diff --git a/clang/include/clang/Basic/riscv_vector.td b/clang/include/clang/Basic/riscv_vector.td index d02a4fffb14115ddc51f843614587511411cd738..c1d21d6e7aa3567c2cfcf96840c79af793c09340 100644 --- a/clang/include/clang/Basic/riscv_vector.td +++ b/clang/include/clang/Basic/riscv_vector.td @@ -173,20 +173,13 @@ class RVVBuiltin { cast(ResultType)->getElementType(), Ops[1]->getType()}; Ops.insert(Ops.begin() + 1, llvm::Constant::getNullValue(IntrinsicTypes[1])); + // insert undef passthru + Ops.insert(Ops.begin(), llvm::UndefValue::get(ResultType)); break; } }], @@ -1321,6 +1316,8 @@ multiclass RVVPseudoVNotBuiltin { Ops[1]->getType()}; Ops.insert(Ops.begin() + 1, llvm::Constant::getAllOnesValue(IntrinsicTypes[1])); + // insert undef passthru + Ops.insert(Ops.begin(), llvm::UndefValue::get(ResultType)); break; } }], @@ -1369,6 +1366,8 @@ multiclass RVVPseudoVFUnaryBuiltin { IntrinsicTypes = {ResultType, Ops[0]->getType(), Ops[1]->getType()}; Ops.insert(Ops.begin() + 1, Ops[0]); + // insert undef passthru + Ops.insert(Ops.begin(), llvm::UndefValue::get(ResultType)); break; } }], @@ -1402,6 +1401,8 @@ multiclass RVVPseudoVWCVTBuiltin(Ops[0]->getType())->getElementType(), Ops[1]->getType()}; Ops.insert(Ops.begin() + 1, llvm::Constant::getNullValue(IntrinsicTypes[2])); + // insert undef passthru + Ops.insert(Ops.begin(), llvm::UndefValue::get(ResultType)); break; } }], @@ -1438,6 +1439,8 @@ multiclass RVVPseudoVNCVTBuiltingetType(), Ops[1]->getType()}; Ops.insert(Ops.begin() + 1, llvm::Constant::getNullValue(IntrinsicTypes[2])); + // insert undef passthru + Ops.insert(Ops.begin(), llvm::UndefValue::get(ResultType)); break; } }], @@ -1583,15 +1586,18 @@ defm : RVVIndexedSegStore<"vsoxseg">; // 12. Vector Integer Arithmetic Instructions // 12.1. Vector Single-Width Integer Add and Subtract +let HasNoMaskPassThru = true in { defm vadd : RVVIntBinBuiltinSet; defm vsub : RVVIntBinBuiltinSet; defm vrsub : RVVOutOp1BuiltinSet<"vrsub", "csil", [["vx", "v", "vve"], ["vx", "Uv", "UvUvUe"]]>; +} defm vneg_v : RVVPseudoUnaryBuiltin<"vrsub", "csil">; // 12.2. Vector Widening Integer Add/Subtract // Widening unsigned integer add/subtract, 2*SEW = SEW +/- SEW +let HasNoMaskPassThru = true in { defm vwaddu : RVVUnsignedWidenBinBuiltinSet; defm vwsubu : RVVUnsignedWidenBinBuiltinSet; // Widening signed integer add/subtract, 2*SEW = SEW +/- SEW @@ -1603,6 +1609,7 @@ defm vwsubu : RVVUnsignedWidenOp0BinBuiltinSet; // Widening signed integer add/subtract, 2*SEW = 2*SEW +/- SEW defm vwadd : RVVSignedWidenOp0BinBuiltinSet; defm vwsub : RVVSignedWidenOp0BinBuiltinSet; +} defm vwcvtu_x_x_v : RVVPseudoVWCVTBuiltin<"vwaddu", "vwcvtu_x", "csi", [["Uw", "UwUv"]]>; defm vwcvt_x_x_v : RVVPseudoVWCVTBuiltin<"vwadd", "vwcvt_x", "csi", @@ -1633,12 +1640,15 @@ let HasMask = false, HasPolicy = false in { } // 12.5. Vector Bitwise Logical Instructions +let HasNoMaskPassThru = true in { defm vand : RVVIntBinBuiltinSet; defm vxor : RVVIntBinBuiltinSet; defm vor : RVVIntBinBuiltinSet; +} defm vnot_v : RVVPseudoVNotBuiltin<"vxor", "csil">; // 12.6. Vector Single-Width Bit Shift Instructions +let HasNoMaskPassThru = true in { defm vsll : RVVShiftBuiltinSet; defm vsrl : RVVUnsignedShiftBuiltinSet; defm vsra : RVVSignedShiftBuiltinSet; @@ -1646,6 +1656,7 @@ defm vsra : RVVSignedShiftBuiltinSet; // 12.7. Vector Narrowing Integer Right Shift Instructions defm vnsrl : RVVUnsignedNShiftBuiltinSet; defm vnsra : RVVSignedNShiftBuiltinSet; +} defm vncvt_x_x_w : RVVPseudoVNCVTBuiltin<"vnsrl", "vncvt_x", "csi", [["v", "vw"], ["Uv", "UvUw"]]>; @@ -1665,6 +1676,7 @@ defm vmsge : RVVSignedMaskOutBuiltinSet; } // 12.9. Vector Integer Min/Max Instructions +let HasNoMaskPassThru = true in { defm vminu : RVVUnsignedBinBuiltinSet; defm vmin : RVVSignedBinBuiltinSet; defm vmaxu : RVVUnsignedBinBuiltinSet; @@ -1685,9 +1697,10 @@ defm vdivu : RVVUnsignedBinBuiltinSet; defm vdiv : RVVSignedBinBuiltinSet; defm vremu : RVVUnsignedBinBuiltinSet; defm vrem : RVVSignedBinBuiltinSet; +} // 12.12. Vector Widening Integer Multiply Instructions -let Log2LMUL = [-3, -2, -1, 0, 1, 2] in { +let Log2LMUL = [-3, -2, -1, 0, 1, 2], HasNoMaskPassThru = true in { defm vwmul : RVVOutOp0Op1BuiltinSet<"vwmul", "csi", [["vv", "w", "wvv"], ["vx", "w", "wve"]]>; @@ -1751,6 +1764,7 @@ let HasMask = false, HasPolicy = false in { // 13. Vector Fixed-Point Arithmetic Instructions // 13.1. Vector Single-Width Saturating Add and Subtract +let HasNoMaskPassThru = true in { defm vsaddu : RVVUnsignedBinBuiltinSet; defm vsadd : RVVSignedBinBuiltinSet; defm vssubu : RVVUnsignedBinBuiltinSet; @@ -1800,6 +1814,7 @@ let Log2LMUL = [-2, -1, 0, 1, 2] in { [["vv", "w", "wvv"], ["vf", "w", "wve"]]>; } +} // 14.6. Vector Single-Width Floating-Point Fused Multiply-Add Instructions defm vfmacc : RVVFloatingTerBuiltinSet; @@ -1827,6 +1842,7 @@ def vfrsqrt7 : RVVFloatingUnaryVVBuiltin; def vfrec7 : RVVFloatingUnaryVVBuiltin; // 14.11. Vector Floating-Point MIN/MAX Instructions +let HasNoMaskPassThru = true in { defm vfmin : RVVFloatingBinBuiltinSet; defm vfmax : RVVFloatingBinBuiltinSet; @@ -1834,6 +1850,7 @@ defm vfmax : RVVFloatingBinBuiltinSet; defm vfsgnj : RVVFloatingBinBuiltinSet; defm vfsgnjn : RVVFloatingBinBuiltinSet; defm vfsgnjx : RVVFloatingBinBuiltinSet; +} defm vfneg_v : RVVPseudoVFUnaryBuiltin<"vfsgnjn", "xfd">; defm vfabs_v : RVVPseudoVFUnaryBuiltin<"vfsgnjx", "xfd">; @@ -2005,6 +2022,7 @@ defm vslideup : RVVSlideBuiltinSet; defm vslidedown : RVVSlideBuiltinSet; // 17.3.3. Vector Slide1up Instructions +let HasNoMaskPassThru = true in { defm vslide1up : RVVSlideOneBuiltinSet; defm vfslide1up : RVVFloatingBinVFBuiltinSet; @@ -2027,6 +2045,7 @@ defm vrgather : RVVOutBuiltinSet<"vrgather_vx", "csil", [["vx", "Uv", "UvUvz"]]>; defm vrgatherei16 : RVVOutBuiltinSet<"vrgatherei16_vv", "csil", [["vv", "Uv", "UvUv(Log2EEW:4)Uv"]]>; +} // 17.5. Vector Compress Instruction let HasMask = false, HasPolicy = false, diff --git a/clang/test/CodeGen/RISCV/riscv-attr-builtin-alias.c b/clang/test/CodeGen/RISCV/riscv-attr-builtin-alias.c index dab41f1695e2e9e8c1710effdd2f18053a7807b1..e8445d40766f035dccbffe43c212839236aaeac5 100644 --- a/clang/test/CodeGen/RISCV/riscv-attr-builtin-alias.c +++ b/clang/test/CodeGen/RISCV/riscv-attr-builtin-alias.c @@ -25,7 +25,7 @@ vint8m1_t vadd_generic (vint8m1_t op0, vint8m1_t op1, size_t op2); // CHECK-NEXT: [[TMP0:%.*]] = load , * [[OP0_ADDR]], align 1 // CHECK-NEXT: [[TMP1:%.*]] = load , * [[OP1_ADDR]], align 1 // CHECK-NEXT: [[TMP2:%.*]] = load i64, i64* [[VL_ADDR]], align 8 -// CHECK-NEXT: [[TMP3:%.*]] = call @llvm.riscv.vadd.nxv8i8.nxv8i8.i64( [[TMP0]], [[TMP1]], i64 [[TMP2]]) +// CHECK-NEXT: [[TMP3:%.*]] = call @llvm.riscv.vadd.nxv8i8.nxv8i8.i64( undef, [[TMP0]], [[TMP1]], i64 [[TMP2]]) // CHECK-NEXT: store [[TMP3]], * [[RET]], align 1 // CHECK-NEXT: [[TMP4:%.*]] = load , * [[RET]], align 1 // CHECK-NEXT: ret [[TMP4]] diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vaadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vaadd.c index 9b6b966723e3985746d7a2724abd9707f7f7d6b8..8e7c4a76e8c4db6886fa0964a870afd9c2235dd9 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vaadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vaadd.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vaadd_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vaadd_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -15,7 +15,7 @@ vint8mf8_t test_vaadd_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vaadd_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -24,7 +24,7 @@ vint8mf8_t test_vaadd_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vaadd_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -33,7 +33,7 @@ vint8mf4_t test_vaadd_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vaadd_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -42,7 +42,7 @@ vint8mf4_t test_vaadd_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vaadd_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -51,7 +51,7 @@ vint8mf2_t test_vaadd_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vaadd_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -60,7 +60,7 @@ vint8mf2_t test_vaadd_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vaadd_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -69,7 +69,7 @@ vint8m1_t test_vaadd_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vaadd_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { @@ -78,7 +78,7 @@ vint8m1_t test_vaadd_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vaadd_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -87,7 +87,7 @@ vint8m2_t test_vaadd_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vaadd_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { @@ -96,7 +96,7 @@ vint8m2_t test_vaadd_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vaadd_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -105,7 +105,7 @@ vint8m4_t test_vaadd_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vaadd_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { @@ -114,7 +114,7 @@ vint8m4_t test_vaadd_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vaadd_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -123,7 +123,7 @@ vint8m8_t test_vaadd_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vaadd_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { @@ -132,7 +132,7 @@ vint8m8_t test_vaadd_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vaadd_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -141,7 +141,7 @@ vint16mf4_t test_vaadd_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vaadd_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -150,7 +150,7 @@ vint16mf4_t test_vaadd_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vaadd_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -159,7 +159,7 @@ vint16mf2_t test_vaadd_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vaadd_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -168,7 +168,7 @@ vint16mf2_t test_vaadd_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vaadd_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -177,7 +177,7 @@ vint16m1_t test_vaadd_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vaadd_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { @@ -186,7 +186,7 @@ vint16m1_t test_vaadd_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vaadd_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -195,7 +195,7 @@ vint16m2_t test_vaadd_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vaadd_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { @@ -204,7 +204,7 @@ vint16m2_t test_vaadd_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vaadd_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -213,7 +213,7 @@ vint16m4_t test_vaadd_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vaadd_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { @@ -222,7 +222,7 @@ vint16m4_t test_vaadd_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vaadd_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -231,7 +231,7 @@ vint16m8_t test_vaadd_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vaadd_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { @@ -240,7 +240,7 @@ vint16m8_t test_vaadd_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vaadd_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -249,7 +249,7 @@ vint32mf2_t test_vaadd_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vaadd_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -258,7 +258,7 @@ vint32mf2_t test_vaadd_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vaadd_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -267,7 +267,7 @@ vint32m1_t test_vaadd_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vaadd_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { @@ -276,7 +276,7 @@ vint32m1_t test_vaadd_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vaadd_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -285,7 +285,7 @@ vint32m2_t test_vaadd_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vaadd_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { @@ -294,7 +294,7 @@ vint32m2_t test_vaadd_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vaadd_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -303,7 +303,7 @@ vint32m4_t test_vaadd_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vaadd_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { @@ -312,7 +312,7 @@ vint32m4_t test_vaadd_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vaadd_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -321,7 +321,7 @@ vint32m8_t test_vaadd_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vaadd_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { @@ -330,7 +330,7 @@ vint32m8_t test_vaadd_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vaadd_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -339,7 +339,7 @@ vint64m1_t test_vaadd_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vaadd_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { @@ -348,7 +348,7 @@ vint64m1_t test_vaadd_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vaadd_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -357,7 +357,7 @@ vint64m2_t test_vaadd_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vaadd_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { @@ -366,7 +366,7 @@ vint64m2_t test_vaadd_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vaadd_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -375,7 +375,7 @@ vint64m4_t test_vaadd_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vaadd_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { @@ -384,7 +384,7 @@ vint64m4_t test_vaadd_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vaadd_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -393,7 +393,7 @@ vint64m8_t test_vaadd_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vaadd_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { @@ -402,7 +402,7 @@ vint64m8_t test_vaadd_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vaaddu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -411,7 +411,7 @@ vuint8mf8_t test_vaaddu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vaaddu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -420,7 +420,7 @@ vuint8mf8_t test_vaaddu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vaaddu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -429,7 +429,7 @@ vuint8mf4_t test_vaaddu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vaaddu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -438,7 +438,7 @@ vuint8mf4_t test_vaaddu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vaaddu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -447,7 +447,7 @@ vuint8mf2_t test_vaaddu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vaaddu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -456,7 +456,7 @@ vuint8mf2_t test_vaaddu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vaaddu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -465,7 +465,7 @@ vuint8m1_t test_vaaddu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vaaddu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -474,7 +474,7 @@ vuint8m1_t test_vaaddu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vaaddu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -483,7 +483,7 @@ vuint8m2_t test_vaaddu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vaaddu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -492,7 +492,7 @@ vuint8m2_t test_vaaddu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vaaddu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -501,7 +501,7 @@ vuint8m4_t test_vaaddu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vaaddu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -510,7 +510,7 @@ vuint8m4_t test_vaaddu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vaaddu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -519,7 +519,7 @@ vuint8m8_t test_vaaddu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vaaddu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -528,7 +528,7 @@ vuint8m8_t test_vaaddu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vaaddu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, @@ -538,7 +538,7 @@ vuint16mf4_t test_vaaddu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, // CHECK-RV64-LABEL: @test_vaaddu_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vaaddu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -547,7 +547,7 @@ vuint16mf4_t test_vaaddu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vaaddu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, @@ -557,7 +557,7 @@ vuint16mf2_t test_vaaddu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, // CHECK-RV64-LABEL: @test_vaaddu_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vaaddu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -566,7 +566,7 @@ vuint16mf2_t test_vaaddu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vaaddu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -575,7 +575,7 @@ vuint16m1_t test_vaaddu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vaaddu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -584,7 +584,7 @@ vuint16m1_t test_vaaddu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vaaddu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -593,7 +593,7 @@ vuint16m2_t test_vaaddu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vaaddu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -602,7 +602,7 @@ vuint16m2_t test_vaaddu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vaaddu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -611,7 +611,7 @@ vuint16m4_t test_vaaddu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vaaddu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -620,7 +620,7 @@ vuint16m4_t test_vaaddu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vaaddu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -629,7 +629,7 @@ vuint16m8_t test_vaaddu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vaaddu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -638,7 +638,7 @@ vuint16m8_t test_vaaddu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vaaddu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, @@ -648,7 +648,7 @@ vuint32mf2_t test_vaaddu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, // CHECK-RV64-LABEL: @test_vaaddu_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vaaddu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -657,7 +657,7 @@ vuint32mf2_t test_vaaddu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vaaddu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -666,7 +666,7 @@ vuint32m1_t test_vaaddu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vaaddu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -675,7 +675,7 @@ vuint32m1_t test_vaaddu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vaaddu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -684,7 +684,7 @@ vuint32m2_t test_vaaddu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vaaddu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -693,7 +693,7 @@ vuint32m2_t test_vaaddu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vaaddu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -702,7 +702,7 @@ vuint32m4_t test_vaaddu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vaaddu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -711,7 +711,7 @@ vuint32m4_t test_vaaddu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vaaddu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -720,7 +720,7 @@ vuint32m8_t test_vaaddu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vaaddu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -729,7 +729,7 @@ vuint32m8_t test_vaaddu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vaaddu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -738,7 +738,7 @@ vuint64m1_t test_vaaddu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vaaddu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -747,7 +747,7 @@ vuint64m1_t test_vaaddu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vaaddu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -756,7 +756,7 @@ vuint64m2_t test_vaaddu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vaaddu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -765,7 +765,7 @@ vuint64m2_t test_vaaddu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vaaddu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -774,7 +774,7 @@ vuint64m4_t test_vaaddu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vaaddu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -783,7 +783,7 @@ vuint64m4_t test_vaaddu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vaaddu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -792,7 +792,7 @@ vuint64m8_t test_vaaddu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vaaddu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vadd.c index e3ff19a31948374233fb01495b813dd86461116c..bc05aa00acc8976011b549ca7c397ce2b65b0ced 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vadd.c @@ -7,7 +7,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vadd_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -16,7 +16,7 @@ vint8mf8_t test_vadd_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vadd_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -25,7 +25,7 @@ vint8mf8_t test_vadd_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vadd_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -34,7 +34,7 @@ vint8mf4_t test_vadd_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vadd_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -43,7 +43,7 @@ vint8mf4_t test_vadd_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vadd_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -52,7 +52,7 @@ vint8mf2_t test_vadd_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vadd_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -61,7 +61,7 @@ vint8mf2_t test_vadd_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vadd_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -70,7 +70,7 @@ vint8m1_t test_vadd_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vadd_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { @@ -79,7 +79,7 @@ vint8m1_t test_vadd_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vadd_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -88,7 +88,7 @@ vint8m2_t test_vadd_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vadd_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { @@ -97,7 +97,7 @@ vint8m2_t test_vadd_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vadd_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -106,7 +106,7 @@ vint8m4_t test_vadd_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vadd_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { @@ -115,7 +115,7 @@ vint8m4_t test_vadd_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vadd_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -124,7 +124,7 @@ vint8m8_t test_vadd_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vadd_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { @@ -133,7 +133,7 @@ vint8m8_t test_vadd_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vadd_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -142,7 +142,7 @@ vint16mf4_t test_vadd_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vadd_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -151,7 +151,7 @@ vint16mf4_t test_vadd_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vadd_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -160,7 +160,7 @@ vint16mf2_t test_vadd_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vadd_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -169,7 +169,7 @@ vint16mf2_t test_vadd_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vadd_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -178,7 +178,7 @@ vint16m1_t test_vadd_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vadd_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { @@ -187,7 +187,7 @@ vint16m1_t test_vadd_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vadd_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -196,7 +196,7 @@ vint16m2_t test_vadd_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vadd_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { @@ -205,7 +205,7 @@ vint16m2_t test_vadd_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vadd_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -214,7 +214,7 @@ vint16m4_t test_vadd_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vadd_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { @@ -223,7 +223,7 @@ vint16m4_t test_vadd_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vadd_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -232,7 +232,7 @@ vint16m8_t test_vadd_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vadd_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { @@ -241,7 +241,7 @@ vint16m8_t test_vadd_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vadd_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -250,7 +250,7 @@ vint32mf2_t test_vadd_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vadd_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -259,7 +259,7 @@ vint32mf2_t test_vadd_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vadd_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -268,7 +268,7 @@ vint32m1_t test_vadd_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vadd_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { @@ -277,7 +277,7 @@ vint32m1_t test_vadd_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vadd_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -286,7 +286,7 @@ vint32m2_t test_vadd_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vadd_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { @@ -295,7 +295,7 @@ vint32m2_t test_vadd_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vadd_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -304,7 +304,7 @@ vint32m4_t test_vadd_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vadd_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { @@ -313,7 +313,7 @@ vint32m4_t test_vadd_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vadd_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -322,7 +322,7 @@ vint32m8_t test_vadd_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vadd_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { @@ -331,7 +331,7 @@ vint32m8_t test_vadd_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vadd_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -340,7 +340,7 @@ vint64m1_t test_vadd_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vadd_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { @@ -349,7 +349,7 @@ vint64m1_t test_vadd_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vadd_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -358,7 +358,7 @@ vint64m2_t test_vadd_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vadd_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { @@ -367,7 +367,7 @@ vint64m2_t test_vadd_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vadd_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -376,7 +376,7 @@ vint64m4_t test_vadd_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vadd_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { @@ -385,7 +385,7 @@ vint64m4_t test_vadd_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vadd_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -394,7 +394,7 @@ vint64m8_t test_vadd_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vadd_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { @@ -403,7 +403,7 @@ vint64m8_t test_vadd_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vadd_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -412,7 +412,7 @@ vuint8mf8_t test_vadd_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vadd_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -421,7 +421,7 @@ vuint8mf8_t test_vadd_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vadd_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -430,7 +430,7 @@ vuint8mf4_t test_vadd_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vadd_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -439,7 +439,7 @@ vuint8mf4_t test_vadd_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vadd_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -448,7 +448,7 @@ vuint8mf2_t test_vadd_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vadd_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -457,7 +457,7 @@ vuint8mf2_t test_vadd_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vadd_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -466,7 +466,7 @@ vuint8m1_t test_vadd_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vadd_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -475,7 +475,7 @@ vuint8m1_t test_vadd_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vadd_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -484,7 +484,7 @@ vuint8m2_t test_vadd_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vadd_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -493,7 +493,7 @@ vuint8m2_t test_vadd_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vadd_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -502,7 +502,7 @@ vuint8m4_t test_vadd_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vadd_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -511,7 +511,7 @@ vuint8m4_t test_vadd_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vadd_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -520,7 +520,7 @@ vuint8m8_t test_vadd_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vadd_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -529,7 +529,7 @@ vuint8m8_t test_vadd_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vadd_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -538,7 +538,7 @@ vuint16mf4_t test_vadd_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vadd_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vadd_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -547,7 +547,7 @@ vuint16mf4_t test_vadd_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vadd_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -556,7 +556,7 @@ vuint16mf2_t test_vadd_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vadd_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vadd_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -565,7 +565,7 @@ vuint16mf2_t test_vadd_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vadd_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -574,7 +574,7 @@ vuint16m1_t test_vadd_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vadd_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -583,7 +583,7 @@ vuint16m1_t test_vadd_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vadd_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -592,7 +592,7 @@ vuint16m2_t test_vadd_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vadd_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -601,7 +601,7 @@ vuint16m2_t test_vadd_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vadd_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -610,7 +610,7 @@ vuint16m4_t test_vadd_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vadd_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -619,7 +619,7 @@ vuint16m4_t test_vadd_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vadd_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -628,7 +628,7 @@ vuint16m8_t test_vadd_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vadd_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -637,7 +637,7 @@ vuint16m8_t test_vadd_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vadd_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -646,7 +646,7 @@ vuint32mf2_t test_vadd_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vadd_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vadd_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -655,7 +655,7 @@ vuint32mf2_t test_vadd_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vadd_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -664,7 +664,7 @@ vuint32m1_t test_vadd_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vadd_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -673,7 +673,7 @@ vuint32m1_t test_vadd_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vadd_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -682,7 +682,7 @@ vuint32m2_t test_vadd_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vadd_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -691,7 +691,7 @@ vuint32m2_t test_vadd_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vadd_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -700,7 +700,7 @@ vuint32m4_t test_vadd_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vadd_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -709,7 +709,7 @@ vuint32m4_t test_vadd_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vadd_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -718,7 +718,7 @@ vuint32m8_t test_vadd_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vadd_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -727,7 +727,7 @@ vuint32m8_t test_vadd_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vadd_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -736,7 +736,7 @@ vuint64m1_t test_vadd_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vadd_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -745,7 +745,7 @@ vuint64m1_t test_vadd_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vadd_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -754,7 +754,7 @@ vuint64m2_t test_vadd_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vadd_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -763,7 +763,7 @@ vuint64m2_t test_vadd_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vadd_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -772,7 +772,7 @@ vuint64m4_t test_vadd_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vadd_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -781,7 +781,7 @@ vuint64m4_t test_vadd_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vadd_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -790,7 +790,7 @@ vuint64m8_t test_vadd_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vadd_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vand.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vand.c index 909da393f7665f3685b14850d1d14dea01fc158b..9bddb61f8cf3e85c4e2f1d62f917581bff62d820 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vand.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vand.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vand_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vand_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -15,7 +15,7 @@ vint8mf8_t test_vand_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vand_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -24,7 +24,7 @@ vint8mf8_t test_vand_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vand_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -33,7 +33,7 @@ vint8mf4_t test_vand_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vand_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -42,7 +42,7 @@ vint8mf4_t test_vand_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vand_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -51,7 +51,7 @@ vint8mf2_t test_vand_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vand_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -60,7 +60,7 @@ vint8mf2_t test_vand_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vand_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -69,7 +69,7 @@ vint8m1_t test_vand_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vand_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { @@ -78,7 +78,7 @@ vint8m1_t test_vand_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vand_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -87,7 +87,7 @@ vint8m2_t test_vand_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vand_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { @@ -96,7 +96,7 @@ vint8m2_t test_vand_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vand_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -105,7 +105,7 @@ vint8m4_t test_vand_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vand_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { @@ -114,7 +114,7 @@ vint8m4_t test_vand_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vand_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -123,7 +123,7 @@ vint8m8_t test_vand_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vand_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { @@ -132,7 +132,7 @@ vint8m8_t test_vand_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vand_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -141,7 +141,7 @@ vint16mf4_t test_vand_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vand_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -150,7 +150,7 @@ vint16mf4_t test_vand_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vand_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -159,7 +159,7 @@ vint16mf2_t test_vand_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vand_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -168,7 +168,7 @@ vint16mf2_t test_vand_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vand_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -177,7 +177,7 @@ vint16m1_t test_vand_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vand_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { @@ -186,7 +186,7 @@ vint16m1_t test_vand_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vand_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -195,7 +195,7 @@ vint16m2_t test_vand_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vand_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { @@ -204,7 +204,7 @@ vint16m2_t test_vand_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vand_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -213,7 +213,7 @@ vint16m4_t test_vand_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vand_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { @@ -222,7 +222,7 @@ vint16m4_t test_vand_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vand_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -231,7 +231,7 @@ vint16m8_t test_vand_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vand_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { @@ -240,7 +240,7 @@ vint16m8_t test_vand_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vand_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -249,7 +249,7 @@ vint32mf2_t test_vand_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vand_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -258,7 +258,7 @@ vint32mf2_t test_vand_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vand_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -267,7 +267,7 @@ vint32m1_t test_vand_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vand_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { @@ -276,7 +276,7 @@ vint32m1_t test_vand_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vand_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -285,7 +285,7 @@ vint32m2_t test_vand_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vand_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { @@ -294,7 +294,7 @@ vint32m2_t test_vand_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vand_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -303,7 +303,7 @@ vint32m4_t test_vand_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vand_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { @@ -312,7 +312,7 @@ vint32m4_t test_vand_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vand_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -321,7 +321,7 @@ vint32m8_t test_vand_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vand_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { @@ -330,7 +330,7 @@ vint32m8_t test_vand_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vand_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -339,7 +339,7 @@ vint64m1_t test_vand_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vand_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { @@ -348,7 +348,7 @@ vint64m1_t test_vand_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vand_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -357,7 +357,7 @@ vint64m2_t test_vand_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vand_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { @@ -366,7 +366,7 @@ vint64m2_t test_vand_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vand_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -375,7 +375,7 @@ vint64m4_t test_vand_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vand_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { @@ -384,7 +384,7 @@ vint64m4_t test_vand_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vand_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -393,7 +393,7 @@ vint64m8_t test_vand_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vand_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { @@ -402,7 +402,7 @@ vint64m8_t test_vand_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vand_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -411,7 +411,7 @@ vuint8mf8_t test_vand_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vand_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -420,7 +420,7 @@ vuint8mf8_t test_vand_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vand_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -429,7 +429,7 @@ vuint8mf4_t test_vand_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vand_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -438,7 +438,7 @@ vuint8mf4_t test_vand_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vand_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -447,7 +447,7 @@ vuint8mf2_t test_vand_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vand_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -456,7 +456,7 @@ vuint8mf2_t test_vand_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vand_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -465,7 +465,7 @@ vuint8m1_t test_vand_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vand_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -474,7 +474,7 @@ vuint8m1_t test_vand_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vand_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -483,7 +483,7 @@ vuint8m2_t test_vand_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vand_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -492,7 +492,7 @@ vuint8m2_t test_vand_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vand_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -501,7 +501,7 @@ vuint8m4_t test_vand_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vand_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -510,7 +510,7 @@ vuint8m4_t test_vand_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vand_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -519,7 +519,7 @@ vuint8m8_t test_vand_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vand_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -528,7 +528,7 @@ vuint8m8_t test_vand_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vand_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -537,7 +537,7 @@ vuint16mf4_t test_vand_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vand_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vand_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -546,7 +546,7 @@ vuint16mf4_t test_vand_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vand_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -555,7 +555,7 @@ vuint16mf2_t test_vand_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vand_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vand_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -564,7 +564,7 @@ vuint16mf2_t test_vand_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vand_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -573,7 +573,7 @@ vuint16m1_t test_vand_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vand_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -582,7 +582,7 @@ vuint16m1_t test_vand_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vand_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -591,7 +591,7 @@ vuint16m2_t test_vand_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vand_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -600,7 +600,7 @@ vuint16m2_t test_vand_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vand_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -609,7 +609,7 @@ vuint16m4_t test_vand_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vand_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -618,7 +618,7 @@ vuint16m4_t test_vand_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vand_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -627,7 +627,7 @@ vuint16m8_t test_vand_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vand_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -636,7 +636,7 @@ vuint16m8_t test_vand_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vand_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -645,7 +645,7 @@ vuint32mf2_t test_vand_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vand_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vand_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -654,7 +654,7 @@ vuint32mf2_t test_vand_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vand_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -663,7 +663,7 @@ vuint32m1_t test_vand_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vand_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -672,7 +672,7 @@ vuint32m1_t test_vand_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vand_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -681,7 +681,7 @@ vuint32m2_t test_vand_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vand_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -690,7 +690,7 @@ vuint32m2_t test_vand_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vand_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -699,7 +699,7 @@ vuint32m4_t test_vand_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vand_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -708,7 +708,7 @@ vuint32m4_t test_vand_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vand_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -717,7 +717,7 @@ vuint32m8_t test_vand_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vand_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -726,7 +726,7 @@ vuint32m8_t test_vand_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vand_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -735,7 +735,7 @@ vuint64m1_t test_vand_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vand_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -744,7 +744,7 @@ vuint64m1_t test_vand_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vand_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -753,7 +753,7 @@ vuint64m2_t test_vand_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vand_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -762,7 +762,7 @@ vuint64m2_t test_vand_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vand_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -771,7 +771,7 @@ vuint64m4_t test_vand_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vand_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -780,7 +780,7 @@ vuint64m4_t test_vand_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vand_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -789,7 +789,7 @@ vuint64m8_t test_vand_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vand_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vasub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vasub.c index 8def45998e1b830f44cf36fa541a9a5c14fa7a59..98570c85b4150655a900b376d2153d00b8c1eb06 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vasub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vasub.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vasub_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vasub_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -15,7 +15,7 @@ vint8mf8_t test_vasub_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vasub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -24,7 +24,7 @@ vint8mf8_t test_vasub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vasub_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -33,7 +33,7 @@ vint8mf4_t test_vasub_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vasub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -42,7 +42,7 @@ vint8mf4_t test_vasub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vasub_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -51,7 +51,7 @@ vint8mf2_t test_vasub_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vasub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -60,7 +60,7 @@ vint8mf2_t test_vasub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vasub_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -69,7 +69,7 @@ vint8m1_t test_vasub_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vasub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { @@ -78,7 +78,7 @@ vint8m1_t test_vasub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vasub_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -87,7 +87,7 @@ vint8m2_t test_vasub_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vasub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { @@ -96,7 +96,7 @@ vint8m2_t test_vasub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vasub_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -105,7 +105,7 @@ vint8m4_t test_vasub_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vasub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { @@ -114,7 +114,7 @@ vint8m4_t test_vasub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vasub_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -123,7 +123,7 @@ vint8m8_t test_vasub_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vasub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { @@ -132,7 +132,7 @@ vint8m8_t test_vasub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vasub_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -141,7 +141,7 @@ vint16mf4_t test_vasub_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vasub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -150,7 +150,7 @@ vint16mf4_t test_vasub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vasub_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -159,7 +159,7 @@ vint16mf2_t test_vasub_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vasub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -168,7 +168,7 @@ vint16mf2_t test_vasub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vasub_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -177,7 +177,7 @@ vint16m1_t test_vasub_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vasub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { @@ -186,7 +186,7 @@ vint16m1_t test_vasub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vasub_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -195,7 +195,7 @@ vint16m2_t test_vasub_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vasub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { @@ -204,7 +204,7 @@ vint16m2_t test_vasub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vasub_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -213,7 +213,7 @@ vint16m4_t test_vasub_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vasub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { @@ -222,7 +222,7 @@ vint16m4_t test_vasub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vasub_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -231,7 +231,7 @@ vint16m8_t test_vasub_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vasub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { @@ -240,7 +240,7 @@ vint16m8_t test_vasub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vasub_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -249,7 +249,7 @@ vint32mf2_t test_vasub_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vasub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -258,7 +258,7 @@ vint32mf2_t test_vasub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vasub_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -267,7 +267,7 @@ vint32m1_t test_vasub_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vasub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { @@ -276,7 +276,7 @@ vint32m1_t test_vasub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vasub_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -285,7 +285,7 @@ vint32m2_t test_vasub_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vasub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { @@ -294,7 +294,7 @@ vint32m2_t test_vasub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vasub_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -303,7 +303,7 @@ vint32m4_t test_vasub_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vasub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { @@ -312,7 +312,7 @@ vint32m4_t test_vasub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vasub_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -321,7 +321,7 @@ vint32m8_t test_vasub_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vasub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { @@ -330,7 +330,7 @@ vint32m8_t test_vasub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vasub_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -339,7 +339,7 @@ vint64m1_t test_vasub_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vasub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { @@ -348,7 +348,7 @@ vint64m1_t test_vasub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vasub_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -357,7 +357,7 @@ vint64m2_t test_vasub_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vasub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { @@ -366,7 +366,7 @@ vint64m2_t test_vasub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vasub_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -375,7 +375,7 @@ vint64m4_t test_vasub_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vasub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { @@ -384,7 +384,7 @@ vint64m4_t test_vasub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vasub_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -393,7 +393,7 @@ vint64m8_t test_vasub_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vasub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { @@ -402,7 +402,7 @@ vint64m8_t test_vasub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vasubu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -411,7 +411,7 @@ vuint8mf8_t test_vasubu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vasubu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -420,7 +420,7 @@ vuint8mf8_t test_vasubu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vasubu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -429,7 +429,7 @@ vuint8mf4_t test_vasubu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vasubu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -438,7 +438,7 @@ vuint8mf4_t test_vasubu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vasubu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -447,7 +447,7 @@ vuint8mf2_t test_vasubu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vasubu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -456,7 +456,7 @@ vuint8mf2_t test_vasubu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vasubu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -465,7 +465,7 @@ vuint8m1_t test_vasubu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vasubu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -474,7 +474,7 @@ vuint8m1_t test_vasubu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vasubu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -483,7 +483,7 @@ vuint8m2_t test_vasubu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vasubu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -492,7 +492,7 @@ vuint8m2_t test_vasubu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vasubu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -501,7 +501,7 @@ vuint8m4_t test_vasubu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vasubu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -510,7 +510,7 @@ vuint8m4_t test_vasubu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vasubu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -519,7 +519,7 @@ vuint8m8_t test_vasubu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vasubu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -528,7 +528,7 @@ vuint8m8_t test_vasubu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vasubu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, @@ -538,7 +538,7 @@ vuint16mf4_t test_vasubu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, // CHECK-RV64-LABEL: @test_vasubu_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vasubu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -547,7 +547,7 @@ vuint16mf4_t test_vasubu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vasubu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, @@ -557,7 +557,7 @@ vuint16mf2_t test_vasubu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, // CHECK-RV64-LABEL: @test_vasubu_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vasubu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -566,7 +566,7 @@ vuint16mf2_t test_vasubu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vasubu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -575,7 +575,7 @@ vuint16m1_t test_vasubu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vasubu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -584,7 +584,7 @@ vuint16m1_t test_vasubu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vasubu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -593,7 +593,7 @@ vuint16m2_t test_vasubu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vasubu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -602,7 +602,7 @@ vuint16m2_t test_vasubu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vasubu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -611,7 +611,7 @@ vuint16m4_t test_vasubu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vasubu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -620,7 +620,7 @@ vuint16m4_t test_vasubu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vasubu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -629,7 +629,7 @@ vuint16m8_t test_vasubu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vasubu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -638,7 +638,7 @@ vuint16m8_t test_vasubu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vasubu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, @@ -648,7 +648,7 @@ vuint32mf2_t test_vasubu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, // CHECK-RV64-LABEL: @test_vasubu_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vasubu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -657,7 +657,7 @@ vuint32mf2_t test_vasubu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vasubu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -666,7 +666,7 @@ vuint32m1_t test_vasubu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vasubu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -675,7 +675,7 @@ vuint32m1_t test_vasubu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vasubu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -684,7 +684,7 @@ vuint32m2_t test_vasubu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vasubu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -693,7 +693,7 @@ vuint32m2_t test_vasubu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vasubu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -702,7 +702,7 @@ vuint32m4_t test_vasubu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vasubu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -711,7 +711,7 @@ vuint32m4_t test_vasubu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vasubu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -720,7 +720,7 @@ vuint32m8_t test_vasubu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vasubu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -729,7 +729,7 @@ vuint32m8_t test_vasubu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vasubu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -738,7 +738,7 @@ vuint64m1_t test_vasubu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vasubu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -747,7 +747,7 @@ vuint64m1_t test_vasubu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vasubu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -756,7 +756,7 @@ vuint64m2_t test_vasubu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vasubu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -765,7 +765,7 @@ vuint64m2_t test_vasubu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vasubu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -774,7 +774,7 @@ vuint64m4_t test_vasubu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vasubu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -783,7 +783,7 @@ vuint64m4_t test_vasubu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vasubu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -792,7 +792,7 @@ vuint64m8_t test_vasubu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vasubu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vdiv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vdiv.c index e5f8d3f645c04a7b9c9196abadba675ff925860f..e0e804dc6baffd4dbd90c3aa9098ae23c04fe941 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vdiv.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vdiv.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vdiv_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vdiv_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -15,7 +15,7 @@ vint8mf8_t test_vdiv_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vdiv_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -24,7 +24,7 @@ vint8mf8_t test_vdiv_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vdiv_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -33,7 +33,7 @@ vint8mf4_t test_vdiv_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vdiv_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -42,7 +42,7 @@ vint8mf4_t test_vdiv_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vdiv_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -51,7 +51,7 @@ vint8mf2_t test_vdiv_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vdiv_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -60,7 +60,7 @@ vint8mf2_t test_vdiv_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vdiv_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -69,7 +69,7 @@ vint8m1_t test_vdiv_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vdiv_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { @@ -78,7 +78,7 @@ vint8m1_t test_vdiv_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vdiv_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -87,7 +87,7 @@ vint8m2_t test_vdiv_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vdiv_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { @@ -96,7 +96,7 @@ vint8m2_t test_vdiv_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vdiv_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -105,7 +105,7 @@ vint8m4_t test_vdiv_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vdiv_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { @@ -114,7 +114,7 @@ vint8m4_t test_vdiv_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vdiv_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -123,7 +123,7 @@ vint8m8_t test_vdiv_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vdiv_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { @@ -132,7 +132,7 @@ vint8m8_t test_vdiv_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vdiv_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -141,7 +141,7 @@ vint16mf4_t test_vdiv_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vdiv_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -150,7 +150,7 @@ vint16mf4_t test_vdiv_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vdiv_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -159,7 +159,7 @@ vint16mf2_t test_vdiv_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vdiv_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -168,7 +168,7 @@ vint16mf2_t test_vdiv_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vdiv_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -177,7 +177,7 @@ vint16m1_t test_vdiv_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vdiv_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { @@ -186,7 +186,7 @@ vint16m1_t test_vdiv_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vdiv_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -195,7 +195,7 @@ vint16m2_t test_vdiv_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vdiv_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { @@ -204,7 +204,7 @@ vint16m2_t test_vdiv_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vdiv_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -213,7 +213,7 @@ vint16m4_t test_vdiv_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vdiv_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { @@ -222,7 +222,7 @@ vint16m4_t test_vdiv_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vdiv_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -231,7 +231,7 @@ vint16m8_t test_vdiv_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vdiv_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { @@ -240,7 +240,7 @@ vint16m8_t test_vdiv_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vdiv_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -249,7 +249,7 @@ vint32mf2_t test_vdiv_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vdiv_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -258,7 +258,7 @@ vint32mf2_t test_vdiv_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vdiv_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -267,7 +267,7 @@ vint32m1_t test_vdiv_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vdiv_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { @@ -276,7 +276,7 @@ vint32m1_t test_vdiv_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vdiv_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -285,7 +285,7 @@ vint32m2_t test_vdiv_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vdiv_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { @@ -294,7 +294,7 @@ vint32m2_t test_vdiv_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vdiv_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -303,7 +303,7 @@ vint32m4_t test_vdiv_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vdiv_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { @@ -312,7 +312,7 @@ vint32m4_t test_vdiv_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vdiv_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -321,7 +321,7 @@ vint32m8_t test_vdiv_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vdiv_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { @@ -330,7 +330,7 @@ vint32m8_t test_vdiv_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vdiv_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -339,7 +339,7 @@ vint64m1_t test_vdiv_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vdiv_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { @@ -348,7 +348,7 @@ vint64m1_t test_vdiv_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vdiv_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -357,7 +357,7 @@ vint64m2_t test_vdiv_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vdiv_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { @@ -366,7 +366,7 @@ vint64m2_t test_vdiv_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vdiv_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -375,7 +375,7 @@ vint64m4_t test_vdiv_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vdiv_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { @@ -384,7 +384,7 @@ vint64m4_t test_vdiv_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vdiv_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -393,7 +393,7 @@ vint64m8_t test_vdiv_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vdiv_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { @@ -402,7 +402,7 @@ vint64m8_t test_vdiv_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vdivu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -411,7 +411,7 @@ vuint8mf8_t test_vdivu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vdivu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -420,7 +420,7 @@ vuint8mf8_t test_vdivu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vdivu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -429,7 +429,7 @@ vuint8mf4_t test_vdivu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vdivu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -438,7 +438,7 @@ vuint8mf4_t test_vdivu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vdivu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -447,7 +447,7 @@ vuint8mf2_t test_vdivu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vdivu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -456,7 +456,7 @@ vuint8mf2_t test_vdivu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vdivu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -465,7 +465,7 @@ vuint8m1_t test_vdivu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vdivu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -474,7 +474,7 @@ vuint8m1_t test_vdivu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vdivu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -483,7 +483,7 @@ vuint8m2_t test_vdivu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vdivu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -492,7 +492,7 @@ vuint8m2_t test_vdivu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vdivu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -501,7 +501,7 @@ vuint8m4_t test_vdivu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vdivu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -510,7 +510,7 @@ vuint8m4_t test_vdivu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vdivu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -519,7 +519,7 @@ vuint8m8_t test_vdivu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vdivu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -528,7 +528,7 @@ vuint8m8_t test_vdivu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vdivu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -537,7 +537,7 @@ vuint16mf4_t test_vdivu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vdivu_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vdivu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -546,7 +546,7 @@ vuint16mf4_t test_vdivu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vdivu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -555,7 +555,7 @@ vuint16mf2_t test_vdivu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vdivu_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vdivu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -564,7 +564,7 @@ vuint16mf2_t test_vdivu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vdivu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -573,7 +573,7 @@ vuint16m1_t test_vdivu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vdivu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -582,7 +582,7 @@ vuint16m1_t test_vdivu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vdivu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -591,7 +591,7 @@ vuint16m2_t test_vdivu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vdivu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -600,7 +600,7 @@ vuint16m2_t test_vdivu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vdivu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -609,7 +609,7 @@ vuint16m4_t test_vdivu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vdivu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -618,7 +618,7 @@ vuint16m4_t test_vdivu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vdivu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -627,7 +627,7 @@ vuint16m8_t test_vdivu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vdivu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -636,7 +636,7 @@ vuint16m8_t test_vdivu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vdivu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -645,7 +645,7 @@ vuint32mf2_t test_vdivu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vdivu_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vdivu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -654,7 +654,7 @@ vuint32mf2_t test_vdivu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vdivu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -663,7 +663,7 @@ vuint32m1_t test_vdivu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vdivu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -672,7 +672,7 @@ vuint32m1_t test_vdivu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vdivu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -681,7 +681,7 @@ vuint32m2_t test_vdivu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vdivu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -690,7 +690,7 @@ vuint32m2_t test_vdivu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vdivu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -699,7 +699,7 @@ vuint32m4_t test_vdivu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vdivu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -708,7 +708,7 @@ vuint32m4_t test_vdivu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vdivu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -717,7 +717,7 @@ vuint32m8_t test_vdivu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vdivu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -726,7 +726,7 @@ vuint32m8_t test_vdivu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vdivu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -735,7 +735,7 @@ vuint64m1_t test_vdivu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vdivu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -744,7 +744,7 @@ vuint64m1_t test_vdivu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vdivu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -753,7 +753,7 @@ vuint64m2_t test_vdivu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vdivu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -762,7 +762,7 @@ vuint64m2_t test_vdivu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vdivu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -771,7 +771,7 @@ vuint64m4_t test_vdivu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vdivu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -780,7 +780,7 @@ vuint64m4_t test_vdivu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vdivu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -789,7 +789,7 @@ vuint64m8_t test_vdivu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vdivu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfabs.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfabs.c index f784db297373a421b8a227ac45167e6677067bb4..7d098fa95e12539e43122d916963d4ee0d7bdc2c 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfabs.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfabs.c @@ -7,7 +7,7 @@ // CHECK-RV64-LABEL: @test_vfabs_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfabs_v_f32mf2 (vfloat32mf2_t op1, size_t vl) { @@ -16,7 +16,7 @@ vfloat32mf2_t test_vfabs_v_f32mf2 (vfloat32mf2_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vfabs_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f32.nxv2f32.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfabs_v_f32m1 (vfloat32m1_t op1, size_t vl) { @@ -25,7 +25,7 @@ vfloat32m1_t test_vfabs_v_f32m1 (vfloat32m1_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vfabs_v_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f32.nxv4f32.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfabs_v_f32m2 (vfloat32m2_t op1, size_t vl) { @@ -34,7 +34,7 @@ vfloat32m2_t test_vfabs_v_f32m2 (vfloat32m2_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vfabs_v_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f32.nxv8f32.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfabs_v_f32m4 (vfloat32m4_t op1, size_t vl) { @@ -43,7 +43,7 @@ vfloat32m4_t test_vfabs_v_f32m4 (vfloat32m4_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vfabs_v_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv16f32.nxv16f32.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv16f32.nxv16f32.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfabs_v_f32m8 (vfloat32m8_t op1, size_t vl) { @@ -52,7 +52,7 @@ vfloat32m8_t test_vfabs_v_f32m8 (vfloat32m8_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vfabs_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f64.nxv1f64.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfabs_v_f64m1 (vfloat64m1_t op1, size_t vl) { @@ -61,7 +61,7 @@ vfloat64m1_t test_vfabs_v_f64m1 (vfloat64m1_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vfabs_v_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f64.nxv2f64.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfabs_v_f64m2 (vfloat64m2_t op1, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m2_t test_vfabs_v_f64m2 (vfloat64m2_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vfabs_v_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f64.nxv4f64.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfabs_v_f64m4 (vfloat64m4_t op1, size_t vl) { @@ -79,7 +79,7 @@ vfloat64m4_t test_vfabs_v_f64m4 (vfloat64m4_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vfabs_v_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f64.nxv8f64.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f64.nxv8f64.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfabs_v_f64m8 (vfloat64m8_t op1, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfadd.c index 0d7c70918390547c9f708c8310fba7ca496bf79d..455c0dcb934793c5d04fe023dae96cc6b0dec5eb 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfadd.c @@ -8,7 +8,7 @@ // CHECK-RV64-LABEL: @test_vfadd_vv_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f16.nxv1f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f16.nxv1f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfadd_vv_f16mf4 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { @@ -17,7 +17,7 @@ vfloat16mf4_t test_vfadd_vv_f16mf4 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t // CHECK-RV64-LABEL: @test_vfadd_vf_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfadd_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl) { @@ -26,7 +26,7 @@ vfloat16mf4_t test_vfadd_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl) // CHECK-RV64-LABEL: @test_vfadd_vv_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f16.nxv2f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f16.nxv2f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfadd_vv_f16mf2 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { @@ -35,7 +35,7 @@ vfloat16mf2_t test_vfadd_vv_f16mf2 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t // CHECK-RV64-LABEL: @test_vfadd_vf_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfadd_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl) { @@ -44,7 +44,7 @@ vfloat16mf2_t test_vfadd_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl) // CHECK-RV64-LABEL: @test_vfadd_vv_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f16.nxv4f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f16.nxv4f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfadd_vv_f16m1 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { @@ -53,7 +53,7 @@ vfloat16m1_t test_vfadd_vv_f16m1 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vfadd_vf_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfadd_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) { @@ -62,7 +62,7 @@ vfloat16m1_t test_vfadd_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfadd_vv_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f16.nxv8f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f16.nxv8f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfadd_vv_f16m2 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { @@ -71,7 +71,7 @@ vfloat16m2_t test_vfadd_vv_f16m2 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vfadd_vf_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfadd_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) { @@ -80,7 +80,7 @@ vfloat16m2_t test_vfadd_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfadd_vv_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f16.nxv16f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f16.nxv16f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfadd_vv_f16m4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { @@ -89,7 +89,7 @@ vfloat16m4_t test_vfadd_vv_f16m4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vfadd_vf_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfadd_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) { @@ -98,7 +98,7 @@ vfloat16m4_t test_vfadd_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfadd_vv_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv32f16.nxv32f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv32f16.nxv32f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfadd_vv_f16m8 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { @@ -107,7 +107,7 @@ vfloat16m8_t test_vfadd_vv_f16m8 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vfadd_vf_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv32f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv32f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfadd_vf_f16m8 (vfloat16m8_t op1, _Float16 op2, size_t vl) { @@ -116,7 +116,7 @@ vfloat16m8_t test_vfadd_vf_f16m8 (vfloat16m8_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfadd_vv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfadd_vv_f32mf2 (vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { @@ -125,7 +125,7 @@ vfloat32mf2_t test_vfadd_vv_f32mf2 (vfloat32mf2_t op1, vfloat32mf2_t op2, size_t // CHECK-RV64-LABEL: @test_vfadd_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfadd_vf_f32mf2 (vfloat32mf2_t op1, float op2, size_t vl) { @@ -134,7 +134,7 @@ vfloat32mf2_t test_vfadd_vf_f32mf2 (vfloat32mf2_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfadd_vv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f32.nxv2f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfadd_vv_f32m1 (vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { @@ -143,7 +143,7 @@ vfloat32m1_t test_vfadd_vv_f32m1 (vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vfadd_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfadd_vf_f32m1 (vfloat32m1_t op1, float op2, size_t vl) { @@ -152,7 +152,7 @@ vfloat32m1_t test_vfadd_vf_f32m1 (vfloat32m1_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfadd_vv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f32.nxv4f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfadd_vv_f32m2 (vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { @@ -161,7 +161,7 @@ vfloat32m2_t test_vfadd_vv_f32m2 (vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vfadd_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfadd_vf_f32m2 (vfloat32m2_t op1, float op2, size_t vl) { @@ -170,7 +170,7 @@ vfloat32m2_t test_vfadd_vf_f32m2 (vfloat32m2_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfadd_vv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f32.nxv8f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfadd_vv_f32m4 (vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { @@ -179,7 +179,7 @@ vfloat32m4_t test_vfadd_vv_f32m4 (vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vfadd_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfadd_vf_f32m4 (vfloat32m4_t op1, float op2, size_t vl) { @@ -188,7 +188,7 @@ vfloat32m4_t test_vfadd_vf_f32m4 (vfloat32m4_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfadd_vv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f32.nxv16f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f32.nxv16f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfadd_vv_f32m8 (vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { @@ -197,7 +197,7 @@ vfloat32m8_t test_vfadd_vv_f32m8 (vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vfadd_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfadd_vf_f32m8 (vfloat32m8_t op1, float op2, size_t vl) { @@ -206,7 +206,7 @@ vfloat32m8_t test_vfadd_vf_f32m8 (vfloat32m8_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfadd_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f64.nxv1f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfadd_vv_f64m1 (vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { @@ -215,7 +215,7 @@ vfloat64m1_t test_vfadd_vv_f64m1 (vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vfadd_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfadd_vf_f64m1 (vfloat64m1_t op1, double op2, size_t vl) { @@ -224,7 +224,7 @@ vfloat64m1_t test_vfadd_vf_f64m1 (vfloat64m1_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfadd_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f64.nxv2f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfadd_vv_f64m2 (vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { @@ -233,7 +233,7 @@ vfloat64m2_t test_vfadd_vv_f64m2 (vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vfadd_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfadd_vf_f64m2 (vfloat64m2_t op1, double op2, size_t vl) { @@ -242,7 +242,7 @@ vfloat64m2_t test_vfadd_vf_f64m2 (vfloat64m2_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfadd_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f64.nxv4f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfadd_vv_f64m4 (vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { @@ -251,7 +251,7 @@ vfloat64m4_t test_vfadd_vv_f64m4 (vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vfadd_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfadd_vf_f64m4 (vfloat64m4_t op1, double op2, size_t vl) { @@ -260,7 +260,7 @@ vfloat64m4_t test_vfadd_vf_f64m4 (vfloat64m4_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfadd_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f64.nxv8f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f64.nxv8f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfadd_vv_f64m8 (vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { @@ -269,7 +269,7 @@ vfloat64m8_t test_vfadd_vv_f64m8 (vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vfadd_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfadd_vf_f64m8 (vfloat64m8_t op1, double op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfdiv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfdiv.c index f8c882ee23f31306d7c8aa2946512916783f7ec3..05e3acb4cf3cfc6576df4d3a2e7144eb1a84a98b 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfdiv.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfdiv.c @@ -7,7 +7,7 @@ // CHECK-RV64-LABEL: @test_vfdiv_vv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfdiv_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, @@ -17,7 +17,7 @@ vfloat32mf2_t test_vfdiv_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, // CHECK-RV64-LABEL: @test_vfdiv_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfdiv_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { @@ -26,7 +26,7 @@ vfloat32mf2_t test_vfdiv_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfdiv_vv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv2f32.nxv2f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfdiv_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, @@ -36,7 +36,7 @@ vfloat32m1_t test_vfdiv_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, // CHECK-RV64-LABEL: @test_vfdiv_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv2f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfdiv_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { @@ -45,7 +45,7 @@ vfloat32m1_t test_vfdiv_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfdiv_vv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv4f32.nxv4f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfdiv_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, @@ -55,7 +55,7 @@ vfloat32m2_t test_vfdiv_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, // CHECK-RV64-LABEL: @test_vfdiv_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv4f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfdiv_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { @@ -64,7 +64,7 @@ vfloat32m2_t test_vfdiv_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfdiv_vv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv8f32.nxv8f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfdiv_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, @@ -74,7 +74,7 @@ vfloat32m4_t test_vfdiv_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, // CHECK-RV64-LABEL: @test_vfdiv_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv8f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfdiv_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { @@ -83,7 +83,7 @@ vfloat32m4_t test_vfdiv_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfdiv_vv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv16f32.nxv16f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv16f32.nxv16f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfdiv_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, @@ -93,7 +93,7 @@ vfloat32m8_t test_vfdiv_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, // CHECK-RV64-LABEL: @test_vfdiv_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv16f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv16f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfdiv_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { @@ -102,7 +102,7 @@ vfloat32m8_t test_vfdiv_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfdiv_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv1f64.nxv1f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfdiv_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, @@ -112,7 +112,7 @@ vfloat64m1_t test_vfdiv_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, // CHECK-RV64-LABEL: @test_vfdiv_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv1f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv1f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfdiv_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { @@ -121,7 +121,7 @@ vfloat64m1_t test_vfdiv_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfdiv_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv2f64.nxv2f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfdiv_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, @@ -131,7 +131,7 @@ vfloat64m2_t test_vfdiv_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, // CHECK-RV64-LABEL: @test_vfdiv_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv2f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv2f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfdiv_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { @@ -140,7 +140,7 @@ vfloat64m2_t test_vfdiv_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfdiv_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv4f64.nxv4f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfdiv_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, @@ -150,7 +150,7 @@ vfloat64m4_t test_vfdiv_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, // CHECK-RV64-LABEL: @test_vfdiv_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv4f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv4f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfdiv_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { @@ -159,7 +159,7 @@ vfloat64m4_t test_vfdiv_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfdiv_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv8f64.nxv8f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv8f64.nxv8f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfdiv_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, @@ -169,7 +169,7 @@ vfloat64m8_t test_vfdiv_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, // CHECK-RV64-LABEL: @test_vfdiv_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv8f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfdiv_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmax.c index 1ae5f289ca667d3666551df8e7de0de818f12a3a..c8f874db1b2a28ea2fc72b3e5d6ce7961d1b3e95 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmax.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmax.c @@ -7,7 +7,7 @@ // CHECK-RV64-LABEL: @test_vfmax_vv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmax_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, @@ -17,7 +17,7 @@ vfloat32mf2_t test_vfmax_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, // CHECK-RV64-LABEL: @test_vfmax_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmax_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { @@ -26,7 +26,7 @@ vfloat32mf2_t test_vfmax_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfmax_vv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv2f32.nxv2f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmax_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, @@ -36,7 +36,7 @@ vfloat32m1_t test_vfmax_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, // CHECK-RV64-LABEL: @test_vfmax_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv2f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmax_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { @@ -45,7 +45,7 @@ vfloat32m1_t test_vfmax_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfmax_vv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv4f32.nxv4f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmax_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, @@ -55,7 +55,7 @@ vfloat32m2_t test_vfmax_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, // CHECK-RV64-LABEL: @test_vfmax_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv4f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmax_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { @@ -64,7 +64,7 @@ vfloat32m2_t test_vfmax_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfmax_vv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv8f32.nxv8f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmax_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, @@ -74,7 +74,7 @@ vfloat32m4_t test_vfmax_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, // CHECK-RV64-LABEL: @test_vfmax_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv8f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmax_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { @@ -83,7 +83,7 @@ vfloat32m4_t test_vfmax_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfmax_vv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv16f32.nxv16f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv16f32.nxv16f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmax_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, @@ -93,7 +93,7 @@ vfloat32m8_t test_vfmax_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, // CHECK-RV64-LABEL: @test_vfmax_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv16f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv16f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmax_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { @@ -102,7 +102,7 @@ vfloat32m8_t test_vfmax_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfmax_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv1f64.nxv1f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmax_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, @@ -112,7 +112,7 @@ vfloat64m1_t test_vfmax_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, // CHECK-RV64-LABEL: @test_vfmax_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv1f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv1f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmax_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { @@ -121,7 +121,7 @@ vfloat64m1_t test_vfmax_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfmax_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv2f64.nxv2f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmax_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, @@ -131,7 +131,7 @@ vfloat64m2_t test_vfmax_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, // CHECK-RV64-LABEL: @test_vfmax_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv2f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv2f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmax_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { @@ -140,7 +140,7 @@ vfloat64m2_t test_vfmax_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfmax_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv4f64.nxv4f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmax_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, @@ -150,7 +150,7 @@ vfloat64m4_t test_vfmax_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, // CHECK-RV64-LABEL: @test_vfmax_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv4f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv4f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmax_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { @@ -159,7 +159,7 @@ vfloat64m4_t test_vfmax_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfmax_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv8f64.nxv8f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv8f64.nxv8f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmax_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, @@ -169,7 +169,7 @@ vfloat64m8_t test_vfmax_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, // CHECK-RV64-LABEL: @test_vfmax_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv8f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmax_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmin.c index 560d03030b8ba2b195a3b35e78edffd85243a6ac..2328aafb913114653b4eb04ff305704f548d5966 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmin.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmin.c @@ -7,7 +7,7 @@ // CHECK-RV64-LABEL: @test_vfmin_vv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmin_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, @@ -17,7 +17,7 @@ vfloat32mf2_t test_vfmin_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, // CHECK-RV64-LABEL: @test_vfmin_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmin_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { @@ -26,7 +26,7 @@ vfloat32mf2_t test_vfmin_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfmin_vv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv2f32.nxv2f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmin_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, @@ -36,7 +36,7 @@ vfloat32m1_t test_vfmin_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, // CHECK-RV64-LABEL: @test_vfmin_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv2f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmin_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { @@ -45,7 +45,7 @@ vfloat32m1_t test_vfmin_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfmin_vv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv4f32.nxv4f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmin_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, @@ -55,7 +55,7 @@ vfloat32m2_t test_vfmin_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, // CHECK-RV64-LABEL: @test_vfmin_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv4f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmin_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { @@ -64,7 +64,7 @@ vfloat32m2_t test_vfmin_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfmin_vv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv8f32.nxv8f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmin_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, @@ -74,7 +74,7 @@ vfloat32m4_t test_vfmin_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, // CHECK-RV64-LABEL: @test_vfmin_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv8f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmin_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { @@ -83,7 +83,7 @@ vfloat32m4_t test_vfmin_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfmin_vv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv16f32.nxv16f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv16f32.nxv16f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmin_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, @@ -93,7 +93,7 @@ vfloat32m8_t test_vfmin_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, // CHECK-RV64-LABEL: @test_vfmin_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv16f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv16f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmin_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { @@ -102,7 +102,7 @@ vfloat32m8_t test_vfmin_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfmin_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv1f64.nxv1f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmin_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, @@ -112,7 +112,7 @@ vfloat64m1_t test_vfmin_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, // CHECK-RV64-LABEL: @test_vfmin_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv1f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv1f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmin_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { @@ -121,7 +121,7 @@ vfloat64m1_t test_vfmin_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfmin_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv2f64.nxv2f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmin_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, @@ -131,7 +131,7 @@ vfloat64m2_t test_vfmin_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, // CHECK-RV64-LABEL: @test_vfmin_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv2f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv2f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmin_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { @@ -140,7 +140,7 @@ vfloat64m2_t test_vfmin_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfmin_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv4f64.nxv4f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmin_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, @@ -150,7 +150,7 @@ vfloat64m4_t test_vfmin_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, // CHECK-RV64-LABEL: @test_vfmin_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv4f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv4f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmin_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { @@ -159,7 +159,7 @@ vfloat64m4_t test_vfmin_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfmin_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv8f64.nxv8f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv8f64.nxv8f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmin_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, @@ -169,7 +169,7 @@ vfloat64m8_t test_vfmin_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, // CHECK-RV64-LABEL: @test_vfmin_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv8f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmin_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmul.c index 6051fcf8dceecb251679911cb4e0130d0c4af263..c5f9992f2baf560bd3bcdd979373ad15062c6548 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmul.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmul.c @@ -7,7 +7,7 @@ // CHECK-RV64-LABEL: @test_vfmul_vv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmul_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, @@ -17,7 +17,7 @@ vfloat32mf2_t test_vfmul_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, // CHECK-RV64-LABEL: @test_vfmul_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmul_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { @@ -26,7 +26,7 @@ vfloat32mf2_t test_vfmul_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfmul_vv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv2f32.nxv2f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmul_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, @@ -36,7 +36,7 @@ vfloat32m1_t test_vfmul_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, // CHECK-RV64-LABEL: @test_vfmul_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv2f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmul_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { @@ -45,7 +45,7 @@ vfloat32m1_t test_vfmul_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfmul_vv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv4f32.nxv4f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmul_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, @@ -55,7 +55,7 @@ vfloat32m2_t test_vfmul_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, // CHECK-RV64-LABEL: @test_vfmul_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv4f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmul_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { @@ -64,7 +64,7 @@ vfloat32m2_t test_vfmul_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfmul_vv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv8f32.nxv8f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmul_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, @@ -74,7 +74,7 @@ vfloat32m4_t test_vfmul_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, // CHECK-RV64-LABEL: @test_vfmul_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv8f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmul_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { @@ -83,7 +83,7 @@ vfloat32m4_t test_vfmul_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfmul_vv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv16f32.nxv16f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv16f32.nxv16f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmul_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, @@ -93,7 +93,7 @@ vfloat32m8_t test_vfmul_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, // CHECK-RV64-LABEL: @test_vfmul_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv16f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv16f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmul_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { @@ -102,7 +102,7 @@ vfloat32m8_t test_vfmul_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfmul_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv1f64.nxv1f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmul_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, @@ -112,7 +112,7 @@ vfloat64m1_t test_vfmul_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, // CHECK-RV64-LABEL: @test_vfmul_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv1f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv1f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmul_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { @@ -121,7 +121,7 @@ vfloat64m1_t test_vfmul_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfmul_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv2f64.nxv2f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmul_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, @@ -131,7 +131,7 @@ vfloat64m2_t test_vfmul_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, // CHECK-RV64-LABEL: @test_vfmul_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv2f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv2f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmul_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { @@ -140,7 +140,7 @@ vfloat64m2_t test_vfmul_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfmul_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv4f64.nxv4f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmul_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, @@ -150,7 +150,7 @@ vfloat64m4_t test_vfmul_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, // CHECK-RV64-LABEL: @test_vfmul_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv4f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv4f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmul_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { @@ -159,7 +159,7 @@ vfloat64m4_t test_vfmul_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfmul_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv8f64.nxv8f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv8f64.nxv8f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmul_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, @@ -169,7 +169,7 @@ vfloat64m8_t test_vfmul_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, // CHECK-RV64-LABEL: @test_vfmul_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv8f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmul_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfneg.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfneg.c index b2f4fb383db444b35de93bf341776ce8bdf25f80..c688b4c9ae1bc3c130bf987b4b215eb0dc2af270 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfneg.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfneg.c @@ -7,7 +7,7 @@ // CHECK-RV64-LABEL: @test_vfneg_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfneg_v_f32mf2 (vfloat32mf2_t op1, size_t vl) { @@ -16,7 +16,7 @@ vfloat32mf2_t test_vfneg_v_f32mf2 (vfloat32mf2_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vfneg_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2f32.nxv2f32.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfneg_v_f32m1 (vfloat32m1_t op1, size_t vl) { @@ -25,7 +25,7 @@ vfloat32m1_t test_vfneg_v_f32m1 (vfloat32m1_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vfneg_v_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4f32.nxv4f32.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfneg_v_f32m2 (vfloat32m2_t op1, size_t vl) { @@ -34,7 +34,7 @@ vfloat32m2_t test_vfneg_v_f32m2 (vfloat32m2_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vfneg_v_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8f32.nxv8f32.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfneg_v_f32m4 (vfloat32m4_t op1, size_t vl) { @@ -43,7 +43,7 @@ vfloat32m4_t test_vfneg_v_f32m4 (vfloat32m4_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vfneg_v_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv16f32.nxv16f32.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv16f32.nxv16f32.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfneg_v_f32m8 (vfloat32m8_t op1, size_t vl) { @@ -52,7 +52,7 @@ vfloat32m8_t test_vfneg_v_f32m8 (vfloat32m8_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vfneg_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f64.nxv1f64.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfneg_v_f64m1 (vfloat64m1_t op1, size_t vl) { @@ -61,7 +61,7 @@ vfloat64m1_t test_vfneg_v_f64m1 (vfloat64m1_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vfneg_v_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2f64.nxv2f64.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfneg_v_f64m2 (vfloat64m2_t op1, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m2_t test_vfneg_v_f64m2 (vfloat64m2_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vfneg_v_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4f64.nxv4f64.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfneg_v_f64m4 (vfloat64m4_t op1, size_t vl) { @@ -79,7 +79,7 @@ vfloat64m4_t test_vfneg_v_f64m4 (vfloat64m4_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vfneg_v_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8f64.nxv8f64.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8f64.nxv8f64.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfneg_v_f64m8 (vfloat64m8_t op1, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrdiv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrdiv.c index 8c19f7bd147f9caa0f4adc5472680f6c9e0710e2..ed789ae936926db0e92af40a980ca9c6b8deea2c 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrdiv.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrdiv.c @@ -7,7 +7,7 @@ // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfrdiv_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { @@ -16,7 +16,7 @@ vfloat32mf2_t test_vfrdiv_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv2f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfrdiv_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { @@ -25,7 +25,7 @@ vfloat32m1_t test_vfrdiv_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv4f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfrdiv_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { @@ -34,7 +34,7 @@ vfloat32m2_t test_vfrdiv_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv8f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfrdiv_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { @@ -43,7 +43,7 @@ vfloat32m4_t test_vfrdiv_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv16f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv16f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfrdiv_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { @@ -52,7 +52,7 @@ vfloat32m8_t test_vfrdiv_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv1f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv1f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfrdiv_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { @@ -61,7 +61,7 @@ vfloat64m1_t test_vfrdiv_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv2f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv2f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfrdiv_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m2_t test_vfrdiv_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv4f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv4f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfrdiv_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { @@ -79,7 +79,7 @@ vfloat64m4_t test_vfrdiv_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv8f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfrdiv_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrsub.c index e407104b992a25f3e5bdc054ef3efec6f129a2d1..e975dae29dd15c03a01c0259def23332e388049a 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrsub.c @@ -7,7 +7,7 @@ // CHECK-RV64-LABEL: @test_vfrsub_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfrsub_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { @@ -16,7 +16,7 @@ vfloat32mf2_t test_vfrsub_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfrsub_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv2f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfrsub_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { @@ -25,7 +25,7 @@ vfloat32m1_t test_vfrsub_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfrsub_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv4f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfrsub_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { @@ -34,7 +34,7 @@ vfloat32m2_t test_vfrsub_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfrsub_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv8f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfrsub_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { @@ -43,7 +43,7 @@ vfloat32m4_t test_vfrsub_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfrsub_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv16f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv16f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfrsub_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { @@ -52,7 +52,7 @@ vfloat32m8_t test_vfrsub_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfrsub_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv1f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv1f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfrsub_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { @@ -61,7 +61,7 @@ vfloat64m1_t test_vfrsub_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfrsub_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv2f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv2f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfrsub_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m2_t test_vfrsub_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfrsub_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv4f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv4f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfrsub_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { @@ -79,7 +79,7 @@ vfloat64m4_t test_vfrsub_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfrsub_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv8f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfrsub_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsgnj.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsgnj.c index e5fa14a92604b9db10a6621a4653c171dc127e52..a1e2c0c24a24ad9c5d2dbd61201d1d7df3cab227 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsgnj.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsgnj.c @@ -7,7 +7,7 @@ // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsgnj_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, @@ -17,7 +17,7 @@ vfloat32mf2_t test_vfsgnj_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, // CHECK-RV64-LABEL: @test_vfsgnj_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsgnj_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { @@ -26,7 +26,7 @@ vfloat32mf2_t test_vfsgnj_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv2f32.nxv2f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsgnj_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, @@ -36,7 +36,7 @@ vfloat32m1_t test_vfsgnj_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, // CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv2f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsgnj_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { @@ -45,7 +45,7 @@ vfloat32m1_t test_vfsgnj_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv4f32.nxv4f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsgnj_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, @@ -55,7 +55,7 @@ vfloat32m2_t test_vfsgnj_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, // CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv4f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsgnj_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { @@ -64,7 +64,7 @@ vfloat32m2_t test_vfsgnj_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv8f32.nxv8f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsgnj_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, @@ -74,7 +74,7 @@ vfloat32m4_t test_vfsgnj_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, // CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv8f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsgnj_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { @@ -83,7 +83,7 @@ vfloat32m4_t test_vfsgnj_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv16f32.nxv16f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv16f32.nxv16f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsgnj_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, @@ -93,7 +93,7 @@ vfloat32m8_t test_vfsgnj_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, // CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv16f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv16f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsgnj_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { @@ -102,7 +102,7 @@ vfloat32m8_t test_vfsgnj_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv1f64.nxv1f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsgnj_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, @@ -112,7 +112,7 @@ vfloat64m1_t test_vfsgnj_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, // CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv1f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv1f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsgnj_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { @@ -121,7 +121,7 @@ vfloat64m1_t test_vfsgnj_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv2f64.nxv2f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsgnj_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, @@ -131,7 +131,7 @@ vfloat64m2_t test_vfsgnj_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, // CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv2f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv2f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsgnj_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { @@ -140,7 +140,7 @@ vfloat64m2_t test_vfsgnj_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv4f64.nxv4f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsgnj_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, @@ -150,7 +150,7 @@ vfloat64m4_t test_vfsgnj_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, // CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv4f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv4f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsgnj_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { @@ -159,7 +159,7 @@ vfloat64m4_t test_vfsgnj_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv8f64.nxv8f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv8f64.nxv8f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsgnj_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, @@ -169,7 +169,7 @@ vfloat64m8_t test_vfsgnj_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, // CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv8f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsgnj_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { @@ -178,7 +178,7 @@ vfloat64m8_t test_vfsgnj_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsgnjn_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, @@ -188,7 +188,7 @@ vfloat32mf2_t test_vfsgnjn_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsgnjn_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { @@ -197,7 +197,7 @@ vfloat32mf2_t test_vfsgnjn_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2f32.nxv2f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsgnjn_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, @@ -207,7 +207,7 @@ vfloat32m1_t test_vfsgnjn_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsgnjn_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { @@ -216,7 +216,7 @@ vfloat32m1_t test_vfsgnjn_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4f32.nxv4f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsgnjn_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, @@ -226,7 +226,7 @@ vfloat32m2_t test_vfsgnjn_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsgnjn_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { @@ -235,7 +235,7 @@ vfloat32m2_t test_vfsgnjn_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8f32.nxv8f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsgnjn_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, @@ -245,7 +245,7 @@ vfloat32m4_t test_vfsgnjn_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsgnjn_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { @@ -254,7 +254,7 @@ vfloat32m4_t test_vfsgnjn_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv16f32.nxv16f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv16f32.nxv16f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsgnjn_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, @@ -264,7 +264,7 @@ vfloat32m8_t test_vfsgnjn_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv16f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv16f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsgnjn_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { @@ -273,7 +273,7 @@ vfloat32m8_t test_vfsgnjn_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f64.nxv1f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsgnjn_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, @@ -283,7 +283,7 @@ vfloat64m1_t test_vfsgnjn_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsgnjn_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { @@ -292,7 +292,7 @@ vfloat64m1_t test_vfsgnjn_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2f64.nxv2f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsgnjn_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, @@ -302,7 +302,7 @@ vfloat64m2_t test_vfsgnjn_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsgnjn_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { @@ -311,7 +311,7 @@ vfloat64m2_t test_vfsgnjn_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4f64.nxv4f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsgnjn_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, @@ -321,7 +321,7 @@ vfloat64m4_t test_vfsgnjn_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsgnjn_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m4_t test_vfsgnjn_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8f64.nxv8f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8f64.nxv8f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsgnjn_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, @@ -340,7 +340,7 @@ vfloat64m8_t test_vfsgnjn_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsgnjn_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { @@ -349,7 +349,7 @@ vfloat64m8_t test_vfsgnjn_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsgnjx_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, @@ -359,7 +359,7 @@ vfloat32mf2_t test_vfsgnjx_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsgnjx_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { @@ -368,7 +368,7 @@ vfloat32mf2_t test_vfsgnjx_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f32.nxv2f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsgnjx_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, @@ -378,7 +378,7 @@ vfloat32m1_t test_vfsgnjx_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsgnjx_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { @@ -387,7 +387,7 @@ vfloat32m1_t test_vfsgnjx_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f32.nxv4f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsgnjx_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, @@ -397,7 +397,7 @@ vfloat32m2_t test_vfsgnjx_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsgnjx_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { @@ -406,7 +406,7 @@ vfloat32m2_t test_vfsgnjx_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f32.nxv8f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsgnjx_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, @@ -416,7 +416,7 @@ vfloat32m4_t test_vfsgnjx_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsgnjx_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { @@ -425,7 +425,7 @@ vfloat32m4_t test_vfsgnjx_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv16f32.nxv16f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv16f32.nxv16f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsgnjx_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, @@ -435,7 +435,7 @@ vfloat32m8_t test_vfsgnjx_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv16f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv16f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsgnjx_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { @@ -444,7 +444,7 @@ vfloat32m8_t test_vfsgnjx_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f64.nxv1f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsgnjx_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, @@ -454,7 +454,7 @@ vfloat64m1_t test_vfsgnjx_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsgnjx_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { @@ -463,7 +463,7 @@ vfloat64m1_t test_vfsgnjx_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f64.nxv2f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsgnjx_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, @@ -473,7 +473,7 @@ vfloat64m2_t test_vfsgnjx_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsgnjx_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { @@ -482,7 +482,7 @@ vfloat64m2_t test_vfsgnjx_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f64.nxv4f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsgnjx_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, @@ -492,7 +492,7 @@ vfloat64m4_t test_vfsgnjx_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsgnjx_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { @@ -501,7 +501,7 @@ vfloat64m4_t test_vfsgnjx_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f64.nxv8f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f64.nxv8f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsgnjx_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, @@ -511,7 +511,7 @@ vfloat64m8_t test_vfsgnjx_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsgnjx_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfslide1down.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfslide1down.c index e70c63f05770aecf35496af6c5db6b786a9e9c9e..550c4e2085fd93f5b4fa407df86a7b65e3492fbc 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfslide1down.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfslide1down.c @@ -7,7 +7,7 @@ // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv1f32.f32.i64( [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv1f32.f32.i64( undef, [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfslide1down_vf_f32mf2(vfloat32mf2_t src, float value, @@ -17,7 +17,7 @@ vfloat32mf2_t test_vfslide1down_vf_f32mf2(vfloat32mf2_t src, float value, // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv2f32.f32.i64( [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv2f32.f32.i64( undef, [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfslide1down_vf_f32m1(vfloat32m1_t src, float value, @@ -27,7 +27,7 @@ vfloat32m1_t test_vfslide1down_vf_f32m1(vfloat32m1_t src, float value, // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv4f32.f32.i64( [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv4f32.f32.i64( undef, [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfslide1down_vf_f32m2(vfloat32m2_t src, float value, @@ -37,7 +37,7 @@ vfloat32m2_t test_vfslide1down_vf_f32m2(vfloat32m2_t src, float value, // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv8f32.f32.i64( [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv8f32.f32.i64( undef, [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfslide1down_vf_f32m4(vfloat32m4_t src, float value, @@ -47,7 +47,7 @@ vfloat32m4_t test_vfslide1down_vf_f32m4(vfloat32m4_t src, float value, // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv16f32.f32.i64( [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv16f32.f32.i64( undef, [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfslide1down_vf_f32m8(vfloat32m8_t src, float value, @@ -57,7 +57,7 @@ vfloat32m8_t test_vfslide1down_vf_f32m8(vfloat32m8_t src, float value, // CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv1f64.f64.i64( [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv1f64.f64.i64( undef, [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfslide1down_vf_f64m1(vfloat64m1_t src, double value, @@ -67,7 +67,7 @@ vfloat64m1_t test_vfslide1down_vf_f64m1(vfloat64m1_t src, double value, // CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv2f64.f64.i64( [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv2f64.f64.i64( undef, [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfslide1down_vf_f64m2(vfloat64m2_t src, double value, @@ -77,7 +77,7 @@ vfloat64m2_t test_vfslide1down_vf_f64m2(vfloat64m2_t src, double value, // CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv4f64.f64.i64( [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv4f64.f64.i64( undef, [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfslide1down_vf_f64m4(vfloat64m4_t src, double value, @@ -87,7 +87,7 @@ vfloat64m4_t test_vfslide1down_vf_f64m4(vfloat64m4_t src, double value, // CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv8f64.f64.i64( [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv8f64.f64.i64( undef, [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfslide1down_vf_f64m8(vfloat64m8_t src, double value, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfslide1up.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfslide1up.c index 989fd283b920f2defa7f6878433c7087a59ee99e..794ecc7a52830b71a8d55d85c6f3ae75265be29e 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfslide1up.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfslide1up.c @@ -7,7 +7,7 @@ // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv1f32.f32.i64( [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv1f32.f32.i64( undef, [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfslide1up_vf_f32mf2(vfloat32mf2_t src, float value, @@ -17,7 +17,7 @@ vfloat32mf2_t test_vfslide1up_vf_f32mf2(vfloat32mf2_t src, float value, // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv2f32.f32.i64( [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv2f32.f32.i64( undef, [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfslide1up_vf_f32m1(vfloat32m1_t src, float value, @@ -27,7 +27,7 @@ vfloat32m1_t test_vfslide1up_vf_f32m1(vfloat32m1_t src, float value, // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv4f32.f32.i64( [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv4f32.f32.i64( undef, [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfslide1up_vf_f32m2(vfloat32m2_t src, float value, @@ -37,7 +37,7 @@ vfloat32m2_t test_vfslide1up_vf_f32m2(vfloat32m2_t src, float value, // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv8f32.f32.i64( [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv8f32.f32.i64( undef, [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfslide1up_vf_f32m4(vfloat32m4_t src, float value, @@ -47,7 +47,7 @@ vfloat32m4_t test_vfslide1up_vf_f32m4(vfloat32m4_t src, float value, // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv16f32.f32.i64( [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv16f32.f32.i64( undef, [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfslide1up_vf_f32m8(vfloat32m8_t src, float value, @@ -57,7 +57,7 @@ vfloat32m8_t test_vfslide1up_vf_f32m8(vfloat32m8_t src, float value, // CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv1f64.f64.i64( [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv1f64.f64.i64( undef, [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfslide1up_vf_f64m1(vfloat64m1_t src, double value, @@ -67,7 +67,7 @@ vfloat64m1_t test_vfslide1up_vf_f64m1(vfloat64m1_t src, double value, // CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv2f64.f64.i64( [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv2f64.f64.i64( undef, [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfslide1up_vf_f64m2(vfloat64m2_t src, double value, @@ -77,7 +77,7 @@ vfloat64m2_t test_vfslide1up_vf_f64m2(vfloat64m2_t src, double value, // CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv4f64.f64.i64( [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv4f64.f64.i64( undef, [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfslide1up_vf_f64m4(vfloat64m4_t src, double value, @@ -87,7 +87,7 @@ vfloat64m4_t test_vfslide1up_vf_f64m4(vfloat64m4_t src, double value, // CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv8f64.f64.i64( [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv8f64.f64.i64( undef, [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfslide1up_vf_f64m8(vfloat64m8_t src, double value, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsub.c index 2df9853fdfe5447e85a73029520a27f40ad90c4c..5cf294713fdc3d4c1ee163e15bf3cee1efc40767 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsub.c @@ -7,7 +7,7 @@ // CHECK-RV64-LABEL: @test_vfsub_vv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsub_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, @@ -17,7 +17,7 @@ vfloat32mf2_t test_vfsub_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, // CHECK-RV64-LABEL: @test_vfsub_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsub_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { @@ -26,7 +26,7 @@ vfloat32mf2_t test_vfsub_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsub_vv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv2f32.nxv2f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsub_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, @@ -36,7 +36,7 @@ vfloat32m1_t test_vfsub_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, // CHECK-RV64-LABEL: @test_vfsub_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv2f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsub_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { @@ -45,7 +45,7 @@ vfloat32m1_t test_vfsub_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsub_vv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv4f32.nxv4f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsub_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, @@ -55,7 +55,7 @@ vfloat32m2_t test_vfsub_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, // CHECK-RV64-LABEL: @test_vfsub_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv4f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsub_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { @@ -64,7 +64,7 @@ vfloat32m2_t test_vfsub_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsub_vv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv8f32.nxv8f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsub_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, @@ -74,7 +74,7 @@ vfloat32m4_t test_vfsub_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, // CHECK-RV64-LABEL: @test_vfsub_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv8f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsub_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { @@ -83,7 +83,7 @@ vfloat32m4_t test_vfsub_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsub_vv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv16f32.nxv16f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv16f32.nxv16f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsub_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, @@ -93,7 +93,7 @@ vfloat32m8_t test_vfsub_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, // CHECK-RV64-LABEL: @test_vfsub_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv16f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv16f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsub_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { @@ -102,7 +102,7 @@ vfloat32m8_t test_vfsub_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsub_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv1f64.nxv1f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsub_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, @@ -112,7 +112,7 @@ vfloat64m1_t test_vfsub_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, // CHECK-RV64-LABEL: @test_vfsub_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv1f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv1f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsub_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { @@ -121,7 +121,7 @@ vfloat64m1_t test_vfsub_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsub_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv2f64.nxv2f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsub_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, @@ -131,7 +131,7 @@ vfloat64m2_t test_vfsub_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, // CHECK-RV64-LABEL: @test_vfsub_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv2f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv2f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsub_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { @@ -140,7 +140,7 @@ vfloat64m2_t test_vfsub_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsub_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv4f64.nxv4f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsub_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, @@ -150,7 +150,7 @@ vfloat64m4_t test_vfsub_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, // CHECK-RV64-LABEL: @test_vfsub_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv4f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv4f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsub_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { @@ -159,7 +159,7 @@ vfloat64m4_t test_vfsub_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsub_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv8f64.nxv8f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv8f64.nxv8f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsub_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, @@ -169,7 +169,7 @@ vfloat64m8_t test_vfsub_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, // CHECK-RV64-LABEL: @test_vfsub_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv8f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsub_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwadd.c index 2a5bf9f06f114b3ef9e96633af97d63e74097e50..73d50704347e4d30059db81bbe9b0476751ad23e 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwadd.c @@ -7,7 +7,7 @@ // CHECK-RV64-LABEL: @test_vfwadd_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv1f64.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv1f64.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwadd_vv_f64m1(vfloat32mf2_t op1, vfloat32mf2_t op2, @@ -17,7 +17,7 @@ vfloat64m1_t test_vfwadd_vv_f64m1(vfloat32mf2_t op1, vfloat32mf2_t op2, // CHECK-RV64-LABEL: @test_vfwadd_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv1f64.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv1f64.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwadd_vf_f64m1(vfloat32mf2_t op1, float op2, size_t vl) { @@ -26,7 +26,7 @@ vfloat64m1_t test_vfwadd_vf_f64m1(vfloat32mf2_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfwadd_wv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv1f64.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv1f64.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwadd_wv_f64m1(vfloat64m1_t op1, vfloat32mf2_t op2, @@ -36,7 +36,7 @@ vfloat64m1_t test_vfwadd_wv_f64m1(vfloat64m1_t op1, vfloat32mf2_t op2, // CHECK-RV64-LABEL: @test_vfwadd_wf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv1f64.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv1f64.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwadd_wf_f64m1(vfloat64m1_t op1, float op2, size_t vl) { @@ -45,7 +45,7 @@ vfloat64m1_t test_vfwadd_wf_f64m1(vfloat64m1_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfwadd_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv2f64.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv2f64.nxv2f32.nxv2f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwadd_vv_f64m2(vfloat32m1_t op1, vfloat32m1_t op2, @@ -55,7 +55,7 @@ vfloat64m2_t test_vfwadd_vv_f64m2(vfloat32m1_t op1, vfloat32m1_t op2, // CHECK-RV64-LABEL: @test_vfwadd_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv2f64.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv2f64.nxv2f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwadd_vf_f64m2(vfloat32m1_t op1, float op2, size_t vl) { @@ -64,7 +64,7 @@ vfloat64m2_t test_vfwadd_vf_f64m2(vfloat32m1_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfwadd_wv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv2f64.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv2f64.nxv2f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwadd_wv_f64m2(vfloat64m2_t op1, vfloat32m1_t op2, @@ -74,7 +74,7 @@ vfloat64m2_t test_vfwadd_wv_f64m2(vfloat64m2_t op1, vfloat32m1_t op2, // CHECK-RV64-LABEL: @test_vfwadd_wf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv2f64.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv2f64.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwadd_wf_f64m2(vfloat64m2_t op1, float op2, size_t vl) { @@ -83,7 +83,7 @@ vfloat64m2_t test_vfwadd_wf_f64m2(vfloat64m2_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfwadd_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv4f64.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv4f64.nxv4f32.nxv4f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwadd_vv_f64m4(vfloat32m2_t op1, vfloat32m2_t op2, @@ -93,7 +93,7 @@ vfloat64m4_t test_vfwadd_vv_f64m4(vfloat32m2_t op1, vfloat32m2_t op2, // CHECK-RV64-LABEL: @test_vfwadd_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv4f64.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv4f64.nxv4f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwadd_vf_f64m4(vfloat32m2_t op1, float op2, size_t vl) { @@ -102,7 +102,7 @@ vfloat64m4_t test_vfwadd_vf_f64m4(vfloat32m2_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfwadd_wv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv4f64.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv4f64.nxv4f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwadd_wv_f64m4(vfloat64m4_t op1, vfloat32m2_t op2, @@ -112,7 +112,7 @@ vfloat64m4_t test_vfwadd_wv_f64m4(vfloat64m4_t op1, vfloat32m2_t op2, // CHECK-RV64-LABEL: @test_vfwadd_wf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv4f64.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv4f64.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwadd_wf_f64m4(vfloat64m4_t op1, float op2, size_t vl) { @@ -121,7 +121,7 @@ vfloat64m4_t test_vfwadd_wf_f64m4(vfloat64m4_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfwadd_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv8f64.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv8f64.nxv8f32.nxv8f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwadd_vv_f64m8(vfloat32m4_t op1, vfloat32m4_t op2, @@ -131,7 +131,7 @@ vfloat64m8_t test_vfwadd_vv_f64m8(vfloat32m4_t op1, vfloat32m4_t op2, // CHECK-RV64-LABEL: @test_vfwadd_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv8f64.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv8f64.nxv8f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwadd_vf_f64m8(vfloat32m4_t op1, float op2, size_t vl) { @@ -140,7 +140,7 @@ vfloat64m8_t test_vfwadd_vf_f64m8(vfloat32m4_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfwadd_wv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv8f64.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv8f64.nxv8f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwadd_wv_f64m8(vfloat64m8_t op1, vfloat32m4_t op2, @@ -150,7 +150,7 @@ vfloat64m8_t test_vfwadd_wv_f64m8(vfloat64m8_t op1, vfloat32m4_t op2, // CHECK-RV64-LABEL: @test_vfwadd_wf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv8f64.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv8f64.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwadd_wf_f64m8(vfloat64m8_t op1, float op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwmul.c index 8d5396c32933eea6bc05b0c6300e4995c914a1a3..fe0eebfd952ab2b1115564c753cbfb0a5a21c6af 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwmul.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwmul.c @@ -7,7 +7,7 @@ // CHECK-RV64-LABEL: @test_vfwmul_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv1f64.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv1f64.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwmul_vv_f64m1(vfloat32mf2_t op1, vfloat32mf2_t op2, @@ -17,7 +17,7 @@ vfloat64m1_t test_vfwmul_vv_f64m1(vfloat32mf2_t op1, vfloat32mf2_t op2, // CHECK-RV64-LABEL: @test_vfwmul_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv1f64.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv1f64.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwmul_vf_f64m1(vfloat32mf2_t op1, float op2, size_t vl) { @@ -26,7 +26,7 @@ vfloat64m1_t test_vfwmul_vf_f64m1(vfloat32mf2_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfwmul_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv2f64.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv2f64.nxv2f32.nxv2f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwmul_vv_f64m2(vfloat32m1_t op1, vfloat32m1_t op2, @@ -36,7 +36,7 @@ vfloat64m2_t test_vfwmul_vv_f64m2(vfloat32m1_t op1, vfloat32m1_t op2, // CHECK-RV64-LABEL: @test_vfwmul_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv2f64.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv2f64.nxv2f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwmul_vf_f64m2(vfloat32m1_t op1, float op2, size_t vl) { @@ -45,7 +45,7 @@ vfloat64m2_t test_vfwmul_vf_f64m2(vfloat32m1_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfwmul_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv4f64.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv4f64.nxv4f32.nxv4f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwmul_vv_f64m4(vfloat32m2_t op1, vfloat32m2_t op2, @@ -55,7 +55,7 @@ vfloat64m4_t test_vfwmul_vv_f64m4(vfloat32m2_t op1, vfloat32m2_t op2, // CHECK-RV64-LABEL: @test_vfwmul_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv4f64.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv4f64.nxv4f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwmul_vf_f64m4(vfloat32m2_t op1, float op2, size_t vl) { @@ -64,7 +64,7 @@ vfloat64m4_t test_vfwmul_vf_f64m4(vfloat32m2_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfwmul_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv8f64.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv8f64.nxv8f32.nxv8f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwmul_vv_f64m8(vfloat32m4_t op1, vfloat32m4_t op2, @@ -74,7 +74,7 @@ vfloat64m8_t test_vfwmul_vv_f64m8(vfloat32m4_t op1, vfloat32m4_t op2, // CHECK-RV64-LABEL: @test_vfwmul_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv8f64.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv8f64.nxv8f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwmul_vf_f64m8(vfloat32m4_t op1, float op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwsub.c index 2dd4350fa9c7a238d72e0c9f8b0cabb1416e2411..4d712614bc6496ef023125cbc19e3e8aaa4ea6ec 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwsub.c @@ -7,7 +7,7 @@ // CHECK-RV64-LABEL: @test_vfwsub_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv1f64.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv1f64.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwsub_vv_f64m1(vfloat32mf2_t op1, vfloat32mf2_t op2, @@ -17,7 +17,7 @@ vfloat64m1_t test_vfwsub_vv_f64m1(vfloat32mf2_t op1, vfloat32mf2_t op2, // CHECK-RV64-LABEL: @test_vfwsub_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv1f64.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv1f64.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwsub_vf_f64m1(vfloat32mf2_t op1, float op2, size_t vl) { @@ -26,7 +26,7 @@ vfloat64m1_t test_vfwsub_vf_f64m1(vfloat32mf2_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfwsub_wv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv1f64.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv1f64.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwsub_wv_f64m1(vfloat64m1_t op1, vfloat32mf2_t op2, @@ -36,7 +36,7 @@ vfloat64m1_t test_vfwsub_wv_f64m1(vfloat64m1_t op1, vfloat32mf2_t op2, // CHECK-RV64-LABEL: @test_vfwsub_wf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv1f64.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv1f64.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwsub_wf_f64m1(vfloat64m1_t op1, float op2, size_t vl) { @@ -45,7 +45,7 @@ vfloat64m1_t test_vfwsub_wf_f64m1(vfloat64m1_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfwsub_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv2f64.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv2f64.nxv2f32.nxv2f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwsub_vv_f64m2(vfloat32m1_t op1, vfloat32m1_t op2, @@ -55,7 +55,7 @@ vfloat64m2_t test_vfwsub_vv_f64m2(vfloat32m1_t op1, vfloat32m1_t op2, // CHECK-RV64-LABEL: @test_vfwsub_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv2f64.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv2f64.nxv2f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwsub_vf_f64m2(vfloat32m1_t op1, float op2, size_t vl) { @@ -64,7 +64,7 @@ vfloat64m2_t test_vfwsub_vf_f64m2(vfloat32m1_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfwsub_wv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv2f64.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv2f64.nxv2f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwsub_wv_f64m2(vfloat64m2_t op1, vfloat32m1_t op2, @@ -74,7 +74,7 @@ vfloat64m2_t test_vfwsub_wv_f64m2(vfloat64m2_t op1, vfloat32m1_t op2, // CHECK-RV64-LABEL: @test_vfwsub_wf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv2f64.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv2f64.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwsub_wf_f64m2(vfloat64m2_t op1, float op2, size_t vl) { @@ -83,7 +83,7 @@ vfloat64m2_t test_vfwsub_wf_f64m2(vfloat64m2_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfwsub_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv4f64.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv4f64.nxv4f32.nxv4f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwsub_vv_f64m4(vfloat32m2_t op1, vfloat32m2_t op2, @@ -93,7 +93,7 @@ vfloat64m4_t test_vfwsub_vv_f64m4(vfloat32m2_t op1, vfloat32m2_t op2, // CHECK-RV64-LABEL: @test_vfwsub_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv4f64.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv4f64.nxv4f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwsub_vf_f64m4(vfloat32m2_t op1, float op2, size_t vl) { @@ -102,7 +102,7 @@ vfloat64m4_t test_vfwsub_vf_f64m4(vfloat32m2_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfwsub_wv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv4f64.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv4f64.nxv4f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwsub_wv_f64m4(vfloat64m4_t op1, vfloat32m2_t op2, @@ -112,7 +112,7 @@ vfloat64m4_t test_vfwsub_wv_f64m4(vfloat64m4_t op1, vfloat32m2_t op2, // CHECK-RV64-LABEL: @test_vfwsub_wf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv4f64.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv4f64.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwsub_wf_f64m4(vfloat64m4_t op1, float op2, size_t vl) { @@ -121,7 +121,7 @@ vfloat64m4_t test_vfwsub_wf_f64m4(vfloat64m4_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfwsub_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv8f64.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv8f64.nxv8f32.nxv8f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwsub_vv_f64m8(vfloat32m4_t op1, vfloat32m4_t op2, @@ -131,7 +131,7 @@ vfloat64m8_t test_vfwsub_vv_f64m8(vfloat32m4_t op1, vfloat32m4_t op2, // CHECK-RV64-LABEL: @test_vfwsub_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv8f64.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv8f64.nxv8f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwsub_vf_f64m8(vfloat32m4_t op1, float op2, size_t vl) { @@ -140,7 +140,7 @@ vfloat64m8_t test_vfwsub_vf_f64m8(vfloat32m4_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfwsub_wv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv8f64.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv8f64.nxv8f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwsub_wv_f64m8(vfloat64m8_t op1, vfloat32m4_t op2, @@ -150,7 +150,7 @@ vfloat64m8_t test_vfwsub_wv_f64m8(vfloat64m8_t op1, vfloat32m4_t op2, // CHECK-RV64-LABEL: @test_vfwsub_wf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv8f64.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv8f64.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwsub_wf_f64m8(vfloat64m8_t op1, float op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmax.c index ee0948836af2a5d89f617259fef888611eb3c747..7d836785e57f1bca6401f5ce8d05a667c5c0761e 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmax.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmax.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vmax_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmax_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -15,7 +15,7 @@ vint8mf8_t test_vmax_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmax_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -24,7 +24,7 @@ vint8mf8_t test_vmax_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmax_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -33,7 +33,7 @@ vint8mf4_t test_vmax_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmax_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -42,7 +42,7 @@ vint8mf4_t test_vmax_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmax_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -51,7 +51,7 @@ vint8mf2_t test_vmax_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmax_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -60,7 +60,7 @@ vint8mf2_t test_vmax_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmax_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -69,7 +69,7 @@ vint8m1_t test_vmax_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmax_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { @@ -78,7 +78,7 @@ vint8m1_t test_vmax_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmax_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -87,7 +87,7 @@ vint8m2_t test_vmax_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmax_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { @@ -96,7 +96,7 @@ vint8m2_t test_vmax_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmax_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -105,7 +105,7 @@ vint8m4_t test_vmax_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmax_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { @@ -114,7 +114,7 @@ vint8m4_t test_vmax_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmax_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -123,7 +123,7 @@ vint8m8_t test_vmax_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmax_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { @@ -132,7 +132,7 @@ vint8m8_t test_vmax_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmax_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -141,7 +141,7 @@ vint16mf4_t test_vmax_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmax_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -150,7 +150,7 @@ vint16mf4_t test_vmax_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmax_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -159,7 +159,7 @@ vint16mf2_t test_vmax_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmax_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -168,7 +168,7 @@ vint16mf2_t test_vmax_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmax_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -177,7 +177,7 @@ vint16m1_t test_vmax_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmax_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { @@ -186,7 +186,7 @@ vint16m1_t test_vmax_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmax_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -195,7 +195,7 @@ vint16m2_t test_vmax_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmax_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { @@ -204,7 +204,7 @@ vint16m2_t test_vmax_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmax_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -213,7 +213,7 @@ vint16m4_t test_vmax_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmax_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { @@ -222,7 +222,7 @@ vint16m4_t test_vmax_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmax_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -231,7 +231,7 @@ vint16m8_t test_vmax_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmax_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { @@ -240,7 +240,7 @@ vint16m8_t test_vmax_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmax_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -249,7 +249,7 @@ vint32mf2_t test_vmax_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmax_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -258,7 +258,7 @@ vint32mf2_t test_vmax_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmax_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -267,7 +267,7 @@ vint32m1_t test_vmax_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmax_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { @@ -276,7 +276,7 @@ vint32m1_t test_vmax_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmax_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -285,7 +285,7 @@ vint32m2_t test_vmax_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmax_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { @@ -294,7 +294,7 @@ vint32m2_t test_vmax_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmax_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -303,7 +303,7 @@ vint32m4_t test_vmax_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmax_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { @@ -312,7 +312,7 @@ vint32m4_t test_vmax_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmax_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -321,7 +321,7 @@ vint32m8_t test_vmax_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmax_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { @@ -330,7 +330,7 @@ vint32m8_t test_vmax_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmax_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -339,7 +339,7 @@ vint64m1_t test_vmax_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmax_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { @@ -348,7 +348,7 @@ vint64m1_t test_vmax_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmax_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -357,7 +357,7 @@ vint64m2_t test_vmax_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmax_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { @@ -366,7 +366,7 @@ vint64m2_t test_vmax_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmax_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -375,7 +375,7 @@ vint64m4_t test_vmax_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmax_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { @@ -384,7 +384,7 @@ vint64m4_t test_vmax_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmax_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -393,7 +393,7 @@ vint64m8_t test_vmax_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmax_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { @@ -402,7 +402,7 @@ vint64m8_t test_vmax_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmaxu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -411,7 +411,7 @@ vuint8mf8_t test_vmaxu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmaxu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -420,7 +420,7 @@ vuint8mf8_t test_vmaxu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmaxu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -429,7 +429,7 @@ vuint8mf4_t test_vmaxu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmaxu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -438,7 +438,7 @@ vuint8mf4_t test_vmaxu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmaxu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -447,7 +447,7 @@ vuint8mf2_t test_vmaxu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmaxu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -456,7 +456,7 @@ vuint8mf2_t test_vmaxu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmaxu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -465,7 +465,7 @@ vuint8m1_t test_vmaxu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmaxu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -474,7 +474,7 @@ vuint8m1_t test_vmaxu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmaxu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -483,7 +483,7 @@ vuint8m2_t test_vmaxu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmaxu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -492,7 +492,7 @@ vuint8m2_t test_vmaxu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmaxu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -501,7 +501,7 @@ vuint8m4_t test_vmaxu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmaxu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -510,7 +510,7 @@ vuint8m4_t test_vmaxu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmaxu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -519,7 +519,7 @@ vuint8m8_t test_vmaxu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmaxu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -528,7 +528,7 @@ vuint8m8_t test_vmaxu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmaxu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -537,7 +537,7 @@ vuint16mf4_t test_vmaxu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vmaxu_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmaxu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -546,7 +546,7 @@ vuint16mf4_t test_vmaxu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmaxu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -555,7 +555,7 @@ vuint16mf2_t test_vmaxu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vmaxu_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmaxu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -564,7 +564,7 @@ vuint16mf2_t test_vmaxu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmaxu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -573,7 +573,7 @@ vuint16m1_t test_vmaxu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmaxu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -582,7 +582,7 @@ vuint16m1_t test_vmaxu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmaxu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -591,7 +591,7 @@ vuint16m2_t test_vmaxu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmaxu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -600,7 +600,7 @@ vuint16m2_t test_vmaxu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmaxu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -609,7 +609,7 @@ vuint16m4_t test_vmaxu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmaxu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -618,7 +618,7 @@ vuint16m4_t test_vmaxu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmaxu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -627,7 +627,7 @@ vuint16m8_t test_vmaxu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmaxu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -636,7 +636,7 @@ vuint16m8_t test_vmaxu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmaxu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -645,7 +645,7 @@ vuint32mf2_t test_vmaxu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vmaxu_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmaxu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -654,7 +654,7 @@ vuint32mf2_t test_vmaxu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmaxu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -663,7 +663,7 @@ vuint32m1_t test_vmaxu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmaxu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -672,7 +672,7 @@ vuint32m1_t test_vmaxu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmaxu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -681,7 +681,7 @@ vuint32m2_t test_vmaxu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmaxu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -690,7 +690,7 @@ vuint32m2_t test_vmaxu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmaxu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -699,7 +699,7 @@ vuint32m4_t test_vmaxu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmaxu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -708,7 +708,7 @@ vuint32m4_t test_vmaxu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmaxu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -717,7 +717,7 @@ vuint32m8_t test_vmaxu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmaxu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -726,7 +726,7 @@ vuint32m8_t test_vmaxu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmaxu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -735,7 +735,7 @@ vuint64m1_t test_vmaxu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmaxu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -744,7 +744,7 @@ vuint64m1_t test_vmaxu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmaxu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -753,7 +753,7 @@ vuint64m2_t test_vmaxu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmaxu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -762,7 +762,7 @@ vuint64m2_t test_vmaxu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmaxu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -771,7 +771,7 @@ vuint64m4_t test_vmaxu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmaxu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -780,7 +780,7 @@ vuint64m4_t test_vmaxu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmaxu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -789,7 +789,7 @@ vuint64m8_t test_vmaxu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmaxu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmin.c index 5c269c17f1ea5cd10e2f1e372ef7780e134108fe..0c6bf71d57fc7aaf9057a48faf828a5d017f89a6 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmin.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmin.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vmin_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmin_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -15,7 +15,7 @@ vint8mf8_t test_vmin_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmin_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -24,7 +24,7 @@ vint8mf8_t test_vmin_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmin_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -33,7 +33,7 @@ vint8mf4_t test_vmin_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmin_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -42,7 +42,7 @@ vint8mf4_t test_vmin_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmin_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -51,7 +51,7 @@ vint8mf2_t test_vmin_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmin_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -60,7 +60,7 @@ vint8mf2_t test_vmin_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmin_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -69,7 +69,7 @@ vint8m1_t test_vmin_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmin_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { @@ -78,7 +78,7 @@ vint8m1_t test_vmin_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmin_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -87,7 +87,7 @@ vint8m2_t test_vmin_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmin_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { @@ -96,7 +96,7 @@ vint8m2_t test_vmin_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmin_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -105,7 +105,7 @@ vint8m4_t test_vmin_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmin_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { @@ -114,7 +114,7 @@ vint8m4_t test_vmin_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmin_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -123,7 +123,7 @@ vint8m8_t test_vmin_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmin_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { @@ -132,7 +132,7 @@ vint8m8_t test_vmin_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmin_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -141,7 +141,7 @@ vint16mf4_t test_vmin_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmin_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -150,7 +150,7 @@ vint16mf4_t test_vmin_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmin_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -159,7 +159,7 @@ vint16mf2_t test_vmin_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmin_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -168,7 +168,7 @@ vint16mf2_t test_vmin_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmin_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -177,7 +177,7 @@ vint16m1_t test_vmin_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmin_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { @@ -186,7 +186,7 @@ vint16m1_t test_vmin_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmin_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -195,7 +195,7 @@ vint16m2_t test_vmin_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmin_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { @@ -204,7 +204,7 @@ vint16m2_t test_vmin_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmin_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -213,7 +213,7 @@ vint16m4_t test_vmin_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmin_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { @@ -222,7 +222,7 @@ vint16m4_t test_vmin_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmin_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -231,7 +231,7 @@ vint16m8_t test_vmin_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmin_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { @@ -240,7 +240,7 @@ vint16m8_t test_vmin_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmin_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -249,7 +249,7 @@ vint32mf2_t test_vmin_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmin_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -258,7 +258,7 @@ vint32mf2_t test_vmin_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmin_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -267,7 +267,7 @@ vint32m1_t test_vmin_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmin_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { @@ -276,7 +276,7 @@ vint32m1_t test_vmin_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmin_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -285,7 +285,7 @@ vint32m2_t test_vmin_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmin_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { @@ -294,7 +294,7 @@ vint32m2_t test_vmin_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmin_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -303,7 +303,7 @@ vint32m4_t test_vmin_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmin_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { @@ -312,7 +312,7 @@ vint32m4_t test_vmin_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmin_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -321,7 +321,7 @@ vint32m8_t test_vmin_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmin_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { @@ -330,7 +330,7 @@ vint32m8_t test_vmin_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmin_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -339,7 +339,7 @@ vint64m1_t test_vmin_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmin_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { @@ -348,7 +348,7 @@ vint64m1_t test_vmin_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmin_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -357,7 +357,7 @@ vint64m2_t test_vmin_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmin_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { @@ -366,7 +366,7 @@ vint64m2_t test_vmin_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmin_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -375,7 +375,7 @@ vint64m4_t test_vmin_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmin_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { @@ -384,7 +384,7 @@ vint64m4_t test_vmin_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmin_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -393,7 +393,7 @@ vint64m8_t test_vmin_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmin_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { @@ -402,7 +402,7 @@ vint64m8_t test_vmin_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vminu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -411,7 +411,7 @@ vuint8mf8_t test_vminu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vminu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -420,7 +420,7 @@ vuint8mf8_t test_vminu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vminu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -429,7 +429,7 @@ vuint8mf4_t test_vminu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vminu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -438,7 +438,7 @@ vuint8mf4_t test_vminu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vminu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -447,7 +447,7 @@ vuint8mf2_t test_vminu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vminu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -456,7 +456,7 @@ vuint8mf2_t test_vminu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vminu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -465,7 +465,7 @@ vuint8m1_t test_vminu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vminu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -474,7 +474,7 @@ vuint8m1_t test_vminu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vminu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -483,7 +483,7 @@ vuint8m2_t test_vminu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vminu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -492,7 +492,7 @@ vuint8m2_t test_vminu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vminu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -501,7 +501,7 @@ vuint8m4_t test_vminu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vminu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -510,7 +510,7 @@ vuint8m4_t test_vminu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vminu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -519,7 +519,7 @@ vuint8m8_t test_vminu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vminu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -528,7 +528,7 @@ vuint8m8_t test_vminu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vminu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -537,7 +537,7 @@ vuint16mf4_t test_vminu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vminu_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vminu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -546,7 +546,7 @@ vuint16mf4_t test_vminu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vminu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -555,7 +555,7 @@ vuint16mf2_t test_vminu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vminu_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vminu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -564,7 +564,7 @@ vuint16mf2_t test_vminu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vminu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -573,7 +573,7 @@ vuint16m1_t test_vminu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vminu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -582,7 +582,7 @@ vuint16m1_t test_vminu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vminu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -591,7 +591,7 @@ vuint16m2_t test_vminu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vminu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -600,7 +600,7 @@ vuint16m2_t test_vminu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vminu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -609,7 +609,7 @@ vuint16m4_t test_vminu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vminu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -618,7 +618,7 @@ vuint16m4_t test_vminu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vminu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -627,7 +627,7 @@ vuint16m8_t test_vminu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vminu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -636,7 +636,7 @@ vuint16m8_t test_vminu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vminu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -645,7 +645,7 @@ vuint32mf2_t test_vminu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vminu_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vminu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -654,7 +654,7 @@ vuint32mf2_t test_vminu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vminu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -663,7 +663,7 @@ vuint32m1_t test_vminu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vminu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -672,7 +672,7 @@ vuint32m1_t test_vminu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vminu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -681,7 +681,7 @@ vuint32m2_t test_vminu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vminu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -690,7 +690,7 @@ vuint32m2_t test_vminu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vminu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -699,7 +699,7 @@ vuint32m4_t test_vminu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vminu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -708,7 +708,7 @@ vuint32m4_t test_vminu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vminu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -717,7 +717,7 @@ vuint32m8_t test_vminu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vminu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -726,7 +726,7 @@ vuint32m8_t test_vminu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vminu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -735,7 +735,7 @@ vuint64m1_t test_vminu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vminu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -744,7 +744,7 @@ vuint64m1_t test_vminu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vminu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -753,7 +753,7 @@ vuint64m2_t test_vminu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vminu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -762,7 +762,7 @@ vuint64m2_t test_vminu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vminu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -771,7 +771,7 @@ vuint64m4_t test_vminu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vminu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -780,7 +780,7 @@ vuint64m4_t test_vminu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vminu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -789,7 +789,7 @@ vuint64m8_t test_vminu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vminu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmul-eew64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmul-eew64.c index a69e943a6a2bc6a68c668c50f48c83ab1fb927f6..722699306bc7487cd59009454da7e39533019ca0 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmul-eew64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmul-eew64.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vmulh_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmulh_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -18,7 +18,7 @@ vint64m1_t test_vmulh_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmulh_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { @@ -27,7 +27,7 @@ vint64m1_t test_vmulh_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmulh_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -36,7 +36,7 @@ vint64m2_t test_vmulh_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmulh_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { @@ -45,7 +45,7 @@ vint64m2_t test_vmulh_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmulh_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -54,7 +54,7 @@ vint64m4_t test_vmulh_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmulh_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { @@ -63,7 +63,7 @@ vint64m4_t test_vmulh_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmulh_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -72,7 +72,7 @@ vint64m8_t test_vmulh_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmulh_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { @@ -81,7 +81,7 @@ vint64m8_t test_vmulh_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmulhu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -90,7 +90,7 @@ vuint64m1_t test_vmulhu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmulhu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -99,7 +99,7 @@ vuint64m1_t test_vmulhu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmulhu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -108,7 +108,7 @@ vuint64m2_t test_vmulhu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmulhu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -117,7 +117,7 @@ vuint64m2_t test_vmulhu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmulhu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -126,7 +126,7 @@ vuint64m4_t test_vmulhu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmulhu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -135,7 +135,7 @@ vuint64m4_t test_vmulhu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmulhu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -144,7 +144,7 @@ vuint64m8_t test_vmulhu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmulhu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { @@ -153,7 +153,7 @@ vuint64m8_t test_vmulhu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmulhsu_vv_i64m1(vint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -162,7 +162,7 @@ vint64m1_t test_vmulhsu_vv_i64m1(vint64m1_t op1, vuint64m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmulhsu_vx_i64m1(vint64m1_t op1, uint64_t op2, size_t vl) { @@ -171,7 +171,7 @@ vint64m1_t test_vmulhsu_vx_i64m1(vint64m1_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmulhsu_vv_i64m2(vint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -180,7 +180,7 @@ vint64m2_t test_vmulhsu_vv_i64m2(vint64m2_t op1, vuint64m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmulhsu_vx_i64m2(vint64m2_t op1, uint64_t op2, size_t vl) { @@ -189,7 +189,7 @@ vint64m2_t test_vmulhsu_vx_i64m2(vint64m2_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmulhsu_vv_i64m4(vint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -198,7 +198,7 @@ vint64m4_t test_vmulhsu_vv_i64m4(vint64m4_t op1, vuint64m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmulhsu_vx_i64m4(vint64m4_t op1, uint64_t op2, size_t vl) { @@ -207,7 +207,7 @@ vint64m4_t test_vmulhsu_vx_i64m4(vint64m4_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmulhsu_vv_i64m8(vint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -216,7 +216,7 @@ vint64m8_t test_vmulhsu_vv_i64m8(vint64m8_t op1, vuint64m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmulhsu_vx_i64m8(vint64m8_t op1, uint64_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmul.c index b08b304c7ef5efe26609be01591d3c5387e67a1d..a93c1d919ec8bf65dd7ac4679d8a7adfb32c1c66 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmul.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmul.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vmul_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmul_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -15,7 +15,7 @@ vint8mf8_t test_vmul_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmul_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -24,7 +24,7 @@ vint8mf8_t test_vmul_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmul_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -33,7 +33,7 @@ vint8mf4_t test_vmul_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmul_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -42,7 +42,7 @@ vint8mf4_t test_vmul_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmul_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -51,7 +51,7 @@ vint8mf2_t test_vmul_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmul_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -60,7 +60,7 @@ vint8mf2_t test_vmul_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmul_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -69,7 +69,7 @@ vint8m1_t test_vmul_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmul_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { @@ -78,7 +78,7 @@ vint8m1_t test_vmul_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmul_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -87,7 +87,7 @@ vint8m2_t test_vmul_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmul_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { @@ -96,7 +96,7 @@ vint8m2_t test_vmul_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmul_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -105,7 +105,7 @@ vint8m4_t test_vmul_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmul_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { @@ -114,7 +114,7 @@ vint8m4_t test_vmul_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmul_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -123,7 +123,7 @@ vint8m8_t test_vmul_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmul_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { @@ -132,7 +132,7 @@ vint8m8_t test_vmul_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmul_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -141,7 +141,7 @@ vint16mf4_t test_vmul_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmul_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -150,7 +150,7 @@ vint16mf4_t test_vmul_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmul_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -159,7 +159,7 @@ vint16mf2_t test_vmul_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmul_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -168,7 +168,7 @@ vint16mf2_t test_vmul_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmul_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -177,7 +177,7 @@ vint16m1_t test_vmul_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmul_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { @@ -186,7 +186,7 @@ vint16m1_t test_vmul_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmul_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -195,7 +195,7 @@ vint16m2_t test_vmul_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmul_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { @@ -204,7 +204,7 @@ vint16m2_t test_vmul_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmul_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -213,7 +213,7 @@ vint16m4_t test_vmul_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmul_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { @@ -222,7 +222,7 @@ vint16m4_t test_vmul_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmul_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -231,7 +231,7 @@ vint16m8_t test_vmul_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmul_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { @@ -240,7 +240,7 @@ vint16m8_t test_vmul_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmul_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -249,7 +249,7 @@ vint32mf2_t test_vmul_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmul_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -258,7 +258,7 @@ vint32mf2_t test_vmul_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmul_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -267,7 +267,7 @@ vint32m1_t test_vmul_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmul_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { @@ -276,7 +276,7 @@ vint32m1_t test_vmul_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmul_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -285,7 +285,7 @@ vint32m2_t test_vmul_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmul_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { @@ -294,7 +294,7 @@ vint32m2_t test_vmul_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmul_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -303,7 +303,7 @@ vint32m4_t test_vmul_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmul_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { @@ -312,7 +312,7 @@ vint32m4_t test_vmul_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmul_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -321,7 +321,7 @@ vint32m8_t test_vmul_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmul_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { @@ -330,7 +330,7 @@ vint32m8_t test_vmul_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmul_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -339,7 +339,7 @@ vint64m1_t test_vmul_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmul_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { @@ -348,7 +348,7 @@ vint64m1_t test_vmul_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmul_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -357,7 +357,7 @@ vint64m2_t test_vmul_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmul_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { @@ -366,7 +366,7 @@ vint64m2_t test_vmul_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmul_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -375,7 +375,7 @@ vint64m4_t test_vmul_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmul_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { @@ -384,7 +384,7 @@ vint64m4_t test_vmul_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmul_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -393,7 +393,7 @@ vint64m8_t test_vmul_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmul_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { @@ -402,7 +402,7 @@ vint64m8_t test_vmul_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmul_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -411,7 +411,7 @@ vuint8mf8_t test_vmul_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmul_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -420,7 +420,7 @@ vuint8mf8_t test_vmul_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmul_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -429,7 +429,7 @@ vuint8mf4_t test_vmul_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmul_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -438,7 +438,7 @@ vuint8mf4_t test_vmul_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmul_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -447,7 +447,7 @@ vuint8mf2_t test_vmul_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmul_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -456,7 +456,7 @@ vuint8mf2_t test_vmul_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmul_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -465,7 +465,7 @@ vuint8m1_t test_vmul_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmul_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -474,7 +474,7 @@ vuint8m1_t test_vmul_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmul_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -483,7 +483,7 @@ vuint8m2_t test_vmul_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmul_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -492,7 +492,7 @@ vuint8m2_t test_vmul_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmul_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -501,7 +501,7 @@ vuint8m4_t test_vmul_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmul_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -510,7 +510,7 @@ vuint8m4_t test_vmul_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmul_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -519,7 +519,7 @@ vuint8m8_t test_vmul_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmul_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -528,7 +528,7 @@ vuint8m8_t test_vmul_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmul_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -537,7 +537,7 @@ vuint16mf4_t test_vmul_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vmul_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmul_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -546,7 +546,7 @@ vuint16mf4_t test_vmul_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmul_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -555,7 +555,7 @@ vuint16mf2_t test_vmul_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vmul_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmul_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -564,7 +564,7 @@ vuint16mf2_t test_vmul_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmul_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -573,7 +573,7 @@ vuint16m1_t test_vmul_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmul_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -582,7 +582,7 @@ vuint16m1_t test_vmul_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmul_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -591,7 +591,7 @@ vuint16m2_t test_vmul_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmul_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -600,7 +600,7 @@ vuint16m2_t test_vmul_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmul_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -609,7 +609,7 @@ vuint16m4_t test_vmul_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmul_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -618,7 +618,7 @@ vuint16m4_t test_vmul_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmul_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -627,7 +627,7 @@ vuint16m8_t test_vmul_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmul_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -636,7 +636,7 @@ vuint16m8_t test_vmul_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmul_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -645,7 +645,7 @@ vuint32mf2_t test_vmul_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vmul_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmul_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -654,7 +654,7 @@ vuint32mf2_t test_vmul_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmul_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -663,7 +663,7 @@ vuint32m1_t test_vmul_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmul_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -672,7 +672,7 @@ vuint32m1_t test_vmul_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmul_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -681,7 +681,7 @@ vuint32m2_t test_vmul_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmul_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -690,7 +690,7 @@ vuint32m2_t test_vmul_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmul_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -699,7 +699,7 @@ vuint32m4_t test_vmul_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmul_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -708,7 +708,7 @@ vuint32m4_t test_vmul_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmul_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -717,7 +717,7 @@ vuint32m8_t test_vmul_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmul_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -726,7 +726,7 @@ vuint32m8_t test_vmul_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmul_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -735,7 +735,7 @@ vuint64m1_t test_vmul_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmul_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -744,7 +744,7 @@ vuint64m1_t test_vmul_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmul_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -753,7 +753,7 @@ vuint64m2_t test_vmul_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmul_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -762,7 +762,7 @@ vuint64m2_t test_vmul_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmul_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -771,7 +771,7 @@ vuint64m4_t test_vmul_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmul_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -780,7 +780,7 @@ vuint64m4_t test_vmul_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmul_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -789,7 +789,7 @@ vuint64m8_t test_vmul_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmul_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { @@ -798,7 +798,7 @@ vuint64m8_t test_vmul_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmulh_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -807,7 +807,7 @@ vint8mf8_t test_vmulh_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmulh_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -816,7 +816,7 @@ vint8mf8_t test_vmulh_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmulh_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -825,7 +825,7 @@ vint8mf4_t test_vmulh_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmulh_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -834,7 +834,7 @@ vint8mf4_t test_vmulh_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmulh_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -843,7 +843,7 @@ vint8mf2_t test_vmulh_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmulh_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -852,7 +852,7 @@ vint8mf2_t test_vmulh_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmulh_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -861,7 +861,7 @@ vint8m1_t test_vmulh_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmulh_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { @@ -870,7 +870,7 @@ vint8m1_t test_vmulh_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmulh_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -879,7 +879,7 @@ vint8m2_t test_vmulh_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmulh_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { @@ -888,7 +888,7 @@ vint8m2_t test_vmulh_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmulh_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -897,7 +897,7 @@ vint8m4_t test_vmulh_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmulh_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { @@ -906,7 +906,7 @@ vint8m4_t test_vmulh_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmulh_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -915,7 +915,7 @@ vint8m8_t test_vmulh_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmulh_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { @@ -924,7 +924,7 @@ vint8m8_t test_vmulh_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmulh_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -933,7 +933,7 @@ vint16mf4_t test_vmulh_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmulh_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -942,7 +942,7 @@ vint16mf4_t test_vmulh_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmulh_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -951,7 +951,7 @@ vint16mf2_t test_vmulh_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmulh_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -960,7 +960,7 @@ vint16mf2_t test_vmulh_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmulh_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -969,7 +969,7 @@ vint16m1_t test_vmulh_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmulh_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { @@ -978,7 +978,7 @@ vint16m1_t test_vmulh_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmulh_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -987,7 +987,7 @@ vint16m2_t test_vmulh_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmulh_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { @@ -996,7 +996,7 @@ vint16m2_t test_vmulh_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmulh_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -1005,7 +1005,7 @@ vint16m4_t test_vmulh_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmulh_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { @@ -1014,7 +1014,7 @@ vint16m4_t test_vmulh_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmulh_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -1023,7 +1023,7 @@ vint16m8_t test_vmulh_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmulh_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { @@ -1032,7 +1032,7 @@ vint16m8_t test_vmulh_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmulh_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -1041,7 +1041,7 @@ vint32mf2_t test_vmulh_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmulh_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -1050,7 +1050,7 @@ vint32mf2_t test_vmulh_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmulh_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -1059,7 +1059,7 @@ vint32m1_t test_vmulh_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmulh_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { @@ -1068,7 +1068,7 @@ vint32m1_t test_vmulh_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmulh_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -1077,7 +1077,7 @@ vint32m2_t test_vmulh_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmulh_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { @@ -1086,7 +1086,7 @@ vint32m2_t test_vmulh_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmulh_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -1095,7 +1095,7 @@ vint32m4_t test_vmulh_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmulh_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { @@ -1104,7 +1104,7 @@ vint32m4_t test_vmulh_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmulh_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -1113,7 +1113,7 @@ vint32m8_t test_vmulh_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmulh_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { @@ -1122,7 +1122,7 @@ vint32m8_t test_vmulh_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmulhu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -1131,7 +1131,7 @@ vuint8mf8_t test_vmulhu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmulhu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -1140,7 +1140,7 @@ vuint8mf8_t test_vmulhu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmulhu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -1149,7 +1149,7 @@ vuint8mf4_t test_vmulhu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmulhu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -1158,7 +1158,7 @@ vuint8mf4_t test_vmulhu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmulhu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -1167,7 +1167,7 @@ vuint8mf2_t test_vmulhu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmulhu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -1176,7 +1176,7 @@ vuint8mf2_t test_vmulhu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmulhu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -1185,7 +1185,7 @@ vuint8m1_t test_vmulhu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmulhu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -1194,7 +1194,7 @@ vuint8m1_t test_vmulhu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmulhu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -1203,7 +1203,7 @@ vuint8m2_t test_vmulhu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmulhu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -1212,7 +1212,7 @@ vuint8m2_t test_vmulhu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmulhu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -1221,7 +1221,7 @@ vuint8m4_t test_vmulhu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmulhu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -1230,7 +1230,7 @@ vuint8m4_t test_vmulhu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmulhu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -1239,7 +1239,7 @@ vuint8m8_t test_vmulhu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmulhu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -1248,7 +1248,7 @@ vuint8m8_t test_vmulhu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmulhu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -1257,7 +1257,7 @@ vuint16mf4_t test_vmulhu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl // CHECK-RV64-LABEL: @test_vmulhu_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmulhu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -1266,7 +1266,7 @@ vuint16mf4_t test_vmulhu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmulhu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -1275,7 +1275,7 @@ vuint16mf2_t test_vmulhu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl // CHECK-RV64-LABEL: @test_vmulhu_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmulhu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -1284,7 +1284,7 @@ vuint16mf2_t test_vmulhu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmulhu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -1293,7 +1293,7 @@ vuint16m1_t test_vmulhu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmulhu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -1302,7 +1302,7 @@ vuint16m1_t test_vmulhu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmulhu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -1311,7 +1311,7 @@ vuint16m2_t test_vmulhu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmulhu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -1320,7 +1320,7 @@ vuint16m2_t test_vmulhu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmulhu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -1329,7 +1329,7 @@ vuint16m4_t test_vmulhu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmulhu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -1338,7 +1338,7 @@ vuint16m4_t test_vmulhu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmulhu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -1347,7 +1347,7 @@ vuint16m8_t test_vmulhu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmulhu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -1356,7 +1356,7 @@ vuint16m8_t test_vmulhu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmulhu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -1365,7 +1365,7 @@ vuint32mf2_t test_vmulhu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl // CHECK-RV64-LABEL: @test_vmulhu_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmulhu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -1374,7 +1374,7 @@ vuint32mf2_t test_vmulhu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmulhu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -1383,7 +1383,7 @@ vuint32m1_t test_vmulhu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmulhu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -1392,7 +1392,7 @@ vuint32m1_t test_vmulhu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmulhu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -1401,7 +1401,7 @@ vuint32m2_t test_vmulhu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmulhu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -1410,7 +1410,7 @@ vuint32m2_t test_vmulhu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmulhu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -1419,7 +1419,7 @@ vuint32m4_t test_vmulhu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmulhu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -1428,7 +1428,7 @@ vuint32m4_t test_vmulhu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmulhu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -1437,7 +1437,7 @@ vuint32m8_t test_vmulhu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmulhu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -1446,7 +1446,7 @@ vuint32m8_t test_vmulhu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmulhsu_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -1455,7 +1455,7 @@ vint8mf8_t test_vmulhsu_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmulhsu_vx_i8mf8(vint8mf8_t op1, uint8_t op2, size_t vl) { @@ -1464,7 +1464,7 @@ vint8mf8_t test_vmulhsu_vx_i8mf8(vint8mf8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmulhsu_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -1473,7 +1473,7 @@ vint8mf4_t test_vmulhsu_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmulhsu_vx_i8mf4(vint8mf4_t op1, uint8_t op2, size_t vl) { @@ -1482,7 +1482,7 @@ vint8mf4_t test_vmulhsu_vx_i8mf4(vint8mf4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmulhsu_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -1491,7 +1491,7 @@ vint8mf2_t test_vmulhsu_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmulhsu_vx_i8mf2(vint8mf2_t op1, uint8_t op2, size_t vl) { @@ -1500,7 +1500,7 @@ vint8mf2_t test_vmulhsu_vx_i8mf2(vint8mf2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmulhsu_vv_i8m1(vint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -1509,7 +1509,7 @@ vint8m1_t test_vmulhsu_vv_i8m1(vint8m1_t op1, vuint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmulhsu_vx_i8m1(vint8m1_t op1, uint8_t op2, size_t vl) { @@ -1518,7 +1518,7 @@ vint8m1_t test_vmulhsu_vx_i8m1(vint8m1_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmulhsu_vv_i8m2(vint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -1527,7 +1527,7 @@ vint8m2_t test_vmulhsu_vv_i8m2(vint8m2_t op1, vuint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmulhsu_vx_i8m2(vint8m2_t op1, uint8_t op2, size_t vl) { @@ -1536,7 +1536,7 @@ vint8m2_t test_vmulhsu_vx_i8m2(vint8m2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmulhsu_vv_i8m4(vint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -1545,7 +1545,7 @@ vint8m4_t test_vmulhsu_vv_i8m4(vint8m4_t op1, vuint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmulhsu_vx_i8m4(vint8m4_t op1, uint8_t op2, size_t vl) { @@ -1554,7 +1554,7 @@ vint8m4_t test_vmulhsu_vx_i8m4(vint8m4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmulhsu_vv_i8m8(vint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -1563,7 +1563,7 @@ vint8m8_t test_vmulhsu_vv_i8m8(vint8m8_t op1, vuint8m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmulhsu_vx_i8m8(vint8m8_t op1, uint8_t op2, size_t vl) { @@ -1572,7 +1572,7 @@ vint8m8_t test_vmulhsu_vx_i8m8(vint8m8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmulhsu_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -1581,7 +1581,7 @@ vint16mf4_t test_vmulhsu_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmulhsu_vx_i16mf4(vint16mf4_t op1, uint16_t op2, size_t vl) { @@ -1590,7 +1590,7 @@ vint16mf4_t test_vmulhsu_vx_i16mf4(vint16mf4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmulhsu_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -1599,7 +1599,7 @@ vint16mf2_t test_vmulhsu_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmulhsu_vx_i16mf2(vint16mf2_t op1, uint16_t op2, size_t vl) { @@ -1608,7 +1608,7 @@ vint16mf2_t test_vmulhsu_vx_i16mf2(vint16mf2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmulhsu_vv_i16m1(vint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -1617,7 +1617,7 @@ vint16m1_t test_vmulhsu_vv_i16m1(vint16m1_t op1, vuint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmulhsu_vx_i16m1(vint16m1_t op1, uint16_t op2, size_t vl) { @@ -1626,7 +1626,7 @@ vint16m1_t test_vmulhsu_vx_i16m1(vint16m1_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmulhsu_vv_i16m2(vint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -1635,7 +1635,7 @@ vint16m2_t test_vmulhsu_vv_i16m2(vint16m2_t op1, vuint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmulhsu_vx_i16m2(vint16m2_t op1, uint16_t op2, size_t vl) { @@ -1644,7 +1644,7 @@ vint16m2_t test_vmulhsu_vx_i16m2(vint16m2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmulhsu_vv_i16m4(vint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -1653,7 +1653,7 @@ vint16m4_t test_vmulhsu_vv_i16m4(vint16m4_t op1, vuint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmulhsu_vx_i16m4(vint16m4_t op1, uint16_t op2, size_t vl) { @@ -1662,7 +1662,7 @@ vint16m4_t test_vmulhsu_vx_i16m4(vint16m4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmulhsu_vv_i16m8(vint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -1671,7 +1671,7 @@ vint16m8_t test_vmulhsu_vv_i16m8(vint16m8_t op1, vuint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmulhsu_vx_i16m8(vint16m8_t op1, uint16_t op2, size_t vl) { @@ -1680,7 +1680,7 @@ vint16m8_t test_vmulhsu_vx_i16m8(vint16m8_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmulhsu_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -1689,7 +1689,7 @@ vint32mf2_t test_vmulhsu_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmulhsu_vx_i32mf2(vint32mf2_t op1, uint32_t op2, size_t vl) { @@ -1698,7 +1698,7 @@ vint32mf2_t test_vmulhsu_vx_i32mf2(vint32mf2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmulhsu_vv_i32m1(vint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -1707,7 +1707,7 @@ vint32m1_t test_vmulhsu_vv_i32m1(vint32m1_t op1, vuint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmulhsu_vx_i32m1(vint32m1_t op1, uint32_t op2, size_t vl) { @@ -1716,7 +1716,7 @@ vint32m1_t test_vmulhsu_vx_i32m1(vint32m1_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmulhsu_vv_i32m2(vint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -1725,7 +1725,7 @@ vint32m2_t test_vmulhsu_vv_i32m2(vint32m2_t op1, vuint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmulhsu_vx_i32m2(vint32m2_t op1, uint32_t op2, size_t vl) { @@ -1734,7 +1734,7 @@ vint32m2_t test_vmulhsu_vx_i32m2(vint32m2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmulhsu_vv_i32m4(vint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -1743,7 +1743,7 @@ vint32m4_t test_vmulhsu_vv_i32m4(vint32m4_t op1, vuint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmulhsu_vx_i32m4(vint32m4_t op1, uint32_t op2, size_t vl) { @@ -1752,7 +1752,7 @@ vint32m4_t test_vmulhsu_vx_i32m4(vint32m4_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmulhsu_vv_i32m8(vint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -1761,7 +1761,7 @@ vint32m8_t test_vmulhsu_vv_i32m8(vint32m8_t op1, vuint32m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmulhsu_vx_i32m8(vint32m8_t op1, uint32_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnclip.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnclip.c index 29ccbe114c8b6146b092530b2a9309436d5fe90b..bd2af8f9d2ed982419e492b1ec70d457fd6e6574 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnclip.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnclip.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vnclip_wv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i8.nxv1i16.nxv1i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i8.nxv1i16.nxv1i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnclip_wv_i8mf8(vint16mf4_t op1, vuint8mf8_t shift, size_t vl) { @@ -15,7 +15,7 @@ vint8mf8_t test_vnclip_wv_i8mf8(vint16mf4_t op1, vuint8mf8_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclip_wx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i8.nxv1i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i8.nxv1i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnclip_wx_i8mf8(vint16mf4_t op1, size_t shift, size_t vl) { @@ -24,7 +24,7 @@ vint8mf8_t test_vnclip_wx_i8mf8(vint16mf4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclip_wv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i8.nxv2i16.nxv2i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i8.nxv2i16.nxv2i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnclip_wv_i8mf4(vint16mf2_t op1, vuint8mf4_t shift, size_t vl) { @@ -33,7 +33,7 @@ vint8mf4_t test_vnclip_wv_i8mf4(vint16mf2_t op1, vuint8mf4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclip_wx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i8.nxv2i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i8.nxv2i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnclip_wx_i8mf4(vint16mf2_t op1, size_t shift, size_t vl) { @@ -42,7 +42,7 @@ vint8mf4_t test_vnclip_wx_i8mf4(vint16mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclip_wv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i8.nxv4i16.nxv4i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i8.nxv4i16.nxv4i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnclip_wv_i8mf2(vint16m1_t op1, vuint8mf2_t shift, size_t vl) { @@ -51,7 +51,7 @@ vint8mf2_t test_vnclip_wv_i8mf2(vint16m1_t op1, vuint8mf2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclip_wx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i8.nxv4i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i8.nxv4i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnclip_wx_i8mf2(vint16m1_t op1, size_t shift, size_t vl) { @@ -60,7 +60,7 @@ vint8mf2_t test_vnclip_wx_i8mf2(vint16m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclip_wv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i8.nxv8i16.nxv8i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i8.nxv8i16.nxv8i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnclip_wv_i8m1(vint16m2_t op1, vuint8m1_t shift, size_t vl) { @@ -69,7 +69,7 @@ vint8m1_t test_vnclip_wv_i8m1(vint16m2_t op1, vuint8m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclip_wx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i8.nxv8i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i8.nxv8i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnclip_wx_i8m1(vint16m2_t op1, size_t shift, size_t vl) { @@ -78,7 +78,7 @@ vint8m1_t test_vnclip_wx_i8m1(vint16m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclip_wv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv16i8.nxv16i16.nxv16i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv16i8.nxv16i16.nxv16i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnclip_wv_i8m2(vint16m4_t op1, vuint8m2_t shift, size_t vl) { @@ -87,7 +87,7 @@ vint8m2_t test_vnclip_wv_i8m2(vint16m4_t op1, vuint8m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclip_wx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv16i8.nxv16i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv16i8.nxv16i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnclip_wx_i8m2(vint16m4_t op1, size_t shift, size_t vl) { @@ -96,7 +96,7 @@ vint8m2_t test_vnclip_wx_i8m2(vint16m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclip_wv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv32i8.nxv32i16.nxv32i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv32i8.nxv32i16.nxv32i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnclip_wv_i8m4(vint16m8_t op1, vuint8m4_t shift, size_t vl) { @@ -105,7 +105,7 @@ vint8m4_t test_vnclip_wv_i8m4(vint16m8_t op1, vuint8m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclip_wx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv32i8.nxv32i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv32i8.nxv32i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnclip_wx_i8m4(vint16m8_t op1, size_t shift, size_t vl) { @@ -114,7 +114,7 @@ vint8m4_t test_vnclip_wx_i8m4(vint16m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclip_wv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i16.nxv1i32.nxv1i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i16.nxv1i32.nxv1i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnclip_wv_i16mf4(vint32mf2_t op1, vuint16mf4_t shift, @@ -124,7 +124,7 @@ vint16mf4_t test_vnclip_wv_i16mf4(vint32mf2_t op1, vuint16mf4_t shift, // CHECK-RV64-LABEL: @test_vnclip_wx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i16.nxv1i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i16.nxv1i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnclip_wx_i16mf4(vint32mf2_t op1, size_t shift, size_t vl) { @@ -133,7 +133,7 @@ vint16mf4_t test_vnclip_wx_i16mf4(vint32mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclip_wv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i16.nxv2i32.nxv2i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i16.nxv2i32.nxv2i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnclip_wv_i16mf2(vint32m1_t op1, vuint16mf2_t shift, @@ -143,7 +143,7 @@ vint16mf2_t test_vnclip_wv_i16mf2(vint32m1_t op1, vuint16mf2_t shift, // CHECK-RV64-LABEL: @test_vnclip_wx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i16.nxv2i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i16.nxv2i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnclip_wx_i16mf2(vint32m1_t op1, size_t shift, size_t vl) { @@ -152,7 +152,7 @@ vint16mf2_t test_vnclip_wx_i16mf2(vint32m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclip_wv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i16.nxv4i32.nxv4i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i16.nxv4i32.nxv4i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnclip_wv_i16m1(vint32m2_t op1, vuint16m1_t shift, size_t vl) { @@ -161,7 +161,7 @@ vint16m1_t test_vnclip_wv_i16m1(vint32m2_t op1, vuint16m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclip_wx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i16.nxv4i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i16.nxv4i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnclip_wx_i16m1(vint32m2_t op1, size_t shift, size_t vl) { @@ -170,7 +170,7 @@ vint16m1_t test_vnclip_wx_i16m1(vint32m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclip_wv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i16.nxv8i32.nxv8i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i16.nxv8i32.nxv8i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnclip_wv_i16m2(vint32m4_t op1, vuint16m2_t shift, size_t vl) { @@ -179,7 +179,7 @@ vint16m2_t test_vnclip_wv_i16m2(vint32m4_t op1, vuint16m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclip_wx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i16.nxv8i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i16.nxv8i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnclip_wx_i16m2(vint32m4_t op1, size_t shift, size_t vl) { @@ -188,7 +188,7 @@ vint16m2_t test_vnclip_wx_i16m2(vint32m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclip_wv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv16i16.nxv16i32.nxv16i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv16i16.nxv16i32.nxv16i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnclip_wv_i16m4(vint32m8_t op1, vuint16m4_t shift, size_t vl) { @@ -197,7 +197,7 @@ vint16m4_t test_vnclip_wv_i16m4(vint32m8_t op1, vuint16m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclip_wx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv16i16.nxv16i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv16i16.nxv16i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnclip_wx_i16m4(vint32m8_t op1, size_t shift, size_t vl) { @@ -206,7 +206,7 @@ vint16m4_t test_vnclip_wx_i16m4(vint32m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclip_wv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i32.nxv1i64.nxv1i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i32.nxv1i64.nxv1i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnclip_wv_i32mf2(vint64m1_t op1, vuint32mf2_t shift, @@ -216,7 +216,7 @@ vint32mf2_t test_vnclip_wv_i32mf2(vint64m1_t op1, vuint32mf2_t shift, // CHECK-RV64-LABEL: @test_vnclip_wx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i32.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i32.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnclip_wx_i32mf2(vint64m1_t op1, size_t shift, size_t vl) { @@ -225,7 +225,7 @@ vint32mf2_t test_vnclip_wx_i32mf2(vint64m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclip_wv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i32.nxv2i64.nxv2i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i32.nxv2i64.nxv2i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnclip_wv_i32m1(vint64m2_t op1, vuint32m1_t shift, size_t vl) { @@ -234,7 +234,7 @@ vint32m1_t test_vnclip_wv_i32m1(vint64m2_t op1, vuint32m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclip_wx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i32.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i32.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnclip_wx_i32m1(vint64m2_t op1, size_t shift, size_t vl) { @@ -243,7 +243,7 @@ vint32m1_t test_vnclip_wx_i32m1(vint64m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclip_wv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i32.nxv4i64.nxv4i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i32.nxv4i64.nxv4i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnclip_wv_i32m2(vint64m4_t op1, vuint32m2_t shift, size_t vl) { @@ -252,7 +252,7 @@ vint32m2_t test_vnclip_wv_i32m2(vint64m4_t op1, vuint32m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclip_wx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i32.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i32.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnclip_wx_i32m2(vint64m4_t op1, size_t shift, size_t vl) { @@ -261,7 +261,7 @@ vint32m2_t test_vnclip_wx_i32m2(vint64m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclip_wv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i32.nxv8i64.nxv8i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i32.nxv8i64.nxv8i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnclip_wv_i32m4(vint64m8_t op1, vuint32m4_t shift, size_t vl) { @@ -270,7 +270,7 @@ vint32m4_t test_vnclip_wv_i32m4(vint64m8_t op1, vuint32m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclip_wx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i32.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i32.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnclip_wx_i32m4(vint64m8_t op1, size_t shift, size_t vl) { @@ -279,7 +279,7 @@ vint32m4_t test_vnclip_wx_i32m4(vint64m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i8.nxv1i16.nxv1i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i8.nxv1i16.nxv1i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnclipu_wv_u8mf8(vuint16mf4_t op1, vuint8mf8_t shift, @@ -289,7 +289,7 @@ vuint8mf8_t test_vnclipu_wv_u8mf8(vuint16mf4_t op1, vuint8mf8_t shift, // CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i8.nxv1i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i8.nxv1i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnclipu_wx_u8mf8(vuint16mf4_t op1, size_t shift, size_t vl) { @@ -298,7 +298,7 @@ vuint8mf8_t test_vnclipu_wx_u8mf8(vuint16mf4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i8.nxv2i16.nxv2i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i8.nxv2i16.nxv2i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnclipu_wv_u8mf4(vuint16mf2_t op1, vuint8mf4_t shift, @@ -308,7 +308,7 @@ vuint8mf4_t test_vnclipu_wv_u8mf4(vuint16mf2_t op1, vuint8mf4_t shift, // CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i8.nxv2i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i8.nxv2i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnclipu_wx_u8mf4(vuint16mf2_t op1, size_t shift, size_t vl) { @@ -317,7 +317,7 @@ vuint8mf4_t test_vnclipu_wx_u8mf4(vuint16mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i8.nxv4i16.nxv4i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i8.nxv4i16.nxv4i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnclipu_wv_u8mf2(vuint16m1_t op1, vuint8mf2_t shift, @@ -327,7 +327,7 @@ vuint8mf2_t test_vnclipu_wv_u8mf2(vuint16m1_t op1, vuint8mf2_t shift, // CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i8.nxv4i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i8.nxv4i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnclipu_wx_u8mf2(vuint16m1_t op1, size_t shift, size_t vl) { @@ -336,7 +336,7 @@ vuint8mf2_t test_vnclipu_wx_u8mf2(vuint16m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclipu_wv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i8.nxv8i16.nxv8i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i8.nxv8i16.nxv8i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnclipu_wv_u8m1(vuint16m2_t op1, vuint8m1_t shift, size_t vl) { @@ -345,7 +345,7 @@ vuint8m1_t test_vnclipu_wv_u8m1(vuint16m2_t op1, vuint8m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclipu_wx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i8.nxv8i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i8.nxv8i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnclipu_wx_u8m1(vuint16m2_t op1, size_t shift, size_t vl) { @@ -354,7 +354,7 @@ vuint8m1_t test_vnclipu_wx_u8m1(vuint16m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclipu_wv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv16i8.nxv16i16.nxv16i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv16i8.nxv16i16.nxv16i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnclipu_wv_u8m2(vuint16m4_t op1, vuint8m2_t shift, size_t vl) { @@ -363,7 +363,7 @@ vuint8m2_t test_vnclipu_wv_u8m2(vuint16m4_t op1, vuint8m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclipu_wx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv16i8.nxv16i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv16i8.nxv16i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnclipu_wx_u8m2(vuint16m4_t op1, size_t shift, size_t vl) { @@ -372,7 +372,7 @@ vuint8m2_t test_vnclipu_wx_u8m2(vuint16m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclipu_wv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv32i8.nxv32i16.nxv32i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv32i8.nxv32i16.nxv32i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnclipu_wv_u8m4(vuint16m8_t op1, vuint8m4_t shift, size_t vl) { @@ -381,7 +381,7 @@ vuint8m4_t test_vnclipu_wv_u8m4(vuint16m8_t op1, vuint8m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclipu_wx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv32i8.nxv32i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv32i8.nxv32i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnclipu_wx_u8m4(vuint16m8_t op1, size_t shift, size_t vl) { @@ -390,7 +390,7 @@ vuint8m4_t test_vnclipu_wx_u8m4(vuint16m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclipu_wv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i16.nxv1i32.nxv1i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i16.nxv1i32.nxv1i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnclipu_wv_u16mf4(vuint32mf2_t op1, vuint16mf4_t shift, @@ -400,7 +400,7 @@ vuint16mf4_t test_vnclipu_wv_u16mf4(vuint32mf2_t op1, vuint16mf4_t shift, // CHECK-RV64-LABEL: @test_vnclipu_wx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i16.nxv1i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i16.nxv1i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnclipu_wx_u16mf4(vuint32mf2_t op1, size_t shift, size_t vl) { @@ -409,7 +409,7 @@ vuint16mf4_t test_vnclipu_wx_u16mf4(vuint32mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclipu_wv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i16.nxv2i32.nxv2i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i16.nxv2i32.nxv2i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnclipu_wv_u16mf2(vuint32m1_t op1, vuint16mf2_t shift, @@ -419,7 +419,7 @@ vuint16mf2_t test_vnclipu_wv_u16mf2(vuint32m1_t op1, vuint16mf2_t shift, // CHECK-RV64-LABEL: @test_vnclipu_wx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i16.nxv2i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i16.nxv2i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnclipu_wx_u16mf2(vuint32m1_t op1, size_t shift, size_t vl) { @@ -428,7 +428,7 @@ vuint16mf2_t test_vnclipu_wx_u16mf2(vuint32m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclipu_wv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i16.nxv4i32.nxv4i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i16.nxv4i32.nxv4i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnclipu_wv_u16m1(vuint32m2_t op1, vuint16m1_t shift, @@ -438,7 +438,7 @@ vuint16m1_t test_vnclipu_wv_u16m1(vuint32m2_t op1, vuint16m1_t shift, // CHECK-RV64-LABEL: @test_vnclipu_wx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i16.nxv4i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i16.nxv4i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnclipu_wx_u16m1(vuint32m2_t op1, size_t shift, size_t vl) { @@ -447,7 +447,7 @@ vuint16m1_t test_vnclipu_wx_u16m1(vuint32m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclipu_wv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i16.nxv8i32.nxv8i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i16.nxv8i32.nxv8i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnclipu_wv_u16m2(vuint32m4_t op1, vuint16m2_t shift, @@ -457,7 +457,7 @@ vuint16m2_t test_vnclipu_wv_u16m2(vuint32m4_t op1, vuint16m2_t shift, // CHECK-RV64-LABEL: @test_vnclipu_wx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i16.nxv8i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i16.nxv8i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnclipu_wx_u16m2(vuint32m4_t op1, size_t shift, size_t vl) { @@ -466,7 +466,7 @@ vuint16m2_t test_vnclipu_wx_u16m2(vuint32m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclipu_wv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv16i16.nxv16i32.nxv16i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv16i16.nxv16i32.nxv16i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnclipu_wv_u16m4(vuint32m8_t op1, vuint16m4_t shift, @@ -476,7 +476,7 @@ vuint16m4_t test_vnclipu_wv_u16m4(vuint32m8_t op1, vuint16m4_t shift, // CHECK-RV64-LABEL: @test_vnclipu_wx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv16i16.nxv16i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv16i16.nxv16i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnclipu_wx_u16m4(vuint32m8_t op1, size_t shift, size_t vl) { @@ -485,7 +485,7 @@ vuint16m4_t test_vnclipu_wx_u16m4(vuint32m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclipu_wv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i32.nxv1i64.nxv1i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i32.nxv1i64.nxv1i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnclipu_wv_u32mf2(vuint64m1_t op1, vuint32mf2_t shift, @@ -495,7 +495,7 @@ vuint32mf2_t test_vnclipu_wv_u32mf2(vuint64m1_t op1, vuint32mf2_t shift, // CHECK-RV64-LABEL: @test_vnclipu_wx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i32.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i32.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnclipu_wx_u32mf2(vuint64m1_t op1, size_t shift, size_t vl) { @@ -504,7 +504,7 @@ vuint32mf2_t test_vnclipu_wx_u32mf2(vuint64m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclipu_wv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i32.nxv2i64.nxv2i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i32.nxv2i64.nxv2i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnclipu_wv_u32m1(vuint64m2_t op1, vuint32m1_t shift, @@ -514,7 +514,7 @@ vuint32m1_t test_vnclipu_wv_u32m1(vuint64m2_t op1, vuint32m1_t shift, // CHECK-RV64-LABEL: @test_vnclipu_wx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i32.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i32.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnclipu_wx_u32m1(vuint64m2_t op1, size_t shift, size_t vl) { @@ -523,7 +523,7 @@ vuint32m1_t test_vnclipu_wx_u32m1(vuint64m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclipu_wv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i32.nxv4i64.nxv4i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i32.nxv4i64.nxv4i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnclipu_wv_u32m2(vuint64m4_t op1, vuint32m2_t shift, @@ -533,7 +533,7 @@ vuint32m2_t test_vnclipu_wv_u32m2(vuint64m4_t op1, vuint32m2_t shift, // CHECK-RV64-LABEL: @test_vnclipu_wx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i32.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i32.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnclipu_wx_u32m2(vuint64m4_t op1, size_t shift, size_t vl) { @@ -542,7 +542,7 @@ vuint32m2_t test_vnclipu_wx_u32m2(vuint64m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclipu_wv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i32.nxv8i64.nxv8i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i32.nxv8i64.nxv8i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnclipu_wv_u32m4(vuint64m8_t op1, vuint32m4_t shift, @@ -552,7 +552,7 @@ vuint32m4_t test_vnclipu_wv_u32m4(vuint64m8_t op1, vuint32m4_t shift, // CHECK-RV64-LABEL: @test_vnclipu_wx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i32.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i32.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnclipu_wx_u32m4(vuint64m8_t op1, size_t shift, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vncvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vncvt.c index d25b1b4c414a32924778e3b7efd111130815b84c..104a5746c1e42b016d8e1b29d6b36a925f437e34 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vncvt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vncvt.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i8.nxv1i16.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i8.nxv1i16.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vncvt_x_x_w_i8mf8 (vint16mf4_t src, size_t vl) { @@ -15,7 +15,7 @@ vint8mf8_t test_vncvt_x_x_w_i8mf8 (vint16mf4_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i8.nxv2i16.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i8.nxv2i16.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vncvt_x_x_w_i8mf4 (vint16mf2_t src, size_t vl) { @@ -24,7 +24,7 @@ vint8mf4_t test_vncvt_x_x_w_i8mf4 (vint16mf2_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i8.nxv4i16.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i8.nxv4i16.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vncvt_x_x_w_i8mf2 (vint16m1_t src, size_t vl) { @@ -33,7 +33,7 @@ vint8mf2_t test_vncvt_x_x_w_i8mf2 (vint16m1_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i8.nxv8i16.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i8.nxv8i16.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vncvt_x_x_w_i8m1 (vint16m2_t src, size_t vl) { @@ -42,7 +42,7 @@ vint8m1_t test_vncvt_x_x_w_i8m1 (vint16m2_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv16i8.nxv16i16.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv16i8.nxv16i16.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vncvt_x_x_w_i8m2 (vint16m4_t src, size_t vl) { @@ -51,7 +51,7 @@ vint8m2_t test_vncvt_x_x_w_i8m2 (vint16m4_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv32i8.nxv32i16.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv32i8.nxv32i16.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vncvt_x_x_w_i8m4 (vint16m8_t src, size_t vl) { @@ -60,7 +60,7 @@ vint8m4_t test_vncvt_x_x_w_i8m4 (vint16m8_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i8.nxv1i16.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i8.nxv1i16.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vncvt_x_x_w_u8mf8 (vuint16mf4_t src, size_t vl) { @@ -69,7 +69,7 @@ vuint8mf8_t test_vncvt_x_x_w_u8mf8 (vuint16mf4_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i8.nxv2i16.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i8.nxv2i16.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vncvt_x_x_w_u8mf4 (vuint16mf2_t src, size_t vl) { @@ -78,7 +78,7 @@ vuint8mf4_t test_vncvt_x_x_w_u8mf4 (vuint16mf2_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i8.nxv4i16.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i8.nxv4i16.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vncvt_x_x_w_u8mf2 (vuint16m1_t src, size_t vl) { @@ -87,7 +87,7 @@ vuint8mf2_t test_vncvt_x_x_w_u8mf2 (vuint16m1_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i8.nxv8i16.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i8.nxv8i16.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vncvt_x_x_w_u8m1 (vuint16m2_t src, size_t vl) { @@ -96,7 +96,7 @@ vuint8m1_t test_vncvt_x_x_w_u8m1 (vuint16m2_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv16i8.nxv16i16.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv16i8.nxv16i16.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vncvt_x_x_w_u8m2 (vuint16m4_t src, size_t vl) { @@ -105,7 +105,7 @@ vuint8m2_t test_vncvt_x_x_w_u8m2 (vuint16m4_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv32i8.nxv32i16.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv32i8.nxv32i16.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vncvt_x_x_w_u8m4 (vuint16m8_t src, size_t vl) { @@ -114,7 +114,7 @@ vuint8m4_t test_vncvt_x_x_w_u8m4 (vuint16m8_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i16.nxv1i32.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i16.nxv1i32.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vncvt_x_x_w_i16mf4 (vint32mf2_t src, size_t vl) { @@ -123,7 +123,7 @@ vint16mf4_t test_vncvt_x_x_w_i16mf4 (vint32mf2_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i16.nxv2i32.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i16.nxv2i32.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vncvt_x_x_w_i16mf2 (vint32m1_t src, size_t vl) { @@ -132,7 +132,7 @@ vint16mf2_t test_vncvt_x_x_w_i16mf2 (vint32m1_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i16.nxv4i32.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i16.nxv4i32.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vncvt_x_x_w_i16m1 (vint32m2_t src, size_t vl) { @@ -141,7 +141,7 @@ vint16m1_t test_vncvt_x_x_w_i16m1 (vint32m2_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i16.nxv8i32.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i16.nxv8i32.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vncvt_x_x_w_i16m2 (vint32m4_t src, size_t vl) { @@ -150,7 +150,7 @@ vint16m2_t test_vncvt_x_x_w_i16m2 (vint32m4_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv16i16.nxv16i32.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv16i16.nxv16i32.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vncvt_x_x_w_i16m4 (vint32m8_t src, size_t vl) { @@ -159,7 +159,7 @@ vint16m4_t test_vncvt_x_x_w_i16m4 (vint32m8_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i16.nxv1i32.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i16.nxv1i32.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vncvt_x_x_w_u16mf4 (vuint32mf2_t src, size_t vl) { @@ -168,7 +168,7 @@ vuint16mf4_t test_vncvt_x_x_w_u16mf4 (vuint32mf2_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i16.nxv2i32.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i16.nxv2i32.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vncvt_x_x_w_u16mf2 (vuint32m1_t src, size_t vl) { @@ -177,7 +177,7 @@ vuint16mf2_t test_vncvt_x_x_w_u16mf2 (vuint32m1_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i16.nxv4i32.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i16.nxv4i32.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vncvt_x_x_w_u16m1 (vuint32m2_t src, size_t vl) { @@ -186,7 +186,7 @@ vuint16m1_t test_vncvt_x_x_w_u16m1 (vuint32m2_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i16.nxv8i32.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i16.nxv8i32.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vncvt_x_x_w_u16m2 (vuint32m4_t src, size_t vl) { @@ -195,7 +195,7 @@ vuint16m2_t test_vncvt_x_x_w_u16m2 (vuint32m4_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv16i16.nxv16i32.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv16i16.nxv16i32.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vncvt_x_x_w_u16m4 (vuint32m8_t src, size_t vl) { @@ -204,7 +204,7 @@ vuint16m4_t test_vncvt_x_x_w_u16m4 (vuint32m8_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i32.nxv1i64.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i32.nxv1i64.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vncvt_x_x_w_i32mf2 (vint64m1_t src, size_t vl) { @@ -213,7 +213,7 @@ vint32mf2_t test_vncvt_x_x_w_i32mf2 (vint64m1_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i32.nxv2i64.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i32.nxv2i64.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vncvt_x_x_w_i32m1 (vint64m2_t src, size_t vl) { @@ -222,7 +222,7 @@ vint32m1_t test_vncvt_x_x_w_i32m1 (vint64m2_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i32.nxv4i64.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i32.nxv4i64.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vncvt_x_x_w_i32m2 (vint64m4_t src, size_t vl) { @@ -231,7 +231,7 @@ vint32m2_t test_vncvt_x_x_w_i32m2 (vint64m4_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i32.nxv8i64.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i32.nxv8i64.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vncvt_x_x_w_i32m4 (vint64m8_t src, size_t vl) { @@ -240,7 +240,7 @@ vint32m4_t test_vncvt_x_x_w_i32m4 (vint64m8_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i32.nxv1i64.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i32.nxv1i64.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vncvt_x_x_w_u32mf2 (vuint64m1_t src, size_t vl) { @@ -249,7 +249,7 @@ vuint32mf2_t test_vncvt_x_x_w_u32mf2 (vuint64m1_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i32.nxv2i64.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i32.nxv2i64.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vncvt_x_x_w_u32m1 (vuint64m2_t src, size_t vl) { @@ -258,7 +258,7 @@ vuint32m1_t test_vncvt_x_x_w_u32m1 (vuint64m2_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i32.nxv4i64.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i32.nxv4i64.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vncvt_x_x_w_u32m2 (vuint64m4_t src, size_t vl) { @@ -267,7 +267,7 @@ vuint32m2_t test_vncvt_x_x_w_u32m2 (vuint64m4_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i32.nxv8i64.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i32.nxv8i64.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vncvt_x_x_w_u32m4 (vuint64m8_t src, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vneg.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vneg.c index a52e5677e01fcb225a21dfa0e6f1ff35109fbe37..d6d3ea9188a31279e5fcca490918cc76ed6a801a 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vneg.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vneg.c @@ -7,7 +7,7 @@ // CHECK-RV64-LABEL: @test_vneg_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i8.i8.i64( [[OP1:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vneg_v_i8mf8 (vint8mf8_t op1, size_t vl) { @@ -16,7 +16,7 @@ vint8mf8_t test_vneg_v_i8mf8 (vint8mf8_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vneg_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i8.i8.i64( [[OP1:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vneg_v_i8mf4 (vint8mf4_t op1, size_t vl) { @@ -25,7 +25,7 @@ vint8mf4_t test_vneg_v_i8mf4 (vint8mf4_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vneg_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i8.i8.i64( [[OP1:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vneg_v_i8mf2 (vint8mf2_t op1, size_t vl) { @@ -34,7 +34,7 @@ vint8mf2_t test_vneg_v_i8mf2 (vint8mf2_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vneg_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i8.i8.i64( [[OP1:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vneg_v_i8m1 (vint8m1_t op1, size_t vl) { @@ -43,7 +43,7 @@ vint8m1_t test_vneg_v_i8m1 (vint8m1_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vneg_v_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i8.i8.i64( [[OP1:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vneg_v_i8m2 (vint8m2_t op1, size_t vl) { @@ -52,7 +52,7 @@ vint8m2_t test_vneg_v_i8m2 (vint8m2_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vneg_v_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv32i8.i8.i64( [[OP1:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vneg_v_i8m4 (vint8m4_t op1, size_t vl) { @@ -61,7 +61,7 @@ vint8m4_t test_vneg_v_i8m4 (vint8m4_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vneg_v_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv64i8.i8.i64( [[OP1:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vneg_v_i8m8 (vint8m8_t op1, size_t vl) { @@ -70,7 +70,7 @@ vint8m8_t test_vneg_v_i8m8 (vint8m8_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vneg_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i16.i16.i64( [[OP1:%.*]], i16 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vneg_v_i16mf4 (vint16mf4_t op1, size_t vl) { @@ -79,7 +79,7 @@ vint16mf4_t test_vneg_v_i16mf4 (vint16mf4_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vneg_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i16.i16.i64( [[OP1:%.*]], i16 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vneg_v_i16mf2 (vint16mf2_t op1, size_t vl) { @@ -88,7 +88,7 @@ vint16mf2_t test_vneg_v_i16mf2 (vint16mf2_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vneg_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i16.i16.i64( [[OP1:%.*]], i16 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vneg_v_i16m1 (vint16m1_t op1, size_t vl) { @@ -97,7 +97,7 @@ vint16m1_t test_vneg_v_i16m1 (vint16m1_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vneg_v_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i16.i16.i64( [[OP1:%.*]], i16 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vneg_v_i16m2 (vint16m2_t op1, size_t vl) { @@ -106,7 +106,7 @@ vint16m2_t test_vneg_v_i16m2 (vint16m2_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vneg_v_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i16.i16.i64( [[OP1:%.*]], i16 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vneg_v_i16m4 (vint16m4_t op1, size_t vl) { @@ -115,7 +115,7 @@ vint16m4_t test_vneg_v_i16m4 (vint16m4_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vneg_v_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv32i16.i16.i64( [[OP1:%.*]], i16 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vneg_v_i16m8 (vint16m8_t op1, size_t vl) { @@ -124,7 +124,7 @@ vint16m8_t test_vneg_v_i16m8 (vint16m8_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vneg_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i32.i32.i64( [[OP1:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vneg_v_i32mf2 (vint32mf2_t op1, size_t vl) { @@ -133,7 +133,7 @@ vint32mf2_t test_vneg_v_i32mf2 (vint32mf2_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vneg_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i32.i32.i64( [[OP1:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vneg_v_i32m1 (vint32m1_t op1, size_t vl) { @@ -142,7 +142,7 @@ vint32m1_t test_vneg_v_i32m1 (vint32m1_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vneg_v_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i32.i32.i64( [[OP1:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vneg_v_i32m2 (vint32m2_t op1, size_t vl) { @@ -151,7 +151,7 @@ vint32m2_t test_vneg_v_i32m2 (vint32m2_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vneg_v_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i32.i32.i64( [[OP1:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vneg_v_i32m4 (vint32m4_t op1, size_t vl) { @@ -160,7 +160,7 @@ vint32m4_t test_vneg_v_i32m4 (vint32m4_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vneg_v_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i32.i32.i64( [[OP1:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vneg_v_i32m8 (vint32m8_t op1, size_t vl) { @@ -169,7 +169,7 @@ vint32m8_t test_vneg_v_i32m8 (vint32m8_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vneg_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i64.i64.i64( [[OP1:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vneg_v_i64m1 (vint64m1_t op1, size_t vl) { @@ -178,7 +178,7 @@ vint64m1_t test_vneg_v_i64m1 (vint64m1_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vneg_v_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i64.i64.i64( [[OP1:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vneg_v_i64m2 (vint64m2_t op1, size_t vl) { @@ -187,7 +187,7 @@ vint64m2_t test_vneg_v_i64m2 (vint64m2_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vneg_v_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i64.i64.i64( [[OP1:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vneg_v_i64m4 (vint64m4_t op1, size_t vl) { @@ -196,7 +196,7 @@ vint64m4_t test_vneg_v_i64m4 (vint64m4_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vneg_v_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i64.i64.i64( [[OP1:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vneg_v_i64m8 (vint64m8_t op1, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnot.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnot.c index 642055f09fab460712b1649303ea0e621632b25b..696d89216491961fb02a36bb4ef283bf72c26977 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnot.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnot.c @@ -7,7 +7,7 @@ // CHECK-RV64-LABEL: @test_vnot_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i8.i8.i64( [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnot_v_i8mf8 (vint8mf8_t op1, size_t vl) { @@ -16,7 +16,7 @@ vint8mf8_t test_vnot_v_i8mf8 (vint8mf8_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i8.i8.i64( [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnot_v_i8mf4 (vint8mf4_t op1, size_t vl) { @@ -25,7 +25,7 @@ vint8mf4_t test_vnot_v_i8mf4 (vint8mf4_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i8.i8.i64( [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnot_v_i8mf2 (vint8mf2_t op1, size_t vl) { @@ -34,7 +34,7 @@ vint8mf2_t test_vnot_v_i8mf2 (vint8mf2_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i8.i8.i64( [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnot_v_i8m1 (vint8m1_t op1, size_t vl) { @@ -43,7 +43,7 @@ vint8m1_t test_vnot_v_i8m1 (vint8m1_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i8.i8.i64( [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnot_v_i8m2 (vint8m2_t op1, size_t vl) { @@ -52,7 +52,7 @@ vint8m2_t test_vnot_v_i8m2 (vint8m2_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i8.i8.i64( [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnot_v_i8m4 (vint8m4_t op1, size_t vl) { @@ -61,7 +61,7 @@ vint8m4_t test_vnot_v_i8m4 (vint8m4_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv64i8.i8.i64( [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vnot_v_i8m8 (vint8m8_t op1, size_t vl) { @@ -70,7 +70,7 @@ vint8m8_t test_vnot_v_i8m8 (vint8m8_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i16.i16.i64( [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnot_v_i16mf4 (vint16mf4_t op1, size_t vl) { @@ -79,7 +79,7 @@ vint16mf4_t test_vnot_v_i16mf4 (vint16mf4_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i16.i16.i64( [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnot_v_i16mf2 (vint16mf2_t op1, size_t vl) { @@ -88,7 +88,7 @@ vint16mf2_t test_vnot_v_i16mf2 (vint16mf2_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i16.i16.i64( [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnot_v_i16m1 (vint16m1_t op1, size_t vl) { @@ -97,7 +97,7 @@ vint16m1_t test_vnot_v_i16m1 (vint16m1_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i16.i16.i64( [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnot_v_i16m2 (vint16m2_t op1, size_t vl) { @@ -106,7 +106,7 @@ vint16m2_t test_vnot_v_i16m2 (vint16m2_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i16.i16.i64( [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnot_v_i16m4 (vint16m4_t op1, size_t vl) { @@ -115,7 +115,7 @@ vint16m4_t test_vnot_v_i16m4 (vint16m4_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i16.i16.i64( [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vnot_v_i16m8 (vint16m8_t op1, size_t vl) { @@ -124,7 +124,7 @@ vint16m8_t test_vnot_v_i16m8 (vint16m8_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.i32.i64( [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnot_v_i32mf2 (vint32mf2_t op1, size_t vl) { @@ -133,7 +133,7 @@ vint32mf2_t test_vnot_v_i32mf2 (vint32mf2_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i32.i32.i64( [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnot_v_i32m1 (vint32m1_t op1, size_t vl) { @@ -142,7 +142,7 @@ vint32m1_t test_vnot_v_i32m1 (vint32m1_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i32.i32.i64( [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnot_v_i32m2 (vint32m2_t op1, size_t vl) { @@ -151,7 +151,7 @@ vint32m2_t test_vnot_v_i32m2 (vint32m2_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i32.i32.i64( [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnot_v_i32m4 (vint32m4_t op1, size_t vl) { @@ -160,7 +160,7 @@ vint32m4_t test_vnot_v_i32m4 (vint32m4_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i32.i32.i64( [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vnot_v_i32m8 (vint32m8_t op1, size_t vl) { @@ -169,7 +169,7 @@ vint32m8_t test_vnot_v_i32m8 (vint32m8_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i64.i64.i64( [[OP1:%.*]], i64 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vnot_v_i64m1 (vint64m1_t op1, size_t vl) { @@ -178,7 +178,7 @@ vint64m1_t test_vnot_v_i64m1 (vint64m1_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i64.i64.i64( [[OP1:%.*]], i64 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vnot_v_i64m2 (vint64m2_t op1, size_t vl) { @@ -187,7 +187,7 @@ vint64m2_t test_vnot_v_i64m2 (vint64m2_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i64.i64.i64( [[OP1:%.*]], i64 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vnot_v_i64m4 (vint64m4_t op1, size_t vl) { @@ -196,7 +196,7 @@ vint64m4_t test_vnot_v_i64m4 (vint64m4_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i64.i64.i64( [[OP1:%.*]], i64 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vnot_v_i64m8 (vint64m8_t op1, size_t vl) { @@ -205,7 +205,7 @@ vint64m8_t test_vnot_v_i64m8 (vint64m8_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i8.i8.i64( [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnot_v_u8mf8 (vuint8mf8_t op1, size_t vl) { @@ -214,7 +214,7 @@ vuint8mf8_t test_vnot_v_u8mf8 (vuint8mf8_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i8.i8.i64( [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnot_v_u8mf4 (vuint8mf4_t op1, size_t vl) { @@ -223,7 +223,7 @@ vuint8mf4_t test_vnot_v_u8mf4 (vuint8mf4_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i8.i8.i64( [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnot_v_u8mf2 (vuint8mf2_t op1, size_t vl) { @@ -232,7 +232,7 @@ vuint8mf2_t test_vnot_v_u8mf2 (vuint8mf2_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i8.i8.i64( [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnot_v_u8m1 (vuint8m1_t op1, size_t vl) { @@ -241,7 +241,7 @@ vuint8m1_t test_vnot_v_u8m1 (vuint8m1_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i8.i8.i64( [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnot_v_u8m2 (vuint8m2_t op1, size_t vl) { @@ -250,7 +250,7 @@ vuint8m2_t test_vnot_v_u8m2 (vuint8m2_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i8.i8.i64( [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnot_v_u8m4 (vuint8m4_t op1, size_t vl) { @@ -259,7 +259,7 @@ vuint8m4_t test_vnot_v_u8m4 (vuint8m4_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv64i8.i8.i64( [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vnot_v_u8m8 (vuint8m8_t op1, size_t vl) { @@ -268,7 +268,7 @@ vuint8m8_t test_vnot_v_u8m8 (vuint8m8_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i16.i16.i64( [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnot_v_u16mf4 (vuint16mf4_t op1, size_t vl) { @@ -277,7 +277,7 @@ vuint16mf4_t test_vnot_v_u16mf4 (vuint16mf4_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i16.i16.i64( [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnot_v_u16mf2 (vuint16mf2_t op1, size_t vl) { @@ -286,7 +286,7 @@ vuint16mf2_t test_vnot_v_u16mf2 (vuint16mf2_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i16.i16.i64( [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnot_v_u16m1 (vuint16m1_t op1, size_t vl) { @@ -295,7 +295,7 @@ vuint16m1_t test_vnot_v_u16m1 (vuint16m1_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i16.i16.i64( [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnot_v_u16m2 (vuint16m2_t op1, size_t vl) { @@ -304,7 +304,7 @@ vuint16m2_t test_vnot_v_u16m2 (vuint16m2_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i16.i16.i64( [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnot_v_u16m4 (vuint16m4_t op1, size_t vl) { @@ -313,7 +313,7 @@ vuint16m4_t test_vnot_v_u16m4 (vuint16m4_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i16.i16.i64( [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vnot_v_u16m8 (vuint16m8_t op1, size_t vl) { @@ -322,7 +322,7 @@ vuint16m8_t test_vnot_v_u16m8 (vuint16m8_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.i32.i64( [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnot_v_u32mf2 (vuint32mf2_t op1, size_t vl) { @@ -331,7 +331,7 @@ vuint32mf2_t test_vnot_v_u32mf2 (vuint32mf2_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i32.i32.i64( [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnot_v_u32m1 (vuint32m1_t op1, size_t vl) { @@ -340,7 +340,7 @@ vuint32m1_t test_vnot_v_u32m1 (vuint32m1_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i32.i32.i64( [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnot_v_u32m2 (vuint32m2_t op1, size_t vl) { @@ -349,7 +349,7 @@ vuint32m2_t test_vnot_v_u32m2 (vuint32m2_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i32.i32.i64( [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnot_v_u32m4 (vuint32m4_t op1, size_t vl) { @@ -358,7 +358,7 @@ vuint32m4_t test_vnot_v_u32m4 (vuint32m4_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i32.i32.i64( [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vnot_v_u32m8 (vuint32m8_t op1, size_t vl) { @@ -367,7 +367,7 @@ vuint32m8_t test_vnot_v_u32m8 (vuint32m8_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i64.i64.i64( [[OP1:%.*]], i64 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vnot_v_u64m1 (vuint64m1_t op1, size_t vl) { @@ -376,7 +376,7 @@ vuint64m1_t test_vnot_v_u64m1 (vuint64m1_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i64.i64.i64( [[OP1:%.*]], i64 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vnot_v_u64m2 (vuint64m2_t op1, size_t vl) { @@ -385,7 +385,7 @@ vuint64m2_t test_vnot_v_u64m2 (vuint64m2_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i64.i64.i64( [[OP1:%.*]], i64 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vnot_v_u64m4 (vuint64m4_t op1, size_t vl) { @@ -394,7 +394,7 @@ vuint64m4_t test_vnot_v_u64m4 (vuint64m4_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i64.i64.i64( [[OP1:%.*]], i64 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vnot_v_u64m8 (vuint64m8_t op1, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnsra.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnsra.c index c911a400be40a6ab9f350e1631bc3f860452849e..f580d8ccb4dbacd5b4974654ea4fe68158d30538 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnsra.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnsra.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vnsra_wv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv1i8.nxv1i16.nxv1i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv1i8.nxv1i16.nxv1i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnsra_wv_i8mf8(vint16mf4_t op1, vuint8mf8_t shift, size_t vl) { @@ -15,7 +15,7 @@ vint8mf8_t test_vnsra_wv_i8mf8(vint16mf4_t op1, vuint8mf8_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsra_wx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv1i8.nxv1i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv1i8.nxv1i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnsra_wx_i8mf8(vint16mf4_t op1, size_t shift, size_t vl) { @@ -24,7 +24,7 @@ vint8mf8_t test_vnsra_wx_i8mf8(vint16mf4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsra_wv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv2i8.nxv2i16.nxv2i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv2i8.nxv2i16.nxv2i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnsra_wv_i8mf4(vint16mf2_t op1, vuint8mf4_t shift, size_t vl) { @@ -33,7 +33,7 @@ vint8mf4_t test_vnsra_wv_i8mf4(vint16mf2_t op1, vuint8mf4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsra_wx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv2i8.nxv2i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv2i8.nxv2i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnsra_wx_i8mf4(vint16mf2_t op1, size_t shift, size_t vl) { @@ -42,7 +42,7 @@ vint8mf4_t test_vnsra_wx_i8mf4(vint16mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsra_wv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv4i8.nxv4i16.nxv4i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv4i8.nxv4i16.nxv4i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnsra_wv_i8mf2(vint16m1_t op1, vuint8mf2_t shift, size_t vl) { @@ -51,7 +51,7 @@ vint8mf2_t test_vnsra_wv_i8mf2(vint16m1_t op1, vuint8mf2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsra_wx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv4i8.nxv4i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv4i8.nxv4i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnsra_wx_i8mf2(vint16m1_t op1, size_t shift, size_t vl) { @@ -60,7 +60,7 @@ vint8mf2_t test_vnsra_wx_i8mf2(vint16m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsra_wv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv8i8.nxv8i16.nxv8i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv8i8.nxv8i16.nxv8i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnsra_wv_i8m1(vint16m2_t op1, vuint8m1_t shift, size_t vl) { @@ -69,7 +69,7 @@ vint8m1_t test_vnsra_wv_i8m1(vint16m2_t op1, vuint8m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsra_wx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv8i8.nxv8i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv8i8.nxv8i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnsra_wx_i8m1(vint16m2_t op1, size_t shift, size_t vl) { @@ -78,7 +78,7 @@ vint8m1_t test_vnsra_wx_i8m1(vint16m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsra_wv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv16i8.nxv16i16.nxv16i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv16i8.nxv16i16.nxv16i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnsra_wv_i8m2(vint16m4_t op1, vuint8m2_t shift, size_t vl) { @@ -87,7 +87,7 @@ vint8m2_t test_vnsra_wv_i8m2(vint16m4_t op1, vuint8m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsra_wx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv16i8.nxv16i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv16i8.nxv16i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnsra_wx_i8m2(vint16m4_t op1, size_t shift, size_t vl) { @@ -96,7 +96,7 @@ vint8m2_t test_vnsra_wx_i8m2(vint16m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsra_wv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv32i8.nxv32i16.nxv32i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv32i8.nxv32i16.nxv32i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnsra_wv_i8m4(vint16m8_t op1, vuint8m4_t shift, size_t vl) { @@ -105,7 +105,7 @@ vint8m4_t test_vnsra_wv_i8m4(vint16m8_t op1, vuint8m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsra_wx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv32i8.nxv32i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv32i8.nxv32i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnsra_wx_i8m4(vint16m8_t op1, size_t shift, size_t vl) { @@ -114,7 +114,7 @@ vint8m4_t test_vnsra_wx_i8m4(vint16m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsra_wv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv1i16.nxv1i32.nxv1i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv1i16.nxv1i32.nxv1i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnsra_wv_i16mf4(vint32mf2_t op1, vuint16mf4_t shift, size_t vl) { @@ -123,7 +123,7 @@ vint16mf4_t test_vnsra_wv_i16mf4(vint32mf2_t op1, vuint16mf4_t shift, size_t vl) // CHECK-RV64-LABEL: @test_vnsra_wx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv1i16.nxv1i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv1i16.nxv1i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnsra_wx_i16mf4(vint32mf2_t op1, size_t shift, size_t vl) { @@ -132,7 +132,7 @@ vint16mf4_t test_vnsra_wx_i16mf4(vint32mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsra_wv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv2i16.nxv2i32.nxv2i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv2i16.nxv2i32.nxv2i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnsra_wv_i16mf2(vint32m1_t op1, vuint16mf2_t shift, size_t vl) { @@ -141,7 +141,7 @@ vint16mf2_t test_vnsra_wv_i16mf2(vint32m1_t op1, vuint16mf2_t shift, size_t vl) // CHECK-RV64-LABEL: @test_vnsra_wx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv2i16.nxv2i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv2i16.nxv2i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnsra_wx_i16mf2(vint32m1_t op1, size_t shift, size_t vl) { @@ -150,7 +150,7 @@ vint16mf2_t test_vnsra_wx_i16mf2(vint32m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsra_wv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv4i16.nxv4i32.nxv4i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv4i16.nxv4i32.nxv4i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnsra_wv_i16m1(vint32m2_t op1, vuint16m1_t shift, size_t vl) { @@ -159,7 +159,7 @@ vint16m1_t test_vnsra_wv_i16m1(vint32m2_t op1, vuint16m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsra_wx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv4i16.nxv4i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv4i16.nxv4i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnsra_wx_i16m1(vint32m2_t op1, size_t shift, size_t vl) { @@ -168,7 +168,7 @@ vint16m1_t test_vnsra_wx_i16m1(vint32m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsra_wv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv8i16.nxv8i32.nxv8i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv8i16.nxv8i32.nxv8i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnsra_wv_i16m2(vint32m4_t op1, vuint16m2_t shift, size_t vl) { @@ -177,7 +177,7 @@ vint16m2_t test_vnsra_wv_i16m2(vint32m4_t op1, vuint16m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsra_wx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv8i16.nxv8i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv8i16.nxv8i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnsra_wx_i16m2(vint32m4_t op1, size_t shift, size_t vl) { @@ -186,7 +186,7 @@ vint16m2_t test_vnsra_wx_i16m2(vint32m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsra_wv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv16i16.nxv16i32.nxv16i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv16i16.nxv16i32.nxv16i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnsra_wv_i16m4(vint32m8_t op1, vuint16m4_t shift, size_t vl) { @@ -195,7 +195,7 @@ vint16m4_t test_vnsra_wv_i16m4(vint32m8_t op1, vuint16m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsra_wx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv16i16.nxv16i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv16i16.nxv16i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnsra_wx_i16m4(vint32m8_t op1, size_t shift, size_t vl) { @@ -204,7 +204,7 @@ vint16m4_t test_vnsra_wx_i16m4(vint32m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsra_wv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv1i32.nxv1i64.nxv1i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv1i32.nxv1i64.nxv1i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnsra_wv_i32mf2(vint64m1_t op1, vuint32mf2_t shift, size_t vl) { @@ -213,7 +213,7 @@ vint32mf2_t test_vnsra_wv_i32mf2(vint64m1_t op1, vuint32mf2_t shift, size_t vl) // CHECK-RV64-LABEL: @test_vnsra_wx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv1i32.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv1i32.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnsra_wx_i32mf2(vint64m1_t op1, size_t shift, size_t vl) { @@ -222,7 +222,7 @@ vint32mf2_t test_vnsra_wx_i32mf2(vint64m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsra_wv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv2i32.nxv2i64.nxv2i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv2i32.nxv2i64.nxv2i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnsra_wv_i32m1(vint64m2_t op1, vuint32m1_t shift, size_t vl) { @@ -231,7 +231,7 @@ vint32m1_t test_vnsra_wv_i32m1(vint64m2_t op1, vuint32m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsra_wx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv2i32.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv2i32.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnsra_wx_i32m1(vint64m2_t op1, size_t shift, size_t vl) { @@ -240,7 +240,7 @@ vint32m1_t test_vnsra_wx_i32m1(vint64m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsra_wv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv4i32.nxv4i64.nxv4i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv4i32.nxv4i64.nxv4i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnsra_wv_i32m2(vint64m4_t op1, vuint32m2_t shift, size_t vl) { @@ -249,7 +249,7 @@ vint32m2_t test_vnsra_wv_i32m2(vint64m4_t op1, vuint32m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsra_wx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv4i32.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv4i32.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnsra_wx_i32m2(vint64m4_t op1, size_t shift, size_t vl) { @@ -258,7 +258,7 @@ vint32m2_t test_vnsra_wx_i32m2(vint64m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsra_wv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv8i32.nxv8i64.nxv8i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv8i32.nxv8i64.nxv8i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnsra_wv_i32m4(vint64m8_t op1, vuint32m4_t shift, size_t vl) { @@ -267,7 +267,7 @@ vint32m4_t test_vnsra_wv_i32m4(vint64m8_t op1, vuint32m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsra_wx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv8i32.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv8i32.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnsra_wx_i32m4(vint64m8_t op1, size_t shift, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnsrl.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnsrl.c index 315a6802b8ba08c31e1f33c63fb888f0cb346606..7a1b4618fe2f841bfea7d29b439ce4c413d589e6 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnsrl.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnsrl.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vnsrl_wv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i8.nxv1i16.nxv1i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i8.nxv1i16.nxv1i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnsrl_wv_u8mf8(vuint16mf4_t op1, vuint8mf8_t shift, size_t vl) { @@ -15,7 +15,7 @@ vuint8mf8_t test_vnsrl_wv_u8mf8(vuint16mf4_t op1, vuint8mf8_t shift, size_t vl) // CHECK-RV64-LABEL: @test_vnsrl_wx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i8.nxv1i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i8.nxv1i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnsrl_wx_u8mf8(vuint16mf4_t op1, size_t shift, size_t vl) { @@ -24,7 +24,7 @@ vuint8mf8_t test_vnsrl_wx_u8mf8(vuint16mf4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsrl_wv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i8.nxv2i16.nxv2i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i8.nxv2i16.nxv2i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnsrl_wv_u8mf4(vuint16mf2_t op1, vuint8mf4_t shift, size_t vl) { @@ -33,7 +33,7 @@ vuint8mf4_t test_vnsrl_wv_u8mf4(vuint16mf2_t op1, vuint8mf4_t shift, size_t vl) // CHECK-RV64-LABEL: @test_vnsrl_wx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i8.nxv2i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i8.nxv2i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnsrl_wx_u8mf4(vuint16mf2_t op1, size_t shift, size_t vl) { @@ -42,7 +42,7 @@ vuint8mf4_t test_vnsrl_wx_u8mf4(vuint16mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsrl_wv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i8.nxv4i16.nxv4i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i8.nxv4i16.nxv4i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnsrl_wv_u8mf2(vuint16m1_t op1, vuint8mf2_t shift, size_t vl) { @@ -51,7 +51,7 @@ vuint8mf2_t test_vnsrl_wv_u8mf2(vuint16m1_t op1, vuint8mf2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsrl_wx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i8.nxv4i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i8.nxv4i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnsrl_wx_u8mf2(vuint16m1_t op1, size_t shift, size_t vl) { @@ -60,7 +60,7 @@ vuint8mf2_t test_vnsrl_wx_u8mf2(vuint16m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsrl_wv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i8.nxv8i16.nxv8i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i8.nxv8i16.nxv8i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnsrl_wv_u8m1(vuint16m2_t op1, vuint8m1_t shift, size_t vl) { @@ -69,7 +69,7 @@ vuint8m1_t test_vnsrl_wv_u8m1(vuint16m2_t op1, vuint8m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsrl_wx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i8.nxv8i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i8.nxv8i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnsrl_wx_u8m1(vuint16m2_t op1, size_t shift, size_t vl) { @@ -78,7 +78,7 @@ vuint8m1_t test_vnsrl_wx_u8m1(vuint16m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsrl_wv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv16i8.nxv16i16.nxv16i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv16i8.nxv16i16.nxv16i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnsrl_wv_u8m2(vuint16m4_t op1, vuint8m2_t shift, size_t vl) { @@ -87,7 +87,7 @@ vuint8m2_t test_vnsrl_wv_u8m2(vuint16m4_t op1, vuint8m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsrl_wx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv16i8.nxv16i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv16i8.nxv16i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnsrl_wx_u8m2(vuint16m4_t op1, size_t shift, size_t vl) { @@ -96,7 +96,7 @@ vuint8m2_t test_vnsrl_wx_u8m2(vuint16m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsrl_wv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv32i8.nxv32i16.nxv32i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv32i8.nxv32i16.nxv32i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnsrl_wv_u8m4(vuint16m8_t op1, vuint8m4_t shift, size_t vl) { @@ -105,7 +105,7 @@ vuint8m4_t test_vnsrl_wv_u8m4(vuint16m8_t op1, vuint8m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsrl_wx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv32i8.nxv32i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv32i8.nxv32i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnsrl_wx_u8m4(vuint16m8_t op1, size_t shift, size_t vl) { @@ -114,7 +114,7 @@ vuint8m4_t test_vnsrl_wx_u8m4(vuint16m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsrl_wv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i16.nxv1i32.nxv1i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i16.nxv1i32.nxv1i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnsrl_wv_u16mf4(vuint32mf2_t op1, vuint16mf4_t shift, size_t vl) { @@ -123,7 +123,7 @@ vuint16mf4_t test_vnsrl_wv_u16mf4(vuint32mf2_t op1, vuint16mf4_t shift, size_t v // CHECK-RV64-LABEL: @test_vnsrl_wx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i16.nxv1i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i16.nxv1i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnsrl_wx_u16mf4(vuint32mf2_t op1, size_t shift, size_t vl) { @@ -132,7 +132,7 @@ vuint16mf4_t test_vnsrl_wx_u16mf4(vuint32mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsrl_wv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i16.nxv2i32.nxv2i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i16.nxv2i32.nxv2i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnsrl_wv_u16mf2(vuint32m1_t op1, vuint16mf2_t shift, size_t vl) { @@ -141,7 +141,7 @@ vuint16mf2_t test_vnsrl_wv_u16mf2(vuint32m1_t op1, vuint16mf2_t shift, size_t vl // CHECK-RV64-LABEL: @test_vnsrl_wx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i16.nxv2i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i16.nxv2i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnsrl_wx_u16mf2(vuint32m1_t op1, size_t shift, size_t vl) { @@ -150,7 +150,7 @@ vuint16mf2_t test_vnsrl_wx_u16mf2(vuint32m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsrl_wv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i16.nxv4i32.nxv4i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i16.nxv4i32.nxv4i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnsrl_wv_u16m1(vuint32m2_t op1, vuint16m1_t shift, size_t vl) { @@ -159,7 +159,7 @@ vuint16m1_t test_vnsrl_wv_u16m1(vuint32m2_t op1, vuint16m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsrl_wx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i16.nxv4i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i16.nxv4i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnsrl_wx_u16m1(vuint32m2_t op1, size_t shift, size_t vl) { @@ -168,7 +168,7 @@ vuint16m1_t test_vnsrl_wx_u16m1(vuint32m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsrl_wv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i16.nxv8i32.nxv8i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i16.nxv8i32.nxv8i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnsrl_wv_u16m2(vuint32m4_t op1, vuint16m2_t shift, size_t vl) { @@ -177,7 +177,7 @@ vuint16m2_t test_vnsrl_wv_u16m2(vuint32m4_t op1, vuint16m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsrl_wx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i16.nxv8i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i16.nxv8i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnsrl_wx_u16m2(vuint32m4_t op1, size_t shift, size_t vl) { @@ -186,7 +186,7 @@ vuint16m2_t test_vnsrl_wx_u16m2(vuint32m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsrl_wv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv16i16.nxv16i32.nxv16i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv16i16.nxv16i32.nxv16i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnsrl_wv_u16m4(vuint32m8_t op1, vuint16m4_t shift, size_t vl) { @@ -195,7 +195,7 @@ vuint16m4_t test_vnsrl_wv_u16m4(vuint32m8_t op1, vuint16m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsrl_wx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv16i16.nxv16i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv16i16.nxv16i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnsrl_wx_u16m4(vuint32m8_t op1, size_t shift, size_t vl) { @@ -204,7 +204,7 @@ vuint16m4_t test_vnsrl_wx_u16m4(vuint32m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsrl_wv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i32.nxv1i64.nxv1i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i32.nxv1i64.nxv1i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnsrl_wv_u32mf2(vuint64m1_t op1, vuint32mf2_t shift, size_t vl) { @@ -213,7 +213,7 @@ vuint32mf2_t test_vnsrl_wv_u32mf2(vuint64m1_t op1, vuint32mf2_t shift, size_t vl // CHECK-RV64-LABEL: @test_vnsrl_wx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i32.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i32.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnsrl_wx_u32mf2(vuint64m1_t op1, size_t shift, size_t vl) { @@ -222,7 +222,7 @@ vuint32mf2_t test_vnsrl_wx_u32mf2(vuint64m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsrl_wv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i32.nxv2i64.nxv2i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i32.nxv2i64.nxv2i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnsrl_wv_u32m1(vuint64m2_t op1, vuint32m1_t shift, size_t vl) { @@ -231,7 +231,7 @@ vuint32m1_t test_vnsrl_wv_u32m1(vuint64m2_t op1, vuint32m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsrl_wx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i32.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i32.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnsrl_wx_u32m1(vuint64m2_t op1, size_t shift, size_t vl) { @@ -240,7 +240,7 @@ vuint32m1_t test_vnsrl_wx_u32m1(vuint64m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsrl_wv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i32.nxv4i64.nxv4i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i32.nxv4i64.nxv4i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnsrl_wv_u32m2(vuint64m4_t op1, vuint32m2_t shift, size_t vl) { @@ -249,7 +249,7 @@ vuint32m2_t test_vnsrl_wv_u32m2(vuint64m4_t op1, vuint32m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsrl_wx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i32.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i32.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnsrl_wx_u32m2(vuint64m4_t op1, size_t shift, size_t vl) { @@ -258,7 +258,7 @@ vuint32m2_t test_vnsrl_wx_u32m2(vuint64m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsrl_wv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i32.nxv8i64.nxv8i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i32.nxv8i64.nxv8i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnsrl_wv_u32m4(vuint64m8_t op1, vuint32m4_t shift, size_t vl) { @@ -267,7 +267,7 @@ vuint32m4_t test_vnsrl_wv_u32m4(vuint64m8_t op1, vuint32m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsrl_wx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i32.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i32.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnsrl_wx_u32m4(vuint64m8_t op1, size_t shift, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vor.c index 526f78ad58630f78ab22228d1e4f652a3811fbc6..44349ce19d20f3d910fa1637129fbc5d518b7831 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vor.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vor.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vor_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vor_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -15,7 +15,7 @@ vint8mf8_t test_vor_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vor_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -24,7 +24,7 @@ vint8mf8_t test_vor_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vor_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -33,7 +33,7 @@ vint8mf4_t test_vor_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vor_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -42,7 +42,7 @@ vint8mf4_t test_vor_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vor_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -51,7 +51,7 @@ vint8mf2_t test_vor_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vor_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -60,7 +60,7 @@ vint8mf2_t test_vor_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vor_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -69,7 +69,7 @@ vint8m1_t test_vor_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vor_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { @@ -78,7 +78,7 @@ vint8m1_t test_vor_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vor_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -87,7 +87,7 @@ vint8m2_t test_vor_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vor_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { @@ -96,7 +96,7 @@ vint8m2_t test_vor_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vor_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -105,7 +105,7 @@ vint8m4_t test_vor_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vor_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { @@ -114,7 +114,7 @@ vint8m4_t test_vor_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vor_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -123,7 +123,7 @@ vint8m8_t test_vor_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vor_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { @@ -132,7 +132,7 @@ vint8m8_t test_vor_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vor_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -141,7 +141,7 @@ vint16mf4_t test_vor_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vor_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -150,7 +150,7 @@ vint16mf4_t test_vor_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vor_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -159,7 +159,7 @@ vint16mf2_t test_vor_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vor_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -168,7 +168,7 @@ vint16mf2_t test_vor_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vor_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -177,7 +177,7 @@ vint16m1_t test_vor_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vor_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { @@ -186,7 +186,7 @@ vint16m1_t test_vor_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vor_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -195,7 +195,7 @@ vint16m2_t test_vor_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vor_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { @@ -204,7 +204,7 @@ vint16m2_t test_vor_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vor_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -213,7 +213,7 @@ vint16m4_t test_vor_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vor_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { @@ -222,7 +222,7 @@ vint16m4_t test_vor_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vor_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -231,7 +231,7 @@ vint16m8_t test_vor_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vor_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { @@ -240,7 +240,7 @@ vint16m8_t test_vor_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vor_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -249,7 +249,7 @@ vint32mf2_t test_vor_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vor_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -258,7 +258,7 @@ vint32mf2_t test_vor_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vor_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -267,7 +267,7 @@ vint32m1_t test_vor_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vor_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { @@ -276,7 +276,7 @@ vint32m1_t test_vor_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vor_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -285,7 +285,7 @@ vint32m2_t test_vor_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vor_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { @@ -294,7 +294,7 @@ vint32m2_t test_vor_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vor_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -303,7 +303,7 @@ vint32m4_t test_vor_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vor_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { @@ -312,7 +312,7 @@ vint32m4_t test_vor_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vor_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -321,7 +321,7 @@ vint32m8_t test_vor_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vor_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { @@ -330,7 +330,7 @@ vint32m8_t test_vor_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vor_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -339,7 +339,7 @@ vint64m1_t test_vor_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vor_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { @@ -348,7 +348,7 @@ vint64m1_t test_vor_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vor_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -357,7 +357,7 @@ vint64m2_t test_vor_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vor_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { @@ -366,7 +366,7 @@ vint64m2_t test_vor_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vor_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -375,7 +375,7 @@ vint64m4_t test_vor_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vor_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { @@ -384,7 +384,7 @@ vint64m4_t test_vor_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vor_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -393,7 +393,7 @@ vint64m8_t test_vor_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vor_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { @@ -402,7 +402,7 @@ vint64m8_t test_vor_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vor_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -411,7 +411,7 @@ vuint8mf8_t test_vor_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vor_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -420,7 +420,7 @@ vuint8mf8_t test_vor_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vor_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -429,7 +429,7 @@ vuint8mf4_t test_vor_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vor_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -438,7 +438,7 @@ vuint8mf4_t test_vor_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vor_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -447,7 +447,7 @@ vuint8mf2_t test_vor_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vor_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -456,7 +456,7 @@ vuint8mf2_t test_vor_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vor_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -465,7 +465,7 @@ vuint8m1_t test_vor_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vor_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -474,7 +474,7 @@ vuint8m1_t test_vor_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vor_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -483,7 +483,7 @@ vuint8m2_t test_vor_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vor_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -492,7 +492,7 @@ vuint8m2_t test_vor_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vor_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -501,7 +501,7 @@ vuint8m4_t test_vor_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vor_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -510,7 +510,7 @@ vuint8m4_t test_vor_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vor_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -519,7 +519,7 @@ vuint8m8_t test_vor_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vor_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -528,7 +528,7 @@ vuint8m8_t test_vor_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vor_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -537,7 +537,7 @@ vuint16mf4_t test_vor_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vor_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -546,7 +546,7 @@ vuint16mf4_t test_vor_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vor_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -555,7 +555,7 @@ vuint16mf2_t test_vor_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vor_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -564,7 +564,7 @@ vuint16mf2_t test_vor_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vor_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -573,7 +573,7 @@ vuint16m1_t test_vor_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vor_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -582,7 +582,7 @@ vuint16m1_t test_vor_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vor_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -591,7 +591,7 @@ vuint16m2_t test_vor_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vor_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -600,7 +600,7 @@ vuint16m2_t test_vor_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vor_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -609,7 +609,7 @@ vuint16m4_t test_vor_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vor_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -618,7 +618,7 @@ vuint16m4_t test_vor_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vor_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -627,7 +627,7 @@ vuint16m8_t test_vor_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vor_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -636,7 +636,7 @@ vuint16m8_t test_vor_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vor_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -645,7 +645,7 @@ vuint32mf2_t test_vor_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vor_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -654,7 +654,7 @@ vuint32mf2_t test_vor_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vor_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -663,7 +663,7 @@ vuint32m1_t test_vor_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vor_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -672,7 +672,7 @@ vuint32m1_t test_vor_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vor_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -681,7 +681,7 @@ vuint32m2_t test_vor_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vor_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -690,7 +690,7 @@ vuint32m2_t test_vor_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vor_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -699,7 +699,7 @@ vuint32m4_t test_vor_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vor_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -708,7 +708,7 @@ vuint32m4_t test_vor_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vor_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -717,7 +717,7 @@ vuint32m8_t test_vor_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vor_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -726,7 +726,7 @@ vuint32m8_t test_vor_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vor_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -735,7 +735,7 @@ vuint64m1_t test_vor_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vor_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -744,7 +744,7 @@ vuint64m1_t test_vor_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vor_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -753,7 +753,7 @@ vuint64m2_t test_vor_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vor_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -762,7 +762,7 @@ vuint64m2_t test_vor_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vor_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -771,7 +771,7 @@ vuint64m4_t test_vor_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vor_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -780,7 +780,7 @@ vuint64m4_t test_vor_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vor_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -789,7 +789,7 @@ vuint64m8_t test_vor_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vor_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrem.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrem.c index ddff28ea9d6c14b4445ac880d32d339a283ecc51..e8041d4d1a9bc89050c75a54ed907c76474be23a 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrem.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrem.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vrem_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vrem_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -15,7 +15,7 @@ vint8mf8_t test_vrem_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vrem_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -24,7 +24,7 @@ vint8mf8_t test_vrem_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vrem_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -33,7 +33,7 @@ vint8mf4_t test_vrem_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vrem_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -42,7 +42,7 @@ vint8mf4_t test_vrem_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vrem_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -51,7 +51,7 @@ vint8mf2_t test_vrem_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vrem_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -60,7 +60,7 @@ vint8mf2_t test_vrem_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vrem_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -69,7 +69,7 @@ vint8m1_t test_vrem_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vrem_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { @@ -78,7 +78,7 @@ vint8m1_t test_vrem_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vrem_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -87,7 +87,7 @@ vint8m2_t test_vrem_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vrem_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { @@ -96,7 +96,7 @@ vint8m2_t test_vrem_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vrem_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -105,7 +105,7 @@ vint8m4_t test_vrem_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vrem_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { @@ -114,7 +114,7 @@ vint8m4_t test_vrem_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vrem_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -123,7 +123,7 @@ vint8m8_t test_vrem_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vrem_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { @@ -132,7 +132,7 @@ vint8m8_t test_vrem_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vrem_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -141,7 +141,7 @@ vint16mf4_t test_vrem_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vrem_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -150,7 +150,7 @@ vint16mf4_t test_vrem_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vrem_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -159,7 +159,7 @@ vint16mf2_t test_vrem_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vrem_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -168,7 +168,7 @@ vint16mf2_t test_vrem_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vrem_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -177,7 +177,7 @@ vint16m1_t test_vrem_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vrem_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { @@ -186,7 +186,7 @@ vint16m1_t test_vrem_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vrem_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -195,7 +195,7 @@ vint16m2_t test_vrem_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vrem_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { @@ -204,7 +204,7 @@ vint16m2_t test_vrem_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vrem_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -213,7 +213,7 @@ vint16m4_t test_vrem_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vrem_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { @@ -222,7 +222,7 @@ vint16m4_t test_vrem_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vrem_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -231,7 +231,7 @@ vint16m8_t test_vrem_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vrem_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { @@ -240,7 +240,7 @@ vint16m8_t test_vrem_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vrem_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -249,7 +249,7 @@ vint32mf2_t test_vrem_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vrem_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -258,7 +258,7 @@ vint32mf2_t test_vrem_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vrem_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -267,7 +267,7 @@ vint32m1_t test_vrem_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vrem_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { @@ -276,7 +276,7 @@ vint32m1_t test_vrem_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vrem_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -285,7 +285,7 @@ vint32m2_t test_vrem_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vrem_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { @@ -294,7 +294,7 @@ vint32m2_t test_vrem_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vrem_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -303,7 +303,7 @@ vint32m4_t test_vrem_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vrem_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { @@ -312,7 +312,7 @@ vint32m4_t test_vrem_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vrem_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -321,7 +321,7 @@ vint32m8_t test_vrem_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vrem_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { @@ -330,7 +330,7 @@ vint32m8_t test_vrem_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vrem_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -339,7 +339,7 @@ vint64m1_t test_vrem_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vrem_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { @@ -348,7 +348,7 @@ vint64m1_t test_vrem_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vrem_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -357,7 +357,7 @@ vint64m2_t test_vrem_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vrem_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { @@ -366,7 +366,7 @@ vint64m2_t test_vrem_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vrem_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -375,7 +375,7 @@ vint64m4_t test_vrem_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vrem_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { @@ -384,7 +384,7 @@ vint64m4_t test_vrem_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vrem_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -393,7 +393,7 @@ vint64m8_t test_vrem_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vrem_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { @@ -402,7 +402,7 @@ vint64m8_t test_vrem_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vremu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -411,7 +411,7 @@ vuint8mf8_t test_vremu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vremu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -420,7 +420,7 @@ vuint8mf8_t test_vremu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vremu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -429,7 +429,7 @@ vuint8mf4_t test_vremu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vremu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -438,7 +438,7 @@ vuint8mf4_t test_vremu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vremu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -447,7 +447,7 @@ vuint8mf2_t test_vremu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vremu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -456,7 +456,7 @@ vuint8mf2_t test_vremu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vremu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -465,7 +465,7 @@ vuint8m1_t test_vremu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vremu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -474,7 +474,7 @@ vuint8m1_t test_vremu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vremu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -483,7 +483,7 @@ vuint8m2_t test_vremu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vremu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -492,7 +492,7 @@ vuint8m2_t test_vremu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vremu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -501,7 +501,7 @@ vuint8m4_t test_vremu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vremu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -510,7 +510,7 @@ vuint8m4_t test_vremu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vremu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -519,7 +519,7 @@ vuint8m8_t test_vremu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vremu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -528,7 +528,7 @@ vuint8m8_t test_vremu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vremu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -537,7 +537,7 @@ vuint16mf4_t test_vremu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vremu_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vremu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -546,7 +546,7 @@ vuint16mf4_t test_vremu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vremu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -555,7 +555,7 @@ vuint16mf2_t test_vremu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vremu_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vremu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -564,7 +564,7 @@ vuint16mf2_t test_vremu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vremu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -573,7 +573,7 @@ vuint16m1_t test_vremu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vremu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -582,7 +582,7 @@ vuint16m1_t test_vremu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vremu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -591,7 +591,7 @@ vuint16m2_t test_vremu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vremu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -600,7 +600,7 @@ vuint16m2_t test_vremu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vremu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -609,7 +609,7 @@ vuint16m4_t test_vremu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vremu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -618,7 +618,7 @@ vuint16m4_t test_vremu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vremu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -627,7 +627,7 @@ vuint16m8_t test_vremu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vremu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -636,7 +636,7 @@ vuint16m8_t test_vremu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vremu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -645,7 +645,7 @@ vuint32mf2_t test_vremu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vremu_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vremu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -654,7 +654,7 @@ vuint32mf2_t test_vremu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vremu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -663,7 +663,7 @@ vuint32m1_t test_vremu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vremu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -672,7 +672,7 @@ vuint32m1_t test_vremu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vremu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -681,7 +681,7 @@ vuint32m2_t test_vremu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vremu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -690,7 +690,7 @@ vuint32m2_t test_vremu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vremu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -699,7 +699,7 @@ vuint32m4_t test_vremu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vremu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -708,7 +708,7 @@ vuint32m4_t test_vremu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vremu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -717,7 +717,7 @@ vuint32m8_t test_vremu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vremu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -726,7 +726,7 @@ vuint32m8_t test_vremu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vremu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -735,7 +735,7 @@ vuint64m1_t test_vremu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vremu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -744,7 +744,7 @@ vuint64m1_t test_vremu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vremu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -753,7 +753,7 @@ vuint64m2_t test_vremu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vremu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -762,7 +762,7 @@ vuint64m2_t test_vremu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vremu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -771,7 +771,7 @@ vuint64m4_t test_vremu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vremu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -780,7 +780,7 @@ vuint64m4_t test_vremu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vremu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -789,7 +789,7 @@ vuint64m8_t test_vremu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vremu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrgather.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrgather.c index 7e4ef5e0ba95082ced6b718836aec24d9450e69f..1a2ddb813927f4b463e0adcdd157b7866394b207 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrgather.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrgather.c @@ -7,7 +7,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i8.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i8.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vrgather_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t index, @@ -17,7 +17,7 @@ vint8mf8_t test_vrgather_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i8.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i8.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vrgather_vx_i8mf8(vint8mf8_t op1, size_t index, size_t vl) { @@ -26,7 +26,7 @@ vint8mf8_t test_vrgather_vx_i8mf8(vint8mf8_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i8.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i8.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vrgather_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t index, @@ -36,7 +36,7 @@ vint8mf4_t test_vrgather_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2i8.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2i8.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vrgather_vx_i8mf4(vint8mf4_t op1, size_t index, size_t vl) { @@ -45,7 +45,7 @@ vint8mf4_t test_vrgather_vx_i8mf4(vint8mf4_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i8.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i8.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vrgather_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t index, @@ -55,7 +55,7 @@ vint8mf2_t test_vrgather_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4i8.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4i8.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vrgather_vx_i8mf2(vint8mf2_t op1, size_t index, size_t vl) { @@ -64,7 +64,7 @@ vint8mf2_t test_vrgather_vx_i8mf2(vint8mf2_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i8.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i8.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vrgather_vv_i8m1(vint8m1_t op1, vuint8m1_t index, size_t vl) { @@ -73,7 +73,7 @@ vint8m1_t test_vrgather_vv_i8m1(vint8m1_t op1, vuint8m1_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8i8.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8i8.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vrgather_vx_i8m1(vint8m1_t op1, size_t index, size_t vl) { @@ -82,7 +82,7 @@ vint8m1_t test_vrgather_vx_i8m1(vint8m1_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16i8.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16i8.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vrgather_vv_i8m2(vint8m2_t op1, vuint8m2_t index, size_t vl) { @@ -91,7 +91,7 @@ vint8m2_t test_vrgather_vv_i8m2(vint8m2_t op1, vuint8m2_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv16i8.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv16i8.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vrgather_vx_i8m2(vint8m2_t op1, size_t index, size_t vl) { @@ -100,7 +100,7 @@ vint8m2_t test_vrgather_vx_i8m2(vint8m2_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv32i8.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv32i8.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vrgather_vv_i8m4(vint8m4_t op1, vuint8m4_t index, size_t vl) { @@ -109,7 +109,7 @@ vint8m4_t test_vrgather_vv_i8m4(vint8m4_t op1, vuint8m4_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv32i8.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv32i8.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vrgather_vx_i8m4(vint8m4_t op1, size_t index, size_t vl) { @@ -118,7 +118,7 @@ vint8m4_t test_vrgather_vx_i8m4(vint8m4_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv64i8.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv64i8.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vrgather_vv_i8m8(vint8m8_t op1, vuint8m8_t index, size_t vl) { @@ -127,7 +127,7 @@ vint8m8_t test_vrgather_vv_i8m8(vint8m8_t op1, vuint8m8_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv64i8.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv64i8.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vrgather_vx_i8m8(vint8m8_t op1, size_t index, size_t vl) { @@ -136,7 +136,7 @@ vint8m8_t test_vrgather_vx_i8m8(vint8m8_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i16.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vrgather_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t index, @@ -146,7 +146,7 @@ vint16mf4_t test_vrgather_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i16.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vrgather_vx_i16mf4(vint16mf4_t op1, size_t index, size_t vl) { @@ -155,7 +155,7 @@ vint16mf4_t test_vrgather_vx_i16mf4(vint16mf4_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i16.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vrgather_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t index, @@ -165,7 +165,7 @@ vint16mf2_t test_vrgather_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2i16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2i16.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vrgather_vx_i16mf2(vint16mf2_t op1, size_t index, size_t vl) { @@ -174,7 +174,7 @@ vint16mf2_t test_vrgather_vx_i16mf2(vint16mf2_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i16.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vrgather_vv_i16m1(vint16m1_t op1, vuint16m1_t index, @@ -184,7 +184,7 @@ vint16m1_t test_vrgather_vv_i16m1(vint16m1_t op1, vuint16m1_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4i16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4i16.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vrgather_vx_i16m1(vint16m1_t op1, size_t index, size_t vl) { @@ -193,7 +193,7 @@ vint16m1_t test_vrgather_vx_i16m1(vint16m1_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i16.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vrgather_vv_i16m2(vint16m2_t op1, vuint16m2_t index, @@ -203,7 +203,7 @@ vint16m2_t test_vrgather_vv_i16m2(vint16m2_t op1, vuint16m2_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8i16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8i16.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vrgather_vx_i16m2(vint16m2_t op1, size_t index, size_t vl) { @@ -212,7 +212,7 @@ vint16m2_t test_vrgather_vx_i16m2(vint16m2_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16i16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16i16.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vrgather_vv_i16m4(vint16m4_t op1, vuint16m4_t index, @@ -222,7 +222,7 @@ vint16m4_t test_vrgather_vv_i16m4(vint16m4_t op1, vuint16m4_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv16i16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv16i16.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vrgather_vx_i16m4(vint16m4_t op1, size_t index, size_t vl) { @@ -231,7 +231,7 @@ vint16m4_t test_vrgather_vx_i16m4(vint16m4_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv32i16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv32i16.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vrgather_vv_i16m8(vint16m8_t op1, vuint16m8_t index, @@ -241,7 +241,7 @@ vint16m8_t test_vrgather_vv_i16m8(vint16m8_t op1, vuint16m8_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv32i16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv32i16.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vrgather_vx_i16m8(vint16m8_t op1, size_t index, size_t vl) { @@ -250,7 +250,7 @@ vint16m8_t test_vrgather_vx_i16m8(vint16m8_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i32.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vrgather_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t index, @@ -260,7 +260,7 @@ vint32mf2_t test_vrgather_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i32.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i32.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vrgather_vx_i32mf2(vint32mf2_t op1, size_t index, size_t vl) { @@ -269,7 +269,7 @@ vint32mf2_t test_vrgather_vx_i32mf2(vint32mf2_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i32.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vrgather_vv_i32m1(vint32m1_t op1, vuint32m1_t index, @@ -279,7 +279,7 @@ vint32m1_t test_vrgather_vv_i32m1(vint32m1_t op1, vuint32m1_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2i32.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2i32.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vrgather_vx_i32m1(vint32m1_t op1, size_t index, size_t vl) { @@ -288,7 +288,7 @@ vint32m1_t test_vrgather_vx_i32m1(vint32m1_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i32.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vrgather_vv_i32m2(vint32m2_t op1, vuint32m2_t index, @@ -298,7 +298,7 @@ vint32m2_t test_vrgather_vv_i32m2(vint32m2_t op1, vuint32m2_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4i32.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4i32.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vrgather_vx_i32m2(vint32m2_t op1, size_t index, size_t vl) { @@ -307,7 +307,7 @@ vint32m2_t test_vrgather_vx_i32m2(vint32m2_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i32.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vrgather_vv_i32m4(vint32m4_t op1, vuint32m4_t index, @@ -317,7 +317,7 @@ vint32m4_t test_vrgather_vv_i32m4(vint32m4_t op1, vuint32m4_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8i32.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8i32.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vrgather_vx_i32m4(vint32m4_t op1, size_t index, size_t vl) { @@ -326,7 +326,7 @@ vint32m4_t test_vrgather_vx_i32m4(vint32m4_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16i32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16i32.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vrgather_vv_i32m8(vint32m8_t op1, vuint32m8_t index, @@ -336,7 +336,7 @@ vint32m8_t test_vrgather_vv_i32m8(vint32m8_t op1, vuint32m8_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv16i32.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv16i32.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vrgather_vx_i32m8(vint32m8_t op1, size_t index, size_t vl) { @@ -345,7 +345,7 @@ vint32m8_t test_vrgather_vx_i32m8(vint32m8_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i64.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i64.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vrgather_vv_i64m1(vint64m1_t op1, vuint64m1_t index, @@ -355,7 +355,7 @@ vint64m1_t test_vrgather_vv_i64m1(vint64m1_t op1, vuint64m1_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i64.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i64.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vrgather_vx_i64m1(vint64m1_t op1, size_t index, size_t vl) { @@ -364,7 +364,7 @@ vint64m1_t test_vrgather_vx_i64m1(vint64m1_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i64.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i64.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vrgather_vv_i64m2(vint64m2_t op1, vuint64m2_t index, @@ -374,7 +374,7 @@ vint64m2_t test_vrgather_vv_i64m2(vint64m2_t op1, vuint64m2_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2i64.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2i64.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vrgather_vx_i64m2(vint64m2_t op1, size_t index, size_t vl) { @@ -383,7 +383,7 @@ vint64m2_t test_vrgather_vx_i64m2(vint64m2_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i64.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i64.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vrgather_vv_i64m4(vint64m4_t op1, vuint64m4_t index, @@ -393,7 +393,7 @@ vint64m4_t test_vrgather_vv_i64m4(vint64m4_t op1, vuint64m4_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4i64.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4i64.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vrgather_vx_i64m4(vint64m4_t op1, size_t index, size_t vl) { @@ -402,7 +402,7 @@ vint64m4_t test_vrgather_vx_i64m4(vint64m4_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i64.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i64.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vrgather_vv_i64m8(vint64m8_t op1, vuint64m8_t index, @@ -412,7 +412,7 @@ vint64m8_t test_vrgather_vv_i64m8(vint64m8_t op1, vuint64m8_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8i64.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8i64.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vrgather_vx_i64m8(vint64m8_t op1, size_t index, size_t vl) { @@ -421,7 +421,7 @@ vint64m8_t test_vrgather_vx_i64m8(vint64m8_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i8.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i8.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vrgather_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t index, @@ -431,7 +431,7 @@ vuint8mf8_t test_vrgather_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i8.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i8.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vrgather_vx_u8mf8(vuint8mf8_t op1, size_t index, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8_t test_vrgather_vx_u8mf8(vuint8mf8_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i8.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i8.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vrgather_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t index, @@ -450,7 +450,7 @@ vuint8mf4_t test_vrgather_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2i8.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2i8.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vrgather_vx_u8mf4(vuint8mf4_t op1, size_t index, size_t vl) { @@ -459,7 +459,7 @@ vuint8mf4_t test_vrgather_vx_u8mf4(vuint8mf4_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i8.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i8.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vrgather_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t index, @@ -469,7 +469,7 @@ vuint8mf2_t test_vrgather_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4i8.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4i8.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vrgather_vx_u8mf2(vuint8mf2_t op1, size_t index, size_t vl) { @@ -478,7 +478,7 @@ vuint8mf2_t test_vrgather_vx_u8mf2(vuint8mf2_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i8.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i8.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vrgather_vv_u8m1(vuint8m1_t op1, vuint8m1_t index, size_t vl) { @@ -487,7 +487,7 @@ vuint8m1_t test_vrgather_vv_u8m1(vuint8m1_t op1, vuint8m1_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8i8.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8i8.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vrgather_vx_u8m1(vuint8m1_t op1, size_t index, size_t vl) { @@ -496,7 +496,7 @@ vuint8m1_t test_vrgather_vx_u8m1(vuint8m1_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16i8.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16i8.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vrgather_vv_u8m2(vuint8m2_t op1, vuint8m2_t index, size_t vl) { @@ -505,7 +505,7 @@ vuint8m2_t test_vrgather_vv_u8m2(vuint8m2_t op1, vuint8m2_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv16i8.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv16i8.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vrgather_vx_u8m2(vuint8m2_t op1, size_t index, size_t vl) { @@ -514,7 +514,7 @@ vuint8m2_t test_vrgather_vx_u8m2(vuint8m2_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv32i8.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv32i8.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vrgather_vv_u8m4(vuint8m4_t op1, vuint8m4_t index, size_t vl) { @@ -523,7 +523,7 @@ vuint8m4_t test_vrgather_vv_u8m4(vuint8m4_t op1, vuint8m4_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv32i8.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv32i8.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vrgather_vx_u8m4(vuint8m4_t op1, size_t index, size_t vl) { @@ -532,7 +532,7 @@ vuint8m4_t test_vrgather_vx_u8m4(vuint8m4_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv64i8.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv64i8.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vrgather_vv_u8m8(vuint8m8_t op1, vuint8m8_t index, size_t vl) { @@ -541,7 +541,7 @@ vuint8m8_t test_vrgather_vv_u8m8(vuint8m8_t op1, vuint8m8_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv64i8.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv64i8.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vrgather_vx_u8m8(vuint8m8_t op1, size_t index, size_t vl) { @@ -550,7 +550,7 @@ vuint8m8_t test_vrgather_vx_u8m8(vuint8m8_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i16.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vrgather_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t index, @@ -560,7 +560,7 @@ vuint16mf4_t test_vrgather_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i16.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vrgather_vx_u16mf4(vuint16mf4_t op1, size_t index, @@ -570,7 +570,7 @@ vuint16mf4_t test_vrgather_vx_u16mf4(vuint16mf4_t op1, size_t index, // CHECK-RV64-LABEL: @test_vrgather_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i16.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vrgather_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t index, @@ -580,7 +580,7 @@ vuint16mf2_t test_vrgather_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2i16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2i16.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vrgather_vx_u16mf2(vuint16mf2_t op1, size_t index, @@ -590,7 +590,7 @@ vuint16mf2_t test_vrgather_vx_u16mf2(vuint16mf2_t op1, size_t index, // CHECK-RV64-LABEL: @test_vrgather_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i16.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vrgather_vv_u16m1(vuint16m1_t op1, vuint16m1_t index, @@ -600,7 +600,7 @@ vuint16m1_t test_vrgather_vv_u16m1(vuint16m1_t op1, vuint16m1_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4i16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4i16.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vrgather_vx_u16m1(vuint16m1_t op1, size_t index, size_t vl) { @@ -609,7 +609,7 @@ vuint16m1_t test_vrgather_vx_u16m1(vuint16m1_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i16.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vrgather_vv_u16m2(vuint16m2_t op1, vuint16m2_t index, @@ -619,7 +619,7 @@ vuint16m2_t test_vrgather_vv_u16m2(vuint16m2_t op1, vuint16m2_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8i16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8i16.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vrgather_vx_u16m2(vuint16m2_t op1, size_t index, size_t vl) { @@ -628,7 +628,7 @@ vuint16m2_t test_vrgather_vx_u16m2(vuint16m2_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16i16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16i16.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vrgather_vv_u16m4(vuint16m4_t op1, vuint16m4_t index, @@ -638,7 +638,7 @@ vuint16m4_t test_vrgather_vv_u16m4(vuint16m4_t op1, vuint16m4_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv16i16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv16i16.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vrgather_vx_u16m4(vuint16m4_t op1, size_t index, size_t vl) { @@ -647,7 +647,7 @@ vuint16m4_t test_vrgather_vx_u16m4(vuint16m4_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv32i16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv32i16.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vrgather_vv_u16m8(vuint16m8_t op1, vuint16m8_t index, @@ -657,7 +657,7 @@ vuint16m8_t test_vrgather_vv_u16m8(vuint16m8_t op1, vuint16m8_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv32i16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv32i16.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vrgather_vx_u16m8(vuint16m8_t op1, size_t index, size_t vl) { @@ -666,7 +666,7 @@ vuint16m8_t test_vrgather_vx_u16m8(vuint16m8_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i32.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vrgather_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t index, @@ -676,7 +676,7 @@ vuint32mf2_t test_vrgather_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i32.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i32.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vrgather_vx_u32mf2(vuint32mf2_t op1, size_t index, @@ -686,7 +686,7 @@ vuint32mf2_t test_vrgather_vx_u32mf2(vuint32mf2_t op1, size_t index, // CHECK-RV64-LABEL: @test_vrgather_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i32.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vrgather_vv_u32m1(vuint32m1_t op1, vuint32m1_t index, @@ -696,7 +696,7 @@ vuint32m1_t test_vrgather_vv_u32m1(vuint32m1_t op1, vuint32m1_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2i32.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2i32.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vrgather_vx_u32m1(vuint32m1_t op1, size_t index, size_t vl) { @@ -705,7 +705,7 @@ vuint32m1_t test_vrgather_vx_u32m1(vuint32m1_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i32.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vrgather_vv_u32m2(vuint32m2_t op1, vuint32m2_t index, @@ -715,7 +715,7 @@ vuint32m2_t test_vrgather_vv_u32m2(vuint32m2_t op1, vuint32m2_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4i32.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4i32.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vrgather_vx_u32m2(vuint32m2_t op1, size_t index, size_t vl) { @@ -724,7 +724,7 @@ vuint32m2_t test_vrgather_vx_u32m2(vuint32m2_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i32.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vrgather_vv_u32m4(vuint32m4_t op1, vuint32m4_t index, @@ -734,7 +734,7 @@ vuint32m4_t test_vrgather_vv_u32m4(vuint32m4_t op1, vuint32m4_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8i32.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8i32.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vrgather_vx_u32m4(vuint32m4_t op1, size_t index, size_t vl) { @@ -743,7 +743,7 @@ vuint32m4_t test_vrgather_vx_u32m4(vuint32m4_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16i32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16i32.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vrgather_vv_u32m8(vuint32m8_t op1, vuint32m8_t index, @@ -753,7 +753,7 @@ vuint32m8_t test_vrgather_vv_u32m8(vuint32m8_t op1, vuint32m8_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv16i32.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv16i32.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vrgather_vx_u32m8(vuint32m8_t op1, size_t index, size_t vl) { @@ -762,7 +762,7 @@ vuint32m8_t test_vrgather_vx_u32m8(vuint32m8_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i64.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i64.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vrgather_vv_u64m1(vuint64m1_t op1, vuint64m1_t index, @@ -772,7 +772,7 @@ vuint64m1_t test_vrgather_vv_u64m1(vuint64m1_t op1, vuint64m1_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i64.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i64.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vrgather_vx_u64m1(vuint64m1_t op1, size_t index, size_t vl) { @@ -781,7 +781,7 @@ vuint64m1_t test_vrgather_vx_u64m1(vuint64m1_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i64.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i64.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vrgather_vv_u64m2(vuint64m2_t op1, vuint64m2_t index, @@ -791,7 +791,7 @@ vuint64m2_t test_vrgather_vv_u64m2(vuint64m2_t op1, vuint64m2_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2i64.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2i64.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vrgather_vx_u64m2(vuint64m2_t op1, size_t index, size_t vl) { @@ -800,7 +800,7 @@ vuint64m2_t test_vrgather_vx_u64m2(vuint64m2_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i64.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i64.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vrgather_vv_u64m4(vuint64m4_t op1, vuint64m4_t index, @@ -810,7 +810,7 @@ vuint64m4_t test_vrgather_vv_u64m4(vuint64m4_t op1, vuint64m4_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4i64.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4i64.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vrgather_vx_u64m4(vuint64m4_t op1, size_t index, size_t vl) { @@ -819,7 +819,7 @@ vuint64m4_t test_vrgather_vx_u64m4(vuint64m4_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i64.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i64.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vrgather_vv_u64m8(vuint64m8_t op1, vuint64m8_t index, @@ -829,7 +829,7 @@ vuint64m8_t test_vrgather_vv_u64m8(vuint64m8_t op1, vuint64m8_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8i64.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8i64.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vrgather_vx_u64m8(vuint64m8_t op1, size_t index, size_t vl) { @@ -838,7 +838,7 @@ vuint64m8_t test_vrgather_vx_u64m8(vuint64m8_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1f32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1f32.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vrgather_vv_f32mf2(vfloat32mf2_t op1, vuint32mf2_t index, @@ -848,7 +848,7 @@ vfloat32mf2_t test_vrgather_vv_f32mf2(vfloat32mf2_t op1, vuint32mf2_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1f32.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1f32.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vrgather_vx_f32mf2(vfloat32mf2_t op1, size_t index, @@ -858,7 +858,7 @@ vfloat32mf2_t test_vrgather_vx_f32mf2(vfloat32mf2_t op1, size_t index, // CHECK-RV64-LABEL: @test_vrgather_vv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2f32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2f32.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vrgather_vv_f32m1(vfloat32m1_t op1, vuint32m1_t index, @@ -868,7 +868,7 @@ vfloat32m1_t test_vrgather_vv_f32m1(vfloat32m1_t op1, vuint32m1_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2f32.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2f32.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vrgather_vx_f32m1(vfloat32m1_t op1, size_t index, size_t vl) { @@ -877,7 +877,7 @@ vfloat32m1_t test_vrgather_vx_f32m1(vfloat32m1_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4f32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4f32.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vrgather_vv_f32m2(vfloat32m2_t op1, vuint32m2_t index, @@ -887,7 +887,7 @@ vfloat32m2_t test_vrgather_vv_f32m2(vfloat32m2_t op1, vuint32m2_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4f32.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4f32.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vrgather_vx_f32m2(vfloat32m2_t op1, size_t index, size_t vl) { @@ -896,7 +896,7 @@ vfloat32m2_t test_vrgather_vx_f32m2(vfloat32m2_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8f32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8f32.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vrgather_vv_f32m4(vfloat32m4_t op1, vuint32m4_t index, @@ -906,7 +906,7 @@ vfloat32m4_t test_vrgather_vv_f32m4(vfloat32m4_t op1, vuint32m4_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8f32.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8f32.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vrgather_vx_f32m4(vfloat32m4_t op1, size_t index, size_t vl) { @@ -915,7 +915,7 @@ vfloat32m4_t test_vrgather_vx_f32m4(vfloat32m4_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16f32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16f32.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vrgather_vv_f32m8(vfloat32m8_t op1, vuint32m8_t index, @@ -925,7 +925,7 @@ vfloat32m8_t test_vrgather_vv_f32m8(vfloat32m8_t op1, vuint32m8_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv16f32.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv16f32.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vrgather_vx_f32m8(vfloat32m8_t op1, size_t index, size_t vl) { @@ -934,7 +934,7 @@ vfloat32m8_t test_vrgather_vx_f32m8(vfloat32m8_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1f64.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1f64.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vrgather_vv_f64m1(vfloat64m1_t op1, vuint64m1_t index, @@ -944,7 +944,7 @@ vfloat64m1_t test_vrgather_vv_f64m1(vfloat64m1_t op1, vuint64m1_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1f64.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1f64.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vrgather_vx_f64m1(vfloat64m1_t op1, size_t index, size_t vl) { @@ -953,7 +953,7 @@ vfloat64m1_t test_vrgather_vx_f64m1(vfloat64m1_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2f64.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2f64.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vrgather_vv_f64m2(vfloat64m2_t op1, vuint64m2_t index, @@ -963,7 +963,7 @@ vfloat64m2_t test_vrgather_vv_f64m2(vfloat64m2_t op1, vuint64m2_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2f64.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2f64.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vrgather_vx_f64m2(vfloat64m2_t op1, size_t index, size_t vl) { @@ -972,7 +972,7 @@ vfloat64m2_t test_vrgather_vx_f64m2(vfloat64m2_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4f64.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4f64.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vrgather_vv_f64m4(vfloat64m4_t op1, vuint64m4_t index, @@ -982,7 +982,7 @@ vfloat64m4_t test_vrgather_vv_f64m4(vfloat64m4_t op1, vuint64m4_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4f64.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4f64.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vrgather_vx_f64m4(vfloat64m4_t op1, size_t index, size_t vl) { @@ -991,7 +991,7 @@ vfloat64m4_t test_vrgather_vx_f64m4(vfloat64m4_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8f64.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8f64.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vrgather_vv_f64m8(vfloat64m8_t op1, vuint64m8_t index, @@ -1001,7 +1001,7 @@ vfloat64m8_t test_vrgather_vv_f64m8(vfloat64m8_t op1, vuint64m8_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8f64.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8f64.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vrgather_vx_f64m8(vfloat64m8_t op1, size_t index, size_t vl) { @@ -1010,7 +1010,7 @@ vfloat64m8_t test_vrgather_vx_f64m8(vfloat64m8_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vrgatherei16_vv_i8mf8(vint8mf8_t op1, vuint16mf4_t op2, @@ -1020,7 +1020,7 @@ vint8mf8_t test_vrgatherei16_vv_i8mf8(vint8mf8_t op1, vuint16mf4_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vrgatherei16_vv_i8mf4(vint8mf4_t op1, vuint16mf2_t op2, @@ -1030,7 +1030,7 @@ vint8mf4_t test_vrgatherei16_vv_i8mf4(vint8mf4_t op1, vuint16mf2_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vrgatherei16_vv_i8mf2(vint8mf2_t op1, vuint16m1_t op2, @@ -1040,7 +1040,7 @@ vint8mf2_t test_vrgatherei16_vv_i8mf2(vint8mf2_t op1, vuint16m1_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vrgatherei16_vv_i8m1(vint8m1_t op1, vuint16m2_t op2, size_t vl) { @@ -1049,7 +1049,7 @@ vint8m1_t test_vrgatherei16_vv_i8m1(vint8m1_t op1, vuint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vrgatherei16_vv_i8m2(vint8m2_t op1, vuint16m4_t op2, size_t vl) { @@ -1058,7 +1058,7 @@ vint8m2_t test_vrgatherei16_vv_i8m2(vint8m2_t op1, vuint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vrgatherei16_vv_i8m4(vint8m4_t op1, vuint16m8_t op2, size_t vl) { @@ -1067,7 +1067,7 @@ vint8m4_t test_vrgatherei16_vv_i8m4(vint8m4_t op1, vuint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vrgatherei16_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t op2, @@ -1077,7 +1077,7 @@ vint16mf4_t test_vrgatherei16_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vrgatherei16_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t op2, @@ -1087,7 +1087,7 @@ vint16mf2_t test_vrgatherei16_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vrgatherei16_vv_i16m1(vint16m1_t op1, vuint16m1_t op2, @@ -1097,7 +1097,7 @@ vint16m1_t test_vrgatherei16_vv_i16m1(vint16m1_t op1, vuint16m1_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vrgatherei16_vv_i16m2(vint16m2_t op1, vuint16m2_t op2, @@ -1107,7 +1107,7 @@ vint16m2_t test_vrgatherei16_vv_i16m2(vint16m2_t op1, vuint16m2_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vrgatherei16_vv_i16m4(vint16m4_t op1, vuint16m4_t op2, @@ -1117,7 +1117,7 @@ vint16m4_t test_vrgatherei16_vv_i16m4(vint16m4_t op1, vuint16m4_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vrgatherei16_vv_i16m8(vint16m8_t op1, vuint16m8_t op2, @@ -1127,7 +1127,7 @@ vint16m8_t test_vrgatherei16_vv_i16m8(vint16m8_t op1, vuint16m8_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vrgatherei16_vv_i32mf2(vint32mf2_t op1, vuint16mf4_t op2, @@ -1137,7 +1137,7 @@ vint32mf2_t test_vrgatherei16_vv_i32mf2(vint32mf2_t op1, vuint16mf4_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vrgatherei16_vv_i32m1(vint32m1_t op1, vuint16mf2_t op2, @@ -1147,7 +1147,7 @@ vint32m1_t test_vrgatherei16_vv_i32m1(vint32m1_t op1, vuint16mf2_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vrgatherei16_vv_i32m2(vint32m2_t op1, vuint16m1_t op2, @@ -1157,7 +1157,7 @@ vint32m2_t test_vrgatherei16_vv_i32m2(vint32m2_t op1, vuint16m1_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vrgatherei16_vv_i32m4(vint32m4_t op1, vuint16m2_t op2, @@ -1167,7 +1167,7 @@ vint32m4_t test_vrgatherei16_vv_i32m4(vint32m4_t op1, vuint16m2_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vrgatherei16_vv_i32m8(vint32m8_t op1, vuint16m4_t op2, @@ -1177,7 +1177,7 @@ vint32m8_t test_vrgatherei16_vv_i32m8(vint32m8_t op1, vuint16m4_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vrgatherei16_vv_i64m1(vint64m1_t op1, vuint16mf4_t op2, @@ -1187,7 +1187,7 @@ vint64m1_t test_vrgatherei16_vv_i64m1(vint64m1_t op1, vuint16mf4_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vrgatherei16_vv_i64m2(vint64m2_t op1, vuint16mf2_t op2, @@ -1197,7 +1197,7 @@ vint64m2_t test_vrgatherei16_vv_i64m2(vint64m2_t op1, vuint16mf2_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vrgatherei16_vv_i64m4(vint64m4_t op1, vuint16m1_t op2, @@ -1207,7 +1207,7 @@ vint64m4_t test_vrgatherei16_vv_i64m4(vint64m4_t op1, vuint16m1_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vrgatherei16_vv_i64m8(vint64m8_t op1, vuint16m2_t op2, @@ -1217,7 +1217,7 @@ vint64m8_t test_vrgatherei16_vv_i64m8(vint64m8_t op1, vuint16m2_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vrgatherei16_vv_u8mf8(vuint8mf8_t op1, vuint16mf4_t op2, @@ -1227,7 +1227,7 @@ vuint8mf8_t test_vrgatherei16_vv_u8mf8(vuint8mf8_t op1, vuint16mf4_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vrgatherei16_vv_u8mf4(vuint8mf4_t op1, vuint16mf2_t op2, @@ -1237,7 +1237,7 @@ vuint8mf4_t test_vrgatherei16_vv_u8mf4(vuint8mf4_t op1, vuint16mf2_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vrgatherei16_vv_u8mf2(vuint8mf2_t op1, vuint16m1_t op2, @@ -1247,7 +1247,7 @@ vuint8mf2_t test_vrgatherei16_vv_u8mf2(vuint8mf2_t op1, vuint16m1_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vrgatherei16_vv_u8m1(vuint8m1_t op1, vuint16m2_t op2, @@ -1257,7 +1257,7 @@ vuint8m1_t test_vrgatherei16_vv_u8m1(vuint8m1_t op1, vuint16m2_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vrgatherei16_vv_u8m2(vuint8m2_t op1, vuint16m4_t op2, @@ -1267,7 +1267,7 @@ vuint8m2_t test_vrgatherei16_vv_u8m2(vuint8m2_t op1, vuint16m4_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vrgatherei16_vv_u8m4(vuint8m4_t op1, vuint16m8_t op2, @@ -1277,7 +1277,7 @@ vuint8m4_t test_vrgatherei16_vv_u8m4(vuint8m4_t op1, vuint16m8_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vrgatherei16_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, @@ -1287,7 +1287,7 @@ vuint16mf4_t test_vrgatherei16_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vrgatherei16_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, @@ -1297,7 +1297,7 @@ vuint16mf2_t test_vrgatherei16_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vrgatherei16_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, @@ -1307,7 +1307,7 @@ vuint16m1_t test_vrgatherei16_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vrgatherei16_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, @@ -1317,7 +1317,7 @@ vuint16m2_t test_vrgatherei16_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vrgatherei16_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, @@ -1327,7 +1327,7 @@ vuint16m4_t test_vrgatherei16_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vrgatherei16_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, @@ -1337,7 +1337,7 @@ vuint16m8_t test_vrgatherei16_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vrgatherei16_vv_u32mf2(vuint32mf2_t op1, vuint16mf4_t op2, @@ -1347,7 +1347,7 @@ vuint32mf2_t test_vrgatherei16_vv_u32mf2(vuint32mf2_t op1, vuint16mf4_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vrgatherei16_vv_u32m1(vuint32m1_t op1, vuint16mf2_t op2, @@ -1357,7 +1357,7 @@ vuint32m1_t test_vrgatherei16_vv_u32m1(vuint32m1_t op1, vuint16mf2_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vrgatherei16_vv_u32m2(vuint32m2_t op1, vuint16m1_t op2, @@ -1367,7 +1367,7 @@ vuint32m2_t test_vrgatherei16_vv_u32m2(vuint32m2_t op1, vuint16m1_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vrgatherei16_vv_u32m4(vuint32m4_t op1, vuint16m2_t op2, @@ -1377,7 +1377,7 @@ vuint32m4_t test_vrgatherei16_vv_u32m4(vuint32m4_t op1, vuint16m2_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vrgatherei16_vv_u32m8(vuint32m8_t op1, vuint16m4_t op2, @@ -1387,7 +1387,7 @@ vuint32m8_t test_vrgatherei16_vv_u32m8(vuint32m8_t op1, vuint16m4_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vrgatherei16_vv_u64m1(vuint64m1_t op1, vuint16mf4_t op2, @@ -1397,7 +1397,7 @@ vuint64m1_t test_vrgatherei16_vv_u64m1(vuint64m1_t op1, vuint16mf4_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vrgatherei16_vv_u64m2(vuint64m2_t op1, vuint16mf2_t op2, @@ -1407,7 +1407,7 @@ vuint64m2_t test_vrgatherei16_vv_u64m2(vuint64m2_t op1, vuint16mf2_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vrgatherei16_vv_u64m4(vuint64m4_t op1, vuint16m1_t op2, @@ -1417,7 +1417,7 @@ vuint64m4_t test_vrgatherei16_vv_u64m4(vuint64m4_t op1, vuint16m1_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vrgatherei16_vv_u64m8(vuint64m8_t op1, vuint16m2_t op2, @@ -1427,7 +1427,7 @@ vuint64m8_t test_vrgatherei16_vv_u64m8(vuint64m8_t op1, vuint16m2_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vrgatherei16_vv_f32mf2(vfloat32mf2_t op1, vuint16mf4_t op2, @@ -1437,7 +1437,7 @@ vfloat32mf2_t test_vrgatherei16_vv_f32mf2(vfloat32mf2_t op1, vuint16mf4_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vrgatherei16_vv_f32m1(vfloat32m1_t op1, vuint16mf2_t op2, @@ -1447,7 +1447,7 @@ vfloat32m1_t test_vrgatherei16_vv_f32m1(vfloat32m1_t op1, vuint16mf2_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vrgatherei16_vv_f32m2(vfloat32m2_t op1, vuint16m1_t op2, @@ -1457,7 +1457,7 @@ vfloat32m2_t test_vrgatherei16_vv_f32m2(vfloat32m2_t op1, vuint16m1_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vrgatherei16_vv_f32m4(vfloat32m4_t op1, vuint16m2_t op2, @@ -1467,7 +1467,7 @@ vfloat32m4_t test_vrgatherei16_vv_f32m4(vfloat32m4_t op1, vuint16m2_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv16f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv16f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vrgatherei16_vv_f32m8(vfloat32m8_t op1, vuint16m4_t op2, @@ -1477,7 +1477,7 @@ vfloat32m8_t test_vrgatherei16_vv_f32m8(vfloat32m8_t op1, vuint16m4_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vrgatherei16_vv_f64m1(vfloat64m1_t op1, vuint16mf4_t op2, @@ -1487,7 +1487,7 @@ vfloat64m1_t test_vrgatherei16_vv_f64m1(vfloat64m1_t op1, vuint16mf4_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vrgatherei16_vv_f64m2(vfloat64m2_t op1, vuint16mf2_t op2, @@ -1497,7 +1497,7 @@ vfloat64m2_t test_vrgatherei16_vv_f64m2(vfloat64m2_t op1, vuint16mf2_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vrgatherei16_vv_f64m4(vfloat64m4_t op1, vuint16m1_t op2, @@ -1507,7 +1507,7 @@ vfloat64m4_t test_vrgatherei16_vv_f64m4(vfloat64m4_t op1, vuint16m1_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vrgatherei16_vv_f64m8(vfloat64m8_t op1, vuint16m2_t op2, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrsub.c index d313ab0d0e0dc6d09e930a653ffe27286f8bf694..e5d8565ce1ab3ab03fdc229197b3604d2fae0cc5 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrsub.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vrsub_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vrsub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -15,7 +15,7 @@ vint8mf8_t test_vrsub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vrsub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -24,7 +24,7 @@ vint8mf4_t test_vrsub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vrsub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -33,7 +33,7 @@ vint8mf2_t test_vrsub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vrsub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { @@ -42,7 +42,7 @@ vint8m1_t test_vrsub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vrsub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { @@ -51,7 +51,7 @@ vint8m2_t test_vrsub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vrsub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { @@ -60,7 +60,7 @@ vint8m4_t test_vrsub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vrsub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { @@ -69,7 +69,7 @@ vint8m8_t test_vrsub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vrsub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -78,7 +78,7 @@ vint16mf4_t test_vrsub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vrsub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -87,7 +87,7 @@ vint16mf2_t test_vrsub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vrsub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { @@ -96,7 +96,7 @@ vint16m1_t test_vrsub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vrsub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { @@ -105,7 +105,7 @@ vint16m2_t test_vrsub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vrsub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { @@ -114,7 +114,7 @@ vint16m4_t test_vrsub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vrsub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { @@ -123,7 +123,7 @@ vint16m8_t test_vrsub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vrsub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -132,7 +132,7 @@ vint32mf2_t test_vrsub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vrsub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { @@ -141,7 +141,7 @@ vint32m1_t test_vrsub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vrsub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { @@ -150,7 +150,7 @@ vint32m2_t test_vrsub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vrsub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { @@ -159,7 +159,7 @@ vint32m4_t test_vrsub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vrsub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { @@ -168,7 +168,7 @@ vint32m8_t test_vrsub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vrsub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { @@ -177,7 +177,7 @@ vint64m1_t test_vrsub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vrsub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { @@ -186,7 +186,7 @@ vint64m2_t test_vrsub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vrsub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { @@ -195,7 +195,7 @@ vint64m4_t test_vrsub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vrsub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { @@ -204,7 +204,7 @@ vint64m8_t test_vrsub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vrsub_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -213,7 +213,7 @@ vuint8mf8_t test_vrsub_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vrsub_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -222,7 +222,7 @@ vuint8mf4_t test_vrsub_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vrsub_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -231,7 +231,7 @@ vuint8mf2_t test_vrsub_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vrsub_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -240,7 +240,7 @@ vuint8m1_t test_vrsub_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vrsub_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -249,7 +249,7 @@ vuint8m2_t test_vrsub_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vrsub_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -258,7 +258,7 @@ vuint8m4_t test_vrsub_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vrsub_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -267,7 +267,7 @@ vuint8m8_t test_vrsub_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vrsub_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -276,7 +276,7 @@ vuint16mf4_t test_vrsub_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vrsub_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -285,7 +285,7 @@ vuint16mf2_t test_vrsub_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vrsub_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -294,7 +294,7 @@ vuint16m1_t test_vrsub_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vrsub_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -303,7 +303,7 @@ vuint16m2_t test_vrsub_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vrsub_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -312,7 +312,7 @@ vuint16m4_t test_vrsub_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vrsub_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -321,7 +321,7 @@ vuint16m8_t test_vrsub_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vrsub_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -330,7 +330,7 @@ vuint32mf2_t test_vrsub_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vrsub_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -339,7 +339,7 @@ vuint32m1_t test_vrsub_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vrsub_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -348,7 +348,7 @@ vuint32m2_t test_vrsub_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vrsub_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -357,7 +357,7 @@ vuint32m4_t test_vrsub_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vrsub_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -366,7 +366,7 @@ vuint32m8_t test_vrsub_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vrsub_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -375,7 +375,7 @@ vuint64m1_t test_vrsub_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vrsub_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -384,7 +384,7 @@ vuint64m2_t test_vrsub_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vrsub_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -393,7 +393,7 @@ vuint64m4_t test_vrsub_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vrsub_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsadd.c index a5418573ac47c6a345925916e4ff87359d4112a9..627b5c96cb10c1cdcab840072f9155e793fddbc6 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsadd.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vsadd_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsadd_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -15,7 +15,7 @@ vint8mf8_t test_vsadd_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsadd_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -24,7 +24,7 @@ vint8mf8_t test_vsadd_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsadd_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -33,7 +33,7 @@ vint8mf4_t test_vsadd_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsadd_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -42,7 +42,7 @@ vint8mf4_t test_vsadd_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsadd_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -51,7 +51,7 @@ vint8mf2_t test_vsadd_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsadd_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -60,7 +60,7 @@ vint8mf2_t test_vsadd_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsadd_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -69,7 +69,7 @@ vint8m1_t test_vsadd_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsadd_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { @@ -78,7 +78,7 @@ vint8m1_t test_vsadd_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsadd_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -87,7 +87,7 @@ vint8m2_t test_vsadd_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsadd_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { @@ -96,7 +96,7 @@ vint8m2_t test_vsadd_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsadd_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -105,7 +105,7 @@ vint8m4_t test_vsadd_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsadd_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { @@ -114,7 +114,7 @@ vint8m4_t test_vsadd_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsadd_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -123,7 +123,7 @@ vint8m8_t test_vsadd_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsadd_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { @@ -132,7 +132,7 @@ vint8m8_t test_vsadd_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsadd_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -141,7 +141,7 @@ vint16mf4_t test_vsadd_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsadd_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -150,7 +150,7 @@ vint16mf4_t test_vsadd_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsadd_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -159,7 +159,7 @@ vint16mf2_t test_vsadd_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsadd_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -168,7 +168,7 @@ vint16mf2_t test_vsadd_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsadd_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -177,7 +177,7 @@ vint16m1_t test_vsadd_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsadd_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { @@ -186,7 +186,7 @@ vint16m1_t test_vsadd_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsadd_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -195,7 +195,7 @@ vint16m2_t test_vsadd_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsadd_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { @@ -204,7 +204,7 @@ vint16m2_t test_vsadd_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsadd_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -213,7 +213,7 @@ vint16m4_t test_vsadd_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsadd_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { @@ -222,7 +222,7 @@ vint16m4_t test_vsadd_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsadd_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -231,7 +231,7 @@ vint16m8_t test_vsadd_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsadd_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { @@ -240,7 +240,7 @@ vint16m8_t test_vsadd_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsadd_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -249,7 +249,7 @@ vint32mf2_t test_vsadd_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsadd_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -258,7 +258,7 @@ vint32mf2_t test_vsadd_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsadd_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -267,7 +267,7 @@ vint32m1_t test_vsadd_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsadd_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { @@ -276,7 +276,7 @@ vint32m1_t test_vsadd_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsadd_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -285,7 +285,7 @@ vint32m2_t test_vsadd_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsadd_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { @@ -294,7 +294,7 @@ vint32m2_t test_vsadd_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsadd_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -303,7 +303,7 @@ vint32m4_t test_vsadd_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsadd_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { @@ -312,7 +312,7 @@ vint32m4_t test_vsadd_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsadd_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -321,7 +321,7 @@ vint32m8_t test_vsadd_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsadd_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { @@ -330,7 +330,7 @@ vint32m8_t test_vsadd_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsadd_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -339,7 +339,7 @@ vint64m1_t test_vsadd_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsadd_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { @@ -348,7 +348,7 @@ vint64m1_t test_vsadd_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsadd_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -357,7 +357,7 @@ vint64m2_t test_vsadd_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsadd_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { @@ -366,7 +366,7 @@ vint64m2_t test_vsadd_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsadd_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -375,7 +375,7 @@ vint64m4_t test_vsadd_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsadd_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { @@ -384,7 +384,7 @@ vint64m4_t test_vsadd_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsadd_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -393,7 +393,7 @@ vint64m8_t test_vsadd_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsadd_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { @@ -402,7 +402,7 @@ vint64m8_t test_vsadd_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsaddu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -411,7 +411,7 @@ vuint8mf8_t test_vsaddu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsaddu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -420,7 +420,7 @@ vuint8mf8_t test_vsaddu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsaddu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -429,7 +429,7 @@ vuint8mf4_t test_vsaddu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsaddu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -438,7 +438,7 @@ vuint8mf4_t test_vsaddu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsaddu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -447,7 +447,7 @@ vuint8mf2_t test_vsaddu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsaddu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -456,7 +456,7 @@ vuint8mf2_t test_vsaddu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsaddu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -465,7 +465,7 @@ vuint8m1_t test_vsaddu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsaddu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -474,7 +474,7 @@ vuint8m1_t test_vsaddu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsaddu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -483,7 +483,7 @@ vuint8m2_t test_vsaddu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsaddu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -492,7 +492,7 @@ vuint8m2_t test_vsaddu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsaddu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -501,7 +501,7 @@ vuint8m4_t test_vsaddu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsaddu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -510,7 +510,7 @@ vuint8m4_t test_vsaddu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsaddu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -519,7 +519,7 @@ vuint8m8_t test_vsaddu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsaddu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -528,7 +528,7 @@ vuint8m8_t test_vsaddu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsaddu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, @@ -538,7 +538,7 @@ vuint16mf4_t test_vsaddu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, // CHECK-RV64-LABEL: @test_vsaddu_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsaddu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -547,7 +547,7 @@ vuint16mf4_t test_vsaddu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsaddu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, @@ -557,7 +557,7 @@ vuint16mf2_t test_vsaddu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, // CHECK-RV64-LABEL: @test_vsaddu_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsaddu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -566,7 +566,7 @@ vuint16mf2_t test_vsaddu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsaddu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -575,7 +575,7 @@ vuint16m1_t test_vsaddu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsaddu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -584,7 +584,7 @@ vuint16m1_t test_vsaddu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsaddu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -593,7 +593,7 @@ vuint16m2_t test_vsaddu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsaddu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -602,7 +602,7 @@ vuint16m2_t test_vsaddu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsaddu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -611,7 +611,7 @@ vuint16m4_t test_vsaddu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsaddu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -620,7 +620,7 @@ vuint16m4_t test_vsaddu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsaddu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -629,7 +629,7 @@ vuint16m8_t test_vsaddu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsaddu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -638,7 +638,7 @@ vuint16m8_t test_vsaddu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsaddu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, @@ -648,7 +648,7 @@ vuint32mf2_t test_vsaddu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, // CHECK-RV64-LABEL: @test_vsaddu_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsaddu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -657,7 +657,7 @@ vuint32mf2_t test_vsaddu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsaddu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -666,7 +666,7 @@ vuint32m1_t test_vsaddu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsaddu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -675,7 +675,7 @@ vuint32m1_t test_vsaddu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsaddu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -684,7 +684,7 @@ vuint32m2_t test_vsaddu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsaddu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -693,7 +693,7 @@ vuint32m2_t test_vsaddu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsaddu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -702,7 +702,7 @@ vuint32m4_t test_vsaddu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsaddu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -711,7 +711,7 @@ vuint32m4_t test_vsaddu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsaddu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -720,7 +720,7 @@ vuint32m8_t test_vsaddu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsaddu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -729,7 +729,7 @@ vuint32m8_t test_vsaddu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsaddu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -738,7 +738,7 @@ vuint64m1_t test_vsaddu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsaddu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -747,7 +747,7 @@ vuint64m1_t test_vsaddu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsaddu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -756,7 +756,7 @@ vuint64m2_t test_vsaddu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsaddu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -765,7 +765,7 @@ vuint64m2_t test_vsaddu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsaddu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -774,7 +774,7 @@ vuint64m4_t test_vsaddu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsaddu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -783,7 +783,7 @@ vuint64m4_t test_vsaddu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsaddu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -792,7 +792,7 @@ vuint64m8_t test_vsaddu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsaddu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslide1down.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslide1down.c index 0e200dc4cb54661fcfb0d70de030d29545f51f1e..b935758780a9c04c595b64b9eeec54ade91e18ea 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslide1down.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslide1down.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vslide1down_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vslide1down_vx_i8mf8(vint8mf8_t src, int8_t value, size_t vl) { @@ -15,7 +15,7 @@ vint8mf8_t test_vslide1down_vx_i8mf8(vint8mf8_t src, int8_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1down_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vslide1down_vx_i8mf4(vint8mf4_t src, int8_t value, size_t vl) { @@ -24,7 +24,7 @@ vint8mf4_t test_vslide1down_vx_i8mf4(vint8mf4_t src, int8_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1down_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vslide1down_vx_i8mf2(vint8mf2_t src, int8_t value, size_t vl) { @@ -33,7 +33,7 @@ vint8mf2_t test_vslide1down_vx_i8mf2(vint8mf2_t src, int8_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1down_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vslide1down_vx_i8m1(vint8m1_t src, int8_t value, size_t vl) { @@ -42,7 +42,7 @@ vint8m1_t test_vslide1down_vx_i8m1(vint8m1_t src, int8_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1down_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv16i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv16i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vslide1down_vx_i8m2(vint8m2_t src, int8_t value, size_t vl) { @@ -51,7 +51,7 @@ vint8m2_t test_vslide1down_vx_i8m2(vint8m2_t src, int8_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1down_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv32i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv32i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vslide1down_vx_i8m4(vint8m4_t src, int8_t value, size_t vl) { @@ -60,7 +60,7 @@ vint8m4_t test_vslide1down_vx_i8m4(vint8m4_t src, int8_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1down_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv64i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv64i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vslide1down_vx_i8m8(vint8m8_t src, int8_t value, size_t vl) { @@ -69,7 +69,7 @@ vint8m8_t test_vslide1down_vx_i8m8(vint8m8_t src, int8_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1down_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vslide1down_vx_i16mf4(vint16mf4_t src, int16_t value, @@ -79,7 +79,7 @@ vint16mf4_t test_vslide1down_vx_i16mf4(vint16mf4_t src, int16_t value, // CHECK-RV64-LABEL: @test_vslide1down_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vslide1down_vx_i16mf2(vint16mf2_t src, int16_t value, @@ -89,7 +89,7 @@ vint16mf2_t test_vslide1down_vx_i16mf2(vint16mf2_t src, int16_t value, // CHECK-RV64-LABEL: @test_vslide1down_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vslide1down_vx_i16m1(vint16m1_t src, int16_t value, size_t vl) { @@ -98,7 +98,7 @@ vint16m1_t test_vslide1down_vx_i16m1(vint16m1_t src, int16_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1down_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vslide1down_vx_i16m2(vint16m2_t src, int16_t value, size_t vl) { @@ -107,7 +107,7 @@ vint16m2_t test_vslide1down_vx_i16m2(vint16m2_t src, int16_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1down_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv16i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv16i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vslide1down_vx_i16m4(vint16m4_t src, int16_t value, size_t vl) { @@ -116,7 +116,7 @@ vint16m4_t test_vslide1down_vx_i16m4(vint16m4_t src, int16_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1down_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv32i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv32i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vslide1down_vx_i16m8(vint16m8_t src, int16_t value, size_t vl) { @@ -125,7 +125,7 @@ vint16m8_t test_vslide1down_vx_i16m8(vint16m8_t src, int16_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1down_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vslide1down_vx_i32mf2(vint32mf2_t src, int32_t value, @@ -135,7 +135,7 @@ vint32mf2_t test_vslide1down_vx_i32mf2(vint32mf2_t src, int32_t value, // CHECK-RV64-LABEL: @test_vslide1down_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vslide1down_vx_i32m1(vint32m1_t src, int32_t value, size_t vl) { @@ -144,7 +144,7 @@ vint32m1_t test_vslide1down_vx_i32m1(vint32m1_t src, int32_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1down_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vslide1down_vx_i32m2(vint32m2_t src, int32_t value, size_t vl) { @@ -153,7 +153,7 @@ vint32m2_t test_vslide1down_vx_i32m2(vint32m2_t src, int32_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1down_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vslide1down_vx_i32m4(vint32m4_t src, int32_t value, size_t vl) { @@ -162,7 +162,7 @@ vint32m4_t test_vslide1down_vx_i32m4(vint32m4_t src, int32_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1down_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv16i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv16i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vslide1down_vx_i32m8(vint32m8_t src, int32_t value, size_t vl) { @@ -171,7 +171,7 @@ vint32m8_t test_vslide1down_vx_i32m8(vint32m8_t src, int32_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1down_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i64.i64.i64( undef, [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vslide1down_vx_i64m1(vint64m1_t src, int64_t value, size_t vl) { @@ -180,7 +180,7 @@ vint64m1_t test_vslide1down_vx_i64m1(vint64m1_t src, int64_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1down_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i64.i64.i64( undef, [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vslide1down_vx_i64m2(vint64m2_t src, int64_t value, size_t vl) { @@ -189,7 +189,7 @@ vint64m2_t test_vslide1down_vx_i64m2(vint64m2_t src, int64_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1down_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i64.i64.i64( undef, [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vslide1down_vx_i64m4(vint64m4_t src, int64_t value, size_t vl) { @@ -198,7 +198,7 @@ vint64m4_t test_vslide1down_vx_i64m4(vint64m4_t src, int64_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1down_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i64.i64.i64( undef, [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vslide1down_vx_i64m8(vint64m8_t src, int64_t value, size_t vl) { @@ -207,7 +207,7 @@ vint64m8_t test_vslide1down_vx_i64m8(vint64m8_t src, int64_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1down_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vslide1down_vx_u8mf8(vuint8mf8_t src, uint8_t value, @@ -217,7 +217,7 @@ vuint8mf8_t test_vslide1down_vx_u8mf8(vuint8mf8_t src, uint8_t value, // CHECK-RV64-LABEL: @test_vslide1down_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vslide1down_vx_u8mf4(vuint8mf4_t src, uint8_t value, @@ -227,7 +227,7 @@ vuint8mf4_t test_vslide1down_vx_u8mf4(vuint8mf4_t src, uint8_t value, // CHECK-RV64-LABEL: @test_vslide1down_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vslide1down_vx_u8mf2(vuint8mf2_t src, uint8_t value, @@ -237,7 +237,7 @@ vuint8mf2_t test_vslide1down_vx_u8mf2(vuint8mf2_t src, uint8_t value, // CHECK-RV64-LABEL: @test_vslide1down_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vslide1down_vx_u8m1(vuint8m1_t src, uint8_t value, size_t vl) { @@ -246,7 +246,7 @@ vuint8m1_t test_vslide1down_vx_u8m1(vuint8m1_t src, uint8_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1down_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv16i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv16i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vslide1down_vx_u8m2(vuint8m2_t src, uint8_t value, size_t vl) { @@ -255,7 +255,7 @@ vuint8m2_t test_vslide1down_vx_u8m2(vuint8m2_t src, uint8_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1down_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv32i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv32i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vslide1down_vx_u8m4(vuint8m4_t src, uint8_t value, size_t vl) { @@ -264,7 +264,7 @@ vuint8m4_t test_vslide1down_vx_u8m4(vuint8m4_t src, uint8_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1down_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv64i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv64i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vslide1down_vx_u8m8(vuint8m8_t src, uint8_t value, size_t vl) { @@ -273,7 +273,7 @@ vuint8m8_t test_vslide1down_vx_u8m8(vuint8m8_t src, uint8_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1down_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vslide1down_vx_u16mf4(vuint16mf4_t src, uint16_t value, @@ -283,7 +283,7 @@ vuint16mf4_t test_vslide1down_vx_u16mf4(vuint16mf4_t src, uint16_t value, // CHECK-RV64-LABEL: @test_vslide1down_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vslide1down_vx_u16mf2(vuint16mf2_t src, uint16_t value, @@ -293,7 +293,7 @@ vuint16mf2_t test_vslide1down_vx_u16mf2(vuint16mf2_t src, uint16_t value, // CHECK-RV64-LABEL: @test_vslide1down_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vslide1down_vx_u16m1(vuint16m1_t src, uint16_t value, @@ -303,7 +303,7 @@ vuint16m1_t test_vslide1down_vx_u16m1(vuint16m1_t src, uint16_t value, // CHECK-RV64-LABEL: @test_vslide1down_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vslide1down_vx_u16m2(vuint16m2_t src, uint16_t value, @@ -313,7 +313,7 @@ vuint16m2_t test_vslide1down_vx_u16m2(vuint16m2_t src, uint16_t value, // CHECK-RV64-LABEL: @test_vslide1down_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv16i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv16i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vslide1down_vx_u16m4(vuint16m4_t src, uint16_t value, @@ -323,7 +323,7 @@ vuint16m4_t test_vslide1down_vx_u16m4(vuint16m4_t src, uint16_t value, // CHECK-RV64-LABEL: @test_vslide1down_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv32i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv32i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vslide1down_vx_u16m8(vuint16m8_t src, uint16_t value, @@ -333,7 +333,7 @@ vuint16m8_t test_vslide1down_vx_u16m8(vuint16m8_t src, uint16_t value, // CHECK-RV64-LABEL: @test_vslide1down_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vslide1down_vx_u32mf2(vuint32mf2_t src, uint32_t value, @@ -343,7 +343,7 @@ vuint32mf2_t test_vslide1down_vx_u32mf2(vuint32mf2_t src, uint32_t value, // CHECK-RV64-LABEL: @test_vslide1down_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vslide1down_vx_u32m1(vuint32m1_t src, uint32_t value, @@ -353,7 +353,7 @@ vuint32m1_t test_vslide1down_vx_u32m1(vuint32m1_t src, uint32_t value, // CHECK-RV64-LABEL: @test_vslide1down_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vslide1down_vx_u32m2(vuint32m2_t src, uint32_t value, @@ -363,7 +363,7 @@ vuint32m2_t test_vslide1down_vx_u32m2(vuint32m2_t src, uint32_t value, // CHECK-RV64-LABEL: @test_vslide1down_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vslide1down_vx_u32m4(vuint32m4_t src, uint32_t value, @@ -373,7 +373,7 @@ vuint32m4_t test_vslide1down_vx_u32m4(vuint32m4_t src, uint32_t value, // CHECK-RV64-LABEL: @test_vslide1down_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv16i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv16i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vslide1down_vx_u32m8(vuint32m8_t src, uint32_t value, @@ -383,7 +383,7 @@ vuint32m8_t test_vslide1down_vx_u32m8(vuint32m8_t src, uint32_t value, // CHECK-RV64-LABEL: @test_vslide1down_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i64.i64.i64( undef, [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vslide1down_vx_u64m1(vuint64m1_t src, uint64_t value, @@ -393,7 +393,7 @@ vuint64m1_t test_vslide1down_vx_u64m1(vuint64m1_t src, uint64_t value, // CHECK-RV64-LABEL: @test_vslide1down_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i64.i64.i64( undef, [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vslide1down_vx_u64m2(vuint64m2_t src, uint64_t value, @@ -403,7 +403,7 @@ vuint64m2_t test_vslide1down_vx_u64m2(vuint64m2_t src, uint64_t value, // CHECK-RV64-LABEL: @test_vslide1down_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i64.i64.i64( undef, [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vslide1down_vx_u64m4(vuint64m4_t src, uint64_t value, @@ -413,7 +413,7 @@ vuint64m4_t test_vslide1down_vx_u64m4(vuint64m4_t src, uint64_t value, // CHECK-RV64-LABEL: @test_vslide1down_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i64.i64.i64( undef, [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vslide1down_vx_u64m8(vuint64m8_t src, uint64_t value, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslide1up.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslide1up.c index 7d8056e097bbfab8d4c531eb75e9b4f6785930fb..bbd6d6554b92345b8f29b74d663c4d237fe6cd09 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslide1up.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslide1up.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vslide1up_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vslide1up_vx_i8mf8(vint8mf8_t src, int8_t value, size_t vl) { @@ -15,7 +15,7 @@ vint8mf8_t test_vslide1up_vx_i8mf8(vint8mf8_t src, int8_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1up_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vslide1up_vx_i8mf4(vint8mf4_t src, int8_t value, size_t vl) { @@ -24,7 +24,7 @@ vint8mf4_t test_vslide1up_vx_i8mf4(vint8mf4_t src, int8_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1up_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vslide1up_vx_i8mf2(vint8mf2_t src, int8_t value, size_t vl) { @@ -33,7 +33,7 @@ vint8mf2_t test_vslide1up_vx_i8mf2(vint8mf2_t src, int8_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1up_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vslide1up_vx_i8m1(vint8m1_t src, int8_t value, size_t vl) { @@ -42,7 +42,7 @@ vint8m1_t test_vslide1up_vx_i8m1(vint8m1_t src, int8_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1up_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv16i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv16i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vslide1up_vx_i8m2(vint8m2_t src, int8_t value, size_t vl) { @@ -51,7 +51,7 @@ vint8m2_t test_vslide1up_vx_i8m2(vint8m2_t src, int8_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1up_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv32i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv32i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vslide1up_vx_i8m4(vint8m4_t src, int8_t value, size_t vl) { @@ -60,7 +60,7 @@ vint8m4_t test_vslide1up_vx_i8m4(vint8m4_t src, int8_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1up_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv64i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv64i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vslide1up_vx_i8m8(vint8m8_t src, int8_t value, size_t vl) { @@ -69,7 +69,7 @@ vint8m8_t test_vslide1up_vx_i8m8(vint8m8_t src, int8_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1up_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vslide1up_vx_i16mf4(vint16mf4_t src, int16_t value, @@ -79,7 +79,7 @@ vint16mf4_t test_vslide1up_vx_i16mf4(vint16mf4_t src, int16_t value, // CHECK-RV64-LABEL: @test_vslide1up_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vslide1up_vx_i16mf2(vint16mf2_t src, int16_t value, @@ -89,7 +89,7 @@ vint16mf2_t test_vslide1up_vx_i16mf2(vint16mf2_t src, int16_t value, // CHECK-RV64-LABEL: @test_vslide1up_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vslide1up_vx_i16m1(vint16m1_t src, int16_t value, size_t vl) { @@ -98,7 +98,7 @@ vint16m1_t test_vslide1up_vx_i16m1(vint16m1_t src, int16_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1up_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vslide1up_vx_i16m2(vint16m2_t src, int16_t value, size_t vl) { @@ -107,7 +107,7 @@ vint16m2_t test_vslide1up_vx_i16m2(vint16m2_t src, int16_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1up_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv16i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv16i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vslide1up_vx_i16m4(vint16m4_t src, int16_t value, size_t vl) { @@ -116,7 +116,7 @@ vint16m4_t test_vslide1up_vx_i16m4(vint16m4_t src, int16_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1up_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv32i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv32i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vslide1up_vx_i16m8(vint16m8_t src, int16_t value, size_t vl) { @@ -125,7 +125,7 @@ vint16m8_t test_vslide1up_vx_i16m8(vint16m8_t src, int16_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1up_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vslide1up_vx_i32mf2(vint32mf2_t src, int32_t value, @@ -135,7 +135,7 @@ vint32mf2_t test_vslide1up_vx_i32mf2(vint32mf2_t src, int32_t value, // CHECK-RV64-LABEL: @test_vslide1up_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vslide1up_vx_i32m1(vint32m1_t src, int32_t value, size_t vl) { @@ -144,7 +144,7 @@ vint32m1_t test_vslide1up_vx_i32m1(vint32m1_t src, int32_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1up_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vslide1up_vx_i32m2(vint32m2_t src, int32_t value, size_t vl) { @@ -153,7 +153,7 @@ vint32m2_t test_vslide1up_vx_i32m2(vint32m2_t src, int32_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1up_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vslide1up_vx_i32m4(vint32m4_t src, int32_t value, size_t vl) { @@ -162,7 +162,7 @@ vint32m4_t test_vslide1up_vx_i32m4(vint32m4_t src, int32_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1up_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv16i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv16i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vslide1up_vx_i32m8(vint32m8_t src, int32_t value, size_t vl) { @@ -171,7 +171,7 @@ vint32m8_t test_vslide1up_vx_i32m8(vint32m8_t src, int32_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1up_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i64.i64.i64( undef, [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vslide1up_vx_i64m1(vint64m1_t src, int64_t value, size_t vl) { @@ -180,7 +180,7 @@ vint64m1_t test_vslide1up_vx_i64m1(vint64m1_t src, int64_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1up_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i64.i64.i64( undef, [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vslide1up_vx_i64m2(vint64m2_t src, int64_t value, size_t vl) { @@ -189,7 +189,7 @@ vint64m2_t test_vslide1up_vx_i64m2(vint64m2_t src, int64_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1up_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i64.i64.i64( undef, [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vslide1up_vx_i64m4(vint64m4_t src, int64_t value, size_t vl) { @@ -198,7 +198,7 @@ vint64m4_t test_vslide1up_vx_i64m4(vint64m4_t src, int64_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1up_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i64.i64.i64( undef, [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vslide1up_vx_i64m8(vint64m8_t src, int64_t value, size_t vl) { @@ -207,7 +207,7 @@ vint64m8_t test_vslide1up_vx_i64m8(vint64m8_t src, int64_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1up_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vslide1up_vx_u8mf8(vuint8mf8_t src, uint8_t value, size_t vl) { @@ -216,7 +216,7 @@ vuint8mf8_t test_vslide1up_vx_u8mf8(vuint8mf8_t src, uint8_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1up_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vslide1up_vx_u8mf4(vuint8mf4_t src, uint8_t value, size_t vl) { @@ -225,7 +225,7 @@ vuint8mf4_t test_vslide1up_vx_u8mf4(vuint8mf4_t src, uint8_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1up_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vslide1up_vx_u8mf2(vuint8mf2_t src, uint8_t value, size_t vl) { @@ -234,7 +234,7 @@ vuint8mf2_t test_vslide1up_vx_u8mf2(vuint8mf2_t src, uint8_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1up_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vslide1up_vx_u8m1(vuint8m1_t src, uint8_t value, size_t vl) { @@ -243,7 +243,7 @@ vuint8m1_t test_vslide1up_vx_u8m1(vuint8m1_t src, uint8_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1up_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv16i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv16i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vslide1up_vx_u8m2(vuint8m2_t src, uint8_t value, size_t vl) { @@ -252,7 +252,7 @@ vuint8m2_t test_vslide1up_vx_u8m2(vuint8m2_t src, uint8_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1up_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv32i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv32i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vslide1up_vx_u8m4(vuint8m4_t src, uint8_t value, size_t vl) { @@ -261,7 +261,7 @@ vuint8m4_t test_vslide1up_vx_u8m4(vuint8m4_t src, uint8_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1up_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv64i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv64i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vslide1up_vx_u8m8(vuint8m8_t src, uint8_t value, size_t vl) { @@ -270,7 +270,7 @@ vuint8m8_t test_vslide1up_vx_u8m8(vuint8m8_t src, uint8_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1up_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vslide1up_vx_u16mf4(vuint16mf4_t src, uint16_t value, @@ -280,7 +280,7 @@ vuint16mf4_t test_vslide1up_vx_u16mf4(vuint16mf4_t src, uint16_t value, // CHECK-RV64-LABEL: @test_vslide1up_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vslide1up_vx_u16mf2(vuint16mf2_t src, uint16_t value, @@ -290,7 +290,7 @@ vuint16mf2_t test_vslide1up_vx_u16mf2(vuint16mf2_t src, uint16_t value, // CHECK-RV64-LABEL: @test_vslide1up_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vslide1up_vx_u16m1(vuint16m1_t src, uint16_t value, @@ -300,7 +300,7 @@ vuint16m1_t test_vslide1up_vx_u16m1(vuint16m1_t src, uint16_t value, // CHECK-RV64-LABEL: @test_vslide1up_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vslide1up_vx_u16m2(vuint16m2_t src, uint16_t value, @@ -310,7 +310,7 @@ vuint16m2_t test_vslide1up_vx_u16m2(vuint16m2_t src, uint16_t value, // CHECK-RV64-LABEL: @test_vslide1up_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv16i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv16i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vslide1up_vx_u16m4(vuint16m4_t src, uint16_t value, @@ -320,7 +320,7 @@ vuint16m4_t test_vslide1up_vx_u16m4(vuint16m4_t src, uint16_t value, // CHECK-RV64-LABEL: @test_vslide1up_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv32i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv32i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vslide1up_vx_u16m8(vuint16m8_t src, uint16_t value, @@ -330,7 +330,7 @@ vuint16m8_t test_vslide1up_vx_u16m8(vuint16m8_t src, uint16_t value, // CHECK-RV64-LABEL: @test_vslide1up_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vslide1up_vx_u32mf2(vuint32mf2_t src, uint32_t value, @@ -340,7 +340,7 @@ vuint32mf2_t test_vslide1up_vx_u32mf2(vuint32mf2_t src, uint32_t value, // CHECK-RV64-LABEL: @test_vslide1up_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vslide1up_vx_u32m1(vuint32m1_t src, uint32_t value, @@ -350,7 +350,7 @@ vuint32m1_t test_vslide1up_vx_u32m1(vuint32m1_t src, uint32_t value, // CHECK-RV64-LABEL: @test_vslide1up_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vslide1up_vx_u32m2(vuint32m2_t src, uint32_t value, @@ -360,7 +360,7 @@ vuint32m2_t test_vslide1up_vx_u32m2(vuint32m2_t src, uint32_t value, // CHECK-RV64-LABEL: @test_vslide1up_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vslide1up_vx_u32m4(vuint32m4_t src, uint32_t value, @@ -370,7 +370,7 @@ vuint32m4_t test_vslide1up_vx_u32m4(vuint32m4_t src, uint32_t value, // CHECK-RV64-LABEL: @test_vslide1up_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv16i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv16i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vslide1up_vx_u32m8(vuint32m8_t src, uint32_t value, @@ -380,7 +380,7 @@ vuint32m8_t test_vslide1up_vx_u32m8(vuint32m8_t src, uint32_t value, // CHECK-RV64-LABEL: @test_vslide1up_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i64.i64.i64( undef, [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vslide1up_vx_u64m1(vuint64m1_t src, uint64_t value, @@ -390,7 +390,7 @@ vuint64m1_t test_vslide1up_vx_u64m1(vuint64m1_t src, uint64_t value, // CHECK-RV64-LABEL: @test_vslide1up_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i64.i64.i64( undef, [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vslide1up_vx_u64m2(vuint64m2_t src, uint64_t value, @@ -400,7 +400,7 @@ vuint64m2_t test_vslide1up_vx_u64m2(vuint64m2_t src, uint64_t value, // CHECK-RV64-LABEL: @test_vslide1up_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i64.i64.i64( undef, [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vslide1up_vx_u64m4(vuint64m4_t src, uint64_t value, @@ -410,7 +410,7 @@ vuint64m4_t test_vslide1up_vx_u64m4(vuint64m4_t src, uint64_t value, // CHECK-RV64-LABEL: @test_vslide1up_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i64.i64.i64( undef, [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vslide1up_vx_u64m8(vuint64m8_t src, uint64_t value, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsll.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsll.c index 5101560b0e4e02160f1c4cec70584694612653da..d1193186e04cea606e605a400c074098a618ce47 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsll.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsll.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vsll_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsll_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { @@ -15,7 +15,7 @@ vint8mf8_t test_vsll_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsll_vx_i8mf8(vint8mf8_t op1, size_t shift, size_t vl) { @@ -24,7 +24,7 @@ vint8mf8_t test_vsll_vx_i8mf8(vint8mf8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsll_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { @@ -33,7 +33,7 @@ vint8mf4_t test_vsll_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsll_vx_i8mf4(vint8mf4_t op1, size_t shift, size_t vl) { @@ -42,7 +42,7 @@ vint8mf4_t test_vsll_vx_i8mf4(vint8mf4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsll_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { @@ -51,7 +51,7 @@ vint8mf2_t test_vsll_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsll_vx_i8mf2(vint8mf2_t op1, size_t shift, size_t vl) { @@ -60,7 +60,7 @@ vint8mf2_t test_vsll_vx_i8mf2(vint8mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsll_vv_i8m1(vint8m1_t op1, vuint8m1_t shift, size_t vl) { @@ -69,7 +69,7 @@ vint8m1_t test_vsll_vv_i8m1(vint8m1_t op1, vuint8m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsll_vx_i8m1(vint8m1_t op1, size_t shift, size_t vl) { @@ -78,7 +78,7 @@ vint8m1_t test_vsll_vx_i8m1(vint8m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsll_vv_i8m2(vint8m2_t op1, vuint8m2_t shift, size_t vl) { @@ -87,7 +87,7 @@ vint8m2_t test_vsll_vv_i8m2(vint8m2_t op1, vuint8m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsll_vx_i8m2(vint8m2_t op1, size_t shift, size_t vl) { @@ -96,7 +96,7 @@ vint8m2_t test_vsll_vx_i8m2(vint8m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsll_vv_i8m4(vint8m4_t op1, vuint8m4_t shift, size_t vl) { @@ -105,7 +105,7 @@ vint8m4_t test_vsll_vv_i8m4(vint8m4_t op1, vuint8m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsll_vx_i8m4(vint8m4_t op1, size_t shift, size_t vl) { @@ -114,7 +114,7 @@ vint8m4_t test_vsll_vx_i8m4(vint8m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsll_vv_i8m8(vint8m8_t op1, vuint8m8_t shift, size_t vl) { @@ -123,7 +123,7 @@ vint8m8_t test_vsll_vv_i8m8(vint8m8_t op1, vuint8m8_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv64i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv64i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsll_vx_i8m8(vint8m8_t op1, size_t shift, size_t vl) { @@ -132,7 +132,7 @@ vint8m8_t test_vsll_vx_i8m8(vint8m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsll_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t shift, size_t vl) { @@ -141,7 +141,7 @@ vint16mf4_t test_vsll_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t shift, size_t vl) // CHECK-RV64-LABEL: @test_vsll_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsll_vx_i16mf4(vint16mf4_t op1, size_t shift, size_t vl) { @@ -150,7 +150,7 @@ vint16mf4_t test_vsll_vx_i16mf4(vint16mf4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsll_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t shift, size_t vl) { @@ -159,7 +159,7 @@ vint16mf2_t test_vsll_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t shift, size_t vl) // CHECK-RV64-LABEL: @test_vsll_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsll_vx_i16mf2(vint16mf2_t op1, size_t shift, size_t vl) { @@ -168,7 +168,7 @@ vint16mf2_t test_vsll_vx_i16mf2(vint16mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsll_vv_i16m1(vint16m1_t op1, vuint16m1_t shift, size_t vl) { @@ -177,7 +177,7 @@ vint16m1_t test_vsll_vv_i16m1(vint16m1_t op1, vuint16m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsll_vx_i16m1(vint16m1_t op1, size_t shift, size_t vl) { @@ -186,7 +186,7 @@ vint16m1_t test_vsll_vx_i16m1(vint16m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsll_vv_i16m2(vint16m2_t op1, vuint16m2_t shift, size_t vl) { @@ -195,7 +195,7 @@ vint16m2_t test_vsll_vv_i16m2(vint16m2_t op1, vuint16m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsll_vx_i16m2(vint16m2_t op1, size_t shift, size_t vl) { @@ -204,7 +204,7 @@ vint16m2_t test_vsll_vx_i16m2(vint16m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsll_vv_i16m4(vint16m4_t op1, vuint16m4_t shift, size_t vl) { @@ -213,7 +213,7 @@ vint16m4_t test_vsll_vv_i16m4(vint16m4_t op1, vuint16m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsll_vx_i16m4(vint16m4_t op1, size_t shift, size_t vl) { @@ -222,7 +222,7 @@ vint16m4_t test_vsll_vx_i16m4(vint16m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsll_vv_i16m8(vint16m8_t op1, vuint16m8_t shift, size_t vl) { @@ -231,7 +231,7 @@ vint16m8_t test_vsll_vv_i16m8(vint16m8_t op1, vuint16m8_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsll_vx_i16m8(vint16m8_t op1, size_t shift, size_t vl) { @@ -240,7 +240,7 @@ vint16m8_t test_vsll_vx_i16m8(vint16m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsll_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { @@ -249,7 +249,7 @@ vint32mf2_t test_vsll_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t shift, size_t vl) // CHECK-RV64-LABEL: @test_vsll_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsll_vx_i32mf2(vint32mf2_t op1, size_t shift, size_t vl) { @@ -258,7 +258,7 @@ vint32mf2_t test_vsll_vx_i32mf2(vint32mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsll_vv_i32m1(vint32m1_t op1, vuint32m1_t shift, size_t vl) { @@ -267,7 +267,7 @@ vint32m1_t test_vsll_vv_i32m1(vint32m1_t op1, vuint32m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsll_vx_i32m1(vint32m1_t op1, size_t shift, size_t vl) { @@ -276,7 +276,7 @@ vint32m1_t test_vsll_vx_i32m1(vint32m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsll_vv_i32m2(vint32m2_t op1, vuint32m2_t shift, size_t vl) { @@ -285,7 +285,7 @@ vint32m2_t test_vsll_vv_i32m2(vint32m2_t op1, vuint32m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsll_vx_i32m2(vint32m2_t op1, size_t shift, size_t vl) { @@ -294,7 +294,7 @@ vint32m2_t test_vsll_vx_i32m2(vint32m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsll_vv_i32m4(vint32m4_t op1, vuint32m4_t shift, size_t vl) { @@ -303,7 +303,7 @@ vint32m4_t test_vsll_vv_i32m4(vint32m4_t op1, vuint32m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsll_vx_i32m4(vint32m4_t op1, size_t shift, size_t vl) { @@ -312,7 +312,7 @@ vint32m4_t test_vsll_vx_i32m4(vint32m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsll_vv_i32m8(vint32m8_t op1, vuint32m8_t shift, size_t vl) { @@ -321,7 +321,7 @@ vint32m8_t test_vsll_vv_i32m8(vint32m8_t op1, vuint32m8_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsll_vx_i32m8(vint32m8_t op1, size_t shift, size_t vl) { @@ -330,7 +330,7 @@ vint32m8_t test_vsll_vx_i32m8(vint32m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsll_vv_i64m1(vint64m1_t op1, vuint64m1_t shift, size_t vl) { @@ -339,7 +339,7 @@ vint64m1_t test_vsll_vv_i64m1(vint64m1_t op1, vuint64m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsll_vx_i64m1(vint64m1_t op1, size_t shift, size_t vl) { @@ -348,7 +348,7 @@ vint64m1_t test_vsll_vx_i64m1(vint64m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsll_vv_i64m2(vint64m2_t op1, vuint64m2_t shift, size_t vl) { @@ -357,7 +357,7 @@ vint64m2_t test_vsll_vv_i64m2(vint64m2_t op1, vuint64m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsll_vx_i64m2(vint64m2_t op1, size_t shift, size_t vl) { @@ -366,7 +366,7 @@ vint64m2_t test_vsll_vx_i64m2(vint64m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsll_vv_i64m4(vint64m4_t op1, vuint64m4_t shift, size_t vl) { @@ -375,7 +375,7 @@ vint64m4_t test_vsll_vv_i64m4(vint64m4_t op1, vuint64m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsll_vx_i64m4(vint64m4_t op1, size_t shift, size_t vl) { @@ -384,7 +384,7 @@ vint64m4_t test_vsll_vx_i64m4(vint64m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsll_vv_i64m8(vint64m8_t op1, vuint64m8_t shift, size_t vl) { @@ -393,7 +393,7 @@ vint64m8_t test_vsll_vv_i64m8(vint64m8_t op1, vuint64m8_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsll_vx_i64m8(vint64m8_t op1, size_t shift, size_t vl) { @@ -402,7 +402,7 @@ vint64m8_t test_vsll_vx_i64m8(vint64m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsll_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { @@ -411,7 +411,7 @@ vuint8mf8_t test_vsll_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsll_vx_u8mf8(vuint8mf8_t op1, size_t shift, size_t vl) { @@ -420,7 +420,7 @@ vuint8mf8_t test_vsll_vx_u8mf8(vuint8mf8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsll_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { @@ -429,7 +429,7 @@ vuint8mf4_t test_vsll_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsll_vx_u8mf4(vuint8mf4_t op1, size_t shift, size_t vl) { @@ -438,7 +438,7 @@ vuint8mf4_t test_vsll_vx_u8mf4(vuint8mf4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsll_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { @@ -447,7 +447,7 @@ vuint8mf2_t test_vsll_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsll_vx_u8mf2(vuint8mf2_t op1, size_t shift, size_t vl) { @@ -456,7 +456,7 @@ vuint8mf2_t test_vsll_vx_u8mf2(vuint8mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsll_vv_u8m1(vuint8m1_t op1, vuint8m1_t shift, size_t vl) { @@ -465,7 +465,7 @@ vuint8m1_t test_vsll_vv_u8m1(vuint8m1_t op1, vuint8m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsll_vx_u8m1(vuint8m1_t op1, size_t shift, size_t vl) { @@ -474,7 +474,7 @@ vuint8m1_t test_vsll_vx_u8m1(vuint8m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsll_vv_u8m2(vuint8m2_t op1, vuint8m2_t shift, size_t vl) { @@ -483,7 +483,7 @@ vuint8m2_t test_vsll_vv_u8m2(vuint8m2_t op1, vuint8m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsll_vx_u8m2(vuint8m2_t op1, size_t shift, size_t vl) { @@ -492,7 +492,7 @@ vuint8m2_t test_vsll_vx_u8m2(vuint8m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsll_vv_u8m4(vuint8m4_t op1, vuint8m4_t shift, size_t vl) { @@ -501,7 +501,7 @@ vuint8m4_t test_vsll_vv_u8m4(vuint8m4_t op1, vuint8m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsll_vx_u8m4(vuint8m4_t op1, size_t shift, size_t vl) { @@ -510,7 +510,7 @@ vuint8m4_t test_vsll_vx_u8m4(vuint8m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsll_vv_u8m8(vuint8m8_t op1, vuint8m8_t shift, size_t vl) { @@ -519,7 +519,7 @@ vuint8m8_t test_vsll_vv_u8m8(vuint8m8_t op1, vuint8m8_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv64i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv64i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsll_vx_u8m8(vuint8m8_t op1, size_t shift, size_t vl) { @@ -528,7 +528,7 @@ vuint8m8_t test_vsll_vx_u8m8(vuint8m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsll_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) { @@ -537,7 +537,7 @@ vuint16mf4_t test_vsll_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t shift, size_t vl // CHECK-RV64-LABEL: @test_vsll_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsll_vx_u16mf4(vuint16mf4_t op1, size_t shift, size_t vl) { @@ -546,7 +546,7 @@ vuint16mf4_t test_vsll_vx_u16mf4(vuint16mf4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsll_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) { @@ -555,7 +555,7 @@ vuint16mf2_t test_vsll_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t shift, size_t vl // CHECK-RV64-LABEL: @test_vsll_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsll_vx_u16mf2(vuint16mf2_t op1, size_t shift, size_t vl) { @@ -564,7 +564,7 @@ vuint16mf2_t test_vsll_vx_u16mf2(vuint16mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsll_vv_u16m1(vuint16m1_t op1, vuint16m1_t shift, size_t vl) { @@ -573,7 +573,7 @@ vuint16m1_t test_vsll_vv_u16m1(vuint16m1_t op1, vuint16m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsll_vx_u16m1(vuint16m1_t op1, size_t shift, size_t vl) { @@ -582,7 +582,7 @@ vuint16m1_t test_vsll_vx_u16m1(vuint16m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsll_vv_u16m2(vuint16m2_t op1, vuint16m2_t shift, size_t vl) { @@ -591,7 +591,7 @@ vuint16m2_t test_vsll_vv_u16m2(vuint16m2_t op1, vuint16m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsll_vx_u16m2(vuint16m2_t op1, size_t shift, size_t vl) { @@ -600,7 +600,7 @@ vuint16m2_t test_vsll_vx_u16m2(vuint16m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsll_vv_u16m4(vuint16m4_t op1, vuint16m4_t shift, size_t vl) { @@ -609,7 +609,7 @@ vuint16m4_t test_vsll_vv_u16m4(vuint16m4_t op1, vuint16m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsll_vx_u16m4(vuint16m4_t op1, size_t shift, size_t vl) { @@ -618,7 +618,7 @@ vuint16m4_t test_vsll_vx_u16m4(vuint16m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsll_vv_u16m8(vuint16m8_t op1, vuint16m8_t shift, size_t vl) { @@ -627,7 +627,7 @@ vuint16m8_t test_vsll_vv_u16m8(vuint16m8_t op1, vuint16m8_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsll_vx_u16m8(vuint16m8_t op1, size_t shift, size_t vl) { @@ -636,7 +636,7 @@ vuint16m8_t test_vsll_vx_u16m8(vuint16m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsll_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { @@ -645,7 +645,7 @@ vuint32mf2_t test_vsll_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t shift, size_t vl // CHECK-RV64-LABEL: @test_vsll_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsll_vx_u32mf2(vuint32mf2_t op1, size_t shift, size_t vl) { @@ -654,7 +654,7 @@ vuint32mf2_t test_vsll_vx_u32mf2(vuint32mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsll_vv_u32m1(vuint32m1_t op1, vuint32m1_t shift, size_t vl) { @@ -663,7 +663,7 @@ vuint32m1_t test_vsll_vv_u32m1(vuint32m1_t op1, vuint32m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsll_vx_u32m1(vuint32m1_t op1, size_t shift, size_t vl) { @@ -672,7 +672,7 @@ vuint32m1_t test_vsll_vx_u32m1(vuint32m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsll_vv_u32m2(vuint32m2_t op1, vuint32m2_t shift, size_t vl) { @@ -681,7 +681,7 @@ vuint32m2_t test_vsll_vv_u32m2(vuint32m2_t op1, vuint32m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsll_vx_u32m2(vuint32m2_t op1, size_t shift, size_t vl) { @@ -690,7 +690,7 @@ vuint32m2_t test_vsll_vx_u32m2(vuint32m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsll_vv_u32m4(vuint32m4_t op1, vuint32m4_t shift, size_t vl) { @@ -699,7 +699,7 @@ vuint32m4_t test_vsll_vv_u32m4(vuint32m4_t op1, vuint32m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsll_vx_u32m4(vuint32m4_t op1, size_t shift, size_t vl) { @@ -708,7 +708,7 @@ vuint32m4_t test_vsll_vx_u32m4(vuint32m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsll_vv_u32m8(vuint32m8_t op1, vuint32m8_t shift, size_t vl) { @@ -717,7 +717,7 @@ vuint32m8_t test_vsll_vv_u32m8(vuint32m8_t op1, vuint32m8_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsll_vx_u32m8(vuint32m8_t op1, size_t shift, size_t vl) { @@ -726,7 +726,7 @@ vuint32m8_t test_vsll_vx_u32m8(vuint32m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsll_vv_u64m1(vuint64m1_t op1, vuint64m1_t shift, size_t vl) { @@ -735,7 +735,7 @@ vuint64m1_t test_vsll_vv_u64m1(vuint64m1_t op1, vuint64m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsll_vx_u64m1(vuint64m1_t op1, size_t shift, size_t vl) { @@ -744,7 +744,7 @@ vuint64m1_t test_vsll_vx_u64m1(vuint64m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsll_vv_u64m2(vuint64m2_t op1, vuint64m2_t shift, size_t vl) { @@ -753,7 +753,7 @@ vuint64m2_t test_vsll_vv_u64m2(vuint64m2_t op1, vuint64m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsll_vx_u64m2(vuint64m2_t op1, size_t shift, size_t vl) { @@ -762,7 +762,7 @@ vuint64m2_t test_vsll_vx_u64m2(vuint64m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsll_vv_u64m4(vuint64m4_t op1, vuint64m4_t shift, size_t vl) { @@ -771,7 +771,7 @@ vuint64m4_t test_vsll_vv_u64m4(vuint64m4_t op1, vuint64m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsll_vx_u64m4(vuint64m4_t op1, size_t shift, size_t vl) { @@ -780,7 +780,7 @@ vuint64m4_t test_vsll_vx_u64m4(vuint64m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsll_vv_u64m8(vuint64m8_t op1, vuint64m8_t shift, size_t vl) { @@ -789,7 +789,7 @@ vuint64m8_t test_vsll_vv_u64m8(vuint64m8_t op1, vuint64m8_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsll_vx_u64m8(vuint64m8_t op1, size_t shift, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsmul-eew64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsmul-eew64.c index dc05b51b2a6aa8b48260d53930ddcafd1f95e6cc..f67a014f5b3dfdce36d13a1979b962aa61b159c4 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsmul-eew64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsmul-eew64.c @@ -8,7 +8,7 @@ // CHECK-RV64-LABEL: @test_vsmul_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsmul_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -17,7 +17,7 @@ vint64m1_t test_vsmul_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsmul_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { @@ -26,7 +26,7 @@ vint64m1_t test_vsmul_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsmul_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -35,7 +35,7 @@ vint64m2_t test_vsmul_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsmul_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { @@ -44,7 +44,7 @@ vint64m2_t test_vsmul_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsmul_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -53,7 +53,7 @@ vint64m4_t test_vsmul_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsmul_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { @@ -62,7 +62,7 @@ vint64m4_t test_vsmul_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsmul_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -71,7 +71,7 @@ vint64m8_t test_vsmul_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsmul_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsmul.c index 3905826cb0d80b774f613d126dcfe58cb6a042b2..7a0374cf0f273248b8f66cbc17ec68b25d7e80e3 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsmul.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsmul.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vsmul_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsmul_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -15,7 +15,7 @@ vint8mf8_t test_vsmul_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsmul_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -24,7 +24,7 @@ vint8mf8_t test_vsmul_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsmul_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -33,7 +33,7 @@ vint8mf4_t test_vsmul_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsmul_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -42,7 +42,7 @@ vint8mf4_t test_vsmul_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsmul_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -51,7 +51,7 @@ vint8mf2_t test_vsmul_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsmul_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -60,7 +60,7 @@ vint8mf2_t test_vsmul_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsmul_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -69,7 +69,7 @@ vint8m1_t test_vsmul_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsmul_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { @@ -78,7 +78,7 @@ vint8m1_t test_vsmul_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsmul_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -87,7 +87,7 @@ vint8m2_t test_vsmul_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsmul_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { @@ -96,7 +96,7 @@ vint8m2_t test_vsmul_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsmul_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -105,7 +105,7 @@ vint8m4_t test_vsmul_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsmul_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { @@ -114,7 +114,7 @@ vint8m4_t test_vsmul_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsmul_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -123,7 +123,7 @@ vint8m8_t test_vsmul_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsmul_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { @@ -132,7 +132,7 @@ vint8m8_t test_vsmul_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsmul_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -141,7 +141,7 @@ vint16mf4_t test_vsmul_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsmul_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -150,7 +150,7 @@ vint16mf4_t test_vsmul_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsmul_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -159,7 +159,7 @@ vint16mf2_t test_vsmul_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsmul_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -168,7 +168,7 @@ vint16mf2_t test_vsmul_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsmul_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -177,7 +177,7 @@ vint16m1_t test_vsmul_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsmul_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { @@ -186,7 +186,7 @@ vint16m1_t test_vsmul_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsmul_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -195,7 +195,7 @@ vint16m2_t test_vsmul_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsmul_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { @@ -204,7 +204,7 @@ vint16m2_t test_vsmul_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsmul_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -213,7 +213,7 @@ vint16m4_t test_vsmul_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsmul_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { @@ -222,7 +222,7 @@ vint16m4_t test_vsmul_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsmul_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -231,7 +231,7 @@ vint16m8_t test_vsmul_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsmul_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { @@ -240,7 +240,7 @@ vint16m8_t test_vsmul_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsmul_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -249,7 +249,7 @@ vint32mf2_t test_vsmul_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsmul_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -258,7 +258,7 @@ vint32mf2_t test_vsmul_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsmul_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -267,7 +267,7 @@ vint32m1_t test_vsmul_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsmul_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { @@ -276,7 +276,7 @@ vint32m1_t test_vsmul_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsmul_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -285,7 +285,7 @@ vint32m2_t test_vsmul_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsmul_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { @@ -294,7 +294,7 @@ vint32m2_t test_vsmul_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsmul_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -303,7 +303,7 @@ vint32m4_t test_vsmul_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsmul_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { @@ -312,7 +312,7 @@ vint32m4_t test_vsmul_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsmul_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -321,7 +321,7 @@ vint32m8_t test_vsmul_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsmul_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsra.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsra.c index c7855bb0d02d018b394ccf778905bbc1f64e1ed8..6455f3804bc8e9e386dcff61a13d4dd4f8722665 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsra.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsra.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vsra_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsra_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { @@ -15,7 +15,7 @@ vint8mf8_t test_vsra_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsra_vx_i8mf8(vint8mf8_t op1, size_t shift, size_t vl) { @@ -24,7 +24,7 @@ vint8mf8_t test_vsra_vx_i8mf8(vint8mf8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsra_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { @@ -33,7 +33,7 @@ vint8mf4_t test_vsra_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsra_vx_i8mf4(vint8mf4_t op1, size_t shift, size_t vl) { @@ -42,7 +42,7 @@ vint8mf4_t test_vsra_vx_i8mf4(vint8mf4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsra_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { @@ -51,7 +51,7 @@ vint8mf2_t test_vsra_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsra_vx_i8mf2(vint8mf2_t op1, size_t shift, size_t vl) { @@ -60,7 +60,7 @@ vint8mf2_t test_vsra_vx_i8mf2(vint8mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsra_vv_i8m1(vint8m1_t op1, vuint8m1_t shift, size_t vl) { @@ -69,7 +69,7 @@ vint8m1_t test_vsra_vv_i8m1(vint8m1_t op1, vuint8m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsra_vx_i8m1(vint8m1_t op1, size_t shift, size_t vl) { @@ -78,7 +78,7 @@ vint8m1_t test_vsra_vx_i8m1(vint8m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsra_vv_i8m2(vint8m2_t op1, vuint8m2_t shift, size_t vl) { @@ -87,7 +87,7 @@ vint8m2_t test_vsra_vv_i8m2(vint8m2_t op1, vuint8m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv16i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv16i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsra_vx_i8m2(vint8m2_t op1, size_t shift, size_t vl) { @@ -96,7 +96,7 @@ vint8m2_t test_vsra_vx_i8m2(vint8m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsra_vv_i8m4(vint8m4_t op1, vuint8m4_t shift, size_t vl) { @@ -105,7 +105,7 @@ vint8m4_t test_vsra_vv_i8m4(vint8m4_t op1, vuint8m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv32i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv32i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsra_vx_i8m4(vint8m4_t op1, size_t shift, size_t vl) { @@ -114,7 +114,7 @@ vint8m4_t test_vsra_vx_i8m4(vint8m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsra_vv_i8m8(vint8m8_t op1, vuint8m8_t shift, size_t vl) { @@ -123,7 +123,7 @@ vint8m8_t test_vsra_vv_i8m8(vint8m8_t op1, vuint8m8_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv64i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv64i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsra_vx_i8m8(vint8m8_t op1, size_t shift, size_t vl) { @@ -132,7 +132,7 @@ vint8m8_t test_vsra_vx_i8m8(vint8m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsra_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t shift, size_t vl) { @@ -141,7 +141,7 @@ vint16mf4_t test_vsra_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t shift, size_t vl) // CHECK-RV64-LABEL: @test_vsra_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsra_vx_i16mf4(vint16mf4_t op1, size_t shift, size_t vl) { @@ -150,7 +150,7 @@ vint16mf4_t test_vsra_vx_i16mf4(vint16mf4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsra_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t shift, size_t vl) { @@ -159,7 +159,7 @@ vint16mf2_t test_vsra_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t shift, size_t vl) // CHECK-RV64-LABEL: @test_vsra_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsra_vx_i16mf2(vint16mf2_t op1, size_t shift, size_t vl) { @@ -168,7 +168,7 @@ vint16mf2_t test_vsra_vx_i16mf2(vint16mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsra_vv_i16m1(vint16m1_t op1, vuint16m1_t shift, size_t vl) { @@ -177,7 +177,7 @@ vint16m1_t test_vsra_vv_i16m1(vint16m1_t op1, vuint16m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsra_vx_i16m1(vint16m1_t op1, size_t shift, size_t vl) { @@ -186,7 +186,7 @@ vint16m1_t test_vsra_vx_i16m1(vint16m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsra_vv_i16m2(vint16m2_t op1, vuint16m2_t shift, size_t vl) { @@ -195,7 +195,7 @@ vint16m2_t test_vsra_vv_i16m2(vint16m2_t op1, vuint16m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsra_vx_i16m2(vint16m2_t op1, size_t shift, size_t vl) { @@ -204,7 +204,7 @@ vint16m2_t test_vsra_vx_i16m2(vint16m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsra_vv_i16m4(vint16m4_t op1, vuint16m4_t shift, size_t vl) { @@ -213,7 +213,7 @@ vint16m4_t test_vsra_vv_i16m4(vint16m4_t op1, vuint16m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv16i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv16i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsra_vx_i16m4(vint16m4_t op1, size_t shift, size_t vl) { @@ -222,7 +222,7 @@ vint16m4_t test_vsra_vx_i16m4(vint16m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsra_vv_i16m8(vint16m8_t op1, vuint16m8_t shift, size_t vl) { @@ -231,7 +231,7 @@ vint16m8_t test_vsra_vv_i16m8(vint16m8_t op1, vuint16m8_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv32i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv32i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsra_vx_i16m8(vint16m8_t op1, size_t shift, size_t vl) { @@ -240,7 +240,7 @@ vint16m8_t test_vsra_vx_i16m8(vint16m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsra_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { @@ -249,7 +249,7 @@ vint32mf2_t test_vsra_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t shift, size_t vl) // CHECK-RV64-LABEL: @test_vsra_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsra_vx_i32mf2(vint32mf2_t op1, size_t shift, size_t vl) { @@ -258,7 +258,7 @@ vint32mf2_t test_vsra_vx_i32mf2(vint32mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsra_vv_i32m1(vint32m1_t op1, vuint32m1_t shift, size_t vl) { @@ -267,7 +267,7 @@ vint32m1_t test_vsra_vv_i32m1(vint32m1_t op1, vuint32m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsra_vx_i32m1(vint32m1_t op1, size_t shift, size_t vl) { @@ -276,7 +276,7 @@ vint32m1_t test_vsra_vx_i32m1(vint32m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsra_vv_i32m2(vint32m2_t op1, vuint32m2_t shift, size_t vl) { @@ -285,7 +285,7 @@ vint32m2_t test_vsra_vv_i32m2(vint32m2_t op1, vuint32m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsra_vx_i32m2(vint32m2_t op1, size_t shift, size_t vl) { @@ -294,7 +294,7 @@ vint32m2_t test_vsra_vx_i32m2(vint32m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsra_vv_i32m4(vint32m4_t op1, vuint32m4_t shift, size_t vl) { @@ -303,7 +303,7 @@ vint32m4_t test_vsra_vv_i32m4(vint32m4_t op1, vuint32m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsra_vx_i32m4(vint32m4_t op1, size_t shift, size_t vl) { @@ -312,7 +312,7 @@ vint32m4_t test_vsra_vx_i32m4(vint32m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsra_vv_i32m8(vint32m8_t op1, vuint32m8_t shift, size_t vl) { @@ -321,7 +321,7 @@ vint32m8_t test_vsra_vv_i32m8(vint32m8_t op1, vuint32m8_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv16i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv16i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsra_vx_i32m8(vint32m8_t op1, size_t shift, size_t vl) { @@ -330,7 +330,7 @@ vint32m8_t test_vsra_vx_i32m8(vint32m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsra_vv_i64m1(vint64m1_t op1, vuint64m1_t shift, size_t vl) { @@ -339,7 +339,7 @@ vint64m1_t test_vsra_vv_i64m1(vint64m1_t op1, vuint64m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsra_vx_i64m1(vint64m1_t op1, size_t shift, size_t vl) { @@ -348,7 +348,7 @@ vint64m1_t test_vsra_vx_i64m1(vint64m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsra_vv_i64m2(vint64m2_t op1, vuint64m2_t shift, size_t vl) { @@ -357,7 +357,7 @@ vint64m2_t test_vsra_vv_i64m2(vint64m2_t op1, vuint64m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsra_vx_i64m2(vint64m2_t op1, size_t shift, size_t vl) { @@ -366,7 +366,7 @@ vint64m2_t test_vsra_vx_i64m2(vint64m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsra_vv_i64m4(vint64m4_t op1, vuint64m4_t shift, size_t vl) { @@ -375,7 +375,7 @@ vint64m4_t test_vsra_vv_i64m4(vint64m4_t op1, vuint64m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsra_vx_i64m4(vint64m4_t op1, size_t shift, size_t vl) { @@ -384,7 +384,7 @@ vint64m4_t test_vsra_vx_i64m4(vint64m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsra_vv_i64m8(vint64m8_t op1, vuint64m8_t shift, size_t vl) { @@ -393,7 +393,7 @@ vint64m8_t test_vsra_vv_i64m8(vint64m8_t op1, vuint64m8_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsra_vx_i64m8(vint64m8_t op1, size_t shift, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsrl.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsrl.c index bf0e538fc331bfe6489058ddc16e4731ac6de9af..968377dc5ffd54dcea16981761b995e3f2b3db0d 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsrl.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsrl.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vsrl_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsrl_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { @@ -15,7 +15,7 @@ vuint8mf8_t test_vsrl_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsrl_vx_u8mf8(vuint8mf8_t op1, size_t shift, size_t vl) { @@ -24,7 +24,7 @@ vuint8mf8_t test_vsrl_vx_u8mf8(vuint8mf8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsrl_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { @@ -33,7 +33,7 @@ vuint8mf4_t test_vsrl_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsrl_vx_u8mf4(vuint8mf4_t op1, size_t shift, size_t vl) { @@ -42,7 +42,7 @@ vuint8mf4_t test_vsrl_vx_u8mf4(vuint8mf4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsrl_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { @@ -51,7 +51,7 @@ vuint8mf2_t test_vsrl_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsrl_vx_u8mf2(vuint8mf2_t op1, size_t shift, size_t vl) { @@ -60,7 +60,7 @@ vuint8mf2_t test_vsrl_vx_u8mf2(vuint8mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsrl_vv_u8m1(vuint8m1_t op1, vuint8m1_t shift, size_t vl) { @@ -69,7 +69,7 @@ vuint8m1_t test_vsrl_vv_u8m1(vuint8m1_t op1, vuint8m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsrl_vx_u8m1(vuint8m1_t op1, size_t shift, size_t vl) { @@ -78,7 +78,7 @@ vuint8m1_t test_vsrl_vx_u8m1(vuint8m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsrl_vv_u8m2(vuint8m2_t op1, vuint8m2_t shift, size_t vl) { @@ -87,7 +87,7 @@ vuint8m2_t test_vsrl_vv_u8m2(vuint8m2_t op1, vuint8m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv16i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv16i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsrl_vx_u8m2(vuint8m2_t op1, size_t shift, size_t vl) { @@ -96,7 +96,7 @@ vuint8m2_t test_vsrl_vx_u8m2(vuint8m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsrl_vv_u8m4(vuint8m4_t op1, vuint8m4_t shift, size_t vl) { @@ -105,7 +105,7 @@ vuint8m4_t test_vsrl_vv_u8m4(vuint8m4_t op1, vuint8m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv32i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv32i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsrl_vx_u8m4(vuint8m4_t op1, size_t shift, size_t vl) { @@ -114,7 +114,7 @@ vuint8m4_t test_vsrl_vx_u8m4(vuint8m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsrl_vv_u8m8(vuint8m8_t op1, vuint8m8_t shift, size_t vl) { @@ -123,7 +123,7 @@ vuint8m8_t test_vsrl_vv_u8m8(vuint8m8_t op1, vuint8m8_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv64i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv64i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsrl_vx_u8m8(vuint8m8_t op1, size_t shift, size_t vl) { @@ -132,7 +132,7 @@ vuint8m8_t test_vsrl_vx_u8m8(vuint8m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsrl_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) { @@ -141,7 +141,7 @@ vuint16mf4_t test_vsrl_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t shift, size_t vl // CHECK-RV64-LABEL: @test_vsrl_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsrl_vx_u16mf4(vuint16mf4_t op1, size_t shift, size_t vl) { @@ -150,7 +150,7 @@ vuint16mf4_t test_vsrl_vx_u16mf4(vuint16mf4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsrl_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) { @@ -159,7 +159,7 @@ vuint16mf2_t test_vsrl_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t shift, size_t vl // CHECK-RV64-LABEL: @test_vsrl_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsrl_vx_u16mf2(vuint16mf2_t op1, size_t shift, size_t vl) { @@ -168,7 +168,7 @@ vuint16mf2_t test_vsrl_vx_u16mf2(vuint16mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsrl_vv_u16m1(vuint16m1_t op1, vuint16m1_t shift, size_t vl) { @@ -177,7 +177,7 @@ vuint16m1_t test_vsrl_vv_u16m1(vuint16m1_t op1, vuint16m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsrl_vx_u16m1(vuint16m1_t op1, size_t shift, size_t vl) { @@ -186,7 +186,7 @@ vuint16m1_t test_vsrl_vx_u16m1(vuint16m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsrl_vv_u16m2(vuint16m2_t op1, vuint16m2_t shift, size_t vl) { @@ -195,7 +195,7 @@ vuint16m2_t test_vsrl_vv_u16m2(vuint16m2_t op1, vuint16m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsrl_vx_u16m2(vuint16m2_t op1, size_t shift, size_t vl) { @@ -204,7 +204,7 @@ vuint16m2_t test_vsrl_vx_u16m2(vuint16m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsrl_vv_u16m4(vuint16m4_t op1, vuint16m4_t shift, size_t vl) { @@ -213,7 +213,7 @@ vuint16m4_t test_vsrl_vv_u16m4(vuint16m4_t op1, vuint16m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv16i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv16i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsrl_vx_u16m4(vuint16m4_t op1, size_t shift, size_t vl) { @@ -222,7 +222,7 @@ vuint16m4_t test_vsrl_vx_u16m4(vuint16m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsrl_vv_u16m8(vuint16m8_t op1, vuint16m8_t shift, size_t vl) { @@ -231,7 +231,7 @@ vuint16m8_t test_vsrl_vv_u16m8(vuint16m8_t op1, vuint16m8_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv32i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv32i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsrl_vx_u16m8(vuint16m8_t op1, size_t shift, size_t vl) { @@ -240,7 +240,7 @@ vuint16m8_t test_vsrl_vx_u16m8(vuint16m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsrl_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { @@ -249,7 +249,7 @@ vuint32mf2_t test_vsrl_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t shift, size_t vl // CHECK-RV64-LABEL: @test_vsrl_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsrl_vx_u32mf2(vuint32mf2_t op1, size_t shift, size_t vl) { @@ -258,7 +258,7 @@ vuint32mf2_t test_vsrl_vx_u32mf2(vuint32mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsrl_vv_u32m1(vuint32m1_t op1, vuint32m1_t shift, size_t vl) { @@ -267,7 +267,7 @@ vuint32m1_t test_vsrl_vv_u32m1(vuint32m1_t op1, vuint32m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsrl_vx_u32m1(vuint32m1_t op1, size_t shift, size_t vl) { @@ -276,7 +276,7 @@ vuint32m1_t test_vsrl_vx_u32m1(vuint32m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsrl_vv_u32m2(vuint32m2_t op1, vuint32m2_t shift, size_t vl) { @@ -285,7 +285,7 @@ vuint32m2_t test_vsrl_vv_u32m2(vuint32m2_t op1, vuint32m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsrl_vx_u32m2(vuint32m2_t op1, size_t shift, size_t vl) { @@ -294,7 +294,7 @@ vuint32m2_t test_vsrl_vx_u32m2(vuint32m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsrl_vv_u32m4(vuint32m4_t op1, vuint32m4_t shift, size_t vl) { @@ -303,7 +303,7 @@ vuint32m4_t test_vsrl_vv_u32m4(vuint32m4_t op1, vuint32m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsrl_vx_u32m4(vuint32m4_t op1, size_t shift, size_t vl) { @@ -312,7 +312,7 @@ vuint32m4_t test_vsrl_vx_u32m4(vuint32m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsrl_vv_u32m8(vuint32m8_t op1, vuint32m8_t shift, size_t vl) { @@ -321,7 +321,7 @@ vuint32m8_t test_vsrl_vv_u32m8(vuint32m8_t op1, vuint32m8_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv16i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv16i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsrl_vx_u32m8(vuint32m8_t op1, size_t shift, size_t vl) { @@ -330,7 +330,7 @@ vuint32m8_t test_vsrl_vx_u32m8(vuint32m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsrl_vv_u64m1(vuint64m1_t op1, vuint64m1_t shift, size_t vl) { @@ -339,7 +339,7 @@ vuint64m1_t test_vsrl_vv_u64m1(vuint64m1_t op1, vuint64m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsrl_vx_u64m1(vuint64m1_t op1, size_t shift, size_t vl) { @@ -348,7 +348,7 @@ vuint64m1_t test_vsrl_vx_u64m1(vuint64m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsrl_vv_u64m2(vuint64m2_t op1, vuint64m2_t shift, size_t vl) { @@ -357,7 +357,7 @@ vuint64m2_t test_vsrl_vv_u64m2(vuint64m2_t op1, vuint64m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsrl_vx_u64m2(vuint64m2_t op1, size_t shift, size_t vl) { @@ -366,7 +366,7 @@ vuint64m2_t test_vsrl_vx_u64m2(vuint64m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsrl_vv_u64m4(vuint64m4_t op1, vuint64m4_t shift, size_t vl) { @@ -375,7 +375,7 @@ vuint64m4_t test_vsrl_vv_u64m4(vuint64m4_t op1, vuint64m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsrl_vx_u64m4(vuint64m4_t op1, size_t shift, size_t vl) { @@ -384,7 +384,7 @@ vuint64m4_t test_vsrl_vx_u64m4(vuint64m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsrl_vv_u64m8(vuint64m8_t op1, vuint64m8_t shift, size_t vl) { @@ -393,7 +393,7 @@ vuint64m8_t test_vsrl_vv_u64m8(vuint64m8_t op1, vuint64m8_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsrl_vx_u64m8(vuint64m8_t op1, size_t shift, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssra.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssra.c index 2427f374cdb396192e7f20cf9ce90314094ca8ff..7ce1fe3917b63dbf89ed2c71481fd46c8aede70d 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssra.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssra.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vssra_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vssra_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { @@ -15,7 +15,7 @@ vint8mf8_t test_vssra_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vssra_vx_i8mf8(vint8mf8_t op1, size_t shift, size_t vl) { @@ -24,7 +24,7 @@ vint8mf8_t test_vssra_vx_i8mf8(vint8mf8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vssra_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { @@ -33,7 +33,7 @@ vint8mf4_t test_vssra_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vssra_vx_i8mf4(vint8mf4_t op1, size_t shift, size_t vl) { @@ -42,7 +42,7 @@ vint8mf4_t test_vssra_vx_i8mf4(vint8mf4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vssra_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { @@ -51,7 +51,7 @@ vint8mf2_t test_vssra_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vssra_vx_i8mf2(vint8mf2_t op1, size_t shift, size_t vl) { @@ -60,7 +60,7 @@ vint8mf2_t test_vssra_vx_i8mf2(vint8mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vssra_vv_i8m1(vint8m1_t op1, vuint8m1_t shift, size_t vl) { @@ -69,7 +69,7 @@ vint8m1_t test_vssra_vv_i8m1(vint8m1_t op1, vuint8m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vssra_vx_i8m1(vint8m1_t op1, size_t shift, size_t vl) { @@ -78,7 +78,7 @@ vint8m1_t test_vssra_vx_i8m1(vint8m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vssra_vv_i8m2(vint8m2_t op1, vuint8m2_t shift, size_t vl) { @@ -87,7 +87,7 @@ vint8m2_t test_vssra_vv_i8m2(vint8m2_t op1, vuint8m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vssra_vx_i8m2(vint8m2_t op1, size_t shift, size_t vl) { @@ -96,7 +96,7 @@ vint8m2_t test_vssra_vx_i8m2(vint8m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vssra_vv_i8m4(vint8m4_t op1, vuint8m4_t shift, size_t vl) { @@ -105,7 +105,7 @@ vint8m4_t test_vssra_vv_i8m4(vint8m4_t op1, vuint8m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv32i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv32i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vssra_vx_i8m4(vint8m4_t op1, size_t shift, size_t vl) { @@ -114,7 +114,7 @@ vint8m4_t test_vssra_vx_i8m4(vint8m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vssra_vv_i8m8(vint8m8_t op1, vuint8m8_t shift, size_t vl) { @@ -123,7 +123,7 @@ vint8m8_t test_vssra_vv_i8m8(vint8m8_t op1, vuint8m8_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv64i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv64i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vssra_vx_i8m8(vint8m8_t op1, size_t shift, size_t vl) { @@ -132,7 +132,7 @@ vint8m8_t test_vssra_vx_i8m8(vint8m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vssra_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t shift, @@ -142,7 +142,7 @@ vint16mf4_t test_vssra_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t shift, // CHECK-RV64-LABEL: @test_vssra_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vssra_vx_i16mf4(vint16mf4_t op1, size_t shift, size_t vl) { @@ -151,7 +151,7 @@ vint16mf4_t test_vssra_vx_i16mf4(vint16mf4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vssra_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t shift, @@ -161,7 +161,7 @@ vint16mf2_t test_vssra_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t shift, // CHECK-RV64-LABEL: @test_vssra_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vssra_vx_i16mf2(vint16mf2_t op1, size_t shift, size_t vl) { @@ -170,7 +170,7 @@ vint16mf2_t test_vssra_vx_i16mf2(vint16mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vssra_vv_i16m1(vint16m1_t op1, vuint16m1_t shift, size_t vl) { @@ -179,7 +179,7 @@ vint16m1_t test_vssra_vv_i16m1(vint16m1_t op1, vuint16m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vssra_vx_i16m1(vint16m1_t op1, size_t shift, size_t vl) { @@ -188,7 +188,7 @@ vint16m1_t test_vssra_vx_i16m1(vint16m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vssra_vv_i16m2(vint16m2_t op1, vuint16m2_t shift, size_t vl) { @@ -197,7 +197,7 @@ vint16m2_t test_vssra_vv_i16m2(vint16m2_t op1, vuint16m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vssra_vx_i16m2(vint16m2_t op1, size_t shift, size_t vl) { @@ -206,7 +206,7 @@ vint16m2_t test_vssra_vx_i16m2(vint16m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vssra_vv_i16m4(vint16m4_t op1, vuint16m4_t shift, size_t vl) { @@ -215,7 +215,7 @@ vint16m4_t test_vssra_vv_i16m4(vint16m4_t op1, vuint16m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vssra_vx_i16m4(vint16m4_t op1, size_t shift, size_t vl) { @@ -224,7 +224,7 @@ vint16m4_t test_vssra_vx_i16m4(vint16m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vssra_vv_i16m8(vint16m8_t op1, vuint16m8_t shift, size_t vl) { @@ -233,7 +233,7 @@ vint16m8_t test_vssra_vv_i16m8(vint16m8_t op1, vuint16m8_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv32i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv32i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vssra_vx_i16m8(vint16m8_t op1, size_t shift, size_t vl) { @@ -242,7 +242,7 @@ vint16m8_t test_vssra_vx_i16m8(vint16m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vssra_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t shift, @@ -252,7 +252,7 @@ vint32mf2_t test_vssra_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t shift, // CHECK-RV64-LABEL: @test_vssra_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vssra_vx_i32mf2(vint32mf2_t op1, size_t shift, size_t vl) { @@ -261,7 +261,7 @@ vint32mf2_t test_vssra_vx_i32mf2(vint32mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vssra_vv_i32m1(vint32m1_t op1, vuint32m1_t shift, size_t vl) { @@ -270,7 +270,7 @@ vint32m1_t test_vssra_vv_i32m1(vint32m1_t op1, vuint32m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vssra_vx_i32m1(vint32m1_t op1, size_t shift, size_t vl) { @@ -279,7 +279,7 @@ vint32m1_t test_vssra_vx_i32m1(vint32m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vssra_vv_i32m2(vint32m2_t op1, vuint32m2_t shift, size_t vl) { @@ -288,7 +288,7 @@ vint32m2_t test_vssra_vv_i32m2(vint32m2_t op1, vuint32m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vssra_vx_i32m2(vint32m2_t op1, size_t shift, size_t vl) { @@ -297,7 +297,7 @@ vint32m2_t test_vssra_vx_i32m2(vint32m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vssra_vv_i32m4(vint32m4_t op1, vuint32m4_t shift, size_t vl) { @@ -306,7 +306,7 @@ vint32m4_t test_vssra_vv_i32m4(vint32m4_t op1, vuint32m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vssra_vx_i32m4(vint32m4_t op1, size_t shift, size_t vl) { @@ -315,7 +315,7 @@ vint32m4_t test_vssra_vx_i32m4(vint32m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vssra_vv_i32m8(vint32m8_t op1, vuint32m8_t shift, size_t vl) { @@ -324,7 +324,7 @@ vint32m8_t test_vssra_vv_i32m8(vint32m8_t op1, vuint32m8_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vssra_vx_i32m8(vint32m8_t op1, size_t shift, size_t vl) { @@ -333,7 +333,7 @@ vint32m8_t test_vssra_vx_i32m8(vint32m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vssra_vv_i64m1(vint64m1_t op1, vuint64m1_t shift, size_t vl) { @@ -342,7 +342,7 @@ vint64m1_t test_vssra_vv_i64m1(vint64m1_t op1, vuint64m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vssra_vx_i64m1(vint64m1_t op1, size_t shift, size_t vl) { @@ -351,7 +351,7 @@ vint64m1_t test_vssra_vx_i64m1(vint64m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vssra_vv_i64m2(vint64m2_t op1, vuint64m2_t shift, size_t vl) { @@ -360,7 +360,7 @@ vint64m2_t test_vssra_vv_i64m2(vint64m2_t op1, vuint64m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vssra_vx_i64m2(vint64m2_t op1, size_t shift, size_t vl) { @@ -369,7 +369,7 @@ vint64m2_t test_vssra_vx_i64m2(vint64m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vssra_vv_i64m4(vint64m4_t op1, vuint64m4_t shift, size_t vl) { @@ -378,7 +378,7 @@ vint64m4_t test_vssra_vv_i64m4(vint64m4_t op1, vuint64m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vssra_vx_i64m4(vint64m4_t op1, size_t shift, size_t vl) { @@ -387,7 +387,7 @@ vint64m4_t test_vssra_vx_i64m4(vint64m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vssra_vv_i64m8(vint64m8_t op1, vuint64m8_t shift, size_t vl) { @@ -396,7 +396,7 @@ vint64m8_t test_vssra_vv_i64m8(vint64m8_t op1, vuint64m8_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vssra_vx_i64m8(vint64m8_t op1, size_t shift, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssrl.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssrl.c index d393fa1214fc58218e9983c813d779e4348e0c3b..d723aa470c6ae67eccb1965fb4e89b40fa0578ed 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssrl.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssrl.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vssrl_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vssrl_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { @@ -15,7 +15,7 @@ vuint8mf8_t test_vssrl_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vssrl_vx_u8mf8(vuint8mf8_t op1, size_t shift, size_t vl) { @@ -24,7 +24,7 @@ vuint8mf8_t test_vssrl_vx_u8mf8(vuint8mf8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vssrl_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { @@ -33,7 +33,7 @@ vuint8mf4_t test_vssrl_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vssrl_vx_u8mf4(vuint8mf4_t op1, size_t shift, size_t vl) { @@ -42,7 +42,7 @@ vuint8mf4_t test_vssrl_vx_u8mf4(vuint8mf4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vssrl_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { @@ -51,7 +51,7 @@ vuint8mf2_t test_vssrl_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vssrl_vx_u8mf2(vuint8mf2_t op1, size_t shift, size_t vl) { @@ -60,7 +60,7 @@ vuint8mf2_t test_vssrl_vx_u8mf2(vuint8mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vssrl_vv_u8m1(vuint8m1_t op1, vuint8m1_t shift, size_t vl) { @@ -69,7 +69,7 @@ vuint8m1_t test_vssrl_vv_u8m1(vuint8m1_t op1, vuint8m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vssrl_vx_u8m1(vuint8m1_t op1, size_t shift, size_t vl) { @@ -78,7 +78,7 @@ vuint8m1_t test_vssrl_vx_u8m1(vuint8m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vssrl_vv_u8m2(vuint8m2_t op1, vuint8m2_t shift, size_t vl) { @@ -87,7 +87,7 @@ vuint8m2_t test_vssrl_vv_u8m2(vuint8m2_t op1, vuint8m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vssrl_vx_u8m2(vuint8m2_t op1, size_t shift, size_t vl) { @@ -96,7 +96,7 @@ vuint8m2_t test_vssrl_vx_u8m2(vuint8m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vssrl_vv_u8m4(vuint8m4_t op1, vuint8m4_t shift, size_t vl) { @@ -105,7 +105,7 @@ vuint8m4_t test_vssrl_vv_u8m4(vuint8m4_t op1, vuint8m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv32i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv32i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vssrl_vx_u8m4(vuint8m4_t op1, size_t shift, size_t vl) { @@ -114,7 +114,7 @@ vuint8m4_t test_vssrl_vx_u8m4(vuint8m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vssrl_vv_u8m8(vuint8m8_t op1, vuint8m8_t shift, size_t vl) { @@ -123,7 +123,7 @@ vuint8m8_t test_vssrl_vv_u8m8(vuint8m8_t op1, vuint8m8_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv64i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv64i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vssrl_vx_u8m8(vuint8m8_t op1, size_t shift, size_t vl) { @@ -132,7 +132,7 @@ vuint8m8_t test_vssrl_vx_u8m8(vuint8m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vssrl_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t shift, @@ -142,7 +142,7 @@ vuint16mf4_t test_vssrl_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t shift, // CHECK-RV64-LABEL: @test_vssrl_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vssrl_vx_u16mf4(vuint16mf4_t op1, size_t shift, size_t vl) { @@ -151,7 +151,7 @@ vuint16mf4_t test_vssrl_vx_u16mf4(vuint16mf4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vssrl_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t shift, @@ -161,7 +161,7 @@ vuint16mf2_t test_vssrl_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t shift, // CHECK-RV64-LABEL: @test_vssrl_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vssrl_vx_u16mf2(vuint16mf2_t op1, size_t shift, size_t vl) { @@ -170,7 +170,7 @@ vuint16mf2_t test_vssrl_vx_u16mf2(vuint16mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vssrl_vv_u16m1(vuint16m1_t op1, vuint16m1_t shift, size_t vl) { @@ -179,7 +179,7 @@ vuint16m1_t test_vssrl_vv_u16m1(vuint16m1_t op1, vuint16m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vssrl_vx_u16m1(vuint16m1_t op1, size_t shift, size_t vl) { @@ -188,7 +188,7 @@ vuint16m1_t test_vssrl_vx_u16m1(vuint16m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vssrl_vv_u16m2(vuint16m2_t op1, vuint16m2_t shift, size_t vl) { @@ -197,7 +197,7 @@ vuint16m2_t test_vssrl_vv_u16m2(vuint16m2_t op1, vuint16m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vssrl_vx_u16m2(vuint16m2_t op1, size_t shift, size_t vl) { @@ -206,7 +206,7 @@ vuint16m2_t test_vssrl_vx_u16m2(vuint16m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vssrl_vv_u16m4(vuint16m4_t op1, vuint16m4_t shift, size_t vl) { @@ -215,7 +215,7 @@ vuint16m4_t test_vssrl_vv_u16m4(vuint16m4_t op1, vuint16m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vssrl_vx_u16m4(vuint16m4_t op1, size_t shift, size_t vl) { @@ -224,7 +224,7 @@ vuint16m4_t test_vssrl_vx_u16m4(vuint16m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vssrl_vv_u16m8(vuint16m8_t op1, vuint16m8_t shift, size_t vl) { @@ -233,7 +233,7 @@ vuint16m8_t test_vssrl_vv_u16m8(vuint16m8_t op1, vuint16m8_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv32i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv32i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vssrl_vx_u16m8(vuint16m8_t op1, size_t shift, size_t vl) { @@ -242,7 +242,7 @@ vuint16m8_t test_vssrl_vx_u16m8(vuint16m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vssrl_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t shift, @@ -252,7 +252,7 @@ vuint32mf2_t test_vssrl_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t shift, // CHECK-RV64-LABEL: @test_vssrl_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vssrl_vx_u32mf2(vuint32mf2_t op1, size_t shift, size_t vl) { @@ -261,7 +261,7 @@ vuint32mf2_t test_vssrl_vx_u32mf2(vuint32mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vssrl_vv_u32m1(vuint32m1_t op1, vuint32m1_t shift, size_t vl) { @@ -270,7 +270,7 @@ vuint32m1_t test_vssrl_vv_u32m1(vuint32m1_t op1, vuint32m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vssrl_vx_u32m1(vuint32m1_t op1, size_t shift, size_t vl) { @@ -279,7 +279,7 @@ vuint32m1_t test_vssrl_vx_u32m1(vuint32m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vssrl_vv_u32m2(vuint32m2_t op1, vuint32m2_t shift, size_t vl) { @@ -288,7 +288,7 @@ vuint32m2_t test_vssrl_vv_u32m2(vuint32m2_t op1, vuint32m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vssrl_vx_u32m2(vuint32m2_t op1, size_t shift, size_t vl) { @@ -297,7 +297,7 @@ vuint32m2_t test_vssrl_vx_u32m2(vuint32m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vssrl_vv_u32m4(vuint32m4_t op1, vuint32m4_t shift, size_t vl) { @@ -306,7 +306,7 @@ vuint32m4_t test_vssrl_vv_u32m4(vuint32m4_t op1, vuint32m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vssrl_vx_u32m4(vuint32m4_t op1, size_t shift, size_t vl) { @@ -315,7 +315,7 @@ vuint32m4_t test_vssrl_vx_u32m4(vuint32m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vssrl_vv_u32m8(vuint32m8_t op1, vuint32m8_t shift, size_t vl) { @@ -324,7 +324,7 @@ vuint32m8_t test_vssrl_vv_u32m8(vuint32m8_t op1, vuint32m8_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vssrl_vx_u32m8(vuint32m8_t op1, size_t shift, size_t vl) { @@ -333,7 +333,7 @@ vuint32m8_t test_vssrl_vx_u32m8(vuint32m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vssrl_vv_u64m1(vuint64m1_t op1, vuint64m1_t shift, size_t vl) { @@ -342,7 +342,7 @@ vuint64m1_t test_vssrl_vv_u64m1(vuint64m1_t op1, vuint64m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vssrl_vx_u64m1(vuint64m1_t op1, size_t shift, size_t vl) { @@ -351,7 +351,7 @@ vuint64m1_t test_vssrl_vx_u64m1(vuint64m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vssrl_vv_u64m2(vuint64m2_t op1, vuint64m2_t shift, size_t vl) { @@ -360,7 +360,7 @@ vuint64m2_t test_vssrl_vv_u64m2(vuint64m2_t op1, vuint64m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vssrl_vx_u64m2(vuint64m2_t op1, size_t shift, size_t vl) { @@ -369,7 +369,7 @@ vuint64m2_t test_vssrl_vx_u64m2(vuint64m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vssrl_vv_u64m4(vuint64m4_t op1, vuint64m4_t shift, size_t vl) { @@ -378,7 +378,7 @@ vuint64m4_t test_vssrl_vv_u64m4(vuint64m4_t op1, vuint64m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vssrl_vx_u64m4(vuint64m4_t op1, size_t shift, size_t vl) { @@ -387,7 +387,7 @@ vuint64m4_t test_vssrl_vx_u64m4(vuint64m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vssrl_vv_u64m8(vuint64m8_t op1, vuint64m8_t shift, size_t vl) { @@ -396,7 +396,7 @@ vuint64m8_t test_vssrl_vv_u64m8(vuint64m8_t op1, vuint64m8_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vssrl_vx_u64m8(vuint64m8_t op1, size_t shift, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssub.c index 04b91eb785da26df46b5654b94ce4beb1f268890..e547e43221a1d403aadb70fc2c4b0063a8f1fa4a 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssub.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vssub_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vssub_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -15,7 +15,7 @@ vint8mf8_t test_vssub_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vssub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -24,7 +24,7 @@ vint8mf8_t test_vssub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vssub_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -33,7 +33,7 @@ vint8mf4_t test_vssub_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vssub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -42,7 +42,7 @@ vint8mf4_t test_vssub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vssub_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -51,7 +51,7 @@ vint8mf2_t test_vssub_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vssub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -60,7 +60,7 @@ vint8mf2_t test_vssub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vssub_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -69,7 +69,7 @@ vint8m1_t test_vssub_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vssub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { @@ -78,7 +78,7 @@ vint8m1_t test_vssub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vssub_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -87,7 +87,7 @@ vint8m2_t test_vssub_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vssub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { @@ -96,7 +96,7 @@ vint8m2_t test_vssub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vssub_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -105,7 +105,7 @@ vint8m4_t test_vssub_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vssub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { @@ -114,7 +114,7 @@ vint8m4_t test_vssub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vssub_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -123,7 +123,7 @@ vint8m8_t test_vssub_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vssub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { @@ -132,7 +132,7 @@ vint8m8_t test_vssub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vssub_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -141,7 +141,7 @@ vint16mf4_t test_vssub_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vssub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -150,7 +150,7 @@ vint16mf4_t test_vssub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vssub_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -159,7 +159,7 @@ vint16mf2_t test_vssub_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vssub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -168,7 +168,7 @@ vint16mf2_t test_vssub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vssub_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -177,7 +177,7 @@ vint16m1_t test_vssub_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vssub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { @@ -186,7 +186,7 @@ vint16m1_t test_vssub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vssub_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -195,7 +195,7 @@ vint16m2_t test_vssub_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vssub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { @@ -204,7 +204,7 @@ vint16m2_t test_vssub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vssub_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -213,7 +213,7 @@ vint16m4_t test_vssub_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vssub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { @@ -222,7 +222,7 @@ vint16m4_t test_vssub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vssub_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -231,7 +231,7 @@ vint16m8_t test_vssub_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vssub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { @@ -240,7 +240,7 @@ vint16m8_t test_vssub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vssub_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -249,7 +249,7 @@ vint32mf2_t test_vssub_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vssub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -258,7 +258,7 @@ vint32mf2_t test_vssub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vssub_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -267,7 +267,7 @@ vint32m1_t test_vssub_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vssub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { @@ -276,7 +276,7 @@ vint32m1_t test_vssub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vssub_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -285,7 +285,7 @@ vint32m2_t test_vssub_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vssub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { @@ -294,7 +294,7 @@ vint32m2_t test_vssub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vssub_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -303,7 +303,7 @@ vint32m4_t test_vssub_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vssub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { @@ -312,7 +312,7 @@ vint32m4_t test_vssub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vssub_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -321,7 +321,7 @@ vint32m8_t test_vssub_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vssub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { @@ -330,7 +330,7 @@ vint32m8_t test_vssub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vssub_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -339,7 +339,7 @@ vint64m1_t test_vssub_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vssub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { @@ -348,7 +348,7 @@ vint64m1_t test_vssub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vssub_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -357,7 +357,7 @@ vint64m2_t test_vssub_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vssub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { @@ -366,7 +366,7 @@ vint64m2_t test_vssub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vssub_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -375,7 +375,7 @@ vint64m4_t test_vssub_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vssub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { @@ -384,7 +384,7 @@ vint64m4_t test_vssub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vssub_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -393,7 +393,7 @@ vint64m8_t test_vssub_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vssub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { @@ -402,7 +402,7 @@ vint64m8_t test_vssub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vssubu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -411,7 +411,7 @@ vuint8mf8_t test_vssubu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vssubu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -420,7 +420,7 @@ vuint8mf8_t test_vssubu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vssubu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -429,7 +429,7 @@ vuint8mf4_t test_vssubu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vssubu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -438,7 +438,7 @@ vuint8mf4_t test_vssubu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vssubu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -447,7 +447,7 @@ vuint8mf2_t test_vssubu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vssubu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -456,7 +456,7 @@ vuint8mf2_t test_vssubu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vssubu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -465,7 +465,7 @@ vuint8m1_t test_vssubu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vssubu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -474,7 +474,7 @@ vuint8m1_t test_vssubu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vssubu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -483,7 +483,7 @@ vuint8m2_t test_vssubu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vssubu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -492,7 +492,7 @@ vuint8m2_t test_vssubu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vssubu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -501,7 +501,7 @@ vuint8m4_t test_vssubu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vssubu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -510,7 +510,7 @@ vuint8m4_t test_vssubu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vssubu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -519,7 +519,7 @@ vuint8m8_t test_vssubu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vssubu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -528,7 +528,7 @@ vuint8m8_t test_vssubu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vssubu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, @@ -538,7 +538,7 @@ vuint16mf4_t test_vssubu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, // CHECK-RV64-LABEL: @test_vssubu_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vssubu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -547,7 +547,7 @@ vuint16mf4_t test_vssubu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vssubu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, @@ -557,7 +557,7 @@ vuint16mf2_t test_vssubu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, // CHECK-RV64-LABEL: @test_vssubu_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vssubu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -566,7 +566,7 @@ vuint16mf2_t test_vssubu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vssubu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -575,7 +575,7 @@ vuint16m1_t test_vssubu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vssubu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -584,7 +584,7 @@ vuint16m1_t test_vssubu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vssubu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -593,7 +593,7 @@ vuint16m2_t test_vssubu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vssubu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -602,7 +602,7 @@ vuint16m2_t test_vssubu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vssubu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -611,7 +611,7 @@ vuint16m4_t test_vssubu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vssubu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -620,7 +620,7 @@ vuint16m4_t test_vssubu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vssubu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -629,7 +629,7 @@ vuint16m8_t test_vssubu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vssubu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -638,7 +638,7 @@ vuint16m8_t test_vssubu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vssubu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, @@ -648,7 +648,7 @@ vuint32mf2_t test_vssubu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, // CHECK-RV64-LABEL: @test_vssubu_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vssubu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -657,7 +657,7 @@ vuint32mf2_t test_vssubu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vssubu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -666,7 +666,7 @@ vuint32m1_t test_vssubu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vssubu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -675,7 +675,7 @@ vuint32m1_t test_vssubu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vssubu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -684,7 +684,7 @@ vuint32m2_t test_vssubu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vssubu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -693,7 +693,7 @@ vuint32m2_t test_vssubu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vssubu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -702,7 +702,7 @@ vuint32m4_t test_vssubu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vssubu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -711,7 +711,7 @@ vuint32m4_t test_vssubu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vssubu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -720,7 +720,7 @@ vuint32m8_t test_vssubu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vssubu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -729,7 +729,7 @@ vuint32m8_t test_vssubu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vssubu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -738,7 +738,7 @@ vuint64m1_t test_vssubu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vssubu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -747,7 +747,7 @@ vuint64m1_t test_vssubu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vssubu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -756,7 +756,7 @@ vuint64m2_t test_vssubu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vssubu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -765,7 +765,7 @@ vuint64m2_t test_vssubu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vssubu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -774,7 +774,7 @@ vuint64m4_t test_vssubu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vssubu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -783,7 +783,7 @@ vuint64m4_t test_vssubu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vssubu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -792,7 +792,7 @@ vuint64m8_t test_vssubu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vssubu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsub.c index 720ddadf5123b97d6268ba1d237b3424341fd324..7cb60680fe769c49630bb98dcfd7140fd519b1fa 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsub.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vsub_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsub_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -15,7 +15,7 @@ vint8mf8_t test_vsub_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -24,7 +24,7 @@ vint8mf8_t test_vsub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsub_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -33,7 +33,7 @@ vint8mf4_t test_vsub_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -42,7 +42,7 @@ vint8mf4_t test_vsub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsub_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -51,7 +51,7 @@ vint8mf2_t test_vsub_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -60,7 +60,7 @@ vint8mf2_t test_vsub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsub_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -69,7 +69,7 @@ vint8m1_t test_vsub_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { @@ -78,7 +78,7 @@ vint8m1_t test_vsub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsub_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -87,7 +87,7 @@ vint8m2_t test_vsub_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { @@ -96,7 +96,7 @@ vint8m2_t test_vsub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsub_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -105,7 +105,7 @@ vint8m4_t test_vsub_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { @@ -114,7 +114,7 @@ vint8m4_t test_vsub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsub_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -123,7 +123,7 @@ vint8m8_t test_vsub_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { @@ -132,7 +132,7 @@ vint8m8_t test_vsub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsub_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -141,7 +141,7 @@ vint16mf4_t test_vsub_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -150,7 +150,7 @@ vint16mf4_t test_vsub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsub_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -159,7 +159,7 @@ vint16mf2_t test_vsub_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -168,7 +168,7 @@ vint16mf2_t test_vsub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsub_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -177,7 +177,7 @@ vint16m1_t test_vsub_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { @@ -186,7 +186,7 @@ vint16m1_t test_vsub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsub_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -195,7 +195,7 @@ vint16m2_t test_vsub_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { @@ -204,7 +204,7 @@ vint16m2_t test_vsub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsub_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -213,7 +213,7 @@ vint16m4_t test_vsub_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { @@ -222,7 +222,7 @@ vint16m4_t test_vsub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsub_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -231,7 +231,7 @@ vint16m8_t test_vsub_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { @@ -240,7 +240,7 @@ vint16m8_t test_vsub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsub_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -249,7 +249,7 @@ vint32mf2_t test_vsub_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -258,7 +258,7 @@ vint32mf2_t test_vsub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsub_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -267,7 +267,7 @@ vint32m1_t test_vsub_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { @@ -276,7 +276,7 @@ vint32m1_t test_vsub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsub_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -285,7 +285,7 @@ vint32m2_t test_vsub_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { @@ -294,7 +294,7 @@ vint32m2_t test_vsub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsub_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -303,7 +303,7 @@ vint32m4_t test_vsub_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { @@ -312,7 +312,7 @@ vint32m4_t test_vsub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsub_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -321,7 +321,7 @@ vint32m8_t test_vsub_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { @@ -330,7 +330,7 @@ vint32m8_t test_vsub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsub_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -339,7 +339,7 @@ vint64m1_t test_vsub_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { @@ -348,7 +348,7 @@ vint64m1_t test_vsub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsub_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -357,7 +357,7 @@ vint64m2_t test_vsub_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { @@ -366,7 +366,7 @@ vint64m2_t test_vsub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsub_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -375,7 +375,7 @@ vint64m4_t test_vsub_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { @@ -384,7 +384,7 @@ vint64m4_t test_vsub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsub_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -393,7 +393,7 @@ vint64m8_t test_vsub_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { @@ -402,7 +402,7 @@ vint64m8_t test_vsub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsub_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -411,7 +411,7 @@ vuint8mf8_t test_vsub_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsub_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -420,7 +420,7 @@ vuint8mf8_t test_vsub_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsub_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -429,7 +429,7 @@ vuint8mf4_t test_vsub_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsub_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -438,7 +438,7 @@ vuint8mf4_t test_vsub_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsub_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -447,7 +447,7 @@ vuint8mf2_t test_vsub_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsub_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -456,7 +456,7 @@ vuint8mf2_t test_vsub_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsub_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -465,7 +465,7 @@ vuint8m1_t test_vsub_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsub_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -474,7 +474,7 @@ vuint8m1_t test_vsub_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsub_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -483,7 +483,7 @@ vuint8m2_t test_vsub_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsub_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -492,7 +492,7 @@ vuint8m2_t test_vsub_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsub_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -501,7 +501,7 @@ vuint8m4_t test_vsub_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsub_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -510,7 +510,7 @@ vuint8m4_t test_vsub_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsub_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -519,7 +519,7 @@ vuint8m8_t test_vsub_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsub_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -528,7 +528,7 @@ vuint8m8_t test_vsub_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsub_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -537,7 +537,7 @@ vuint16mf4_t test_vsub_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vsub_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsub_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -546,7 +546,7 @@ vuint16mf4_t test_vsub_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsub_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -555,7 +555,7 @@ vuint16mf2_t test_vsub_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vsub_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsub_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -564,7 +564,7 @@ vuint16mf2_t test_vsub_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsub_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -573,7 +573,7 @@ vuint16m1_t test_vsub_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsub_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -582,7 +582,7 @@ vuint16m1_t test_vsub_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsub_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -591,7 +591,7 @@ vuint16m2_t test_vsub_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsub_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -600,7 +600,7 @@ vuint16m2_t test_vsub_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsub_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -609,7 +609,7 @@ vuint16m4_t test_vsub_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsub_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -618,7 +618,7 @@ vuint16m4_t test_vsub_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsub_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -627,7 +627,7 @@ vuint16m8_t test_vsub_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsub_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -636,7 +636,7 @@ vuint16m8_t test_vsub_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsub_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -645,7 +645,7 @@ vuint32mf2_t test_vsub_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vsub_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsub_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -654,7 +654,7 @@ vuint32mf2_t test_vsub_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsub_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -663,7 +663,7 @@ vuint32m1_t test_vsub_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsub_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -672,7 +672,7 @@ vuint32m1_t test_vsub_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsub_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -681,7 +681,7 @@ vuint32m2_t test_vsub_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsub_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -690,7 +690,7 @@ vuint32m2_t test_vsub_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsub_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -699,7 +699,7 @@ vuint32m4_t test_vsub_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsub_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -708,7 +708,7 @@ vuint32m4_t test_vsub_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsub_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -717,7 +717,7 @@ vuint32m8_t test_vsub_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsub_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -726,7 +726,7 @@ vuint32m8_t test_vsub_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsub_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -735,7 +735,7 @@ vuint64m1_t test_vsub_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsub_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -744,7 +744,7 @@ vuint64m1_t test_vsub_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsub_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -753,7 +753,7 @@ vuint64m2_t test_vsub_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsub_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -762,7 +762,7 @@ vuint64m2_t test_vsub_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsub_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -771,7 +771,7 @@ vuint64m4_t test_vsub_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsub_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -780,7 +780,7 @@ vuint64m4_t test_vsub_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsub_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -789,7 +789,7 @@ vuint64m8_t test_vsub_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsub_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwadd.c index 7d66d0940473e93ca5aa6272ac8b7c776be3a964..73d9fd62c19a110d4b2bc190ef1b5e0d961c3b34 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwadd.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vwadd_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i16.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i16.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwadd_vv_i16mf4(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -15,7 +15,7 @@ vint16mf4_t test_vwadd_vv_i16mf4(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i16.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i16.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwadd_vx_i16mf4(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -24,7 +24,7 @@ vint16mf4_t test_vwadd_vx_i16mf4(vint8mf8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_wv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i16.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i16.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwadd_wv_i16mf4(vint16mf4_t op1, vint8mf8_t op2, size_t vl) { @@ -33,7 +33,7 @@ vint16mf4_t test_vwadd_wv_i16mf4(vint16mf4_t op1, vint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_wx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwadd_wx_i16mf4(vint16mf4_t op1, int8_t op2, size_t vl) { @@ -42,7 +42,7 @@ vint16mf4_t test_vwadd_wx_i16mf4(vint16mf4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i16.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i16.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwadd_vv_i16mf2(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -51,7 +51,7 @@ vint16mf2_t test_vwadd_vv_i16mf2(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i16.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i16.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwadd_vx_i16mf2(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -60,7 +60,7 @@ vint16mf2_t test_vwadd_vx_i16mf2(vint8mf4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_wv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i16.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i16.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwadd_wv_i16mf2(vint16mf2_t op1, vint8mf4_t op2, size_t vl) { @@ -69,7 +69,7 @@ vint16mf2_t test_vwadd_wv_i16mf2(vint16mf2_t op1, vint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_wx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwadd_wx_i16mf2(vint16mf2_t op1, int8_t op2, size_t vl) { @@ -78,7 +78,7 @@ vint16mf2_t test_vwadd_wx_i16mf2(vint16mf2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i16.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i16.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwadd_vv_i16m1(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -87,7 +87,7 @@ vint16m1_t test_vwadd_vv_i16m1(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i16.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i16.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwadd_vx_i16m1(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -96,7 +96,7 @@ vint16m1_t test_vwadd_vx_i16m1(vint8mf2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_wv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i16.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i16.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwadd_wv_i16m1(vint16m1_t op1, vint8mf2_t op2, size_t vl) { @@ -105,7 +105,7 @@ vint16m1_t test_vwadd_wv_i16m1(vint16m1_t op1, vint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_wx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwadd_wx_i16m1(vint16m1_t op1, int8_t op2, size_t vl) { @@ -114,7 +114,7 @@ vint16m1_t test_vwadd_wx_i16m1(vint16m1_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i16.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i16.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwadd_vv_i16m2(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -123,7 +123,7 @@ vint16m2_t test_vwadd_vv_i16m2(vint8m1_t op1, vint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i16.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i16.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwadd_vx_i16m2(vint8m1_t op1, int8_t op2, size_t vl) { @@ -132,7 +132,7 @@ vint16m2_t test_vwadd_vx_i16m2(vint8m1_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_wv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i16.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i16.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwadd_wv_i16m2(vint16m2_t op1, vint8m1_t op2, size_t vl) { @@ -141,7 +141,7 @@ vint16m2_t test_vwadd_wv_i16m2(vint16m2_t op1, vint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_wx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwadd_wx_i16m2(vint16m2_t op1, int8_t op2, size_t vl) { @@ -150,7 +150,7 @@ vint16m2_t test_vwadd_wx_i16m2(vint16m2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv16i16.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv16i16.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwadd_vv_i16m4(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -159,7 +159,7 @@ vint16m4_t test_vwadd_vv_i16m4(vint8m2_t op1, vint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv16i16.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv16i16.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwadd_vx_i16m4(vint8m2_t op1, int8_t op2, size_t vl) { @@ -168,7 +168,7 @@ vint16m4_t test_vwadd_vx_i16m4(vint8m2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_wv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv16i16.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv16i16.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwadd_wv_i16m4(vint16m4_t op1, vint8m2_t op2, size_t vl) { @@ -177,7 +177,7 @@ vint16m4_t test_vwadd_wv_i16m4(vint16m4_t op1, vint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_wx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv16i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv16i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwadd_wx_i16m4(vint16m4_t op1, int8_t op2, size_t vl) { @@ -186,7 +186,7 @@ vint16m4_t test_vwadd_wx_i16m4(vint16m4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv32i16.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv32i16.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwadd_vv_i16m8(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -195,7 +195,7 @@ vint16m8_t test_vwadd_vv_i16m8(vint8m4_t op1, vint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv32i16.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv32i16.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwadd_vx_i16m8(vint8m4_t op1, int8_t op2, size_t vl) { @@ -204,7 +204,7 @@ vint16m8_t test_vwadd_vx_i16m8(vint8m4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_wv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv32i16.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv32i16.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwadd_wv_i16m8(vint16m8_t op1, vint8m4_t op2, size_t vl) { @@ -213,7 +213,7 @@ vint16m8_t test_vwadd_wv_i16m8(vint16m8_t op1, vint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_wx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv32i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv32i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwadd_wx_i16m8(vint16m8_t op1, int8_t op2, size_t vl) { @@ -222,7 +222,7 @@ vint16m8_t test_vwadd_wx_i16m8(vint16m8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i32.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i32.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwadd_vv_i32mf2(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -231,7 +231,7 @@ vint32mf2_t test_vwadd_vv_i32mf2(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i32.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i32.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwadd_vx_i32mf2(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -240,7 +240,7 @@ vint32mf2_t test_vwadd_vx_i32mf2(vint16mf4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_wv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i32.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i32.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwadd_wv_i32mf2(vint32mf2_t op1, vint16mf4_t op2, size_t vl) { @@ -249,7 +249,7 @@ vint32mf2_t test_vwadd_wv_i32mf2(vint32mf2_t op1, vint16mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_wx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwadd_wx_i32mf2(vint32mf2_t op1, int16_t op2, size_t vl) { @@ -258,7 +258,7 @@ vint32mf2_t test_vwadd_wx_i32mf2(vint32mf2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i32.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i32.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwadd_vv_i32m1(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -267,7 +267,7 @@ vint32m1_t test_vwadd_vv_i32m1(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i32.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i32.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwadd_vx_i32m1(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -276,7 +276,7 @@ vint32m1_t test_vwadd_vx_i32m1(vint16mf2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_wv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i32.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i32.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwadd_wv_i32m1(vint32m1_t op1, vint16mf2_t op2, size_t vl) { @@ -285,7 +285,7 @@ vint32m1_t test_vwadd_wv_i32m1(vint32m1_t op1, vint16mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_wx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwadd_wx_i32m1(vint32m1_t op1, int16_t op2, size_t vl) { @@ -294,7 +294,7 @@ vint32m1_t test_vwadd_wx_i32m1(vint32m1_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i32.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i32.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwadd_vv_i32m2(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -303,7 +303,7 @@ vint32m2_t test_vwadd_vv_i32m2(vint16m1_t op1, vint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i32.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i32.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwadd_vx_i32m2(vint16m1_t op1, int16_t op2, size_t vl) { @@ -312,7 +312,7 @@ vint32m2_t test_vwadd_vx_i32m2(vint16m1_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_wv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i32.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i32.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwadd_wv_i32m2(vint32m2_t op1, vint16m1_t op2, size_t vl) { @@ -321,7 +321,7 @@ vint32m2_t test_vwadd_wv_i32m2(vint32m2_t op1, vint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_wx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwadd_wx_i32m2(vint32m2_t op1, int16_t op2, size_t vl) { @@ -330,7 +330,7 @@ vint32m2_t test_vwadd_wx_i32m2(vint32m2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i32.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i32.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwadd_vv_i32m4(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -339,7 +339,7 @@ vint32m4_t test_vwadd_vv_i32m4(vint16m2_t op1, vint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i32.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i32.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwadd_vx_i32m4(vint16m2_t op1, int16_t op2, size_t vl) { @@ -348,7 +348,7 @@ vint32m4_t test_vwadd_vx_i32m4(vint16m2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_wv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i32.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i32.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwadd_wv_i32m4(vint32m4_t op1, vint16m2_t op2, size_t vl) { @@ -357,7 +357,7 @@ vint32m4_t test_vwadd_wv_i32m4(vint32m4_t op1, vint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_wx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwadd_wx_i32m4(vint32m4_t op1, int16_t op2, size_t vl) { @@ -366,7 +366,7 @@ vint32m4_t test_vwadd_wx_i32m4(vint32m4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv16i32.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv16i32.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwadd_vv_i32m8(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -375,7 +375,7 @@ vint32m8_t test_vwadd_vv_i32m8(vint16m4_t op1, vint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv16i32.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv16i32.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwadd_vx_i32m8(vint16m4_t op1, int16_t op2, size_t vl) { @@ -384,7 +384,7 @@ vint32m8_t test_vwadd_vx_i32m8(vint16m4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_wv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv16i32.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv16i32.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwadd_wv_i32m8(vint32m8_t op1, vint16m4_t op2, size_t vl) { @@ -393,7 +393,7 @@ vint32m8_t test_vwadd_wv_i32m8(vint32m8_t op1, vint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_wx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv16i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv16i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwadd_wx_i32m8(vint32m8_t op1, int16_t op2, size_t vl) { @@ -402,7 +402,7 @@ vint32m8_t test_vwadd_wx_i32m8(vint32m8_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i64.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i64.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwadd_vv_i64m1(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -411,7 +411,7 @@ vint64m1_t test_vwadd_vv_i64m1(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i64.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i64.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwadd_vx_i64m1(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -420,7 +420,7 @@ vint64m1_t test_vwadd_vx_i64m1(vint32mf2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_wv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i64.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i64.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwadd_wv_i64m1(vint64m1_t op1, vint32mf2_t op2, size_t vl) { @@ -429,7 +429,7 @@ vint64m1_t test_vwadd_wv_i64m1(vint64m1_t op1, vint32mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_wx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i64.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwadd_wx_i64m1(vint64m1_t op1, int32_t op2, size_t vl) { @@ -438,7 +438,7 @@ vint64m1_t test_vwadd_wx_i64m1(vint64m1_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i64.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i64.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwadd_vv_i64m2(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -447,7 +447,7 @@ vint64m2_t test_vwadd_vv_i64m2(vint32m1_t op1, vint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i64.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i64.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwadd_vx_i64m2(vint32m1_t op1, int32_t op2, size_t vl) { @@ -456,7 +456,7 @@ vint64m2_t test_vwadd_vx_i64m2(vint32m1_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_wv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i64.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i64.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwadd_wv_i64m2(vint64m2_t op1, vint32m1_t op2, size_t vl) { @@ -465,7 +465,7 @@ vint64m2_t test_vwadd_wv_i64m2(vint64m2_t op1, vint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_wx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i64.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwadd_wx_i64m2(vint64m2_t op1, int32_t op2, size_t vl) { @@ -474,7 +474,7 @@ vint64m2_t test_vwadd_wx_i64m2(vint64m2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i64.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i64.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwadd_vv_i64m4(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -483,7 +483,7 @@ vint64m4_t test_vwadd_vv_i64m4(vint32m2_t op1, vint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i64.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i64.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwadd_vx_i64m4(vint32m2_t op1, int32_t op2, size_t vl) { @@ -492,7 +492,7 @@ vint64m4_t test_vwadd_vx_i64m4(vint32m2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_wv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i64.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i64.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwadd_wv_i64m4(vint64m4_t op1, vint32m2_t op2, size_t vl) { @@ -501,7 +501,7 @@ vint64m4_t test_vwadd_wv_i64m4(vint64m4_t op1, vint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_wx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i64.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwadd_wx_i64m4(vint64m4_t op1, int32_t op2, size_t vl) { @@ -510,7 +510,7 @@ vint64m4_t test_vwadd_wx_i64m4(vint64m4_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i64.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i64.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwadd_vv_i64m8(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -519,7 +519,7 @@ vint64m8_t test_vwadd_vv_i64m8(vint32m4_t op1, vint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i64.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i64.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwadd_vx_i64m8(vint32m4_t op1, int32_t op2, size_t vl) { @@ -528,7 +528,7 @@ vint64m8_t test_vwadd_vx_i64m8(vint32m4_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_wv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i64.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i64.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwadd_wv_i64m8(vint64m8_t op1, vint32m4_t op2, size_t vl) { @@ -537,7 +537,7 @@ vint64m8_t test_vwadd_wv_i64m8(vint64m8_t op1, vint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_wx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i64.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwadd_wx_i64m8(vint64m8_t op1, int32_t op2, size_t vl) { @@ -546,7 +546,7 @@ vint64m8_t test_vwadd_wx_i64m8(vint64m8_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i16.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i16.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwaddu_vv_u16mf4(vuint8mf8_t op1, vuint8mf8_t op2, @@ -556,7 +556,7 @@ vuint16mf4_t test_vwaddu_vv_u16mf4(vuint8mf8_t op1, vuint8mf8_t op2, // CHECK-RV64-LABEL: @test_vwaddu_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i16.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i16.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwaddu_vx_u16mf4(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -565,7 +565,7 @@ vuint16mf4_t test_vwaddu_vx_u16mf4(vuint8mf8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_wv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i16.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i16.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwaddu_wv_u16mf4(vuint16mf4_t op1, vuint8mf8_t op2, @@ -575,7 +575,7 @@ vuint16mf4_t test_vwaddu_wv_u16mf4(vuint16mf4_t op1, vuint8mf8_t op2, // CHECK-RV64-LABEL: @test_vwaddu_wx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwaddu_wx_u16mf4(vuint16mf4_t op1, uint8_t op2, size_t vl) { @@ -584,7 +584,7 @@ vuint16mf4_t test_vwaddu_wx_u16mf4(vuint16mf4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i16.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i16.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwaddu_vv_u16mf2(vuint8mf4_t op1, vuint8mf4_t op2, @@ -594,7 +594,7 @@ vuint16mf2_t test_vwaddu_vv_u16mf2(vuint8mf4_t op1, vuint8mf4_t op2, // CHECK-RV64-LABEL: @test_vwaddu_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i16.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i16.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwaddu_vx_u16mf2(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -603,7 +603,7 @@ vuint16mf2_t test_vwaddu_vx_u16mf2(vuint8mf4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_wv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i16.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i16.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwaddu_wv_u16mf2(vuint16mf2_t op1, vuint8mf4_t op2, @@ -613,7 +613,7 @@ vuint16mf2_t test_vwaddu_wv_u16mf2(vuint16mf2_t op1, vuint8mf4_t op2, // CHECK-RV64-LABEL: @test_vwaddu_wx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwaddu_wx_u16mf2(vuint16mf2_t op1, uint8_t op2, size_t vl) { @@ -622,7 +622,7 @@ vuint16mf2_t test_vwaddu_wx_u16mf2(vuint16mf2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i16.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i16.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwaddu_vv_u16m1(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -631,7 +631,7 @@ vuint16m1_t test_vwaddu_vv_u16m1(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i16.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i16.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwaddu_vx_u16m1(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -640,7 +640,7 @@ vuint16m1_t test_vwaddu_vx_u16m1(vuint8mf2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_wv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i16.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i16.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwaddu_wv_u16m1(vuint16m1_t op1, vuint8mf2_t op2, size_t vl) { @@ -649,7 +649,7 @@ vuint16m1_t test_vwaddu_wv_u16m1(vuint16m1_t op1, vuint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_wx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwaddu_wx_u16m1(vuint16m1_t op1, uint8_t op2, size_t vl) { @@ -658,7 +658,7 @@ vuint16m1_t test_vwaddu_wx_u16m1(vuint16m1_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i16.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i16.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwaddu_vv_u16m2(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -667,7 +667,7 @@ vuint16m2_t test_vwaddu_vv_u16m2(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i16.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i16.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwaddu_vx_u16m2(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -676,7 +676,7 @@ vuint16m2_t test_vwaddu_vx_u16m2(vuint8m1_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_wv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i16.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i16.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwaddu_wv_u16m2(vuint16m2_t op1, vuint8m1_t op2, size_t vl) { @@ -685,7 +685,7 @@ vuint16m2_t test_vwaddu_wv_u16m2(vuint16m2_t op1, vuint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_wx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwaddu_wx_u16m2(vuint16m2_t op1, uint8_t op2, size_t vl) { @@ -694,7 +694,7 @@ vuint16m2_t test_vwaddu_wx_u16m2(vuint16m2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv16i16.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv16i16.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwaddu_vv_u16m4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -703,7 +703,7 @@ vuint16m4_t test_vwaddu_vv_u16m4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv16i16.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv16i16.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwaddu_vx_u16m4(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -712,7 +712,7 @@ vuint16m4_t test_vwaddu_vx_u16m4(vuint8m2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_wv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv16i16.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv16i16.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwaddu_wv_u16m4(vuint16m4_t op1, vuint8m2_t op2, size_t vl) { @@ -721,7 +721,7 @@ vuint16m4_t test_vwaddu_wv_u16m4(vuint16m4_t op1, vuint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_wx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv16i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv16i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwaddu_wx_u16m4(vuint16m4_t op1, uint8_t op2, size_t vl) { @@ -730,7 +730,7 @@ vuint16m4_t test_vwaddu_wx_u16m4(vuint16m4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv32i16.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv32i16.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwaddu_vv_u16m8(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -739,7 +739,7 @@ vuint16m8_t test_vwaddu_vv_u16m8(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv32i16.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv32i16.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwaddu_vx_u16m8(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -748,7 +748,7 @@ vuint16m8_t test_vwaddu_vx_u16m8(vuint8m4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_wv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv32i16.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv32i16.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwaddu_wv_u16m8(vuint16m8_t op1, vuint8m4_t op2, size_t vl) { @@ -757,7 +757,7 @@ vuint16m8_t test_vwaddu_wv_u16m8(vuint16m8_t op1, vuint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_wx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv32i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv32i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwaddu_wx_u16m8(vuint16m8_t op1, uint8_t op2, size_t vl) { @@ -766,7 +766,7 @@ vuint16m8_t test_vwaddu_wx_u16m8(vuint16m8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i32.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i32.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwaddu_vv_u32mf2(vuint16mf4_t op1, vuint16mf4_t op2, @@ -776,7 +776,7 @@ vuint32mf2_t test_vwaddu_vv_u32mf2(vuint16mf4_t op1, vuint16mf4_t op2, // CHECK-RV64-LABEL: @test_vwaddu_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i32.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i32.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwaddu_vx_u32mf2(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -785,7 +785,7 @@ vuint32mf2_t test_vwaddu_vx_u32mf2(vuint16mf4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_wv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i32.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i32.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwaddu_wv_u32mf2(vuint32mf2_t op1, vuint16mf4_t op2, @@ -795,7 +795,7 @@ vuint32mf2_t test_vwaddu_wv_u32mf2(vuint32mf2_t op1, vuint16mf4_t op2, // CHECK-RV64-LABEL: @test_vwaddu_wx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwaddu_wx_u32mf2(vuint32mf2_t op1, uint16_t op2, size_t vl) { @@ -804,7 +804,7 @@ vuint32mf2_t test_vwaddu_wx_u32mf2(vuint32mf2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i32.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i32.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwaddu_vv_u32m1(vuint16mf2_t op1, vuint16mf2_t op2, @@ -814,7 +814,7 @@ vuint32m1_t test_vwaddu_vv_u32m1(vuint16mf2_t op1, vuint16mf2_t op2, // CHECK-RV64-LABEL: @test_vwaddu_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i32.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i32.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwaddu_vx_u32m1(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -823,7 +823,7 @@ vuint32m1_t test_vwaddu_vx_u32m1(vuint16mf2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_wv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i32.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i32.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwaddu_wv_u32m1(vuint32m1_t op1, vuint16mf2_t op2, size_t vl) { @@ -832,7 +832,7 @@ vuint32m1_t test_vwaddu_wv_u32m1(vuint32m1_t op1, vuint16mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_wx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwaddu_wx_u32m1(vuint32m1_t op1, uint16_t op2, size_t vl) { @@ -841,7 +841,7 @@ vuint32m1_t test_vwaddu_wx_u32m1(vuint32m1_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i32.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i32.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwaddu_vv_u32m2(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -850,7 +850,7 @@ vuint32m2_t test_vwaddu_vv_u32m2(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i32.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i32.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwaddu_vx_u32m2(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -859,7 +859,7 @@ vuint32m2_t test_vwaddu_vx_u32m2(vuint16m1_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_wv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i32.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i32.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwaddu_wv_u32m2(vuint32m2_t op1, vuint16m1_t op2, size_t vl) { @@ -868,7 +868,7 @@ vuint32m2_t test_vwaddu_wv_u32m2(vuint32m2_t op1, vuint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_wx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwaddu_wx_u32m2(vuint32m2_t op1, uint16_t op2, size_t vl) { @@ -877,7 +877,7 @@ vuint32m2_t test_vwaddu_wx_u32m2(vuint32m2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i32.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i32.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwaddu_vv_u32m4(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -886,7 +886,7 @@ vuint32m4_t test_vwaddu_vv_u32m4(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i32.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i32.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwaddu_vx_u32m4(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -895,7 +895,7 @@ vuint32m4_t test_vwaddu_vx_u32m4(vuint16m2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_wv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i32.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i32.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwaddu_wv_u32m4(vuint32m4_t op1, vuint16m2_t op2, size_t vl) { @@ -904,7 +904,7 @@ vuint32m4_t test_vwaddu_wv_u32m4(vuint32m4_t op1, vuint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_wx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwaddu_wx_u32m4(vuint32m4_t op1, uint16_t op2, size_t vl) { @@ -913,7 +913,7 @@ vuint32m4_t test_vwaddu_wx_u32m4(vuint32m4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv16i32.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv16i32.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwaddu_vv_u32m8(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -922,7 +922,7 @@ vuint32m8_t test_vwaddu_vv_u32m8(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv16i32.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv16i32.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwaddu_vx_u32m8(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -931,7 +931,7 @@ vuint32m8_t test_vwaddu_vx_u32m8(vuint16m4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_wv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv16i32.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv16i32.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwaddu_wv_u32m8(vuint32m8_t op1, vuint16m4_t op2, size_t vl) { @@ -940,7 +940,7 @@ vuint32m8_t test_vwaddu_wv_u32m8(vuint32m8_t op1, vuint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_wx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv16i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv16i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwaddu_wx_u32m8(vuint32m8_t op1, uint16_t op2, size_t vl) { @@ -949,7 +949,7 @@ vuint32m8_t test_vwaddu_wx_u32m8(vuint32m8_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i64.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i64.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwaddu_vv_u64m1(vuint32mf2_t op1, vuint32mf2_t op2, @@ -959,7 +959,7 @@ vuint64m1_t test_vwaddu_vv_u64m1(vuint32mf2_t op1, vuint32mf2_t op2, // CHECK-RV64-LABEL: @test_vwaddu_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i64.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i64.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwaddu_vx_u64m1(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -968,7 +968,7 @@ vuint64m1_t test_vwaddu_vx_u64m1(vuint32mf2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_wv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i64.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i64.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwaddu_wv_u64m1(vuint64m1_t op1, vuint32mf2_t op2, size_t vl) { @@ -977,7 +977,7 @@ vuint64m1_t test_vwaddu_wv_u64m1(vuint64m1_t op1, vuint32mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_wx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i64.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwaddu_wx_u64m1(vuint64m1_t op1, uint32_t op2, size_t vl) { @@ -986,7 +986,7 @@ vuint64m1_t test_vwaddu_wx_u64m1(vuint64m1_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i64.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i64.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwaddu_vv_u64m2(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -995,7 +995,7 @@ vuint64m2_t test_vwaddu_vv_u64m2(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i64.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i64.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwaddu_vx_u64m2(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -1004,7 +1004,7 @@ vuint64m2_t test_vwaddu_vx_u64m2(vuint32m1_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_wv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i64.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i64.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwaddu_wv_u64m2(vuint64m2_t op1, vuint32m1_t op2, size_t vl) { @@ -1013,7 +1013,7 @@ vuint64m2_t test_vwaddu_wv_u64m2(vuint64m2_t op1, vuint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_wx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i64.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwaddu_wx_u64m2(vuint64m2_t op1, uint32_t op2, size_t vl) { @@ -1022,7 +1022,7 @@ vuint64m2_t test_vwaddu_wx_u64m2(vuint64m2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i64.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i64.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwaddu_vv_u64m4(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -1031,7 +1031,7 @@ vuint64m4_t test_vwaddu_vv_u64m4(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i64.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i64.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwaddu_vx_u64m4(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -1040,7 +1040,7 @@ vuint64m4_t test_vwaddu_vx_u64m4(vuint32m2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_wv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i64.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i64.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwaddu_wv_u64m4(vuint64m4_t op1, vuint32m2_t op2, size_t vl) { @@ -1049,7 +1049,7 @@ vuint64m4_t test_vwaddu_wv_u64m4(vuint64m4_t op1, vuint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_wx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i64.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwaddu_wx_u64m4(vuint64m4_t op1, uint32_t op2, size_t vl) { @@ -1058,7 +1058,7 @@ vuint64m4_t test_vwaddu_wx_u64m4(vuint64m4_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i64.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i64.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwaddu_vv_u64m8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -1067,7 +1067,7 @@ vuint64m8_t test_vwaddu_vv_u64m8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i64.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i64.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwaddu_vx_u64m8(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -1076,7 +1076,7 @@ vuint64m8_t test_vwaddu_vx_u64m8(vuint32m4_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_wv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i64.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i64.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwaddu_wv_u64m8(vuint64m8_t op1, vuint32m4_t op2, size_t vl) { @@ -1085,7 +1085,7 @@ vuint64m8_t test_vwaddu_wv_u64m8(vuint64m8_t op1, vuint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_wx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i64.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwaddu_wx_u64m8(vuint64m8_t op1, uint32_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwcvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwcvt.c index 172ea75b07b42363d5c2fbd391e369cfed831fc0..f431ea506d28c93d75d6e860d79be6b142fe5e4b 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwcvt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwcvt.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i16.nxv1i8.i8.i64( [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i16.nxv1i8.i8.i64( undef, [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwcvt_x_x_v_i16mf4 (vint8mf8_t src, size_t vl) { @@ -15,7 +15,7 @@ vint16mf4_t test_vwcvt_x_x_v_i16mf4 (vint8mf8_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i16.nxv2i8.i8.i64( [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i16.nxv2i8.i8.i64( undef, [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwcvt_x_x_v_i16mf2 (vint8mf4_t src, size_t vl) { @@ -24,7 +24,7 @@ vint16mf2_t test_vwcvt_x_x_v_i16mf2 (vint8mf4_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i16.nxv4i8.i8.i64( [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i16.nxv4i8.i8.i64( undef, [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwcvt_x_x_v_i16m1 (vint8mf2_t src, size_t vl) { @@ -33,7 +33,7 @@ vint16m1_t test_vwcvt_x_x_v_i16m1 (vint8mf2_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i16.nxv8i8.i8.i64( [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i16.nxv8i8.i8.i64( undef, [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwcvt_x_x_v_i16m2 (vint8m1_t src, size_t vl) { @@ -42,7 +42,7 @@ vint16m2_t test_vwcvt_x_x_v_i16m2 (vint8m1_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv16i16.nxv16i8.i8.i64( [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv16i16.nxv16i8.i8.i64( undef, [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwcvt_x_x_v_i16m4 (vint8m2_t src, size_t vl) { @@ -51,7 +51,7 @@ vint16m4_t test_vwcvt_x_x_v_i16m4 (vint8m2_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv32i16.nxv32i8.i8.i64( [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv32i16.nxv32i8.i8.i64( undef, [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwcvt_x_x_v_i16m8 (vint8m4_t src, size_t vl) { @@ -60,7 +60,7 @@ vint16m8_t test_vwcvt_x_x_v_i16m8 (vint8m4_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i16.nxv1i8.i8.i64( [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i16.nxv1i8.i8.i64( undef, [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwcvtu_x_x_v_u16mf4 (vuint8mf8_t src, size_t vl) { @@ -69,7 +69,7 @@ vuint16mf4_t test_vwcvtu_x_x_v_u16mf4 (vuint8mf8_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i16.nxv2i8.i8.i64( [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i16.nxv2i8.i8.i64( undef, [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwcvtu_x_x_v_u16mf2 (vuint8mf4_t src, size_t vl) { @@ -78,7 +78,7 @@ vuint16mf2_t test_vwcvtu_x_x_v_u16mf2 (vuint8mf4_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i16.nxv4i8.i8.i64( [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i16.nxv4i8.i8.i64( undef, [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwcvtu_x_x_v_u16m1 (vuint8mf2_t src, size_t vl) { @@ -87,7 +87,7 @@ vuint16m1_t test_vwcvtu_x_x_v_u16m1 (vuint8mf2_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i16.nxv8i8.i8.i64( [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i16.nxv8i8.i8.i64( undef, [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwcvtu_x_x_v_u16m2 (vuint8m1_t src, size_t vl) { @@ -96,7 +96,7 @@ vuint16m2_t test_vwcvtu_x_x_v_u16m2 (vuint8m1_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv16i16.nxv16i8.i8.i64( [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv16i16.nxv16i8.i8.i64( undef, [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwcvtu_x_x_v_u16m4 (vuint8m2_t src, size_t vl) { @@ -105,7 +105,7 @@ vuint16m4_t test_vwcvtu_x_x_v_u16m4 (vuint8m2_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv32i16.nxv32i8.i8.i64( [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv32i16.nxv32i8.i8.i64( undef, [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwcvtu_x_x_v_u16m8 (vuint8m4_t src, size_t vl) { @@ -114,7 +114,7 @@ vuint16m8_t test_vwcvtu_x_x_v_u16m8 (vuint8m4_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i32.nxv1i16.i16.i64( [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i32.nxv1i16.i16.i64( undef, [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwcvt_x_x_v_i32mf2 (vint16mf4_t src, size_t vl) { @@ -123,7 +123,7 @@ vint32mf2_t test_vwcvt_x_x_v_i32mf2 (vint16mf4_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i32.nxv2i16.i16.i64( [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i32.nxv2i16.i16.i64( undef, [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwcvt_x_x_v_i32m1 (vint16mf2_t src, size_t vl) { @@ -132,7 +132,7 @@ vint32m1_t test_vwcvt_x_x_v_i32m1 (vint16mf2_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i32.nxv4i16.i16.i64( [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i32.nxv4i16.i16.i64( undef, [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwcvt_x_x_v_i32m2 (vint16m1_t src, size_t vl) { @@ -141,7 +141,7 @@ vint32m2_t test_vwcvt_x_x_v_i32m2 (vint16m1_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i32.nxv8i16.i16.i64( [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i32.nxv8i16.i16.i64( undef, [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwcvt_x_x_v_i32m4 (vint16m2_t src, size_t vl) { @@ -150,7 +150,7 @@ vint32m4_t test_vwcvt_x_x_v_i32m4 (vint16m2_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv16i32.nxv16i16.i16.i64( [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv16i32.nxv16i16.i16.i64( undef, [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwcvt_x_x_v_i32m8 (vint16m4_t src, size_t vl) { @@ -159,7 +159,7 @@ vint32m8_t test_vwcvt_x_x_v_i32m8 (vint16m4_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i32.nxv1i16.i16.i64( [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i32.nxv1i16.i16.i64( undef, [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwcvtu_x_x_v_u32mf2 (vuint16mf4_t src, size_t vl) { @@ -168,7 +168,7 @@ vuint32mf2_t test_vwcvtu_x_x_v_u32mf2 (vuint16mf4_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i32.nxv2i16.i16.i64( [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i32.nxv2i16.i16.i64( undef, [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwcvtu_x_x_v_u32m1 (vuint16mf2_t src, size_t vl) { @@ -177,7 +177,7 @@ vuint32m1_t test_vwcvtu_x_x_v_u32m1 (vuint16mf2_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i32.nxv4i16.i16.i64( [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i32.nxv4i16.i16.i64( undef, [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwcvtu_x_x_v_u32m2 (vuint16m1_t src, size_t vl) { @@ -186,7 +186,7 @@ vuint32m2_t test_vwcvtu_x_x_v_u32m2 (vuint16m1_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i32.nxv8i16.i16.i64( [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i32.nxv8i16.i16.i64( undef, [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwcvtu_x_x_v_u32m4 (vuint16m2_t src, size_t vl) { @@ -195,7 +195,7 @@ vuint32m4_t test_vwcvtu_x_x_v_u32m4 (vuint16m2_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv16i32.nxv16i16.i16.i64( [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv16i32.nxv16i16.i16.i64( undef, [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwcvtu_x_x_v_u32m8 (vuint16m4_t src, size_t vl) { @@ -204,7 +204,7 @@ vuint32m8_t test_vwcvtu_x_x_v_u32m8 (vuint16m4_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i64.nxv1i32.i32.i64( [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i64.nxv1i32.i32.i64( undef, [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwcvt_x_x_v_i64m1 (vint32mf2_t src, size_t vl) { @@ -213,7 +213,7 @@ vint64m1_t test_vwcvt_x_x_v_i64m1 (vint32mf2_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i64.nxv2i32.i32.i64( [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i64.nxv2i32.i32.i64( undef, [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwcvt_x_x_v_i64m2 (vint32m1_t src, size_t vl) { @@ -222,7 +222,7 @@ vint64m2_t test_vwcvt_x_x_v_i64m2 (vint32m1_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i64.nxv4i32.i32.i64( [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i64.nxv4i32.i32.i64( undef, [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwcvt_x_x_v_i64m4 (vint32m2_t src, size_t vl) { @@ -231,7 +231,7 @@ vint64m4_t test_vwcvt_x_x_v_i64m4 (vint32m2_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i64.nxv8i32.i32.i64( [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i64.nxv8i32.i32.i64( undef, [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwcvt_x_x_v_i64m8 (vint32m4_t src, size_t vl) { @@ -240,7 +240,7 @@ vint64m8_t test_vwcvt_x_x_v_i64m8 (vint32m4_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i64.nxv1i32.i32.i64( [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i64.nxv1i32.i32.i64( undef, [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwcvtu_x_x_v_u64m1 (vuint32mf2_t src, size_t vl) { @@ -249,7 +249,7 @@ vuint64m1_t test_vwcvtu_x_x_v_u64m1 (vuint32mf2_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i64.nxv2i32.i32.i64( [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i64.nxv2i32.i32.i64( undef, [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwcvtu_x_x_v_u64m2 (vuint32m1_t src, size_t vl) { @@ -258,7 +258,7 @@ vuint64m2_t test_vwcvtu_x_x_v_u64m2 (vuint32m1_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i64.nxv4i32.i32.i64( [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i64.nxv4i32.i32.i64( undef, [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwcvtu_x_x_v_u64m4 (vuint32m2_t src, size_t vl) { @@ -267,7 +267,7 @@ vuint64m4_t test_vwcvtu_x_x_v_u64m4 (vuint32m2_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i64.nxv8i32.i32.i64( [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i64.nxv8i32.i32.i64( undef, [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwcvtu_x_x_v_u64m8 (vuint32m4_t src, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwmul.c index 3e6eaaa02da9fc94f6799999592373ac92028a89..09e6e487cee7e051e0226eccb73721405bfff550 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwmul.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwmul.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vwmul_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv1i16.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv1i16.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmul_vv_i16mf4(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -15,7 +15,7 @@ vint16mf4_t test_vwmul_vv_i16mf4(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmul_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv1i16.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv1i16.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmul_vx_i16mf4(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -24,7 +24,7 @@ vint16mf4_t test_vwmul_vx_i16mf4(vint8mf8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmul_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv2i16.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv2i16.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmul_vv_i16mf2(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -33,7 +33,7 @@ vint16mf2_t test_vwmul_vv_i16mf2(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmul_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv2i16.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv2i16.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmul_vx_i16mf2(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -42,7 +42,7 @@ vint16mf2_t test_vwmul_vx_i16mf2(vint8mf4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmul_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv4i16.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv4i16.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmul_vv_i16m1(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -51,7 +51,7 @@ vint16m1_t test_vwmul_vv_i16m1(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmul_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv4i16.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv4i16.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmul_vx_i16m1(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -60,7 +60,7 @@ vint16m1_t test_vwmul_vx_i16m1(vint8mf2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmul_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv8i16.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv8i16.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmul_vv_i16m2(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -69,7 +69,7 @@ vint16m2_t test_vwmul_vv_i16m2(vint8m1_t op1, vint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmul_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv8i16.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv8i16.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmul_vx_i16m2(vint8m1_t op1, int8_t op2, size_t vl) { @@ -78,7 +78,7 @@ vint16m2_t test_vwmul_vx_i16m2(vint8m1_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmul_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv16i16.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv16i16.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmul_vv_i16m4(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -87,7 +87,7 @@ vint16m4_t test_vwmul_vv_i16m4(vint8m2_t op1, vint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmul_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv16i16.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv16i16.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmul_vx_i16m4(vint8m2_t op1, int8_t op2, size_t vl) { @@ -96,7 +96,7 @@ vint16m4_t test_vwmul_vx_i16m4(vint8m2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmul_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv32i16.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv32i16.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmul_vv_i16m8(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -105,7 +105,7 @@ vint16m8_t test_vwmul_vv_i16m8(vint8m4_t op1, vint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmul_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv32i16.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv32i16.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmul_vx_i16m8(vint8m4_t op1, int8_t op2, size_t vl) { @@ -114,7 +114,7 @@ vint16m8_t test_vwmul_vx_i16m8(vint8m4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmul_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv1i32.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv1i32.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmul_vv_i32mf2(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -123,7 +123,7 @@ vint32mf2_t test_vwmul_vv_i32mf2(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmul_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv1i32.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv1i32.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmul_vx_i32mf2(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -132,7 +132,7 @@ vint32mf2_t test_vwmul_vx_i32mf2(vint16mf4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmul_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv2i32.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv2i32.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmul_vv_i32m1(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -141,7 +141,7 @@ vint32m1_t test_vwmul_vv_i32m1(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmul_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv2i32.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv2i32.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmul_vx_i32m1(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -150,7 +150,7 @@ vint32m1_t test_vwmul_vx_i32m1(vint16mf2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmul_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv4i32.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv4i32.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmul_vv_i32m2(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -159,7 +159,7 @@ vint32m2_t test_vwmul_vv_i32m2(vint16m1_t op1, vint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmul_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv4i32.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv4i32.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmul_vx_i32m2(vint16m1_t op1, int16_t op2, size_t vl) { @@ -168,7 +168,7 @@ vint32m2_t test_vwmul_vx_i32m2(vint16m1_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmul_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv8i32.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv8i32.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmul_vv_i32m4(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -177,7 +177,7 @@ vint32m4_t test_vwmul_vv_i32m4(vint16m2_t op1, vint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmul_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv8i32.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv8i32.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmul_vx_i32m4(vint16m2_t op1, int16_t op2, size_t vl) { @@ -186,7 +186,7 @@ vint32m4_t test_vwmul_vx_i32m4(vint16m2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmul_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv16i32.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv16i32.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmul_vv_i32m8(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -195,7 +195,7 @@ vint32m8_t test_vwmul_vv_i32m8(vint16m4_t op1, vint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmul_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv16i32.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv16i32.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmul_vx_i32m8(vint16m4_t op1, int16_t op2, size_t vl) { @@ -204,7 +204,7 @@ vint32m8_t test_vwmul_vx_i32m8(vint16m4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmul_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv1i64.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv1i64.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmul_vv_i64m1(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -213,7 +213,7 @@ vint64m1_t test_vwmul_vv_i64m1(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmul_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv1i64.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv1i64.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmul_vx_i64m1(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -222,7 +222,7 @@ vint64m1_t test_vwmul_vx_i64m1(vint32mf2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmul_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv2i64.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv2i64.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmul_vv_i64m2(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -231,7 +231,7 @@ vint64m2_t test_vwmul_vv_i64m2(vint32m1_t op1, vint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmul_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv2i64.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv2i64.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmul_vx_i64m2(vint32m1_t op1, int32_t op2, size_t vl) { @@ -240,7 +240,7 @@ vint64m2_t test_vwmul_vx_i64m2(vint32m1_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmul_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv4i64.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv4i64.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmul_vv_i64m4(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -249,7 +249,7 @@ vint64m4_t test_vwmul_vv_i64m4(vint32m2_t op1, vint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmul_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv4i64.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv4i64.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmul_vx_i64m4(vint32m2_t op1, int32_t op2, size_t vl) { @@ -258,7 +258,7 @@ vint64m4_t test_vwmul_vx_i64m4(vint32m2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmul_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv8i64.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv8i64.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmul_vv_i64m8(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -267,7 +267,7 @@ vint64m8_t test_vwmul_vv_i64m8(vint32m4_t op1, vint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmul_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv8i64.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv8i64.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmul_vx_i64m8(vint32m4_t op1, int32_t op2, size_t vl) { @@ -276,7 +276,7 @@ vint64m8_t test_vwmul_vx_i64m8(vint32m4_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulu_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv1i16.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv1i16.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwmulu_vv_u16mf4(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -285,7 +285,7 @@ vuint16mf4_t test_vwmulu_vv_u16mf4(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vwmulu_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv1i16.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv1i16.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwmulu_vx_u16mf4(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -294,7 +294,7 @@ vuint16mf4_t test_vwmulu_vx_u16mf4(vuint8mf8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulu_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv2i16.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv2i16.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwmulu_vv_u16mf2(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -303,7 +303,7 @@ vuint16mf2_t test_vwmulu_vv_u16mf2(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vwmulu_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv2i16.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv2i16.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwmulu_vx_u16mf2(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -312,7 +312,7 @@ vuint16mf2_t test_vwmulu_vx_u16mf2(vuint8mf4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulu_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv4i16.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv4i16.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwmulu_vv_u16m1(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -321,7 +321,7 @@ vuint16m1_t test_vwmulu_vv_u16m1(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulu_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv4i16.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv4i16.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwmulu_vx_u16m1(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -330,7 +330,7 @@ vuint16m1_t test_vwmulu_vx_u16m1(vuint8mf2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulu_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv8i16.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv8i16.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwmulu_vv_u16m2(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -339,7 +339,7 @@ vuint16m2_t test_vwmulu_vv_u16m2(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulu_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv8i16.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv8i16.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwmulu_vx_u16m2(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -348,7 +348,7 @@ vuint16m2_t test_vwmulu_vx_u16m2(vuint8m1_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulu_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv16i16.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv16i16.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwmulu_vv_u16m4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -357,7 +357,7 @@ vuint16m4_t test_vwmulu_vv_u16m4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulu_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv16i16.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv16i16.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwmulu_vx_u16m4(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -366,7 +366,7 @@ vuint16m4_t test_vwmulu_vx_u16m4(vuint8m2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulu_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv32i16.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv32i16.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwmulu_vv_u16m8(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -375,7 +375,7 @@ vuint16m8_t test_vwmulu_vv_u16m8(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulu_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv32i16.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv32i16.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwmulu_vx_u16m8(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -384,7 +384,7 @@ vuint16m8_t test_vwmulu_vx_u16m8(vuint8m4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulu_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv1i32.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv1i32.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwmulu_vv_u32mf2(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -393,7 +393,7 @@ vuint32mf2_t test_vwmulu_vv_u32mf2(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl // CHECK-RV64-LABEL: @test_vwmulu_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv1i32.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv1i32.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwmulu_vx_u32mf2(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -402,7 +402,7 @@ vuint32mf2_t test_vwmulu_vx_u32mf2(vuint16mf4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulu_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv2i32.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv2i32.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwmulu_vv_u32m1(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -411,7 +411,7 @@ vuint32m1_t test_vwmulu_vv_u32m1(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vwmulu_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv2i32.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv2i32.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwmulu_vx_u32m1(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -420,7 +420,7 @@ vuint32m1_t test_vwmulu_vx_u32m1(vuint16mf2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulu_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv4i32.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv4i32.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwmulu_vv_u32m2(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -429,7 +429,7 @@ vuint32m2_t test_vwmulu_vv_u32m2(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulu_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv4i32.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv4i32.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwmulu_vx_u32m2(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -438,7 +438,7 @@ vuint32m2_t test_vwmulu_vx_u32m2(vuint16m1_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulu_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv8i32.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv8i32.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwmulu_vv_u32m4(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -447,7 +447,7 @@ vuint32m4_t test_vwmulu_vv_u32m4(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulu_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv8i32.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv8i32.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwmulu_vx_u32m4(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -456,7 +456,7 @@ vuint32m4_t test_vwmulu_vx_u32m4(vuint16m2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulu_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv16i32.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv16i32.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwmulu_vv_u32m8(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -465,7 +465,7 @@ vuint32m8_t test_vwmulu_vv_u32m8(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulu_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv16i32.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv16i32.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwmulu_vx_u32m8(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -474,7 +474,7 @@ vuint32m8_t test_vwmulu_vx_u32m8(vuint16m4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulu_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv1i64.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv1i64.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwmulu_vv_u64m1(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -483,7 +483,7 @@ vuint64m1_t test_vwmulu_vv_u64m1(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vwmulu_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv1i64.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv1i64.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwmulu_vx_u64m1(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -492,7 +492,7 @@ vuint64m1_t test_vwmulu_vx_u64m1(vuint32mf2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulu_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv2i64.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv2i64.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwmulu_vv_u64m2(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -501,7 +501,7 @@ vuint64m2_t test_vwmulu_vv_u64m2(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulu_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv2i64.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv2i64.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwmulu_vx_u64m2(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -510,7 +510,7 @@ vuint64m2_t test_vwmulu_vx_u64m2(vuint32m1_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulu_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv4i64.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv4i64.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwmulu_vv_u64m4(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -519,7 +519,7 @@ vuint64m4_t test_vwmulu_vv_u64m4(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulu_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv4i64.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv4i64.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwmulu_vx_u64m4(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -528,7 +528,7 @@ vuint64m4_t test_vwmulu_vx_u64m4(vuint32m2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulu_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv8i64.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv8i64.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwmulu_vv_u64m8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -537,7 +537,7 @@ vuint64m8_t test_vwmulu_vv_u64m8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulu_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv8i64.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv8i64.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwmulu_vx_u64m8(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -546,7 +546,7 @@ vuint64m8_t test_vwmulu_vx_u64m8(vuint32m4_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmulsu_vv_i16mf4(vint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -555,7 +555,7 @@ vint16mf4_t test_vwmulsu_vv_i16mf4(vint8mf8_t op1, vuint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmulsu_vx_i16mf4(vint8mf8_t op1, uint8_t op2, size_t vl) { @@ -564,7 +564,7 @@ vint16mf4_t test_vwmulsu_vx_i16mf4(vint8mf8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv2i16.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv2i16.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmulsu_vv_i16mf2(vint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -573,7 +573,7 @@ vint16mf2_t test_vwmulsu_vv_i16mf2(vint8mf4_t op1, vuint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv2i16.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv2i16.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmulsu_vx_i16mf2(vint8mf4_t op1, uint8_t op2, size_t vl) { @@ -582,7 +582,7 @@ vint16mf2_t test_vwmulsu_vx_i16mf2(vint8mf4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv4i16.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv4i16.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmulsu_vv_i16m1(vint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -591,7 +591,7 @@ vint16m1_t test_vwmulsu_vv_i16m1(vint8mf2_t op1, vuint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv4i16.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv4i16.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmulsu_vx_i16m1(vint8mf2_t op1, uint8_t op2, size_t vl) { @@ -600,7 +600,7 @@ vint16m1_t test_vwmulsu_vx_i16m1(vint8mf2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv8i16.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv8i16.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmulsu_vv_i16m2(vint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -609,7 +609,7 @@ vint16m2_t test_vwmulsu_vv_i16m2(vint8m1_t op1, vuint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv8i16.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv8i16.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmulsu_vx_i16m2(vint8m1_t op1, uint8_t op2, size_t vl) { @@ -618,7 +618,7 @@ vint16m2_t test_vwmulsu_vx_i16m2(vint8m1_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv16i16.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv16i16.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmulsu_vv_i16m4(vint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -627,7 +627,7 @@ vint16m4_t test_vwmulsu_vv_i16m4(vint8m2_t op1, vuint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv16i16.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv16i16.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmulsu_vx_i16m4(vint8m2_t op1, uint8_t op2, size_t vl) { @@ -636,7 +636,7 @@ vint16m4_t test_vwmulsu_vx_i16m4(vint8m2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv32i16.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv32i16.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmulsu_vv_i16m8(vint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -645,7 +645,7 @@ vint16m8_t test_vwmulsu_vv_i16m8(vint8m4_t op1, vuint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv32i16.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv32i16.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmulsu_vx_i16m8(vint8m4_t op1, uint8_t op2, size_t vl) { @@ -654,7 +654,7 @@ vint16m8_t test_vwmulsu_vx_i16m8(vint8m4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulsu_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv1i32.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv1i32.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmulsu_vv_i32mf2(vint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -663,7 +663,7 @@ vint32mf2_t test_vwmulsu_vv_i32mf2(vint16mf4_t op1, vuint16mf4_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vwmulsu_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv1i32.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv1i32.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmulsu_vx_i32mf2(vint16mf4_t op1, uint16_t op2, size_t vl) { @@ -672,7 +672,7 @@ vint32mf2_t test_vwmulsu_vx_i32mf2(vint16mf4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv2i32.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv2i32.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmulsu_vv_i32m1(vint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -681,7 +681,7 @@ vint32m1_t test_vwmulsu_vv_i32m1(vint16mf2_t op1, vuint16mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv2i32.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv2i32.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmulsu_vx_i32m1(vint16mf2_t op1, uint16_t op2, size_t vl) { @@ -690,7 +690,7 @@ vint32m1_t test_vwmulsu_vx_i32m1(vint16mf2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv4i32.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv4i32.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmulsu_vv_i32m2(vint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -699,7 +699,7 @@ vint32m2_t test_vwmulsu_vv_i32m2(vint16m1_t op1, vuint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv4i32.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv4i32.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmulsu_vx_i32m2(vint16m1_t op1, uint16_t op2, size_t vl) { @@ -708,7 +708,7 @@ vint32m2_t test_vwmulsu_vx_i32m2(vint16m1_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv8i32.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv8i32.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmulsu_vv_i32m4(vint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -717,7 +717,7 @@ vint32m4_t test_vwmulsu_vv_i32m4(vint16m2_t op1, vuint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv8i32.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv8i32.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmulsu_vx_i32m4(vint16m2_t op1, uint16_t op2, size_t vl) { @@ -726,7 +726,7 @@ vint32m4_t test_vwmulsu_vx_i32m4(vint16m2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv16i32.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv16i32.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmulsu_vv_i32m8(vint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -735,7 +735,7 @@ vint32m8_t test_vwmulsu_vv_i32m8(vint16m4_t op1, vuint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv16i32.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv16i32.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmulsu_vx_i32m8(vint16m4_t op1, uint16_t op2, size_t vl) { @@ -744,7 +744,7 @@ vint32m8_t test_vwmulsu_vx_i32m8(vint16m4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmulsu_vv_i64m1(vint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -753,7 +753,7 @@ vint64m1_t test_vwmulsu_vv_i64m1(vint32mf2_t op1, vuint32mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmulsu_vx_i64m1(vint32mf2_t op1, uint32_t op2, size_t vl) { @@ -762,7 +762,7 @@ vint64m1_t test_vwmulsu_vx_i64m1(vint32mf2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv2i64.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv2i64.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmulsu_vv_i64m2(vint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -771,7 +771,7 @@ vint64m2_t test_vwmulsu_vv_i64m2(vint32m1_t op1, vuint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv2i64.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv2i64.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmulsu_vx_i64m2(vint32m1_t op1, uint32_t op2, size_t vl) { @@ -780,7 +780,7 @@ vint64m2_t test_vwmulsu_vx_i64m2(vint32m1_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv4i64.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv4i64.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmulsu_vv_i64m4(vint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -789,7 +789,7 @@ vint64m4_t test_vwmulsu_vv_i64m4(vint32m2_t op1, vuint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv4i64.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv4i64.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmulsu_vx_i64m4(vint32m2_t op1, uint32_t op2, size_t vl) { @@ -798,7 +798,7 @@ vint64m4_t test_vwmulsu_vx_i64m4(vint32m2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv8i64.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv8i64.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmulsu_vv_i64m8(vint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -807,7 +807,7 @@ vint64m8_t test_vwmulsu_vv_i64m8(vint32m4_t op1, vuint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv8i64.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv8i64.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmulsu_vx_i64m8(vint32m4_t op1, uint32_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwsub.c index 9080a62a47bad3b6efd8196a2030375430f00149..d355f29105463a1ac9b6134e37a18f426e52c01b 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwsub.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vwsub_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv1i16.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv1i16.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwsub_vv_i16mf4(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -15,7 +15,7 @@ vint16mf4_t test_vwsub_vv_i16mf4(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv1i16.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv1i16.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwsub_vx_i16mf4(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -24,7 +24,7 @@ vint16mf4_t test_vwsub_vx_i16mf4(vint8mf8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_wv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv1i16.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv1i16.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwsub_wv_i16mf4(vint16mf4_t op1, vint8mf8_t op2, size_t vl) { @@ -33,7 +33,7 @@ vint16mf4_t test_vwsub_wv_i16mf4(vint16mf4_t op1, vint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_wx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv1i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv1i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwsub_wx_i16mf4(vint16mf4_t op1, int8_t op2, size_t vl) { @@ -42,7 +42,7 @@ vint16mf4_t test_vwsub_wx_i16mf4(vint16mf4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv2i16.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv2i16.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwsub_vv_i16mf2(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -51,7 +51,7 @@ vint16mf2_t test_vwsub_vv_i16mf2(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv2i16.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv2i16.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwsub_vx_i16mf2(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -60,7 +60,7 @@ vint16mf2_t test_vwsub_vx_i16mf2(vint8mf4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_wv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv2i16.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv2i16.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwsub_wv_i16mf2(vint16mf2_t op1, vint8mf4_t op2, size_t vl) { @@ -69,7 +69,7 @@ vint16mf2_t test_vwsub_wv_i16mf2(vint16mf2_t op1, vint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_wx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv2i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv2i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwsub_wx_i16mf2(vint16mf2_t op1, int8_t op2, size_t vl) { @@ -78,7 +78,7 @@ vint16mf2_t test_vwsub_wx_i16mf2(vint16mf2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv4i16.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv4i16.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwsub_vv_i16m1(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -87,7 +87,7 @@ vint16m1_t test_vwsub_vv_i16m1(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv4i16.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv4i16.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwsub_vx_i16m1(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -96,7 +96,7 @@ vint16m1_t test_vwsub_vx_i16m1(vint8mf2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_wv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv4i16.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv4i16.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwsub_wv_i16m1(vint16m1_t op1, vint8mf2_t op2, size_t vl) { @@ -105,7 +105,7 @@ vint16m1_t test_vwsub_wv_i16m1(vint16m1_t op1, vint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_wx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv4i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv4i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwsub_wx_i16m1(vint16m1_t op1, int8_t op2, size_t vl) { @@ -114,7 +114,7 @@ vint16m1_t test_vwsub_wx_i16m1(vint16m1_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv8i16.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv8i16.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwsub_vv_i16m2(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -123,7 +123,7 @@ vint16m2_t test_vwsub_vv_i16m2(vint8m1_t op1, vint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv8i16.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv8i16.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwsub_vx_i16m2(vint8m1_t op1, int8_t op2, size_t vl) { @@ -132,7 +132,7 @@ vint16m2_t test_vwsub_vx_i16m2(vint8m1_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_wv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv8i16.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv8i16.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwsub_wv_i16m2(vint16m2_t op1, vint8m1_t op2, size_t vl) { @@ -141,7 +141,7 @@ vint16m2_t test_vwsub_wv_i16m2(vint16m2_t op1, vint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_wx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv8i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv8i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwsub_wx_i16m2(vint16m2_t op1, int8_t op2, size_t vl) { @@ -150,7 +150,7 @@ vint16m2_t test_vwsub_wx_i16m2(vint16m2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv16i16.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv16i16.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwsub_vv_i16m4(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -159,7 +159,7 @@ vint16m4_t test_vwsub_vv_i16m4(vint8m2_t op1, vint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv16i16.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv16i16.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwsub_vx_i16m4(vint8m2_t op1, int8_t op2, size_t vl) { @@ -168,7 +168,7 @@ vint16m4_t test_vwsub_vx_i16m4(vint8m2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_wv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv16i16.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv16i16.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwsub_wv_i16m4(vint16m4_t op1, vint8m2_t op2, size_t vl) { @@ -177,7 +177,7 @@ vint16m4_t test_vwsub_wv_i16m4(vint16m4_t op1, vint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_wx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv16i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv16i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwsub_wx_i16m4(vint16m4_t op1, int8_t op2, size_t vl) { @@ -186,7 +186,7 @@ vint16m4_t test_vwsub_wx_i16m4(vint16m4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv32i16.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv32i16.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwsub_vv_i16m8(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -195,7 +195,7 @@ vint16m8_t test_vwsub_vv_i16m8(vint8m4_t op1, vint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv32i16.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv32i16.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwsub_vx_i16m8(vint8m4_t op1, int8_t op2, size_t vl) { @@ -204,7 +204,7 @@ vint16m8_t test_vwsub_vx_i16m8(vint8m4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_wv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv32i16.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv32i16.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwsub_wv_i16m8(vint16m8_t op1, vint8m4_t op2, size_t vl) { @@ -213,7 +213,7 @@ vint16m8_t test_vwsub_wv_i16m8(vint16m8_t op1, vint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_wx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv32i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv32i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwsub_wx_i16m8(vint16m8_t op1, int8_t op2, size_t vl) { @@ -222,7 +222,7 @@ vint16m8_t test_vwsub_wx_i16m8(vint16m8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv1i32.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv1i32.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwsub_vv_i32mf2(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -231,7 +231,7 @@ vint32mf2_t test_vwsub_vv_i32mf2(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv1i32.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv1i32.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwsub_vx_i32mf2(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -240,7 +240,7 @@ vint32mf2_t test_vwsub_vx_i32mf2(vint16mf4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_wv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv1i32.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv1i32.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwsub_wv_i32mf2(vint32mf2_t op1, vint16mf4_t op2, size_t vl) { @@ -249,7 +249,7 @@ vint32mf2_t test_vwsub_wv_i32mf2(vint32mf2_t op1, vint16mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_wx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv1i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv1i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwsub_wx_i32mf2(vint32mf2_t op1, int16_t op2, size_t vl) { @@ -258,7 +258,7 @@ vint32mf2_t test_vwsub_wx_i32mf2(vint32mf2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv2i32.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv2i32.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwsub_vv_i32m1(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -267,7 +267,7 @@ vint32m1_t test_vwsub_vv_i32m1(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv2i32.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv2i32.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwsub_vx_i32m1(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -276,7 +276,7 @@ vint32m1_t test_vwsub_vx_i32m1(vint16mf2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_wv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv2i32.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv2i32.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwsub_wv_i32m1(vint32m1_t op1, vint16mf2_t op2, size_t vl) { @@ -285,7 +285,7 @@ vint32m1_t test_vwsub_wv_i32m1(vint32m1_t op1, vint16mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_wx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv2i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv2i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwsub_wx_i32m1(vint32m1_t op1, int16_t op2, size_t vl) { @@ -294,7 +294,7 @@ vint32m1_t test_vwsub_wx_i32m1(vint32m1_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv4i32.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv4i32.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwsub_vv_i32m2(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -303,7 +303,7 @@ vint32m2_t test_vwsub_vv_i32m2(vint16m1_t op1, vint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv4i32.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv4i32.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwsub_vx_i32m2(vint16m1_t op1, int16_t op2, size_t vl) { @@ -312,7 +312,7 @@ vint32m2_t test_vwsub_vx_i32m2(vint16m1_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_wv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv4i32.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv4i32.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwsub_wv_i32m2(vint32m2_t op1, vint16m1_t op2, size_t vl) { @@ -321,7 +321,7 @@ vint32m2_t test_vwsub_wv_i32m2(vint32m2_t op1, vint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_wx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv4i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv4i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwsub_wx_i32m2(vint32m2_t op1, int16_t op2, size_t vl) { @@ -330,7 +330,7 @@ vint32m2_t test_vwsub_wx_i32m2(vint32m2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv8i32.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv8i32.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwsub_vv_i32m4(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -339,7 +339,7 @@ vint32m4_t test_vwsub_vv_i32m4(vint16m2_t op1, vint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv8i32.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv8i32.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwsub_vx_i32m4(vint16m2_t op1, int16_t op2, size_t vl) { @@ -348,7 +348,7 @@ vint32m4_t test_vwsub_vx_i32m4(vint16m2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_wv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv8i32.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv8i32.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwsub_wv_i32m4(vint32m4_t op1, vint16m2_t op2, size_t vl) { @@ -357,7 +357,7 @@ vint32m4_t test_vwsub_wv_i32m4(vint32m4_t op1, vint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_wx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv8i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv8i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwsub_wx_i32m4(vint32m4_t op1, int16_t op2, size_t vl) { @@ -366,7 +366,7 @@ vint32m4_t test_vwsub_wx_i32m4(vint32m4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv16i32.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv16i32.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwsub_vv_i32m8(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -375,7 +375,7 @@ vint32m8_t test_vwsub_vv_i32m8(vint16m4_t op1, vint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv16i32.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv16i32.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwsub_vx_i32m8(vint16m4_t op1, int16_t op2, size_t vl) { @@ -384,7 +384,7 @@ vint32m8_t test_vwsub_vx_i32m8(vint16m4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_wv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv16i32.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv16i32.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwsub_wv_i32m8(vint32m8_t op1, vint16m4_t op2, size_t vl) { @@ -393,7 +393,7 @@ vint32m8_t test_vwsub_wv_i32m8(vint32m8_t op1, vint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_wx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv16i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv16i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwsub_wx_i32m8(vint32m8_t op1, int16_t op2, size_t vl) { @@ -402,7 +402,7 @@ vint32m8_t test_vwsub_wx_i32m8(vint32m8_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv1i64.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv1i64.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwsub_vv_i64m1(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -411,7 +411,7 @@ vint64m1_t test_vwsub_vv_i64m1(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv1i64.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv1i64.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwsub_vx_i64m1(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -420,7 +420,7 @@ vint64m1_t test_vwsub_vx_i64m1(vint32mf2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_wv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv1i64.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv1i64.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwsub_wv_i64m1(vint64m1_t op1, vint32mf2_t op2, size_t vl) { @@ -429,7 +429,7 @@ vint64m1_t test_vwsub_wv_i64m1(vint64m1_t op1, vint32mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_wx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv1i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv1i64.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwsub_wx_i64m1(vint64m1_t op1, int32_t op2, size_t vl) { @@ -438,7 +438,7 @@ vint64m1_t test_vwsub_wx_i64m1(vint64m1_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv2i64.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv2i64.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwsub_vv_i64m2(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -447,7 +447,7 @@ vint64m2_t test_vwsub_vv_i64m2(vint32m1_t op1, vint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv2i64.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv2i64.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwsub_vx_i64m2(vint32m1_t op1, int32_t op2, size_t vl) { @@ -456,7 +456,7 @@ vint64m2_t test_vwsub_vx_i64m2(vint32m1_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_wv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv2i64.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv2i64.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwsub_wv_i64m2(vint64m2_t op1, vint32m1_t op2, size_t vl) { @@ -465,7 +465,7 @@ vint64m2_t test_vwsub_wv_i64m2(vint64m2_t op1, vint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_wx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv2i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv2i64.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwsub_wx_i64m2(vint64m2_t op1, int32_t op2, size_t vl) { @@ -474,7 +474,7 @@ vint64m2_t test_vwsub_wx_i64m2(vint64m2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv4i64.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv4i64.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwsub_vv_i64m4(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -483,7 +483,7 @@ vint64m4_t test_vwsub_vv_i64m4(vint32m2_t op1, vint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv4i64.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv4i64.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwsub_vx_i64m4(vint32m2_t op1, int32_t op2, size_t vl) { @@ -492,7 +492,7 @@ vint64m4_t test_vwsub_vx_i64m4(vint32m2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_wv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv4i64.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv4i64.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwsub_wv_i64m4(vint64m4_t op1, vint32m2_t op2, size_t vl) { @@ -501,7 +501,7 @@ vint64m4_t test_vwsub_wv_i64m4(vint64m4_t op1, vint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_wx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv4i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv4i64.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwsub_wx_i64m4(vint64m4_t op1, int32_t op2, size_t vl) { @@ -510,7 +510,7 @@ vint64m4_t test_vwsub_wx_i64m4(vint64m4_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv8i64.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv8i64.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwsub_vv_i64m8(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -519,7 +519,7 @@ vint64m8_t test_vwsub_vv_i64m8(vint32m4_t op1, vint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv8i64.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv8i64.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwsub_vx_i64m8(vint32m4_t op1, int32_t op2, size_t vl) { @@ -528,7 +528,7 @@ vint64m8_t test_vwsub_vx_i64m8(vint32m4_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_wv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv8i64.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv8i64.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwsub_wv_i64m8(vint64m8_t op1, vint32m4_t op2, size_t vl) { @@ -537,7 +537,7 @@ vint64m8_t test_vwsub_wv_i64m8(vint64m8_t op1, vint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_wx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv8i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv8i64.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwsub_wx_i64m8(vint64m8_t op1, int32_t op2, size_t vl) { @@ -546,7 +546,7 @@ vint64m8_t test_vwsub_wx_i64m8(vint64m8_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i16.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i16.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwsubu_vv_u16mf4(vuint8mf8_t op1, vuint8mf8_t op2, @@ -556,7 +556,7 @@ vuint16mf4_t test_vwsubu_vv_u16mf4(vuint8mf8_t op1, vuint8mf8_t op2, // CHECK-RV64-LABEL: @test_vwsubu_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i16.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i16.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwsubu_vx_u16mf4(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -565,7 +565,7 @@ vuint16mf4_t test_vwsubu_vx_u16mf4(vuint8mf8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_wv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i16.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i16.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwsubu_wv_u16mf4(vuint16mf4_t op1, vuint8mf8_t op2, @@ -575,7 +575,7 @@ vuint16mf4_t test_vwsubu_wv_u16mf4(vuint16mf4_t op1, vuint8mf8_t op2, // CHECK-RV64-LABEL: @test_vwsubu_wx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwsubu_wx_u16mf4(vuint16mf4_t op1, uint8_t op2, size_t vl) { @@ -584,7 +584,7 @@ vuint16mf4_t test_vwsubu_wx_u16mf4(vuint16mf4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv2i16.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv2i16.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwsubu_vv_u16mf2(vuint8mf4_t op1, vuint8mf4_t op2, @@ -594,7 +594,7 @@ vuint16mf2_t test_vwsubu_vv_u16mf2(vuint8mf4_t op1, vuint8mf4_t op2, // CHECK-RV64-LABEL: @test_vwsubu_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv2i16.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv2i16.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwsubu_vx_u16mf2(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -603,7 +603,7 @@ vuint16mf2_t test_vwsubu_vx_u16mf2(vuint8mf4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_wv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv2i16.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv2i16.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwsubu_wv_u16mf2(vuint16mf2_t op1, vuint8mf4_t op2, @@ -613,7 +613,7 @@ vuint16mf2_t test_vwsubu_wv_u16mf2(vuint16mf2_t op1, vuint8mf4_t op2, // CHECK-RV64-LABEL: @test_vwsubu_wx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv2i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv2i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwsubu_wx_u16mf2(vuint16mf2_t op1, uint8_t op2, size_t vl) { @@ -622,7 +622,7 @@ vuint16mf2_t test_vwsubu_wx_u16mf2(vuint16mf2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv4i16.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv4i16.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwsubu_vv_u16m1(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -631,7 +631,7 @@ vuint16m1_t test_vwsubu_vv_u16m1(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv4i16.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv4i16.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwsubu_vx_u16m1(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -640,7 +640,7 @@ vuint16m1_t test_vwsubu_vx_u16m1(vuint8mf2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_wv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv4i16.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv4i16.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwsubu_wv_u16m1(vuint16m1_t op1, vuint8mf2_t op2, size_t vl) { @@ -649,7 +649,7 @@ vuint16m1_t test_vwsubu_wv_u16m1(vuint16m1_t op1, vuint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_wx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv4i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv4i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwsubu_wx_u16m1(vuint16m1_t op1, uint8_t op2, size_t vl) { @@ -658,7 +658,7 @@ vuint16m1_t test_vwsubu_wx_u16m1(vuint16m1_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv8i16.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv8i16.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwsubu_vv_u16m2(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -667,7 +667,7 @@ vuint16m2_t test_vwsubu_vv_u16m2(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv8i16.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv8i16.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwsubu_vx_u16m2(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -676,7 +676,7 @@ vuint16m2_t test_vwsubu_vx_u16m2(vuint8m1_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_wv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv8i16.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv8i16.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwsubu_wv_u16m2(vuint16m2_t op1, vuint8m1_t op2, size_t vl) { @@ -685,7 +685,7 @@ vuint16m2_t test_vwsubu_wv_u16m2(vuint16m2_t op1, vuint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_wx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv8i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv8i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwsubu_wx_u16m2(vuint16m2_t op1, uint8_t op2, size_t vl) { @@ -694,7 +694,7 @@ vuint16m2_t test_vwsubu_wx_u16m2(vuint16m2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv16i16.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv16i16.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwsubu_vv_u16m4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -703,7 +703,7 @@ vuint16m4_t test_vwsubu_vv_u16m4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv16i16.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv16i16.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwsubu_vx_u16m4(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -712,7 +712,7 @@ vuint16m4_t test_vwsubu_vx_u16m4(vuint8m2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_wv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv16i16.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv16i16.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwsubu_wv_u16m4(vuint16m4_t op1, vuint8m2_t op2, size_t vl) { @@ -721,7 +721,7 @@ vuint16m4_t test_vwsubu_wv_u16m4(vuint16m4_t op1, vuint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_wx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv16i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv16i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwsubu_wx_u16m4(vuint16m4_t op1, uint8_t op2, size_t vl) { @@ -730,7 +730,7 @@ vuint16m4_t test_vwsubu_wx_u16m4(vuint16m4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv32i16.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv32i16.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwsubu_vv_u16m8(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -739,7 +739,7 @@ vuint16m8_t test_vwsubu_vv_u16m8(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv32i16.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv32i16.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwsubu_vx_u16m8(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -748,7 +748,7 @@ vuint16m8_t test_vwsubu_vx_u16m8(vuint8m4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_wv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv32i16.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv32i16.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwsubu_wv_u16m8(vuint16m8_t op1, vuint8m4_t op2, size_t vl) { @@ -757,7 +757,7 @@ vuint16m8_t test_vwsubu_wv_u16m8(vuint16m8_t op1, vuint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_wx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv32i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv32i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwsubu_wx_u16m8(vuint16m8_t op1, uint8_t op2, size_t vl) { @@ -766,7 +766,7 @@ vuint16m8_t test_vwsubu_wx_u16m8(vuint16m8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i32.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i32.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwsubu_vv_u32mf2(vuint16mf4_t op1, vuint16mf4_t op2, @@ -776,7 +776,7 @@ vuint32mf2_t test_vwsubu_vv_u32mf2(vuint16mf4_t op1, vuint16mf4_t op2, // CHECK-RV64-LABEL: @test_vwsubu_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i32.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i32.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwsubu_vx_u32mf2(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -785,7 +785,7 @@ vuint32mf2_t test_vwsubu_vx_u32mf2(vuint16mf4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_wv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i32.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i32.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwsubu_wv_u32mf2(vuint32mf2_t op1, vuint16mf4_t op2, @@ -795,7 +795,7 @@ vuint32mf2_t test_vwsubu_wv_u32mf2(vuint32mf2_t op1, vuint16mf4_t op2, // CHECK-RV64-LABEL: @test_vwsubu_wx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwsubu_wx_u32mf2(vuint32mf2_t op1, uint16_t op2, size_t vl) { @@ -804,7 +804,7 @@ vuint32mf2_t test_vwsubu_wx_u32mf2(vuint32mf2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv2i32.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv2i32.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwsubu_vv_u32m1(vuint16mf2_t op1, vuint16mf2_t op2, @@ -814,7 +814,7 @@ vuint32m1_t test_vwsubu_vv_u32m1(vuint16mf2_t op1, vuint16mf2_t op2, // CHECK-RV64-LABEL: @test_vwsubu_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv2i32.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv2i32.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwsubu_vx_u32m1(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -823,7 +823,7 @@ vuint32m1_t test_vwsubu_vx_u32m1(vuint16mf2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_wv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv2i32.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv2i32.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwsubu_wv_u32m1(vuint32m1_t op1, vuint16mf2_t op2, size_t vl) { @@ -832,7 +832,7 @@ vuint32m1_t test_vwsubu_wv_u32m1(vuint32m1_t op1, vuint16mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_wx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv2i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv2i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwsubu_wx_u32m1(vuint32m1_t op1, uint16_t op2, size_t vl) { @@ -841,7 +841,7 @@ vuint32m1_t test_vwsubu_wx_u32m1(vuint32m1_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv4i32.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv4i32.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwsubu_vv_u32m2(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -850,7 +850,7 @@ vuint32m2_t test_vwsubu_vv_u32m2(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv4i32.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv4i32.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwsubu_vx_u32m2(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -859,7 +859,7 @@ vuint32m2_t test_vwsubu_vx_u32m2(vuint16m1_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_wv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv4i32.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv4i32.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwsubu_wv_u32m2(vuint32m2_t op1, vuint16m1_t op2, size_t vl) { @@ -868,7 +868,7 @@ vuint32m2_t test_vwsubu_wv_u32m2(vuint32m2_t op1, vuint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_wx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv4i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv4i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwsubu_wx_u32m2(vuint32m2_t op1, uint16_t op2, size_t vl) { @@ -877,7 +877,7 @@ vuint32m2_t test_vwsubu_wx_u32m2(vuint32m2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv8i32.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv8i32.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwsubu_vv_u32m4(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -886,7 +886,7 @@ vuint32m4_t test_vwsubu_vv_u32m4(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv8i32.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv8i32.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwsubu_vx_u32m4(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -895,7 +895,7 @@ vuint32m4_t test_vwsubu_vx_u32m4(vuint16m2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_wv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv8i32.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv8i32.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwsubu_wv_u32m4(vuint32m4_t op1, vuint16m2_t op2, size_t vl) { @@ -904,7 +904,7 @@ vuint32m4_t test_vwsubu_wv_u32m4(vuint32m4_t op1, vuint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_wx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv8i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv8i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwsubu_wx_u32m4(vuint32m4_t op1, uint16_t op2, size_t vl) { @@ -913,7 +913,7 @@ vuint32m4_t test_vwsubu_wx_u32m4(vuint32m4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv16i32.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv16i32.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwsubu_vv_u32m8(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -922,7 +922,7 @@ vuint32m8_t test_vwsubu_vv_u32m8(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv16i32.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv16i32.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwsubu_vx_u32m8(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -931,7 +931,7 @@ vuint32m8_t test_vwsubu_vx_u32m8(vuint16m4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_wv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv16i32.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv16i32.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwsubu_wv_u32m8(vuint32m8_t op1, vuint16m4_t op2, size_t vl) { @@ -940,7 +940,7 @@ vuint32m8_t test_vwsubu_wv_u32m8(vuint32m8_t op1, vuint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_wx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv16i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv16i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwsubu_wx_u32m8(vuint32m8_t op1, uint16_t op2, size_t vl) { @@ -949,7 +949,7 @@ vuint32m8_t test_vwsubu_wx_u32m8(vuint32m8_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i64.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i64.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwsubu_vv_u64m1(vuint32mf2_t op1, vuint32mf2_t op2, @@ -959,7 +959,7 @@ vuint64m1_t test_vwsubu_vv_u64m1(vuint32mf2_t op1, vuint32mf2_t op2, // CHECK-RV64-LABEL: @test_vwsubu_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i64.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i64.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwsubu_vx_u64m1(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -968,7 +968,7 @@ vuint64m1_t test_vwsubu_vx_u64m1(vuint32mf2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_wv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i64.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i64.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwsubu_wv_u64m1(vuint64m1_t op1, vuint32mf2_t op2, size_t vl) { @@ -977,7 +977,7 @@ vuint64m1_t test_vwsubu_wv_u64m1(vuint64m1_t op1, vuint32mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_wx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i64.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwsubu_wx_u64m1(vuint64m1_t op1, uint32_t op2, size_t vl) { @@ -986,7 +986,7 @@ vuint64m1_t test_vwsubu_wx_u64m1(vuint64m1_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv2i64.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv2i64.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwsubu_vv_u64m2(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -995,7 +995,7 @@ vuint64m2_t test_vwsubu_vv_u64m2(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv2i64.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv2i64.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwsubu_vx_u64m2(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -1004,7 +1004,7 @@ vuint64m2_t test_vwsubu_vx_u64m2(vuint32m1_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_wv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv2i64.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv2i64.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwsubu_wv_u64m2(vuint64m2_t op1, vuint32m1_t op2, size_t vl) { @@ -1013,7 +1013,7 @@ vuint64m2_t test_vwsubu_wv_u64m2(vuint64m2_t op1, vuint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_wx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv2i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv2i64.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwsubu_wx_u64m2(vuint64m2_t op1, uint32_t op2, size_t vl) { @@ -1022,7 +1022,7 @@ vuint64m2_t test_vwsubu_wx_u64m2(vuint64m2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv4i64.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv4i64.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwsubu_vv_u64m4(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -1031,7 +1031,7 @@ vuint64m4_t test_vwsubu_vv_u64m4(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv4i64.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv4i64.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwsubu_vx_u64m4(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -1040,7 +1040,7 @@ vuint64m4_t test_vwsubu_vx_u64m4(vuint32m2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_wv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv4i64.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv4i64.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwsubu_wv_u64m4(vuint64m4_t op1, vuint32m2_t op2, size_t vl) { @@ -1049,7 +1049,7 @@ vuint64m4_t test_vwsubu_wv_u64m4(vuint64m4_t op1, vuint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_wx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv4i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv4i64.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwsubu_wx_u64m4(vuint64m4_t op1, uint32_t op2, size_t vl) { @@ -1058,7 +1058,7 @@ vuint64m4_t test_vwsubu_wx_u64m4(vuint64m4_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv8i64.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv8i64.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwsubu_vv_u64m8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -1067,7 +1067,7 @@ vuint64m8_t test_vwsubu_vv_u64m8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv8i64.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv8i64.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwsubu_vx_u64m8(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -1076,7 +1076,7 @@ vuint64m8_t test_vwsubu_vx_u64m8(vuint32m4_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_wv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv8i64.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv8i64.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwsubu_wv_u64m8(vuint64m8_t op1, vuint32m4_t op2, size_t vl) { @@ -1085,7 +1085,7 @@ vuint64m8_t test_vwsubu_wv_u64m8(vuint64m8_t op1, vuint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_wx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv8i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv8i64.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwsubu_wx_u64m8(vuint64m8_t op1, uint32_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vxor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vxor.c index 52d0d079453a15ca3a7183379626b2489ff0323d..d00be9ae74c8b1cfd3b48be7414d041a93df410e 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vxor.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vxor.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vxor_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vxor_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -15,7 +15,7 @@ vint8mf8_t test_vxor_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vxor_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -24,7 +24,7 @@ vint8mf8_t test_vxor_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vxor_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -33,7 +33,7 @@ vint8mf4_t test_vxor_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vxor_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -42,7 +42,7 @@ vint8mf4_t test_vxor_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vxor_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -51,7 +51,7 @@ vint8mf2_t test_vxor_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vxor_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -60,7 +60,7 @@ vint8mf2_t test_vxor_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vxor_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -69,7 +69,7 @@ vint8m1_t test_vxor_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vxor_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { @@ -78,7 +78,7 @@ vint8m1_t test_vxor_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vxor_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -87,7 +87,7 @@ vint8m2_t test_vxor_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vxor_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { @@ -96,7 +96,7 @@ vint8m2_t test_vxor_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vxor_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -105,7 +105,7 @@ vint8m4_t test_vxor_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vxor_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { @@ -114,7 +114,7 @@ vint8m4_t test_vxor_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vxor_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -123,7 +123,7 @@ vint8m8_t test_vxor_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vxor_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { @@ -132,7 +132,7 @@ vint8m8_t test_vxor_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vxor_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -141,7 +141,7 @@ vint16mf4_t test_vxor_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vxor_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -150,7 +150,7 @@ vint16mf4_t test_vxor_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vxor_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -159,7 +159,7 @@ vint16mf2_t test_vxor_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vxor_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -168,7 +168,7 @@ vint16mf2_t test_vxor_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vxor_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -177,7 +177,7 @@ vint16m1_t test_vxor_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vxor_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { @@ -186,7 +186,7 @@ vint16m1_t test_vxor_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vxor_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -195,7 +195,7 @@ vint16m2_t test_vxor_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vxor_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { @@ -204,7 +204,7 @@ vint16m2_t test_vxor_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vxor_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -213,7 +213,7 @@ vint16m4_t test_vxor_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vxor_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { @@ -222,7 +222,7 @@ vint16m4_t test_vxor_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vxor_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -231,7 +231,7 @@ vint16m8_t test_vxor_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vxor_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { @@ -240,7 +240,7 @@ vint16m8_t test_vxor_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vxor_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -249,7 +249,7 @@ vint32mf2_t test_vxor_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vxor_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -258,7 +258,7 @@ vint32mf2_t test_vxor_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vxor_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -267,7 +267,7 @@ vint32m1_t test_vxor_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vxor_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { @@ -276,7 +276,7 @@ vint32m1_t test_vxor_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vxor_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -285,7 +285,7 @@ vint32m2_t test_vxor_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vxor_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { @@ -294,7 +294,7 @@ vint32m2_t test_vxor_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vxor_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -303,7 +303,7 @@ vint32m4_t test_vxor_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vxor_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { @@ -312,7 +312,7 @@ vint32m4_t test_vxor_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vxor_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -321,7 +321,7 @@ vint32m8_t test_vxor_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vxor_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { @@ -330,7 +330,7 @@ vint32m8_t test_vxor_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vxor_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -339,7 +339,7 @@ vint64m1_t test_vxor_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vxor_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { @@ -348,7 +348,7 @@ vint64m1_t test_vxor_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vxor_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -357,7 +357,7 @@ vint64m2_t test_vxor_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vxor_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { @@ -366,7 +366,7 @@ vint64m2_t test_vxor_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vxor_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -375,7 +375,7 @@ vint64m4_t test_vxor_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vxor_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { @@ -384,7 +384,7 @@ vint64m4_t test_vxor_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vxor_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -393,7 +393,7 @@ vint64m8_t test_vxor_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vxor_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { @@ -402,7 +402,7 @@ vint64m8_t test_vxor_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vxor_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -411,7 +411,7 @@ vuint8mf8_t test_vxor_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vxor_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -420,7 +420,7 @@ vuint8mf8_t test_vxor_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vxor_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -429,7 +429,7 @@ vuint8mf4_t test_vxor_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vxor_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -438,7 +438,7 @@ vuint8mf4_t test_vxor_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vxor_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -447,7 +447,7 @@ vuint8mf2_t test_vxor_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vxor_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -456,7 +456,7 @@ vuint8mf2_t test_vxor_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vxor_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -465,7 +465,7 @@ vuint8m1_t test_vxor_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vxor_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -474,7 +474,7 @@ vuint8m1_t test_vxor_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vxor_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -483,7 +483,7 @@ vuint8m2_t test_vxor_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vxor_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -492,7 +492,7 @@ vuint8m2_t test_vxor_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vxor_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -501,7 +501,7 @@ vuint8m4_t test_vxor_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vxor_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -510,7 +510,7 @@ vuint8m4_t test_vxor_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vxor_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -519,7 +519,7 @@ vuint8m8_t test_vxor_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vxor_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -528,7 +528,7 @@ vuint8m8_t test_vxor_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vxor_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -537,7 +537,7 @@ vuint16mf4_t test_vxor_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vxor_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vxor_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -546,7 +546,7 @@ vuint16mf4_t test_vxor_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vxor_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -555,7 +555,7 @@ vuint16mf2_t test_vxor_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vxor_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vxor_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -564,7 +564,7 @@ vuint16mf2_t test_vxor_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vxor_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -573,7 +573,7 @@ vuint16m1_t test_vxor_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vxor_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -582,7 +582,7 @@ vuint16m1_t test_vxor_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vxor_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -591,7 +591,7 @@ vuint16m2_t test_vxor_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vxor_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -600,7 +600,7 @@ vuint16m2_t test_vxor_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vxor_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -609,7 +609,7 @@ vuint16m4_t test_vxor_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vxor_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -618,7 +618,7 @@ vuint16m4_t test_vxor_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vxor_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -627,7 +627,7 @@ vuint16m8_t test_vxor_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vxor_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -636,7 +636,7 @@ vuint16m8_t test_vxor_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vxor_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -645,7 +645,7 @@ vuint32mf2_t test_vxor_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vxor_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vxor_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -654,7 +654,7 @@ vuint32mf2_t test_vxor_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vxor_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -663,7 +663,7 @@ vuint32m1_t test_vxor_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vxor_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -672,7 +672,7 @@ vuint32m1_t test_vxor_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vxor_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -681,7 +681,7 @@ vuint32m2_t test_vxor_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vxor_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -690,7 +690,7 @@ vuint32m2_t test_vxor_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vxor_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -699,7 +699,7 @@ vuint32m4_t test_vxor_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vxor_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -708,7 +708,7 @@ vuint32m4_t test_vxor_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vxor_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -717,7 +717,7 @@ vuint32m8_t test_vxor_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vxor_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -726,7 +726,7 @@ vuint32m8_t test_vxor_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vxor_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -735,7 +735,7 @@ vuint64m1_t test_vxor_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vxor_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -744,7 +744,7 @@ vuint64m1_t test_vxor_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vxor_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -753,7 +753,7 @@ vuint64m2_t test_vxor_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vxor_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -762,7 +762,7 @@ vuint64m2_t test_vxor_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vxor_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -771,7 +771,7 @@ vuint64m4_t test_vxor_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vxor_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -780,7 +780,7 @@ vuint64m4_t test_vxor_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vxor_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -789,7 +789,7 @@ vuint64m8_t test_vxor_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vxor_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vaadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vaadd.c index cf6281261b4214a78b2449df306e5009c813143e..77afacbcac10f8a50014d64f290a95545edb2efa 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vaadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vaadd.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vaadd_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vaadd_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -15,7 +15,7 @@ vint8mf8_t test_vaadd_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vaadd_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -24,7 +24,7 @@ vint8mf8_t test_vaadd_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vaadd_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -33,7 +33,7 @@ vint8mf4_t test_vaadd_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vaadd_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -42,7 +42,7 @@ vint8mf4_t test_vaadd_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vaadd_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -51,7 +51,7 @@ vint8mf2_t test_vaadd_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vaadd_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -60,7 +60,7 @@ vint8mf2_t test_vaadd_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vaadd_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -69,7 +69,7 @@ vint8m1_t test_vaadd_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vaadd_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { @@ -78,7 +78,7 @@ vint8m1_t test_vaadd_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vaadd_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -87,7 +87,7 @@ vint8m2_t test_vaadd_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vaadd_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { @@ -96,7 +96,7 @@ vint8m2_t test_vaadd_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vaadd_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -105,7 +105,7 @@ vint8m4_t test_vaadd_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vaadd_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { @@ -114,7 +114,7 @@ vint8m4_t test_vaadd_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vaadd_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -123,7 +123,7 @@ vint8m8_t test_vaadd_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vaadd_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { @@ -132,7 +132,7 @@ vint8m8_t test_vaadd_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vaadd_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -141,7 +141,7 @@ vint16mf4_t test_vaadd_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vaadd_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -150,7 +150,7 @@ vint16mf4_t test_vaadd_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vaadd_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -159,7 +159,7 @@ vint16mf2_t test_vaadd_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vaadd_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -168,7 +168,7 @@ vint16mf2_t test_vaadd_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vaadd_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -177,7 +177,7 @@ vint16m1_t test_vaadd_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vaadd_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { @@ -186,7 +186,7 @@ vint16m1_t test_vaadd_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vaadd_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -195,7 +195,7 @@ vint16m2_t test_vaadd_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vaadd_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { @@ -204,7 +204,7 @@ vint16m2_t test_vaadd_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vaadd_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -213,7 +213,7 @@ vint16m4_t test_vaadd_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vaadd_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { @@ -222,7 +222,7 @@ vint16m4_t test_vaadd_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vaadd_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -231,7 +231,7 @@ vint16m8_t test_vaadd_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vaadd_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { @@ -240,7 +240,7 @@ vint16m8_t test_vaadd_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vaadd_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -249,7 +249,7 @@ vint32mf2_t test_vaadd_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vaadd_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -258,7 +258,7 @@ vint32mf2_t test_vaadd_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vaadd_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -267,7 +267,7 @@ vint32m1_t test_vaadd_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vaadd_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { @@ -276,7 +276,7 @@ vint32m1_t test_vaadd_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vaadd_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -285,7 +285,7 @@ vint32m2_t test_vaadd_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vaadd_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { @@ -294,7 +294,7 @@ vint32m2_t test_vaadd_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vaadd_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -303,7 +303,7 @@ vint32m4_t test_vaadd_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vaadd_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { @@ -312,7 +312,7 @@ vint32m4_t test_vaadd_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vaadd_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -321,7 +321,7 @@ vint32m8_t test_vaadd_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vaadd_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { @@ -330,7 +330,7 @@ vint32m8_t test_vaadd_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vaadd_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -339,7 +339,7 @@ vint64m1_t test_vaadd_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vaadd_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { @@ -348,7 +348,7 @@ vint64m1_t test_vaadd_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vaadd_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -357,7 +357,7 @@ vint64m2_t test_vaadd_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vaadd_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { @@ -366,7 +366,7 @@ vint64m2_t test_vaadd_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vaadd_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -375,7 +375,7 @@ vint64m4_t test_vaadd_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vaadd_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { @@ -384,7 +384,7 @@ vint64m4_t test_vaadd_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vaadd_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -393,7 +393,7 @@ vint64m8_t test_vaadd_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vaadd_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { @@ -402,7 +402,7 @@ vint64m8_t test_vaadd_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vaaddu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -411,7 +411,7 @@ vuint8mf8_t test_vaaddu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vaaddu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -420,7 +420,7 @@ vuint8mf8_t test_vaaddu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vaaddu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -429,7 +429,7 @@ vuint8mf4_t test_vaaddu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vaaddu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -438,7 +438,7 @@ vuint8mf4_t test_vaaddu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vaaddu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -447,7 +447,7 @@ vuint8mf2_t test_vaaddu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vaaddu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -456,7 +456,7 @@ vuint8mf2_t test_vaaddu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vaaddu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -465,7 +465,7 @@ vuint8m1_t test_vaaddu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vaaddu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -474,7 +474,7 @@ vuint8m1_t test_vaaddu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vaaddu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -483,7 +483,7 @@ vuint8m2_t test_vaaddu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vaaddu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -492,7 +492,7 @@ vuint8m2_t test_vaaddu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vaaddu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -501,7 +501,7 @@ vuint8m4_t test_vaaddu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vaaddu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -510,7 +510,7 @@ vuint8m4_t test_vaaddu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vaaddu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -519,7 +519,7 @@ vuint8m8_t test_vaaddu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vaaddu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -528,7 +528,7 @@ vuint8m8_t test_vaaddu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vaaddu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, @@ -538,7 +538,7 @@ vuint16mf4_t test_vaaddu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, // CHECK-RV64-LABEL: @test_vaaddu_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vaaddu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -547,7 +547,7 @@ vuint16mf4_t test_vaaddu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vaaddu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, @@ -557,7 +557,7 @@ vuint16mf2_t test_vaaddu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, // CHECK-RV64-LABEL: @test_vaaddu_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vaaddu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -566,7 +566,7 @@ vuint16mf2_t test_vaaddu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vaaddu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -575,7 +575,7 @@ vuint16m1_t test_vaaddu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vaaddu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -584,7 +584,7 @@ vuint16m1_t test_vaaddu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vaaddu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -593,7 +593,7 @@ vuint16m2_t test_vaaddu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vaaddu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -602,7 +602,7 @@ vuint16m2_t test_vaaddu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vaaddu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -611,7 +611,7 @@ vuint16m4_t test_vaaddu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vaaddu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -620,7 +620,7 @@ vuint16m4_t test_vaaddu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vaaddu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -629,7 +629,7 @@ vuint16m8_t test_vaaddu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vaaddu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -638,7 +638,7 @@ vuint16m8_t test_vaaddu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vaaddu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, @@ -648,7 +648,7 @@ vuint32mf2_t test_vaaddu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, // CHECK-RV64-LABEL: @test_vaaddu_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vaaddu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -657,7 +657,7 @@ vuint32mf2_t test_vaaddu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vaaddu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -666,7 +666,7 @@ vuint32m1_t test_vaaddu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vaaddu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -675,7 +675,7 @@ vuint32m1_t test_vaaddu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vaaddu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -684,7 +684,7 @@ vuint32m2_t test_vaaddu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vaaddu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -693,7 +693,7 @@ vuint32m2_t test_vaaddu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vaaddu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -702,7 +702,7 @@ vuint32m4_t test_vaaddu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vaaddu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -711,7 +711,7 @@ vuint32m4_t test_vaaddu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vaaddu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -720,7 +720,7 @@ vuint32m8_t test_vaaddu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vaaddu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -729,7 +729,7 @@ vuint32m8_t test_vaaddu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vaaddu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -738,7 +738,7 @@ vuint64m1_t test_vaaddu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vaaddu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -747,7 +747,7 @@ vuint64m1_t test_vaaddu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vaaddu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -756,7 +756,7 @@ vuint64m2_t test_vaaddu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vaaddu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -765,7 +765,7 @@ vuint64m2_t test_vaaddu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vaaddu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -774,7 +774,7 @@ vuint64m4_t test_vaaddu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vaaddu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -783,7 +783,7 @@ vuint64m4_t test_vaaddu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vaaddu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -792,7 +792,7 @@ vuint64m8_t test_vaaddu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vaaddu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vadd.c index f8070ce5a8312fdedeac6e3b4ab929168203e254..bcfbcb07de7d5d9670c3a8fd7f481a8a9c824df7 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vadd.c @@ -7,7 +7,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vadd_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -16,7 +16,7 @@ vint8mf8_t test_vadd_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vadd_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -25,7 +25,7 @@ vint8mf8_t test_vadd_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vadd_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -34,7 +34,7 @@ vint8mf4_t test_vadd_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vadd_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -43,7 +43,7 @@ vint8mf4_t test_vadd_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vadd_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -52,7 +52,7 @@ vint8mf2_t test_vadd_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vadd_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -61,7 +61,7 @@ vint8mf2_t test_vadd_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vadd_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -70,7 +70,7 @@ vint8m1_t test_vadd_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vadd_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { @@ -79,7 +79,7 @@ vint8m1_t test_vadd_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vadd_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -88,7 +88,7 @@ vint8m2_t test_vadd_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vadd_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { @@ -97,7 +97,7 @@ vint8m2_t test_vadd_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vadd_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -106,7 +106,7 @@ vint8m4_t test_vadd_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vadd_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { @@ -115,7 +115,7 @@ vint8m4_t test_vadd_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vadd_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -124,7 +124,7 @@ vint8m8_t test_vadd_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vadd_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { @@ -133,7 +133,7 @@ vint8m8_t test_vadd_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vadd_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -142,7 +142,7 @@ vint16mf4_t test_vadd_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vadd_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -151,7 +151,7 @@ vint16mf4_t test_vadd_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vadd_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -160,7 +160,7 @@ vint16mf2_t test_vadd_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vadd_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -169,7 +169,7 @@ vint16mf2_t test_vadd_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vadd_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -178,7 +178,7 @@ vint16m1_t test_vadd_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vadd_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { @@ -187,7 +187,7 @@ vint16m1_t test_vadd_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vadd_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -196,7 +196,7 @@ vint16m2_t test_vadd_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vadd_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { @@ -205,7 +205,7 @@ vint16m2_t test_vadd_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vadd_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -214,7 +214,7 @@ vint16m4_t test_vadd_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vadd_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { @@ -223,7 +223,7 @@ vint16m4_t test_vadd_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vadd_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -232,7 +232,7 @@ vint16m8_t test_vadd_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vadd_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { @@ -241,7 +241,7 @@ vint16m8_t test_vadd_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vadd_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -250,7 +250,7 @@ vint32mf2_t test_vadd_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vadd_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -259,7 +259,7 @@ vint32mf2_t test_vadd_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vadd_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -268,7 +268,7 @@ vint32m1_t test_vadd_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vadd_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { @@ -277,7 +277,7 @@ vint32m1_t test_vadd_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vadd_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -286,7 +286,7 @@ vint32m2_t test_vadd_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vadd_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { @@ -295,7 +295,7 @@ vint32m2_t test_vadd_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vadd_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -304,7 +304,7 @@ vint32m4_t test_vadd_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vadd_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { @@ -313,7 +313,7 @@ vint32m4_t test_vadd_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vadd_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -322,7 +322,7 @@ vint32m8_t test_vadd_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vadd_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { @@ -331,7 +331,7 @@ vint32m8_t test_vadd_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vadd_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -340,7 +340,7 @@ vint64m1_t test_vadd_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vadd_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { @@ -349,7 +349,7 @@ vint64m1_t test_vadd_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vadd_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -358,7 +358,7 @@ vint64m2_t test_vadd_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vadd_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { @@ -367,7 +367,7 @@ vint64m2_t test_vadd_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vadd_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -376,7 +376,7 @@ vint64m4_t test_vadd_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vadd_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { @@ -385,7 +385,7 @@ vint64m4_t test_vadd_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vadd_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -394,7 +394,7 @@ vint64m8_t test_vadd_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vadd_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { @@ -403,7 +403,7 @@ vint64m8_t test_vadd_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vadd_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -412,7 +412,7 @@ vuint8mf8_t test_vadd_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vadd_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -421,7 +421,7 @@ vuint8mf8_t test_vadd_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vadd_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -430,7 +430,7 @@ vuint8mf4_t test_vadd_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vadd_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -439,7 +439,7 @@ vuint8mf4_t test_vadd_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vadd_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -448,7 +448,7 @@ vuint8mf2_t test_vadd_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vadd_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -457,7 +457,7 @@ vuint8mf2_t test_vadd_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vadd_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -466,7 +466,7 @@ vuint8m1_t test_vadd_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vadd_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -475,7 +475,7 @@ vuint8m1_t test_vadd_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vadd_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -484,7 +484,7 @@ vuint8m2_t test_vadd_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vadd_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -493,7 +493,7 @@ vuint8m2_t test_vadd_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vadd_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -502,7 +502,7 @@ vuint8m4_t test_vadd_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vadd_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -511,7 +511,7 @@ vuint8m4_t test_vadd_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vadd_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -520,7 +520,7 @@ vuint8m8_t test_vadd_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vadd_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -529,7 +529,7 @@ vuint8m8_t test_vadd_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vadd_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -538,7 +538,7 @@ vuint16mf4_t test_vadd_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vadd_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vadd_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -547,7 +547,7 @@ vuint16mf4_t test_vadd_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vadd_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -556,7 +556,7 @@ vuint16mf2_t test_vadd_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vadd_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vadd_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -565,7 +565,7 @@ vuint16mf2_t test_vadd_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vadd_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -574,7 +574,7 @@ vuint16m1_t test_vadd_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vadd_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -583,7 +583,7 @@ vuint16m1_t test_vadd_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vadd_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -592,7 +592,7 @@ vuint16m2_t test_vadd_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vadd_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -601,7 +601,7 @@ vuint16m2_t test_vadd_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vadd_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -610,7 +610,7 @@ vuint16m4_t test_vadd_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vadd_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -619,7 +619,7 @@ vuint16m4_t test_vadd_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vadd_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -628,7 +628,7 @@ vuint16m8_t test_vadd_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vadd_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -637,7 +637,7 @@ vuint16m8_t test_vadd_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vadd_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -646,7 +646,7 @@ vuint32mf2_t test_vadd_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vadd_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vadd_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -655,7 +655,7 @@ vuint32mf2_t test_vadd_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vadd_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -664,7 +664,7 @@ vuint32m1_t test_vadd_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vadd_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -673,7 +673,7 @@ vuint32m1_t test_vadd_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vadd_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -682,7 +682,7 @@ vuint32m2_t test_vadd_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vadd_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -691,7 +691,7 @@ vuint32m2_t test_vadd_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vadd_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -700,7 +700,7 @@ vuint32m4_t test_vadd_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vadd_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -709,7 +709,7 @@ vuint32m4_t test_vadd_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vadd_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -718,7 +718,7 @@ vuint32m8_t test_vadd_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vadd_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -727,7 +727,7 @@ vuint32m8_t test_vadd_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vadd_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -736,7 +736,7 @@ vuint64m1_t test_vadd_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vadd_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -745,7 +745,7 @@ vuint64m1_t test_vadd_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vadd_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -754,7 +754,7 @@ vuint64m2_t test_vadd_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vadd_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -763,7 +763,7 @@ vuint64m2_t test_vadd_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vadd_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -772,7 +772,7 @@ vuint64m4_t test_vadd_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vadd_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -781,7 +781,7 @@ vuint64m4_t test_vadd_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vadd_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -790,7 +790,7 @@ vuint64m8_t test_vadd_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vadd_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vand.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vand.c index 54f63cd2bf58df2284ccb7245c3cec0ab760a944..26b8e9a2446d30280acab31ebf17e03fbd8d1831 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vand.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vand.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vand_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vand_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -15,7 +15,7 @@ vint8mf8_t test_vand_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vand_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -24,7 +24,7 @@ vint8mf8_t test_vand_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vand_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -33,7 +33,7 @@ vint8mf4_t test_vand_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vand_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -42,7 +42,7 @@ vint8mf4_t test_vand_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vand_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -51,7 +51,7 @@ vint8mf2_t test_vand_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vand_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -60,7 +60,7 @@ vint8mf2_t test_vand_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vand_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -69,7 +69,7 @@ vint8m1_t test_vand_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vand_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { @@ -78,7 +78,7 @@ vint8m1_t test_vand_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vand_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -87,7 +87,7 @@ vint8m2_t test_vand_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vand_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { @@ -96,7 +96,7 @@ vint8m2_t test_vand_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vand_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -105,7 +105,7 @@ vint8m4_t test_vand_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vand_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { @@ -114,7 +114,7 @@ vint8m4_t test_vand_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vand_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -123,7 +123,7 @@ vint8m8_t test_vand_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vand_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { @@ -132,7 +132,7 @@ vint8m8_t test_vand_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vand_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -141,7 +141,7 @@ vint16mf4_t test_vand_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vand_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -150,7 +150,7 @@ vint16mf4_t test_vand_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vand_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -159,7 +159,7 @@ vint16mf2_t test_vand_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vand_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -168,7 +168,7 @@ vint16mf2_t test_vand_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vand_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -177,7 +177,7 @@ vint16m1_t test_vand_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vand_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { @@ -186,7 +186,7 @@ vint16m1_t test_vand_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vand_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -195,7 +195,7 @@ vint16m2_t test_vand_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vand_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { @@ -204,7 +204,7 @@ vint16m2_t test_vand_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vand_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -213,7 +213,7 @@ vint16m4_t test_vand_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vand_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { @@ -222,7 +222,7 @@ vint16m4_t test_vand_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vand_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -231,7 +231,7 @@ vint16m8_t test_vand_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vand_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { @@ -240,7 +240,7 @@ vint16m8_t test_vand_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vand_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -249,7 +249,7 @@ vint32mf2_t test_vand_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vand_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -258,7 +258,7 @@ vint32mf2_t test_vand_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vand_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -267,7 +267,7 @@ vint32m1_t test_vand_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vand_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { @@ -276,7 +276,7 @@ vint32m1_t test_vand_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vand_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -285,7 +285,7 @@ vint32m2_t test_vand_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vand_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { @@ -294,7 +294,7 @@ vint32m2_t test_vand_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vand_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -303,7 +303,7 @@ vint32m4_t test_vand_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vand_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { @@ -312,7 +312,7 @@ vint32m4_t test_vand_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vand_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -321,7 +321,7 @@ vint32m8_t test_vand_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vand_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { @@ -330,7 +330,7 @@ vint32m8_t test_vand_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vand_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -339,7 +339,7 @@ vint64m1_t test_vand_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vand_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { @@ -348,7 +348,7 @@ vint64m1_t test_vand_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vand_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -357,7 +357,7 @@ vint64m2_t test_vand_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vand_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { @@ -366,7 +366,7 @@ vint64m2_t test_vand_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vand_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -375,7 +375,7 @@ vint64m4_t test_vand_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vand_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { @@ -384,7 +384,7 @@ vint64m4_t test_vand_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vand_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -393,7 +393,7 @@ vint64m8_t test_vand_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vand_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { @@ -402,7 +402,7 @@ vint64m8_t test_vand_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vand_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -411,7 +411,7 @@ vuint8mf8_t test_vand_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vand_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -420,7 +420,7 @@ vuint8mf8_t test_vand_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vand_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -429,7 +429,7 @@ vuint8mf4_t test_vand_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vand_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -438,7 +438,7 @@ vuint8mf4_t test_vand_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vand_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -447,7 +447,7 @@ vuint8mf2_t test_vand_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vand_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -456,7 +456,7 @@ vuint8mf2_t test_vand_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vand_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -465,7 +465,7 @@ vuint8m1_t test_vand_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vand_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -474,7 +474,7 @@ vuint8m1_t test_vand_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vand_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -483,7 +483,7 @@ vuint8m2_t test_vand_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vand_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -492,7 +492,7 @@ vuint8m2_t test_vand_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vand_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -501,7 +501,7 @@ vuint8m4_t test_vand_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vand_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -510,7 +510,7 @@ vuint8m4_t test_vand_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vand_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -519,7 +519,7 @@ vuint8m8_t test_vand_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vand_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -528,7 +528,7 @@ vuint8m8_t test_vand_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vand_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -537,7 +537,7 @@ vuint16mf4_t test_vand_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vand_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vand_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -546,7 +546,7 @@ vuint16mf4_t test_vand_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vand_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -555,7 +555,7 @@ vuint16mf2_t test_vand_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vand_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vand_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -564,7 +564,7 @@ vuint16mf2_t test_vand_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vand_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -573,7 +573,7 @@ vuint16m1_t test_vand_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vand_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -582,7 +582,7 @@ vuint16m1_t test_vand_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vand_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -591,7 +591,7 @@ vuint16m2_t test_vand_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vand_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -600,7 +600,7 @@ vuint16m2_t test_vand_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vand_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -609,7 +609,7 @@ vuint16m4_t test_vand_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vand_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -618,7 +618,7 @@ vuint16m4_t test_vand_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vand_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -627,7 +627,7 @@ vuint16m8_t test_vand_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vand_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -636,7 +636,7 @@ vuint16m8_t test_vand_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vand_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -645,7 +645,7 @@ vuint32mf2_t test_vand_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vand_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vand_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -654,7 +654,7 @@ vuint32mf2_t test_vand_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vand_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -663,7 +663,7 @@ vuint32m1_t test_vand_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vand_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -672,7 +672,7 @@ vuint32m1_t test_vand_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vand_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -681,7 +681,7 @@ vuint32m2_t test_vand_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vand_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -690,7 +690,7 @@ vuint32m2_t test_vand_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vand_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -699,7 +699,7 @@ vuint32m4_t test_vand_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vand_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -708,7 +708,7 @@ vuint32m4_t test_vand_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vand_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -717,7 +717,7 @@ vuint32m8_t test_vand_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vand_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -726,7 +726,7 @@ vuint32m8_t test_vand_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vand_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -735,7 +735,7 @@ vuint64m1_t test_vand_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vand_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -744,7 +744,7 @@ vuint64m1_t test_vand_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vand_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -753,7 +753,7 @@ vuint64m2_t test_vand_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vand_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -762,7 +762,7 @@ vuint64m2_t test_vand_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vand_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -771,7 +771,7 @@ vuint64m4_t test_vand_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vand_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -780,7 +780,7 @@ vuint64m4_t test_vand_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vand_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -789,7 +789,7 @@ vuint64m8_t test_vand_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vand_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vasub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vasub.c index 311881eb701924446d6b047cbd722d91b705c217..ac701156a5621125158c1ce7c2c4db16610e4c54 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vasub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vasub.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vasub_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vasub_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -15,7 +15,7 @@ vint8mf8_t test_vasub_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vasub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -24,7 +24,7 @@ vint8mf8_t test_vasub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vasub_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -33,7 +33,7 @@ vint8mf4_t test_vasub_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vasub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -42,7 +42,7 @@ vint8mf4_t test_vasub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vasub_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -51,7 +51,7 @@ vint8mf2_t test_vasub_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vasub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -60,7 +60,7 @@ vint8mf2_t test_vasub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vasub_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -69,7 +69,7 @@ vint8m1_t test_vasub_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vasub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { @@ -78,7 +78,7 @@ vint8m1_t test_vasub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vasub_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -87,7 +87,7 @@ vint8m2_t test_vasub_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vasub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { @@ -96,7 +96,7 @@ vint8m2_t test_vasub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vasub_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -105,7 +105,7 @@ vint8m4_t test_vasub_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vasub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { @@ -114,7 +114,7 @@ vint8m4_t test_vasub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vasub_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -123,7 +123,7 @@ vint8m8_t test_vasub_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vasub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { @@ -132,7 +132,7 @@ vint8m8_t test_vasub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vasub_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -141,7 +141,7 @@ vint16mf4_t test_vasub_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vasub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -150,7 +150,7 @@ vint16mf4_t test_vasub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vasub_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -159,7 +159,7 @@ vint16mf2_t test_vasub_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vasub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -168,7 +168,7 @@ vint16mf2_t test_vasub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vasub_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -177,7 +177,7 @@ vint16m1_t test_vasub_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vasub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { @@ -186,7 +186,7 @@ vint16m1_t test_vasub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vasub_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -195,7 +195,7 @@ vint16m2_t test_vasub_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vasub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { @@ -204,7 +204,7 @@ vint16m2_t test_vasub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vasub_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -213,7 +213,7 @@ vint16m4_t test_vasub_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vasub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { @@ -222,7 +222,7 @@ vint16m4_t test_vasub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vasub_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -231,7 +231,7 @@ vint16m8_t test_vasub_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vasub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { @@ -240,7 +240,7 @@ vint16m8_t test_vasub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vasub_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -249,7 +249,7 @@ vint32mf2_t test_vasub_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vasub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -258,7 +258,7 @@ vint32mf2_t test_vasub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vasub_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -267,7 +267,7 @@ vint32m1_t test_vasub_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vasub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { @@ -276,7 +276,7 @@ vint32m1_t test_vasub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vasub_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -285,7 +285,7 @@ vint32m2_t test_vasub_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vasub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { @@ -294,7 +294,7 @@ vint32m2_t test_vasub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vasub_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -303,7 +303,7 @@ vint32m4_t test_vasub_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vasub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { @@ -312,7 +312,7 @@ vint32m4_t test_vasub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vasub_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -321,7 +321,7 @@ vint32m8_t test_vasub_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vasub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { @@ -330,7 +330,7 @@ vint32m8_t test_vasub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vasub_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -339,7 +339,7 @@ vint64m1_t test_vasub_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vasub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { @@ -348,7 +348,7 @@ vint64m1_t test_vasub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vasub_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -357,7 +357,7 @@ vint64m2_t test_vasub_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vasub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { @@ -366,7 +366,7 @@ vint64m2_t test_vasub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vasub_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -375,7 +375,7 @@ vint64m4_t test_vasub_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vasub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { @@ -384,7 +384,7 @@ vint64m4_t test_vasub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vasub_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -393,7 +393,7 @@ vint64m8_t test_vasub_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vasub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { @@ -402,7 +402,7 @@ vint64m8_t test_vasub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vasubu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -411,7 +411,7 @@ vuint8mf8_t test_vasubu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vasubu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -420,7 +420,7 @@ vuint8mf8_t test_vasubu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vasubu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -429,7 +429,7 @@ vuint8mf4_t test_vasubu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vasubu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -438,7 +438,7 @@ vuint8mf4_t test_vasubu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vasubu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -447,7 +447,7 @@ vuint8mf2_t test_vasubu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vasubu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -456,7 +456,7 @@ vuint8mf2_t test_vasubu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vasubu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -465,7 +465,7 @@ vuint8m1_t test_vasubu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vasubu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -474,7 +474,7 @@ vuint8m1_t test_vasubu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vasubu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -483,7 +483,7 @@ vuint8m2_t test_vasubu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vasubu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -492,7 +492,7 @@ vuint8m2_t test_vasubu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vasubu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -501,7 +501,7 @@ vuint8m4_t test_vasubu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vasubu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -510,7 +510,7 @@ vuint8m4_t test_vasubu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vasubu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -519,7 +519,7 @@ vuint8m8_t test_vasubu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vasubu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -528,7 +528,7 @@ vuint8m8_t test_vasubu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vasubu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, @@ -538,7 +538,7 @@ vuint16mf4_t test_vasubu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, // CHECK-RV64-LABEL: @test_vasubu_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vasubu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -547,7 +547,7 @@ vuint16mf4_t test_vasubu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vasubu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, @@ -557,7 +557,7 @@ vuint16mf2_t test_vasubu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, // CHECK-RV64-LABEL: @test_vasubu_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vasubu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -566,7 +566,7 @@ vuint16mf2_t test_vasubu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vasubu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -575,7 +575,7 @@ vuint16m1_t test_vasubu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vasubu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -584,7 +584,7 @@ vuint16m1_t test_vasubu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vasubu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -593,7 +593,7 @@ vuint16m2_t test_vasubu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vasubu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -602,7 +602,7 @@ vuint16m2_t test_vasubu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vasubu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -611,7 +611,7 @@ vuint16m4_t test_vasubu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vasubu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -620,7 +620,7 @@ vuint16m4_t test_vasubu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vasubu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -629,7 +629,7 @@ vuint16m8_t test_vasubu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vasubu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -638,7 +638,7 @@ vuint16m8_t test_vasubu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vasubu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, @@ -648,7 +648,7 @@ vuint32mf2_t test_vasubu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, // CHECK-RV64-LABEL: @test_vasubu_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vasubu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -657,7 +657,7 @@ vuint32mf2_t test_vasubu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vasubu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -666,7 +666,7 @@ vuint32m1_t test_vasubu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vasubu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -675,7 +675,7 @@ vuint32m1_t test_vasubu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vasubu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -684,7 +684,7 @@ vuint32m2_t test_vasubu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vasubu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -693,7 +693,7 @@ vuint32m2_t test_vasubu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vasubu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -702,7 +702,7 @@ vuint32m4_t test_vasubu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vasubu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -711,7 +711,7 @@ vuint32m4_t test_vasubu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vasubu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -720,7 +720,7 @@ vuint32m8_t test_vasubu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vasubu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -729,7 +729,7 @@ vuint32m8_t test_vasubu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vasubu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -738,7 +738,7 @@ vuint64m1_t test_vasubu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vasubu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -747,7 +747,7 @@ vuint64m1_t test_vasubu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vasubu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -756,7 +756,7 @@ vuint64m2_t test_vasubu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vasubu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -765,7 +765,7 @@ vuint64m2_t test_vasubu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vasubu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -774,7 +774,7 @@ vuint64m4_t test_vasubu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vasubu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -783,7 +783,7 @@ vuint64m4_t test_vasubu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vasubu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -792,7 +792,7 @@ vuint64m8_t test_vasubu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vasubu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vdiv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vdiv.c index 7d18db0cad68620d97517a57eec1cd5b5dc52bf6..3451d362e0bb66aa1bd89db44abda05a9c26b1d6 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vdiv.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vdiv.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vdiv_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vdiv_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -15,7 +15,7 @@ vint8mf8_t test_vdiv_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vdiv_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -24,7 +24,7 @@ vint8mf8_t test_vdiv_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vdiv_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -33,7 +33,7 @@ vint8mf4_t test_vdiv_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vdiv_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -42,7 +42,7 @@ vint8mf4_t test_vdiv_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vdiv_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -51,7 +51,7 @@ vint8mf2_t test_vdiv_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vdiv_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -60,7 +60,7 @@ vint8mf2_t test_vdiv_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vdiv_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -69,7 +69,7 @@ vint8m1_t test_vdiv_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vdiv_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { @@ -78,7 +78,7 @@ vint8m1_t test_vdiv_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vdiv_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -87,7 +87,7 @@ vint8m2_t test_vdiv_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vdiv_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { @@ -96,7 +96,7 @@ vint8m2_t test_vdiv_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vdiv_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -105,7 +105,7 @@ vint8m4_t test_vdiv_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vdiv_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { @@ -114,7 +114,7 @@ vint8m4_t test_vdiv_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vdiv_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -123,7 +123,7 @@ vint8m8_t test_vdiv_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vdiv_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { @@ -132,7 +132,7 @@ vint8m8_t test_vdiv_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vdiv_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -141,7 +141,7 @@ vint16mf4_t test_vdiv_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vdiv_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -150,7 +150,7 @@ vint16mf4_t test_vdiv_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vdiv_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -159,7 +159,7 @@ vint16mf2_t test_vdiv_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vdiv_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -168,7 +168,7 @@ vint16mf2_t test_vdiv_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vdiv_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -177,7 +177,7 @@ vint16m1_t test_vdiv_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vdiv_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { @@ -186,7 +186,7 @@ vint16m1_t test_vdiv_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vdiv_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -195,7 +195,7 @@ vint16m2_t test_vdiv_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vdiv_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { @@ -204,7 +204,7 @@ vint16m2_t test_vdiv_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vdiv_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -213,7 +213,7 @@ vint16m4_t test_vdiv_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vdiv_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { @@ -222,7 +222,7 @@ vint16m4_t test_vdiv_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vdiv_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -231,7 +231,7 @@ vint16m8_t test_vdiv_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vdiv_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { @@ -240,7 +240,7 @@ vint16m8_t test_vdiv_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vdiv_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -249,7 +249,7 @@ vint32mf2_t test_vdiv_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vdiv_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -258,7 +258,7 @@ vint32mf2_t test_vdiv_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vdiv_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -267,7 +267,7 @@ vint32m1_t test_vdiv_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vdiv_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { @@ -276,7 +276,7 @@ vint32m1_t test_vdiv_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vdiv_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -285,7 +285,7 @@ vint32m2_t test_vdiv_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vdiv_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { @@ -294,7 +294,7 @@ vint32m2_t test_vdiv_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vdiv_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -303,7 +303,7 @@ vint32m4_t test_vdiv_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vdiv_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { @@ -312,7 +312,7 @@ vint32m4_t test_vdiv_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vdiv_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -321,7 +321,7 @@ vint32m8_t test_vdiv_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vdiv_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { @@ -330,7 +330,7 @@ vint32m8_t test_vdiv_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vdiv_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -339,7 +339,7 @@ vint64m1_t test_vdiv_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vdiv_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { @@ -348,7 +348,7 @@ vint64m1_t test_vdiv_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vdiv_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -357,7 +357,7 @@ vint64m2_t test_vdiv_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vdiv_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { @@ -366,7 +366,7 @@ vint64m2_t test_vdiv_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vdiv_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -375,7 +375,7 @@ vint64m4_t test_vdiv_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vdiv_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { @@ -384,7 +384,7 @@ vint64m4_t test_vdiv_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vdiv_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -393,7 +393,7 @@ vint64m8_t test_vdiv_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vdiv_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { @@ -402,7 +402,7 @@ vint64m8_t test_vdiv_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vdivu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -411,7 +411,7 @@ vuint8mf8_t test_vdivu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vdivu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -420,7 +420,7 @@ vuint8mf8_t test_vdivu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vdivu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -429,7 +429,7 @@ vuint8mf4_t test_vdivu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vdivu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -438,7 +438,7 @@ vuint8mf4_t test_vdivu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vdivu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -447,7 +447,7 @@ vuint8mf2_t test_vdivu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vdivu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -456,7 +456,7 @@ vuint8mf2_t test_vdivu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vdivu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -465,7 +465,7 @@ vuint8m1_t test_vdivu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vdivu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -474,7 +474,7 @@ vuint8m1_t test_vdivu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vdivu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -483,7 +483,7 @@ vuint8m2_t test_vdivu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vdivu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -492,7 +492,7 @@ vuint8m2_t test_vdivu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vdivu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -501,7 +501,7 @@ vuint8m4_t test_vdivu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vdivu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -510,7 +510,7 @@ vuint8m4_t test_vdivu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vdivu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -519,7 +519,7 @@ vuint8m8_t test_vdivu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vdivu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -528,7 +528,7 @@ vuint8m8_t test_vdivu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vdivu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -537,7 +537,7 @@ vuint16mf4_t test_vdivu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vdivu_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vdivu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -546,7 +546,7 @@ vuint16mf4_t test_vdivu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vdivu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -555,7 +555,7 @@ vuint16mf2_t test_vdivu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vdivu_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vdivu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -564,7 +564,7 @@ vuint16mf2_t test_vdivu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vdivu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -573,7 +573,7 @@ vuint16m1_t test_vdivu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vdivu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -582,7 +582,7 @@ vuint16m1_t test_vdivu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vdivu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -591,7 +591,7 @@ vuint16m2_t test_vdivu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vdivu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -600,7 +600,7 @@ vuint16m2_t test_vdivu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vdivu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -609,7 +609,7 @@ vuint16m4_t test_vdivu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vdivu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -618,7 +618,7 @@ vuint16m4_t test_vdivu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vdivu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -627,7 +627,7 @@ vuint16m8_t test_vdivu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vdivu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -636,7 +636,7 @@ vuint16m8_t test_vdivu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vdivu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -645,7 +645,7 @@ vuint32mf2_t test_vdivu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vdivu_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vdivu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -654,7 +654,7 @@ vuint32mf2_t test_vdivu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vdivu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -663,7 +663,7 @@ vuint32m1_t test_vdivu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vdivu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -672,7 +672,7 @@ vuint32m1_t test_vdivu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vdivu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -681,7 +681,7 @@ vuint32m2_t test_vdivu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vdivu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -690,7 +690,7 @@ vuint32m2_t test_vdivu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vdivu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -699,7 +699,7 @@ vuint32m4_t test_vdivu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vdivu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -708,7 +708,7 @@ vuint32m4_t test_vdivu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vdivu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -717,7 +717,7 @@ vuint32m8_t test_vdivu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vdivu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -726,7 +726,7 @@ vuint32m8_t test_vdivu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vdivu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -735,7 +735,7 @@ vuint64m1_t test_vdivu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vdivu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -744,7 +744,7 @@ vuint64m1_t test_vdivu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vdivu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -753,7 +753,7 @@ vuint64m2_t test_vdivu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vdivu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -762,7 +762,7 @@ vuint64m2_t test_vdivu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vdivu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -771,7 +771,7 @@ vuint64m4_t test_vdivu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vdivu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -780,7 +780,7 @@ vuint64m4_t test_vdivu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vdivu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -789,7 +789,7 @@ vuint64m8_t test_vdivu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vdivu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfabs.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfabs.c index 18495dc881ca3ccfa478476d4e71d30def72ceeb..94a75de02b53e07dab3d93fe70a277c76f770a83 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfabs.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfabs.c @@ -8,7 +8,7 @@ // CHECK-RV64-LABEL: @test_vfabs_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfabs_v_f32mf2 (vfloat32mf2_t op1, size_t vl) { @@ -17,7 +17,7 @@ vfloat32mf2_t test_vfabs_v_f32mf2 (vfloat32mf2_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vfabs_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f32.nxv2f32.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfabs_v_f32m1 (vfloat32m1_t op1, size_t vl) { @@ -26,7 +26,7 @@ vfloat32m1_t test_vfabs_v_f32m1 (vfloat32m1_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vfabs_v_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f32.nxv4f32.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfabs_v_f32m2 (vfloat32m2_t op1, size_t vl) { @@ -35,7 +35,7 @@ vfloat32m2_t test_vfabs_v_f32m2 (vfloat32m2_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vfabs_v_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f32.nxv8f32.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfabs_v_f32m4 (vfloat32m4_t op1, size_t vl) { @@ -44,7 +44,7 @@ vfloat32m4_t test_vfabs_v_f32m4 (vfloat32m4_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vfabs_v_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv16f32.nxv16f32.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv16f32.nxv16f32.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfabs_v_f32m8 (vfloat32m8_t op1, size_t vl) { @@ -53,7 +53,7 @@ vfloat32m8_t test_vfabs_v_f32m8 (vfloat32m8_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vfabs_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f64.nxv1f64.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfabs_v_f64m1 (vfloat64m1_t op1, size_t vl) { @@ -62,7 +62,7 @@ vfloat64m1_t test_vfabs_v_f64m1 (vfloat64m1_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vfabs_v_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f64.nxv2f64.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfabs_v_f64m2 (vfloat64m2_t op1, size_t vl) { @@ -71,7 +71,7 @@ vfloat64m2_t test_vfabs_v_f64m2 (vfloat64m2_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vfabs_v_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f64.nxv4f64.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfabs_v_f64m4 (vfloat64m4_t op1, size_t vl) { @@ -80,7 +80,7 @@ vfloat64m4_t test_vfabs_v_f64m4 (vfloat64m4_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vfabs_v_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f64.nxv8f64.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f64.nxv8f64.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfabs_v_f64m8 (vfloat64m8_t op1, size_t vl) { @@ -170,7 +170,7 @@ vfloat64m8_t test_vfabs_v_f64m8_m (vbool8_t mask, vfloat64m8_t maskedoff, vfloat // CHECK-RV64-LABEL: @test_vfabs_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f16.nxv1f16.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f16.nxv1f16.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfabs_v_f16mf4 (vfloat16mf4_t op1, size_t vl) { @@ -179,7 +179,7 @@ vfloat16mf4_t test_vfabs_v_f16mf4 (vfloat16mf4_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vfabs_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f16.nxv2f16.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f16.nxv2f16.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfabs_v_f16mf2 (vfloat16mf2_t op1, size_t vl) { @@ -188,7 +188,7 @@ vfloat16mf2_t test_vfabs_v_f16mf2 (vfloat16mf2_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vfabs_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f16.nxv4f16.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f16.nxv4f16.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfabs_v_f16m1 (vfloat16m1_t op1, size_t vl) { @@ -197,7 +197,7 @@ vfloat16m1_t test_vfabs_v_f16m1 (vfloat16m1_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vfabs_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f16.nxv8f16.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f16.nxv8f16.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfabs_v_f16m2 (vfloat16m2_t op1, size_t vl) { @@ -206,7 +206,7 @@ vfloat16m2_t test_vfabs_v_f16m2 (vfloat16m2_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vfabs_v_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv16f16.nxv16f16.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv16f16.nxv16f16.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfabs_v_f16m4 (vfloat16m4_t op1, size_t vl) { @@ -215,7 +215,7 @@ vfloat16m4_t test_vfabs_v_f16m4 (vfloat16m4_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vfabs_v_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv32f16.nxv32f16.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv32f16.nxv32f16.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfabs_v_f16m8 (vfloat16m8_t op1, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfadd.c index 4757cd9827aa60025af2f94eeb75193d5ece6951..e67009a3fbad99a69e40a338961dde086b6d24f6 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfadd.c @@ -8,7 +8,7 @@ // CHECK-RV64-LABEL: @test_vfadd_vv_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f16.nxv1f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f16.nxv1f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfadd_vv_f16mf4 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { @@ -17,7 +17,7 @@ vfloat16mf4_t test_vfadd_vv_f16mf4 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t // CHECK-RV64-LABEL: @test_vfadd_vf_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfadd_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl) { @@ -26,7 +26,7 @@ vfloat16mf4_t test_vfadd_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl) // CHECK-RV64-LABEL: @test_vfadd_vv_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f16.nxv2f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f16.nxv2f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfadd_vv_f16mf2 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { @@ -35,7 +35,7 @@ vfloat16mf2_t test_vfadd_vv_f16mf2 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t // CHECK-RV64-LABEL: @test_vfadd_vf_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfadd_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl) { @@ -44,7 +44,7 @@ vfloat16mf2_t test_vfadd_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl) // CHECK-RV64-LABEL: @test_vfadd_vv_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f16.nxv4f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f16.nxv4f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfadd_vv_f16m1 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { @@ -53,7 +53,7 @@ vfloat16m1_t test_vfadd_vv_f16m1 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vfadd_vf_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfadd_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) { @@ -62,7 +62,7 @@ vfloat16m1_t test_vfadd_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfadd_vv_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f16.nxv8f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f16.nxv8f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfadd_vv_f16m2 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { @@ -71,7 +71,7 @@ vfloat16m2_t test_vfadd_vv_f16m2 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vfadd_vf_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfadd_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) { @@ -80,7 +80,7 @@ vfloat16m2_t test_vfadd_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfadd_vv_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f16.nxv16f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f16.nxv16f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfadd_vv_f16m4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { @@ -89,7 +89,7 @@ vfloat16m4_t test_vfadd_vv_f16m4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vfadd_vf_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfadd_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) { @@ -98,7 +98,7 @@ vfloat16m4_t test_vfadd_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfadd_vv_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv32f16.nxv32f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv32f16.nxv32f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfadd_vv_f16m8 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { @@ -107,7 +107,7 @@ vfloat16m8_t test_vfadd_vv_f16m8 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vfadd_vf_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv32f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv32f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfadd_vf_f16m8 (vfloat16m8_t op1, _Float16 op2, size_t vl) { @@ -116,7 +116,7 @@ vfloat16m8_t test_vfadd_vf_f16m8 (vfloat16m8_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfadd_vv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfadd_vv_f32mf2 (vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { @@ -125,7 +125,7 @@ vfloat32mf2_t test_vfadd_vv_f32mf2 (vfloat32mf2_t op1, vfloat32mf2_t op2, size_t // CHECK-RV64-LABEL: @test_vfadd_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfadd_vf_f32mf2 (vfloat32mf2_t op1, float op2, size_t vl) { @@ -134,7 +134,7 @@ vfloat32mf2_t test_vfadd_vf_f32mf2 (vfloat32mf2_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfadd_vv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f32.nxv2f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfadd_vv_f32m1 (vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { @@ -143,7 +143,7 @@ vfloat32m1_t test_vfadd_vv_f32m1 (vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vfadd_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfadd_vf_f32m1 (vfloat32m1_t op1, float op2, size_t vl) { @@ -152,7 +152,7 @@ vfloat32m1_t test_vfadd_vf_f32m1 (vfloat32m1_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfadd_vv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f32.nxv4f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfadd_vv_f32m2 (vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { @@ -161,7 +161,7 @@ vfloat32m2_t test_vfadd_vv_f32m2 (vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vfadd_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfadd_vf_f32m2 (vfloat32m2_t op1, float op2, size_t vl) { @@ -170,7 +170,7 @@ vfloat32m2_t test_vfadd_vf_f32m2 (vfloat32m2_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfadd_vv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f32.nxv8f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfadd_vv_f32m4 (vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { @@ -179,7 +179,7 @@ vfloat32m4_t test_vfadd_vv_f32m4 (vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vfadd_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfadd_vf_f32m4 (vfloat32m4_t op1, float op2, size_t vl) { @@ -188,7 +188,7 @@ vfloat32m4_t test_vfadd_vf_f32m4 (vfloat32m4_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfadd_vv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f32.nxv16f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f32.nxv16f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfadd_vv_f32m8 (vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { @@ -197,7 +197,7 @@ vfloat32m8_t test_vfadd_vv_f32m8 (vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vfadd_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfadd_vf_f32m8 (vfloat32m8_t op1, float op2, size_t vl) { @@ -206,7 +206,7 @@ vfloat32m8_t test_vfadd_vf_f32m8 (vfloat32m8_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfadd_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f64.nxv1f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfadd_vv_f64m1 (vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { @@ -215,7 +215,7 @@ vfloat64m1_t test_vfadd_vv_f64m1 (vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vfadd_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfadd_vf_f64m1 (vfloat64m1_t op1, double op2, size_t vl) { @@ -224,7 +224,7 @@ vfloat64m1_t test_vfadd_vf_f64m1 (vfloat64m1_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfadd_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f64.nxv2f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfadd_vv_f64m2 (vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { @@ -233,7 +233,7 @@ vfloat64m2_t test_vfadd_vv_f64m2 (vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vfadd_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfadd_vf_f64m2 (vfloat64m2_t op1, double op2, size_t vl) { @@ -242,7 +242,7 @@ vfloat64m2_t test_vfadd_vf_f64m2 (vfloat64m2_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfadd_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f64.nxv4f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfadd_vv_f64m4 (vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { @@ -251,7 +251,7 @@ vfloat64m4_t test_vfadd_vv_f64m4 (vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vfadd_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfadd_vf_f64m4 (vfloat64m4_t op1, double op2, size_t vl) { @@ -260,7 +260,7 @@ vfloat64m4_t test_vfadd_vf_f64m4 (vfloat64m4_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfadd_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f64.nxv8f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f64.nxv8f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfadd_vv_f64m8 (vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { @@ -269,7 +269,7 @@ vfloat64m8_t test_vfadd_vv_f64m8 (vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vfadd_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfadd_vf_f64m8 (vfloat64m8_t op1, double op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfdiv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfdiv.c index c2c5b12aeae0987e6127d668a1bf14c90a68cf8a..c7b25276571508ed09d29945c7809796f0be5d3a 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfdiv.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfdiv.c @@ -8,7 +8,7 @@ // CHECK-RV64-LABEL: @test_vfdiv_vv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfdiv_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, @@ -18,7 +18,7 @@ vfloat32mf2_t test_vfdiv_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, // CHECK-RV64-LABEL: @test_vfdiv_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfdiv_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { @@ -27,7 +27,7 @@ vfloat32mf2_t test_vfdiv_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfdiv_vv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv2f32.nxv2f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfdiv_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, @@ -37,7 +37,7 @@ vfloat32m1_t test_vfdiv_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, // CHECK-RV64-LABEL: @test_vfdiv_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv2f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfdiv_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { @@ -46,7 +46,7 @@ vfloat32m1_t test_vfdiv_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfdiv_vv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv4f32.nxv4f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfdiv_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, @@ -56,7 +56,7 @@ vfloat32m2_t test_vfdiv_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, // CHECK-RV64-LABEL: @test_vfdiv_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv4f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfdiv_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { @@ -65,7 +65,7 @@ vfloat32m2_t test_vfdiv_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfdiv_vv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv8f32.nxv8f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfdiv_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, @@ -75,7 +75,7 @@ vfloat32m4_t test_vfdiv_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, // CHECK-RV64-LABEL: @test_vfdiv_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv8f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfdiv_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { @@ -84,7 +84,7 @@ vfloat32m4_t test_vfdiv_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfdiv_vv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv16f32.nxv16f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv16f32.nxv16f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfdiv_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, @@ -94,7 +94,7 @@ vfloat32m8_t test_vfdiv_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, // CHECK-RV64-LABEL: @test_vfdiv_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv16f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv16f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfdiv_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { @@ -103,7 +103,7 @@ vfloat32m8_t test_vfdiv_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfdiv_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv1f64.nxv1f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfdiv_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, @@ -113,7 +113,7 @@ vfloat64m1_t test_vfdiv_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, // CHECK-RV64-LABEL: @test_vfdiv_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv1f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv1f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfdiv_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { @@ -122,7 +122,7 @@ vfloat64m1_t test_vfdiv_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfdiv_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv2f64.nxv2f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfdiv_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, @@ -132,7 +132,7 @@ vfloat64m2_t test_vfdiv_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, // CHECK-RV64-LABEL: @test_vfdiv_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv2f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv2f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfdiv_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { @@ -141,7 +141,7 @@ vfloat64m2_t test_vfdiv_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfdiv_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv4f64.nxv4f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfdiv_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, @@ -151,7 +151,7 @@ vfloat64m4_t test_vfdiv_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, // CHECK-RV64-LABEL: @test_vfdiv_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv4f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv4f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfdiv_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { @@ -160,7 +160,7 @@ vfloat64m4_t test_vfdiv_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfdiv_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv8f64.nxv8f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv8f64.nxv8f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfdiv_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, @@ -170,7 +170,7 @@ vfloat64m8_t test_vfdiv_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, // CHECK-RV64-LABEL: @test_vfdiv_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv8f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfdiv_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { @@ -368,7 +368,7 @@ vfloat64m8_t test_vfdiv_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, // CHECK-RV64-LABEL: @test_vfdiv_vv_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv1f16.nxv1f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv1f16.nxv1f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfdiv_vv_f16mf4 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { @@ -377,7 +377,7 @@ vfloat16mf4_t test_vfdiv_vv_f16mf4 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t // CHECK-RV64-LABEL: @test_vfdiv_vf_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv1f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv1f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfdiv_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl) { @@ -386,7 +386,7 @@ vfloat16mf4_t test_vfdiv_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl) // CHECK-RV64-LABEL: @test_vfdiv_vv_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv2f16.nxv2f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv2f16.nxv2f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfdiv_vv_f16mf2 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { @@ -395,7 +395,7 @@ vfloat16mf2_t test_vfdiv_vv_f16mf2 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t // CHECK-RV64-LABEL: @test_vfdiv_vf_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv2f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv2f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfdiv_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl) { @@ -404,7 +404,7 @@ vfloat16mf2_t test_vfdiv_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl) // CHECK-RV64-LABEL: @test_vfdiv_vv_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv4f16.nxv4f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv4f16.nxv4f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfdiv_vv_f16m1 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { @@ -413,7 +413,7 @@ vfloat16m1_t test_vfdiv_vv_f16m1 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vfdiv_vf_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv4f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv4f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfdiv_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) { @@ -422,7 +422,7 @@ vfloat16m1_t test_vfdiv_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfdiv_vv_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv8f16.nxv8f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv8f16.nxv8f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfdiv_vv_f16m2 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { @@ -431,7 +431,7 @@ vfloat16m2_t test_vfdiv_vv_f16m2 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vfdiv_vf_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv8f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv8f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfdiv_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) { @@ -440,7 +440,7 @@ vfloat16m2_t test_vfdiv_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfdiv_vv_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv16f16.nxv16f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv16f16.nxv16f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfdiv_vv_f16m4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { @@ -449,7 +449,7 @@ vfloat16m4_t test_vfdiv_vv_f16m4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vfdiv_vf_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv16f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv16f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfdiv_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) { @@ -458,7 +458,7 @@ vfloat16m4_t test_vfdiv_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfdiv_vv_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv32f16.nxv32f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv32f16.nxv32f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfdiv_vv_f16m8 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { @@ -467,7 +467,7 @@ vfloat16m8_t test_vfdiv_vv_f16m8 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vfdiv_vf_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv32f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv32f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfdiv_vf_f16m8 (vfloat16m8_t op1, _Float16 op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmax.c index 052c02faa5897d615e99d623f3330ee5504eee3a..299d4cb009cd9c05e76090b167dae8b2e255de30 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmax.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmax.c @@ -8,7 +8,7 @@ // CHECK-RV64-LABEL: @test_vfmax_vv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmax_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, @@ -18,7 +18,7 @@ vfloat32mf2_t test_vfmax_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, // CHECK-RV64-LABEL: @test_vfmax_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmax_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { @@ -27,7 +27,7 @@ vfloat32mf2_t test_vfmax_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfmax_vv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv2f32.nxv2f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmax_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, @@ -37,7 +37,7 @@ vfloat32m1_t test_vfmax_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, // CHECK-RV64-LABEL: @test_vfmax_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv2f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmax_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { @@ -46,7 +46,7 @@ vfloat32m1_t test_vfmax_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfmax_vv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv4f32.nxv4f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmax_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, @@ -56,7 +56,7 @@ vfloat32m2_t test_vfmax_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, // CHECK-RV64-LABEL: @test_vfmax_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv4f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmax_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { @@ -65,7 +65,7 @@ vfloat32m2_t test_vfmax_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfmax_vv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv8f32.nxv8f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmax_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, @@ -75,7 +75,7 @@ vfloat32m4_t test_vfmax_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, // CHECK-RV64-LABEL: @test_vfmax_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv8f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmax_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { @@ -84,7 +84,7 @@ vfloat32m4_t test_vfmax_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfmax_vv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv16f32.nxv16f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv16f32.nxv16f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmax_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, @@ -94,7 +94,7 @@ vfloat32m8_t test_vfmax_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, // CHECK-RV64-LABEL: @test_vfmax_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv16f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv16f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmax_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { @@ -103,7 +103,7 @@ vfloat32m8_t test_vfmax_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfmax_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv1f64.nxv1f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmax_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, @@ -113,7 +113,7 @@ vfloat64m1_t test_vfmax_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, // CHECK-RV64-LABEL: @test_vfmax_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv1f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv1f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmax_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { @@ -122,7 +122,7 @@ vfloat64m1_t test_vfmax_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfmax_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv2f64.nxv2f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmax_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, @@ -132,7 +132,7 @@ vfloat64m2_t test_vfmax_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, // CHECK-RV64-LABEL: @test_vfmax_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv2f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv2f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmax_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { @@ -141,7 +141,7 @@ vfloat64m2_t test_vfmax_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfmax_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv4f64.nxv4f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmax_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, @@ -151,7 +151,7 @@ vfloat64m4_t test_vfmax_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, // CHECK-RV64-LABEL: @test_vfmax_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv4f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv4f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmax_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { @@ -160,7 +160,7 @@ vfloat64m4_t test_vfmax_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfmax_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv8f64.nxv8f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv8f64.nxv8f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmax_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, @@ -170,7 +170,7 @@ vfloat64m8_t test_vfmax_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, // CHECK-RV64-LABEL: @test_vfmax_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv8f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmax_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { @@ -368,7 +368,7 @@ vfloat64m8_t test_vfmax_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, // CHECK-RV64-LABEL: @test_vfmax_vv_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv1f16.nxv1f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv1f16.nxv1f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmax_vv_f16mf4 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { @@ -377,7 +377,7 @@ vfloat16mf4_t test_vfmax_vv_f16mf4 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t // CHECK-RV64-LABEL: @test_vfmax_vf_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv1f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv1f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmax_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl) { @@ -386,7 +386,7 @@ vfloat16mf4_t test_vfmax_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl) // CHECK-RV64-LABEL: @test_vfmax_vv_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv2f16.nxv2f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv2f16.nxv2f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmax_vv_f16mf2 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { @@ -395,7 +395,7 @@ vfloat16mf2_t test_vfmax_vv_f16mf2 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t // CHECK-RV64-LABEL: @test_vfmax_vf_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv2f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv2f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmax_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl) { @@ -404,7 +404,7 @@ vfloat16mf2_t test_vfmax_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl) // CHECK-RV64-LABEL: @test_vfmax_vv_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv4f16.nxv4f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv4f16.nxv4f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmax_vv_f16m1 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { @@ -413,7 +413,7 @@ vfloat16m1_t test_vfmax_vv_f16m1 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vfmax_vf_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv4f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv4f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmax_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) { @@ -422,7 +422,7 @@ vfloat16m1_t test_vfmax_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfmax_vv_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv8f16.nxv8f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv8f16.nxv8f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmax_vv_f16m2 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { @@ -431,7 +431,7 @@ vfloat16m2_t test_vfmax_vv_f16m2 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vfmax_vf_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv8f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv8f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmax_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) { @@ -440,7 +440,7 @@ vfloat16m2_t test_vfmax_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfmax_vv_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv16f16.nxv16f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv16f16.nxv16f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmax_vv_f16m4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { @@ -449,7 +449,7 @@ vfloat16m4_t test_vfmax_vv_f16m4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vfmax_vf_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv16f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv16f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmax_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) { @@ -458,7 +458,7 @@ vfloat16m4_t test_vfmax_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfmax_vv_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv32f16.nxv32f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv32f16.nxv32f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmax_vv_f16m8 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { @@ -467,7 +467,7 @@ vfloat16m8_t test_vfmax_vv_f16m8 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vfmax_vf_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv32f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv32f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmax_vf_f16m8 (vfloat16m8_t op1, _Float16 op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmin.c index 29c2b4c676021a398da1e96bc4a8450e37ce3fb4..3ef7f0abe6b6332c925557e7022846c08c0e0347 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmin.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmin.c @@ -8,7 +8,7 @@ // CHECK-RV64-LABEL: @test_vfmin_vv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmin_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, @@ -18,7 +18,7 @@ vfloat32mf2_t test_vfmin_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, // CHECK-RV64-LABEL: @test_vfmin_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmin_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { @@ -27,7 +27,7 @@ vfloat32mf2_t test_vfmin_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfmin_vv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv2f32.nxv2f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmin_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, @@ -37,7 +37,7 @@ vfloat32m1_t test_vfmin_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, // CHECK-RV64-LABEL: @test_vfmin_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv2f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmin_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { @@ -46,7 +46,7 @@ vfloat32m1_t test_vfmin_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfmin_vv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv4f32.nxv4f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmin_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, @@ -56,7 +56,7 @@ vfloat32m2_t test_vfmin_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, // CHECK-RV64-LABEL: @test_vfmin_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv4f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmin_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { @@ -65,7 +65,7 @@ vfloat32m2_t test_vfmin_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfmin_vv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv8f32.nxv8f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmin_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, @@ -75,7 +75,7 @@ vfloat32m4_t test_vfmin_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, // CHECK-RV64-LABEL: @test_vfmin_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv8f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmin_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { @@ -84,7 +84,7 @@ vfloat32m4_t test_vfmin_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfmin_vv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv16f32.nxv16f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv16f32.nxv16f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmin_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, @@ -94,7 +94,7 @@ vfloat32m8_t test_vfmin_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, // CHECK-RV64-LABEL: @test_vfmin_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv16f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv16f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmin_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { @@ -103,7 +103,7 @@ vfloat32m8_t test_vfmin_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfmin_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv1f64.nxv1f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmin_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, @@ -113,7 +113,7 @@ vfloat64m1_t test_vfmin_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, // CHECK-RV64-LABEL: @test_vfmin_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv1f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv1f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmin_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { @@ -122,7 +122,7 @@ vfloat64m1_t test_vfmin_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfmin_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv2f64.nxv2f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmin_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, @@ -132,7 +132,7 @@ vfloat64m2_t test_vfmin_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, // CHECK-RV64-LABEL: @test_vfmin_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv2f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv2f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmin_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { @@ -141,7 +141,7 @@ vfloat64m2_t test_vfmin_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfmin_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv4f64.nxv4f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmin_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, @@ -151,7 +151,7 @@ vfloat64m4_t test_vfmin_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, // CHECK-RV64-LABEL: @test_vfmin_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv4f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv4f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmin_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { @@ -160,7 +160,7 @@ vfloat64m4_t test_vfmin_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfmin_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv8f64.nxv8f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv8f64.nxv8f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmin_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, @@ -170,7 +170,7 @@ vfloat64m8_t test_vfmin_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, // CHECK-RV64-LABEL: @test_vfmin_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv8f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmin_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { @@ -368,7 +368,7 @@ vfloat64m8_t test_vfmin_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, // CHECK-RV64-LABEL: @test_vfmin_vv_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv1f16.nxv1f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv1f16.nxv1f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmin_vv_f16mf4 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { @@ -377,7 +377,7 @@ vfloat16mf4_t test_vfmin_vv_f16mf4 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t // CHECK-RV64-LABEL: @test_vfmin_vf_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv1f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv1f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmin_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl) { @@ -386,7 +386,7 @@ vfloat16mf4_t test_vfmin_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl) // CHECK-RV64-LABEL: @test_vfmin_vv_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv2f16.nxv2f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv2f16.nxv2f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmin_vv_f16mf2 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { @@ -395,7 +395,7 @@ vfloat16mf2_t test_vfmin_vv_f16mf2 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t // CHECK-RV64-LABEL: @test_vfmin_vf_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv2f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv2f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmin_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl) { @@ -404,7 +404,7 @@ vfloat16mf2_t test_vfmin_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl) // CHECK-RV64-LABEL: @test_vfmin_vv_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv4f16.nxv4f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv4f16.nxv4f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmin_vv_f16m1 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { @@ -413,7 +413,7 @@ vfloat16m1_t test_vfmin_vv_f16m1 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vfmin_vf_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv4f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv4f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmin_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) { @@ -422,7 +422,7 @@ vfloat16m1_t test_vfmin_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfmin_vv_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv8f16.nxv8f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv8f16.nxv8f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmin_vv_f16m2 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { @@ -431,7 +431,7 @@ vfloat16m2_t test_vfmin_vv_f16m2 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vfmin_vf_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv8f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv8f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmin_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) { @@ -440,7 +440,7 @@ vfloat16m2_t test_vfmin_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfmin_vv_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv16f16.nxv16f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv16f16.nxv16f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmin_vv_f16m4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { @@ -449,7 +449,7 @@ vfloat16m4_t test_vfmin_vv_f16m4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vfmin_vf_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv16f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv16f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmin_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) { @@ -458,7 +458,7 @@ vfloat16m4_t test_vfmin_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfmin_vv_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv32f16.nxv32f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv32f16.nxv32f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmin_vv_f16m8 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { @@ -467,7 +467,7 @@ vfloat16m8_t test_vfmin_vv_f16m8 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vfmin_vf_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv32f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv32f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmin_vf_f16m8 (vfloat16m8_t op1, _Float16 op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmul.c index 0be537645af31c09fd2359c90b8a6edda233aca1..562eb614109f06b56ad7d1a7ee5f18d9346846d8 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmul.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmul.c @@ -8,7 +8,7 @@ // CHECK-RV64-LABEL: @test_vfmul_vv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmul_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, @@ -18,7 +18,7 @@ vfloat32mf2_t test_vfmul_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, // CHECK-RV64-LABEL: @test_vfmul_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmul_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { @@ -27,7 +27,7 @@ vfloat32mf2_t test_vfmul_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfmul_vv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv2f32.nxv2f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmul_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, @@ -37,7 +37,7 @@ vfloat32m1_t test_vfmul_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, // CHECK-RV64-LABEL: @test_vfmul_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv2f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmul_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { @@ -46,7 +46,7 @@ vfloat32m1_t test_vfmul_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfmul_vv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv4f32.nxv4f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmul_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, @@ -56,7 +56,7 @@ vfloat32m2_t test_vfmul_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, // CHECK-RV64-LABEL: @test_vfmul_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv4f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmul_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { @@ -65,7 +65,7 @@ vfloat32m2_t test_vfmul_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfmul_vv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv8f32.nxv8f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmul_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, @@ -75,7 +75,7 @@ vfloat32m4_t test_vfmul_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, // CHECK-RV64-LABEL: @test_vfmul_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv8f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmul_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { @@ -84,7 +84,7 @@ vfloat32m4_t test_vfmul_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfmul_vv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv16f32.nxv16f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv16f32.nxv16f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmul_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, @@ -94,7 +94,7 @@ vfloat32m8_t test_vfmul_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, // CHECK-RV64-LABEL: @test_vfmul_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv16f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv16f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmul_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { @@ -103,7 +103,7 @@ vfloat32m8_t test_vfmul_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfmul_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv1f64.nxv1f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmul_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, @@ -113,7 +113,7 @@ vfloat64m1_t test_vfmul_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, // CHECK-RV64-LABEL: @test_vfmul_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv1f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv1f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmul_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { @@ -122,7 +122,7 @@ vfloat64m1_t test_vfmul_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfmul_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv2f64.nxv2f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmul_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, @@ -132,7 +132,7 @@ vfloat64m2_t test_vfmul_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, // CHECK-RV64-LABEL: @test_vfmul_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv2f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv2f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmul_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { @@ -141,7 +141,7 @@ vfloat64m2_t test_vfmul_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfmul_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv4f64.nxv4f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmul_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, @@ -151,7 +151,7 @@ vfloat64m4_t test_vfmul_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, // CHECK-RV64-LABEL: @test_vfmul_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv4f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv4f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmul_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { @@ -160,7 +160,7 @@ vfloat64m4_t test_vfmul_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfmul_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv8f64.nxv8f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv8f64.nxv8f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmul_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, @@ -170,7 +170,7 @@ vfloat64m8_t test_vfmul_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, // CHECK-RV64-LABEL: @test_vfmul_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv8f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmul_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { @@ -368,7 +368,7 @@ vfloat64m8_t test_vfmul_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, // CHECK-RV64-LABEL: @test_vfmul_vv_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv1f16.nxv1f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv1f16.nxv1f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmul_vv_f16mf4 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { @@ -377,7 +377,7 @@ vfloat16mf4_t test_vfmul_vv_f16mf4 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t // CHECK-RV64-LABEL: @test_vfmul_vf_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv1f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv1f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmul_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl) { @@ -386,7 +386,7 @@ vfloat16mf4_t test_vfmul_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl) // CHECK-RV64-LABEL: @test_vfmul_vv_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv2f16.nxv2f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv2f16.nxv2f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmul_vv_f16mf2 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { @@ -395,7 +395,7 @@ vfloat16mf2_t test_vfmul_vv_f16mf2 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t // CHECK-RV64-LABEL: @test_vfmul_vf_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv2f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv2f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmul_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl) { @@ -404,7 +404,7 @@ vfloat16mf2_t test_vfmul_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl) // CHECK-RV64-LABEL: @test_vfmul_vv_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv4f16.nxv4f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv4f16.nxv4f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmul_vv_f16m1 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { @@ -413,7 +413,7 @@ vfloat16m1_t test_vfmul_vv_f16m1 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vfmul_vf_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv4f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv4f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmul_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) { @@ -422,7 +422,7 @@ vfloat16m1_t test_vfmul_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfmul_vv_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv8f16.nxv8f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv8f16.nxv8f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmul_vv_f16m2 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { @@ -431,7 +431,7 @@ vfloat16m2_t test_vfmul_vv_f16m2 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vfmul_vf_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv8f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv8f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmul_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) { @@ -440,7 +440,7 @@ vfloat16m2_t test_vfmul_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfmul_vv_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv16f16.nxv16f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv16f16.nxv16f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmul_vv_f16m4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { @@ -449,7 +449,7 @@ vfloat16m4_t test_vfmul_vv_f16m4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vfmul_vf_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv16f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv16f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmul_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) { @@ -458,7 +458,7 @@ vfloat16m4_t test_vfmul_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfmul_vv_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv32f16.nxv32f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv32f16.nxv32f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmul_vv_f16m8 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { @@ -467,7 +467,7 @@ vfloat16m8_t test_vfmul_vv_f16m8 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vfmul_vf_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv32f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv32f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmul_vf_f16m8 (vfloat16m8_t op1, _Float16 op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfneg.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfneg.c index 50dc40329922197755664df658d77268b5139c83..64ed62716e8b7b6dbb3da74c1c4d7ec2746e7106 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfneg.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfneg.c @@ -8,7 +8,7 @@ // CHECK-RV64-LABEL: @test_vfneg_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfneg_v_f32mf2 (vfloat32mf2_t op1, size_t vl) { @@ -17,7 +17,7 @@ vfloat32mf2_t test_vfneg_v_f32mf2 (vfloat32mf2_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vfneg_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2f32.nxv2f32.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfneg_v_f32m1 (vfloat32m1_t op1, size_t vl) { @@ -26,7 +26,7 @@ vfloat32m1_t test_vfneg_v_f32m1 (vfloat32m1_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vfneg_v_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4f32.nxv4f32.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfneg_v_f32m2 (vfloat32m2_t op1, size_t vl) { @@ -35,7 +35,7 @@ vfloat32m2_t test_vfneg_v_f32m2 (vfloat32m2_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vfneg_v_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8f32.nxv8f32.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfneg_v_f32m4 (vfloat32m4_t op1, size_t vl) { @@ -44,7 +44,7 @@ vfloat32m4_t test_vfneg_v_f32m4 (vfloat32m4_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vfneg_v_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv16f32.nxv16f32.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv16f32.nxv16f32.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfneg_v_f32m8 (vfloat32m8_t op1, size_t vl) { @@ -53,7 +53,7 @@ vfloat32m8_t test_vfneg_v_f32m8 (vfloat32m8_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vfneg_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f64.nxv1f64.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfneg_v_f64m1 (vfloat64m1_t op1, size_t vl) { @@ -62,7 +62,7 @@ vfloat64m1_t test_vfneg_v_f64m1 (vfloat64m1_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vfneg_v_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2f64.nxv2f64.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfneg_v_f64m2 (vfloat64m2_t op1, size_t vl) { @@ -71,7 +71,7 @@ vfloat64m2_t test_vfneg_v_f64m2 (vfloat64m2_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vfneg_v_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4f64.nxv4f64.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfneg_v_f64m4 (vfloat64m4_t op1, size_t vl) { @@ -80,7 +80,7 @@ vfloat64m4_t test_vfneg_v_f64m4 (vfloat64m4_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vfneg_v_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8f64.nxv8f64.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8f64.nxv8f64.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfneg_v_f64m8 (vfloat64m8_t op1, size_t vl) { @@ -170,7 +170,7 @@ vfloat64m8_t test_vfneg_v_f64m8_m (vbool8_t mask, vfloat64m8_t maskedoff, vfloat // CHECK-RV64-LABEL: @test_vfneg_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f16.nxv1f16.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f16.nxv1f16.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfneg_v_f16mf4 (vfloat16mf4_t op1, size_t vl) { @@ -179,7 +179,7 @@ vfloat16mf4_t test_vfneg_v_f16mf4 (vfloat16mf4_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vfneg_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2f16.nxv2f16.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2f16.nxv2f16.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfneg_v_f16mf2 (vfloat16mf2_t op1, size_t vl) { @@ -188,7 +188,7 @@ vfloat16mf2_t test_vfneg_v_f16mf2 (vfloat16mf2_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vfneg_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4f16.nxv4f16.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4f16.nxv4f16.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfneg_v_f16m1 (vfloat16m1_t op1, size_t vl) { @@ -197,7 +197,7 @@ vfloat16m1_t test_vfneg_v_f16m1 (vfloat16m1_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vfneg_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8f16.nxv8f16.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8f16.nxv8f16.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfneg_v_f16m2 (vfloat16m2_t op1, size_t vl) { @@ -206,7 +206,7 @@ vfloat16m2_t test_vfneg_v_f16m2 (vfloat16m2_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vfneg_v_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv16f16.nxv16f16.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv16f16.nxv16f16.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfneg_v_f16m4 (vfloat16m4_t op1, size_t vl) { @@ -215,7 +215,7 @@ vfloat16m4_t test_vfneg_v_f16m4 (vfloat16m4_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vfneg_v_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv32f16.nxv32f16.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv32f16.nxv32f16.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfneg_v_f16m8 (vfloat16m8_t op1, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrdiv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrdiv.c index c201f27cc32f35a59e0fd4790b655b8f697b728c..92c9bce681abcdc8a9381cee13edc1925b39c01f 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrdiv.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrdiv.c @@ -8,7 +8,7 @@ // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfrdiv_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { @@ -17,7 +17,7 @@ vfloat32mf2_t test_vfrdiv_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv2f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfrdiv_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { @@ -26,7 +26,7 @@ vfloat32m1_t test_vfrdiv_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv4f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfrdiv_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { @@ -35,7 +35,7 @@ vfloat32m2_t test_vfrdiv_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv8f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfrdiv_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { @@ -44,7 +44,7 @@ vfloat32m4_t test_vfrdiv_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv16f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv16f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfrdiv_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { @@ -53,7 +53,7 @@ vfloat32m8_t test_vfrdiv_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv1f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv1f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfrdiv_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { @@ -62,7 +62,7 @@ vfloat64m1_t test_vfrdiv_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv2f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv2f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfrdiv_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { @@ -71,7 +71,7 @@ vfloat64m2_t test_vfrdiv_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv4f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv4f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfrdiv_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { @@ -80,7 +80,7 @@ vfloat64m4_t test_vfrdiv_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv8f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfrdiv_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { @@ -179,7 +179,7 @@ vfloat64m8_t test_vfrdiv_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, // CHECK-RV64-LABEL: @test_vfrdiv_vf_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv1f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv1f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfrdiv_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl) { @@ -188,7 +188,7 @@ vfloat16mf4_t test_vfrdiv_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl) // CHECK-RV64-LABEL: @test_vfrdiv_vf_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv2f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv2f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfrdiv_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl) { @@ -197,7 +197,7 @@ vfloat16mf2_t test_vfrdiv_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl) // CHECK-RV64-LABEL: @test_vfrdiv_vf_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv4f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv4f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfrdiv_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) { @@ -206,7 +206,7 @@ vfloat16m1_t test_vfrdiv_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfrdiv_vf_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv8f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv8f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfrdiv_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) { @@ -215,7 +215,7 @@ vfloat16m2_t test_vfrdiv_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfrdiv_vf_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv16f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv16f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfrdiv_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) { @@ -224,7 +224,7 @@ vfloat16m4_t test_vfrdiv_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfrdiv_vf_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv32f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv32f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfrdiv_vf_f16m8 (vfloat16m8_t op1, _Float16 op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrsub.c index 7428f43db11b79a8fe6dea474aacf92692ed9233..12974a81c09369f3b1c4ddfdcc420433090c0fe9 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrsub.c @@ -8,7 +8,7 @@ // CHECK-RV64-LABEL: @test_vfrsub_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfrsub_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { @@ -17,7 +17,7 @@ vfloat32mf2_t test_vfrsub_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfrsub_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv2f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfrsub_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { @@ -26,7 +26,7 @@ vfloat32m1_t test_vfrsub_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfrsub_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv4f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfrsub_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { @@ -35,7 +35,7 @@ vfloat32m2_t test_vfrsub_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfrsub_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv8f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfrsub_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { @@ -44,7 +44,7 @@ vfloat32m4_t test_vfrsub_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfrsub_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv16f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv16f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfrsub_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { @@ -53,7 +53,7 @@ vfloat32m8_t test_vfrsub_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfrsub_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv1f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv1f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfrsub_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { @@ -62,7 +62,7 @@ vfloat64m1_t test_vfrsub_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfrsub_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv2f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv2f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfrsub_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { @@ -71,7 +71,7 @@ vfloat64m2_t test_vfrsub_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfrsub_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv4f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv4f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfrsub_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { @@ -80,7 +80,7 @@ vfloat64m4_t test_vfrsub_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfrsub_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv8f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfrsub_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { @@ -179,7 +179,7 @@ vfloat64m8_t test_vfrsub_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, // CHECK-RV64-LABEL: @test_vfrsub_vf_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv1f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv1f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfrsub_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl) { @@ -188,7 +188,7 @@ vfloat16mf4_t test_vfrsub_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl) // CHECK-RV64-LABEL: @test_vfrsub_vf_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv2f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv2f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfrsub_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl) { @@ -197,7 +197,7 @@ vfloat16mf2_t test_vfrsub_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl) // CHECK-RV64-LABEL: @test_vfrsub_vf_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv4f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv4f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfrsub_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) { @@ -206,7 +206,7 @@ vfloat16m1_t test_vfrsub_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfrsub_vf_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv8f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv8f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfrsub_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) { @@ -215,7 +215,7 @@ vfloat16m2_t test_vfrsub_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfrsub_vf_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv16f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv16f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfrsub_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) { @@ -224,7 +224,7 @@ vfloat16m4_t test_vfrsub_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfrsub_vf_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv32f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv32f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfrsub_vf_f16m8 (vfloat16m8_t op1, _Float16 op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfsgnj.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfsgnj.c index 277b803b9d6877a7400c097e5ac93344fa31ae38..ab485498bd051b56ad849caa4ccab4901d0fc07f 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfsgnj.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfsgnj.c @@ -8,7 +8,7 @@ // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsgnj_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, @@ -18,7 +18,7 @@ vfloat32mf2_t test_vfsgnj_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, // CHECK-RV64-LABEL: @test_vfsgnj_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsgnj_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { @@ -27,7 +27,7 @@ vfloat32mf2_t test_vfsgnj_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv2f32.nxv2f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsgnj_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, @@ -37,7 +37,7 @@ vfloat32m1_t test_vfsgnj_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, // CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv2f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsgnj_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { @@ -46,7 +46,7 @@ vfloat32m1_t test_vfsgnj_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv4f32.nxv4f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsgnj_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, @@ -56,7 +56,7 @@ vfloat32m2_t test_vfsgnj_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, // CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv4f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsgnj_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { @@ -65,7 +65,7 @@ vfloat32m2_t test_vfsgnj_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv8f32.nxv8f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsgnj_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, @@ -75,7 +75,7 @@ vfloat32m4_t test_vfsgnj_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, // CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv8f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsgnj_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { @@ -84,7 +84,7 @@ vfloat32m4_t test_vfsgnj_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv16f32.nxv16f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv16f32.nxv16f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsgnj_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, @@ -94,7 +94,7 @@ vfloat32m8_t test_vfsgnj_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, // CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv16f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv16f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsgnj_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { @@ -103,7 +103,7 @@ vfloat32m8_t test_vfsgnj_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv1f64.nxv1f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsgnj_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, @@ -113,7 +113,7 @@ vfloat64m1_t test_vfsgnj_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, // CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv1f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv1f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsgnj_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { @@ -122,7 +122,7 @@ vfloat64m1_t test_vfsgnj_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv2f64.nxv2f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsgnj_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, @@ -132,7 +132,7 @@ vfloat64m2_t test_vfsgnj_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, // CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv2f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv2f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsgnj_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { @@ -141,7 +141,7 @@ vfloat64m2_t test_vfsgnj_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv4f64.nxv4f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsgnj_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, @@ -151,7 +151,7 @@ vfloat64m4_t test_vfsgnj_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, // CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv4f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv4f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsgnj_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { @@ -160,7 +160,7 @@ vfloat64m4_t test_vfsgnj_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv8f64.nxv8f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv8f64.nxv8f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsgnj_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, @@ -170,7 +170,7 @@ vfloat64m8_t test_vfsgnj_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, // CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv8f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsgnj_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { @@ -179,7 +179,7 @@ vfloat64m8_t test_vfsgnj_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsgnjn_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, @@ -189,7 +189,7 @@ vfloat32mf2_t test_vfsgnjn_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsgnjn_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { @@ -198,7 +198,7 @@ vfloat32mf2_t test_vfsgnjn_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2f32.nxv2f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsgnjn_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, @@ -208,7 +208,7 @@ vfloat32m1_t test_vfsgnjn_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsgnjn_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { @@ -217,7 +217,7 @@ vfloat32m1_t test_vfsgnjn_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4f32.nxv4f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsgnjn_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, @@ -227,7 +227,7 @@ vfloat32m2_t test_vfsgnjn_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsgnjn_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { @@ -236,7 +236,7 @@ vfloat32m2_t test_vfsgnjn_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8f32.nxv8f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsgnjn_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, @@ -246,7 +246,7 @@ vfloat32m4_t test_vfsgnjn_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsgnjn_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { @@ -255,7 +255,7 @@ vfloat32m4_t test_vfsgnjn_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv16f32.nxv16f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv16f32.nxv16f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsgnjn_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, @@ -265,7 +265,7 @@ vfloat32m8_t test_vfsgnjn_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv16f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv16f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsgnjn_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { @@ -274,7 +274,7 @@ vfloat32m8_t test_vfsgnjn_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f64.nxv1f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsgnjn_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, @@ -284,7 +284,7 @@ vfloat64m1_t test_vfsgnjn_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsgnjn_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { @@ -293,7 +293,7 @@ vfloat64m1_t test_vfsgnjn_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2f64.nxv2f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsgnjn_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, @@ -303,7 +303,7 @@ vfloat64m2_t test_vfsgnjn_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsgnjn_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { @@ -312,7 +312,7 @@ vfloat64m2_t test_vfsgnjn_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4f64.nxv4f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsgnjn_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, @@ -322,7 +322,7 @@ vfloat64m4_t test_vfsgnjn_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsgnjn_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { @@ -331,7 +331,7 @@ vfloat64m4_t test_vfsgnjn_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8f64.nxv8f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8f64.nxv8f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsgnjn_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, @@ -341,7 +341,7 @@ vfloat64m8_t test_vfsgnjn_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsgnjn_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { @@ -350,7 +350,7 @@ vfloat64m8_t test_vfsgnjn_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsgnjx_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, @@ -360,7 +360,7 @@ vfloat32mf2_t test_vfsgnjx_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsgnjx_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { @@ -369,7 +369,7 @@ vfloat32mf2_t test_vfsgnjx_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f32.nxv2f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsgnjx_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, @@ -379,7 +379,7 @@ vfloat32m1_t test_vfsgnjx_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsgnjx_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { @@ -388,7 +388,7 @@ vfloat32m1_t test_vfsgnjx_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f32.nxv4f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsgnjx_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, @@ -398,7 +398,7 @@ vfloat32m2_t test_vfsgnjx_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsgnjx_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { @@ -407,7 +407,7 @@ vfloat32m2_t test_vfsgnjx_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f32.nxv8f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsgnjx_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, @@ -417,7 +417,7 @@ vfloat32m4_t test_vfsgnjx_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsgnjx_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { @@ -426,7 +426,7 @@ vfloat32m4_t test_vfsgnjx_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv16f32.nxv16f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv16f32.nxv16f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsgnjx_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, @@ -436,7 +436,7 @@ vfloat32m8_t test_vfsgnjx_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv16f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv16f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsgnjx_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { @@ -445,7 +445,7 @@ vfloat32m8_t test_vfsgnjx_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f64.nxv1f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsgnjx_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, @@ -455,7 +455,7 @@ vfloat64m1_t test_vfsgnjx_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsgnjx_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { @@ -464,7 +464,7 @@ vfloat64m1_t test_vfsgnjx_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f64.nxv2f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsgnjx_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, @@ -474,7 +474,7 @@ vfloat64m2_t test_vfsgnjx_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsgnjx_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { @@ -483,7 +483,7 @@ vfloat64m2_t test_vfsgnjx_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f64.nxv4f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsgnjx_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, @@ -493,7 +493,7 @@ vfloat64m4_t test_vfsgnjx_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsgnjx_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { @@ -502,7 +502,7 @@ vfloat64m4_t test_vfsgnjx_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f64.nxv8f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f64.nxv8f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsgnjx_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, @@ -512,7 +512,7 @@ vfloat64m8_t test_vfsgnjx_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsgnjx_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { @@ -1090,7 +1090,7 @@ vfloat64m8_t test_vfsgnjx_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, // CHECK-RV64-LABEL: @test_vfsgnj_vv_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv1f16.nxv1f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv1f16.nxv1f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfsgnj_vv_f16mf4 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { @@ -1099,7 +1099,7 @@ vfloat16mf4_t test_vfsgnj_vv_f16mf4 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_ // CHECK-RV64-LABEL: @test_vfsgnj_vf_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv1f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv1f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfsgnj_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl) { @@ -1108,7 +1108,7 @@ vfloat16mf4_t test_vfsgnj_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl) // CHECK-RV64-LABEL: @test_vfsgnj_vv_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv2f16.nxv2f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv2f16.nxv2f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfsgnj_vv_f16mf2 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { @@ -1117,7 +1117,7 @@ vfloat16mf2_t test_vfsgnj_vv_f16mf2 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_ // CHECK-RV64-LABEL: @test_vfsgnj_vf_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv2f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv2f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfsgnj_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl) { @@ -1126,7 +1126,7 @@ vfloat16mf2_t test_vfsgnj_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl) // CHECK-RV64-LABEL: @test_vfsgnj_vv_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv4f16.nxv4f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv4f16.nxv4f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfsgnj_vv_f16m1 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { @@ -1135,7 +1135,7 @@ vfloat16m1_t test_vfsgnj_vv_f16m1 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl // CHECK-RV64-LABEL: @test_vfsgnj_vf_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv4f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv4f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfsgnj_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) { @@ -1144,7 +1144,7 @@ vfloat16m1_t test_vfsgnj_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsgnj_vv_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv8f16.nxv8f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv8f16.nxv8f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfsgnj_vv_f16m2 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { @@ -1153,7 +1153,7 @@ vfloat16m2_t test_vfsgnj_vv_f16m2 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl // CHECK-RV64-LABEL: @test_vfsgnj_vf_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv8f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv8f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfsgnj_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) { @@ -1162,7 +1162,7 @@ vfloat16m2_t test_vfsgnj_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsgnj_vv_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv16f16.nxv16f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv16f16.nxv16f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfsgnj_vv_f16m4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { @@ -1171,7 +1171,7 @@ vfloat16m4_t test_vfsgnj_vv_f16m4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl // CHECK-RV64-LABEL: @test_vfsgnj_vf_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv16f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv16f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfsgnj_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) { @@ -1180,7 +1180,7 @@ vfloat16m4_t test_vfsgnj_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsgnj_vv_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv32f16.nxv32f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv32f16.nxv32f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfsgnj_vv_f16m8 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { @@ -1189,7 +1189,7 @@ vfloat16m8_t test_vfsgnj_vv_f16m8 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl // CHECK-RV64-LABEL: @test_vfsgnj_vf_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv32f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv32f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfsgnj_vf_f16m8 (vfloat16m8_t op1, _Float16 op2, size_t vl) { @@ -1198,7 +1198,7 @@ vfloat16m8_t test_vfsgnj_vf_f16m8 (vfloat16m8_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f16.nxv1f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f16.nxv1f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfsgnjn_vv_f16mf4 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { @@ -1207,7 +1207,7 @@ vfloat16mf4_t test_vfsgnjn_vv_f16mf4 (vfloat16mf4_t op1, vfloat16mf4_t op2, size // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfsgnjn_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl) { @@ -1216,7 +1216,7 @@ vfloat16mf4_t test_vfsgnjn_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2f16.nxv2f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2f16.nxv2f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfsgnjn_vv_f16mf2 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { @@ -1225,7 +1225,7 @@ vfloat16mf2_t test_vfsgnjn_vv_f16mf2 (vfloat16mf2_t op1, vfloat16mf2_t op2, size // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfsgnjn_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl) { @@ -1234,7 +1234,7 @@ vfloat16mf2_t test_vfsgnjn_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4f16.nxv4f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4f16.nxv4f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfsgnjn_vv_f16m1 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { @@ -1243,7 +1243,7 @@ vfloat16m1_t test_vfsgnjn_vv_f16m1 (vfloat16m1_t op1, vfloat16m1_t op2, size_t v // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfsgnjn_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) { @@ -1252,7 +1252,7 @@ vfloat16m1_t test_vfsgnjn_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8f16.nxv8f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8f16.nxv8f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfsgnjn_vv_f16m2 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { @@ -1261,7 +1261,7 @@ vfloat16m2_t test_vfsgnjn_vv_f16m2 (vfloat16m2_t op1, vfloat16m2_t op2, size_t v // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfsgnjn_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) { @@ -1270,7 +1270,7 @@ vfloat16m2_t test_vfsgnjn_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv16f16.nxv16f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv16f16.nxv16f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfsgnjn_vv_f16m4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { @@ -1279,7 +1279,7 @@ vfloat16m4_t test_vfsgnjn_vv_f16m4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t v // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv16f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv16f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfsgnjn_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) { @@ -1288,7 +1288,7 @@ vfloat16m4_t test_vfsgnjn_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv32f16.nxv32f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv32f16.nxv32f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfsgnjn_vv_f16m8 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { @@ -1297,7 +1297,7 @@ vfloat16m8_t test_vfsgnjn_vv_f16m8 (vfloat16m8_t op1, vfloat16m8_t op2, size_t v // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv32f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv32f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfsgnjn_vf_f16m8 (vfloat16m8_t op1, _Float16 op2, size_t vl) { @@ -1306,7 +1306,7 @@ vfloat16m8_t test_vfsgnjn_vf_f16m8 (vfloat16m8_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f16.nxv1f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f16.nxv1f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfsgnjx_vv_f16mf4 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { @@ -1315,7 +1315,7 @@ vfloat16mf4_t test_vfsgnjx_vv_f16mf4 (vfloat16mf4_t op1, vfloat16mf4_t op2, size // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfsgnjx_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl) { @@ -1324,7 +1324,7 @@ vfloat16mf4_t test_vfsgnjx_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f16.nxv2f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f16.nxv2f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfsgnjx_vv_f16mf2 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { @@ -1333,7 +1333,7 @@ vfloat16mf2_t test_vfsgnjx_vv_f16mf2 (vfloat16mf2_t op1, vfloat16mf2_t op2, size // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfsgnjx_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl) { @@ -1342,7 +1342,7 @@ vfloat16mf2_t test_vfsgnjx_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f16.nxv4f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f16.nxv4f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfsgnjx_vv_f16m1 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { @@ -1351,7 +1351,7 @@ vfloat16m1_t test_vfsgnjx_vv_f16m1 (vfloat16m1_t op1, vfloat16m1_t op2, size_t v // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfsgnjx_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) { @@ -1360,7 +1360,7 @@ vfloat16m1_t test_vfsgnjx_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f16.nxv8f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f16.nxv8f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfsgnjx_vv_f16m2 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { @@ -1369,7 +1369,7 @@ vfloat16m2_t test_vfsgnjx_vv_f16m2 (vfloat16m2_t op1, vfloat16m2_t op2, size_t v // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfsgnjx_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) { @@ -1378,7 +1378,7 @@ vfloat16m2_t test_vfsgnjx_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv16f16.nxv16f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv16f16.nxv16f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfsgnjx_vv_f16m4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { @@ -1387,7 +1387,7 @@ vfloat16m4_t test_vfsgnjx_vv_f16m4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t v // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv16f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv16f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfsgnjx_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) { @@ -1396,7 +1396,7 @@ vfloat16m4_t test_vfsgnjx_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv32f16.nxv32f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv32f16.nxv32f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfsgnjx_vv_f16m8 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { @@ -1405,7 +1405,7 @@ vfloat16m8_t test_vfsgnjx_vv_f16m8 (vfloat16m8_t op1, vfloat16m8_t op2, size_t v // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv32f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv32f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfsgnjx_vf_f16m8 (vfloat16m8_t op1, _Float16 op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfslide1down.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfslide1down.c index 9c01c32bbf96babea1d5018eb998b437732bb014..624e55f32a292b07af52686e9af735ae01ecd568 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfslide1down.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfslide1down.c @@ -8,7 +8,7 @@ // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv1f32.f32.i64( [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv1f32.f32.i64( undef, [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfslide1down_vf_f32mf2(vfloat32mf2_t src, float value, @@ -18,7 +18,7 @@ vfloat32mf2_t test_vfslide1down_vf_f32mf2(vfloat32mf2_t src, float value, // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv2f32.f32.i64( [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv2f32.f32.i64( undef, [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfslide1down_vf_f32m1(vfloat32m1_t src, float value, @@ -28,7 +28,7 @@ vfloat32m1_t test_vfslide1down_vf_f32m1(vfloat32m1_t src, float value, // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv4f32.f32.i64( [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv4f32.f32.i64( undef, [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfslide1down_vf_f32m2(vfloat32m2_t src, float value, @@ -38,7 +38,7 @@ vfloat32m2_t test_vfslide1down_vf_f32m2(vfloat32m2_t src, float value, // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv8f32.f32.i64( [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv8f32.f32.i64( undef, [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfslide1down_vf_f32m4(vfloat32m4_t src, float value, @@ -48,7 +48,7 @@ vfloat32m4_t test_vfslide1down_vf_f32m4(vfloat32m4_t src, float value, // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv16f32.f32.i64( [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv16f32.f32.i64( undef, [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfslide1down_vf_f32m8(vfloat32m8_t src, float value, @@ -58,7 +58,7 @@ vfloat32m8_t test_vfslide1down_vf_f32m8(vfloat32m8_t src, float value, // CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv1f64.f64.i64( [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv1f64.f64.i64( undef, [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfslide1down_vf_f64m1(vfloat64m1_t src, double value, @@ -68,7 +68,7 @@ vfloat64m1_t test_vfslide1down_vf_f64m1(vfloat64m1_t src, double value, // CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv2f64.f64.i64( [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv2f64.f64.i64( undef, [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfslide1down_vf_f64m2(vfloat64m2_t src, double value, @@ -78,7 +78,7 @@ vfloat64m2_t test_vfslide1down_vf_f64m2(vfloat64m2_t src, double value, // CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv4f64.f64.i64( [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv4f64.f64.i64( undef, [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfslide1down_vf_f64m4(vfloat64m4_t src, double value, @@ -88,7 +88,7 @@ vfloat64m4_t test_vfslide1down_vf_f64m4(vfloat64m4_t src, double value, // CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv8f64.f64.i64( [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv8f64.f64.i64( undef, [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfslide1down_vf_f64m8(vfloat64m8_t src, double value, @@ -203,7 +203,7 @@ vfloat64m8_t test_vfslide1down_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, // CHECK-RV64-LABEL: @test_vfslide1down_vf_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv1f16.f16.i64( [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv1f16.f16.i64( undef, [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfslide1down_vf_f16mf4 (vfloat16mf4_t src, _Float16 value, size_t vl) { @@ -212,7 +212,7 @@ vfloat16mf4_t test_vfslide1down_vf_f16mf4 (vfloat16mf4_t src, _Float16 value, si // CHECK-RV64-LABEL: @test_vfslide1down_vf_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv2f16.f16.i64( [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv2f16.f16.i64( undef, [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfslide1down_vf_f16mf2 (vfloat16mf2_t src, _Float16 value, size_t vl) { @@ -221,7 +221,7 @@ vfloat16mf2_t test_vfslide1down_vf_f16mf2 (vfloat16mf2_t src, _Float16 value, si // CHECK-RV64-LABEL: @test_vfslide1down_vf_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv4f16.f16.i64( [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv4f16.f16.i64( undef, [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfslide1down_vf_f16m1 (vfloat16m1_t src, _Float16 value, size_t vl) { @@ -230,7 +230,7 @@ vfloat16m1_t test_vfslide1down_vf_f16m1 (vfloat16m1_t src, _Float16 value, size_ // CHECK-RV64-LABEL: @test_vfslide1down_vf_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv8f16.f16.i64( [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv8f16.f16.i64( undef, [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfslide1down_vf_f16m2 (vfloat16m2_t src, _Float16 value, size_t vl) { @@ -239,7 +239,7 @@ vfloat16m2_t test_vfslide1down_vf_f16m2 (vfloat16m2_t src, _Float16 value, size_ // CHECK-RV64-LABEL: @test_vfslide1down_vf_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv16f16.f16.i64( [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv16f16.f16.i64( undef, [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfslide1down_vf_f16m4 (vfloat16m4_t src, _Float16 value, size_t vl) { @@ -248,7 +248,7 @@ vfloat16m4_t test_vfslide1down_vf_f16m4 (vfloat16m4_t src, _Float16 value, size_ // CHECK-RV64-LABEL: @test_vfslide1down_vf_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv32f16.f16.i64( [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv32f16.f16.i64( undef, [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfslide1down_vf_f16m8 (vfloat16m8_t src, _Float16 value, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfslide1up.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfslide1up.c index 389c762266d8881d133774ecadb52eca97373901..493473e6bcc56e88724c2eb107802513461a02a5 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfslide1up.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfslide1up.c @@ -8,7 +8,7 @@ // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv1f32.f32.i64( [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv1f32.f32.i64( undef, [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfslide1up_vf_f32mf2(vfloat32mf2_t src, float value, @@ -18,7 +18,7 @@ vfloat32mf2_t test_vfslide1up_vf_f32mf2(vfloat32mf2_t src, float value, // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv2f32.f32.i64( [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv2f32.f32.i64( undef, [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfslide1up_vf_f32m1(vfloat32m1_t src, float value, @@ -28,7 +28,7 @@ vfloat32m1_t test_vfslide1up_vf_f32m1(vfloat32m1_t src, float value, // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv4f32.f32.i64( [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv4f32.f32.i64( undef, [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfslide1up_vf_f32m2(vfloat32m2_t src, float value, @@ -38,7 +38,7 @@ vfloat32m2_t test_vfslide1up_vf_f32m2(vfloat32m2_t src, float value, // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv8f32.f32.i64( [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv8f32.f32.i64( undef, [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfslide1up_vf_f32m4(vfloat32m4_t src, float value, @@ -48,7 +48,7 @@ vfloat32m4_t test_vfslide1up_vf_f32m4(vfloat32m4_t src, float value, // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv16f32.f32.i64( [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv16f32.f32.i64( undef, [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfslide1up_vf_f32m8(vfloat32m8_t src, float value, @@ -58,7 +58,7 @@ vfloat32m8_t test_vfslide1up_vf_f32m8(vfloat32m8_t src, float value, // CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv1f64.f64.i64( [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv1f64.f64.i64( undef, [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfslide1up_vf_f64m1(vfloat64m1_t src, double value, @@ -68,7 +68,7 @@ vfloat64m1_t test_vfslide1up_vf_f64m1(vfloat64m1_t src, double value, // CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv2f64.f64.i64( [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv2f64.f64.i64( undef, [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfslide1up_vf_f64m2(vfloat64m2_t src, double value, @@ -78,7 +78,7 @@ vfloat64m2_t test_vfslide1up_vf_f64m2(vfloat64m2_t src, double value, // CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv4f64.f64.i64( [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv4f64.f64.i64( undef, [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfslide1up_vf_f64m4(vfloat64m4_t src, double value, @@ -88,7 +88,7 @@ vfloat64m4_t test_vfslide1up_vf_f64m4(vfloat64m4_t src, double value, // CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv8f64.f64.i64( [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv8f64.f64.i64( undef, [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfslide1up_vf_f64m8(vfloat64m8_t src, double value, @@ -198,7 +198,7 @@ vfloat64m8_t test_vfslide1up_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, // CHECK-RV64-LABEL: @test_vfslide1up_vf_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv1f16.f16.i64( [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv1f16.f16.i64( undef, [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfslide1up_vf_f16mf4 (vfloat16mf4_t src, _Float16 value, size_t vl) { @@ -207,7 +207,7 @@ vfloat16mf4_t test_vfslide1up_vf_f16mf4 (vfloat16mf4_t src, _Float16 value, size // CHECK-RV64-LABEL: @test_vfslide1up_vf_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv2f16.f16.i64( [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv2f16.f16.i64( undef, [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfslide1up_vf_f16mf2 (vfloat16mf2_t src, _Float16 value, size_t vl) { @@ -216,7 +216,7 @@ vfloat16mf2_t test_vfslide1up_vf_f16mf2 (vfloat16mf2_t src, _Float16 value, size // CHECK-RV64-LABEL: @test_vfslide1up_vf_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv4f16.f16.i64( [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv4f16.f16.i64( undef, [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfslide1up_vf_f16m1 (vfloat16m1_t src, _Float16 value, size_t vl) { @@ -225,7 +225,7 @@ vfloat16m1_t test_vfslide1up_vf_f16m1 (vfloat16m1_t src, _Float16 value, size_t // CHECK-RV64-LABEL: @test_vfslide1up_vf_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv8f16.f16.i64( [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv8f16.f16.i64( undef, [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfslide1up_vf_f16m2 (vfloat16m2_t src, _Float16 value, size_t vl) { @@ -234,7 +234,7 @@ vfloat16m2_t test_vfslide1up_vf_f16m2 (vfloat16m2_t src, _Float16 value, size_t // CHECK-RV64-LABEL: @test_vfslide1up_vf_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv16f16.f16.i64( [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv16f16.f16.i64( undef, [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfslide1up_vf_f16m4 (vfloat16m4_t src, _Float16 value, size_t vl) { @@ -243,7 +243,7 @@ vfloat16m4_t test_vfslide1up_vf_f16m4 (vfloat16m4_t src, _Float16 value, size_t // CHECK-RV64-LABEL: @test_vfslide1up_vf_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv32f16.f16.i64( [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv32f16.f16.i64( undef, [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfslide1up_vf_f16m8 (vfloat16m8_t src, _Float16 value, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfsub.c index 88a4c9aadb856069ba045c14f5398906b557ddc5..bbb00daab419b5a98d1d461c8d97d39b4951ba50 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfsub.c @@ -8,7 +8,7 @@ // CHECK-RV64-LABEL: @test_vfsub_vv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsub_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, @@ -18,7 +18,7 @@ vfloat32mf2_t test_vfsub_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, // CHECK-RV64-LABEL: @test_vfsub_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsub_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { @@ -27,7 +27,7 @@ vfloat32mf2_t test_vfsub_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsub_vv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv2f32.nxv2f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsub_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, @@ -37,7 +37,7 @@ vfloat32m1_t test_vfsub_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, // CHECK-RV64-LABEL: @test_vfsub_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv2f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsub_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { @@ -46,7 +46,7 @@ vfloat32m1_t test_vfsub_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsub_vv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv4f32.nxv4f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsub_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, @@ -56,7 +56,7 @@ vfloat32m2_t test_vfsub_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, // CHECK-RV64-LABEL: @test_vfsub_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv4f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsub_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { @@ -65,7 +65,7 @@ vfloat32m2_t test_vfsub_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsub_vv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv8f32.nxv8f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsub_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, @@ -75,7 +75,7 @@ vfloat32m4_t test_vfsub_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, // CHECK-RV64-LABEL: @test_vfsub_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv8f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsub_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { @@ -84,7 +84,7 @@ vfloat32m4_t test_vfsub_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsub_vv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv16f32.nxv16f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv16f32.nxv16f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsub_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, @@ -94,7 +94,7 @@ vfloat32m8_t test_vfsub_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, // CHECK-RV64-LABEL: @test_vfsub_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv16f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv16f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsub_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { @@ -103,7 +103,7 @@ vfloat32m8_t test_vfsub_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsub_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv1f64.nxv1f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsub_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, @@ -113,7 +113,7 @@ vfloat64m1_t test_vfsub_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, // CHECK-RV64-LABEL: @test_vfsub_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv1f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv1f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsub_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { @@ -122,7 +122,7 @@ vfloat64m1_t test_vfsub_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsub_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv2f64.nxv2f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsub_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, @@ -132,7 +132,7 @@ vfloat64m2_t test_vfsub_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, // CHECK-RV64-LABEL: @test_vfsub_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv2f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv2f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsub_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { @@ -141,7 +141,7 @@ vfloat64m2_t test_vfsub_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsub_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv4f64.nxv4f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsub_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, @@ -151,7 +151,7 @@ vfloat64m4_t test_vfsub_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, // CHECK-RV64-LABEL: @test_vfsub_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv4f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv4f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsub_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { @@ -160,7 +160,7 @@ vfloat64m4_t test_vfsub_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsub_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv8f64.nxv8f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv8f64.nxv8f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsub_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, @@ -170,7 +170,7 @@ vfloat64m8_t test_vfsub_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, // CHECK-RV64-LABEL: @test_vfsub_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv8f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsub_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { @@ -368,7 +368,7 @@ vfloat64m8_t test_vfsub_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, // CHECK-RV64-LABEL: @test_vfsub_vv_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv1f16.nxv1f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv1f16.nxv1f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfsub_vv_f16mf4 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { @@ -377,7 +377,7 @@ vfloat16mf4_t test_vfsub_vv_f16mf4 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t // CHECK-RV64-LABEL: @test_vfsub_vf_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv1f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv1f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfsub_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl) { @@ -386,7 +386,7 @@ vfloat16mf4_t test_vfsub_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl) // CHECK-RV64-LABEL: @test_vfsub_vv_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv2f16.nxv2f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv2f16.nxv2f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfsub_vv_f16mf2 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { @@ -395,7 +395,7 @@ vfloat16mf2_t test_vfsub_vv_f16mf2 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t // CHECK-RV64-LABEL: @test_vfsub_vf_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv2f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv2f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfsub_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl) { @@ -404,7 +404,7 @@ vfloat16mf2_t test_vfsub_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl) // CHECK-RV64-LABEL: @test_vfsub_vv_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv4f16.nxv4f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv4f16.nxv4f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfsub_vv_f16m1 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { @@ -413,7 +413,7 @@ vfloat16m1_t test_vfsub_vv_f16m1 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vfsub_vf_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv4f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv4f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfsub_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) { @@ -422,7 +422,7 @@ vfloat16m1_t test_vfsub_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsub_vv_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv8f16.nxv8f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv8f16.nxv8f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfsub_vv_f16m2 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { @@ -431,7 +431,7 @@ vfloat16m2_t test_vfsub_vv_f16m2 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vfsub_vf_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv8f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv8f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfsub_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) { @@ -440,7 +440,7 @@ vfloat16m2_t test_vfsub_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsub_vv_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv16f16.nxv16f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv16f16.nxv16f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfsub_vv_f16m4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { @@ -449,7 +449,7 @@ vfloat16m4_t test_vfsub_vv_f16m4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vfsub_vf_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv16f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv16f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfsub_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) { @@ -458,7 +458,7 @@ vfloat16m4_t test_vfsub_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsub_vv_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv32f16.nxv32f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv32f16.nxv32f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfsub_vv_f16m8 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { @@ -467,7 +467,7 @@ vfloat16m8_t test_vfsub_vv_f16m8 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vfsub_vf_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv32f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv32f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfsub_vf_f16m8 (vfloat16m8_t op1, _Float16 op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwadd.c index 87f59f848330547ee974e6b09d96cf926df364b7..818c16262678cbe8f51bbf980cd8e79278c5721e 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwadd.c @@ -8,7 +8,7 @@ // CHECK-RV64-LABEL: @test_vfwadd_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv1f64.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv1f64.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwadd_vv_f64m1(vfloat32mf2_t op1, vfloat32mf2_t op2, @@ -18,7 +18,7 @@ vfloat64m1_t test_vfwadd_vv_f64m1(vfloat32mf2_t op1, vfloat32mf2_t op2, // CHECK-RV64-LABEL: @test_vfwadd_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv1f64.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv1f64.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwadd_vf_f64m1(vfloat32mf2_t op1, float op2, size_t vl) { @@ -27,7 +27,7 @@ vfloat64m1_t test_vfwadd_vf_f64m1(vfloat32mf2_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfwadd_wv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv1f64.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv1f64.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwadd_wv_f64m1(vfloat64m1_t op1, vfloat32mf2_t op2, @@ -37,7 +37,7 @@ vfloat64m1_t test_vfwadd_wv_f64m1(vfloat64m1_t op1, vfloat32mf2_t op2, // CHECK-RV64-LABEL: @test_vfwadd_wf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv1f64.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv1f64.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwadd_wf_f64m1(vfloat64m1_t op1, float op2, size_t vl) { @@ -46,7 +46,7 @@ vfloat64m1_t test_vfwadd_wf_f64m1(vfloat64m1_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfwadd_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv2f64.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv2f64.nxv2f32.nxv2f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwadd_vv_f64m2(vfloat32m1_t op1, vfloat32m1_t op2, @@ -56,7 +56,7 @@ vfloat64m2_t test_vfwadd_vv_f64m2(vfloat32m1_t op1, vfloat32m1_t op2, // CHECK-RV64-LABEL: @test_vfwadd_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv2f64.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv2f64.nxv2f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwadd_vf_f64m2(vfloat32m1_t op1, float op2, size_t vl) { @@ -65,7 +65,7 @@ vfloat64m2_t test_vfwadd_vf_f64m2(vfloat32m1_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfwadd_wv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv2f64.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv2f64.nxv2f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwadd_wv_f64m2(vfloat64m2_t op1, vfloat32m1_t op2, @@ -75,7 +75,7 @@ vfloat64m2_t test_vfwadd_wv_f64m2(vfloat64m2_t op1, vfloat32m1_t op2, // CHECK-RV64-LABEL: @test_vfwadd_wf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv2f64.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv2f64.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwadd_wf_f64m2(vfloat64m2_t op1, float op2, size_t vl) { @@ -84,7 +84,7 @@ vfloat64m2_t test_vfwadd_wf_f64m2(vfloat64m2_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfwadd_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv4f64.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv4f64.nxv4f32.nxv4f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwadd_vv_f64m4(vfloat32m2_t op1, vfloat32m2_t op2, @@ -94,7 +94,7 @@ vfloat64m4_t test_vfwadd_vv_f64m4(vfloat32m2_t op1, vfloat32m2_t op2, // CHECK-RV64-LABEL: @test_vfwadd_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv4f64.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv4f64.nxv4f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwadd_vf_f64m4(vfloat32m2_t op1, float op2, size_t vl) { @@ -103,7 +103,7 @@ vfloat64m4_t test_vfwadd_vf_f64m4(vfloat32m2_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfwadd_wv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv4f64.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv4f64.nxv4f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwadd_wv_f64m4(vfloat64m4_t op1, vfloat32m2_t op2, @@ -113,7 +113,7 @@ vfloat64m4_t test_vfwadd_wv_f64m4(vfloat64m4_t op1, vfloat32m2_t op2, // CHECK-RV64-LABEL: @test_vfwadd_wf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv4f64.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv4f64.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwadd_wf_f64m4(vfloat64m4_t op1, float op2, size_t vl) { @@ -122,7 +122,7 @@ vfloat64m4_t test_vfwadd_wf_f64m4(vfloat64m4_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfwadd_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv8f64.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv8f64.nxv8f32.nxv8f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwadd_vv_f64m8(vfloat32m4_t op1, vfloat32m4_t op2, @@ -132,7 +132,7 @@ vfloat64m8_t test_vfwadd_vv_f64m8(vfloat32m4_t op1, vfloat32m4_t op2, // CHECK-RV64-LABEL: @test_vfwadd_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv8f64.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv8f64.nxv8f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwadd_vf_f64m8(vfloat32m4_t op1, float op2, size_t vl) { @@ -141,7 +141,7 @@ vfloat64m8_t test_vfwadd_vf_f64m8(vfloat32m4_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfwadd_wv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv8f64.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv8f64.nxv8f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwadd_wv_f64m8(vfloat64m8_t op1, vfloat32m4_t op2, @@ -151,7 +151,7 @@ vfloat64m8_t test_vfwadd_wv_f64m8(vfloat64m8_t op1, vfloat32m4_t op2, // CHECK-RV64-LABEL: @test_vfwadd_wf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv8f64.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv8f64.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwadd_wf_f64m8(vfloat64m8_t op1, float op2, size_t vl) { @@ -328,7 +328,7 @@ vfloat64m8_t test_vfwadd_wf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, // CHECK-RV64-LABEL: @test_vfwadd_vv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv1f32.nxv1f16.nxv1f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv1f32.nxv1f16.nxv1f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwadd_vv_f32mf2 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { @@ -337,7 +337,7 @@ vfloat32mf2_t test_vfwadd_vv_f32mf2 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_ // CHECK-RV64-LABEL: @test_vfwadd_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv1f32.nxv1f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv1f32.nxv1f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwadd_vf_f32mf2 (vfloat16mf4_t op1, _Float16 op2, size_t vl) { @@ -346,7 +346,7 @@ vfloat32mf2_t test_vfwadd_vf_f32mf2 (vfloat16mf4_t op1, _Float16 op2, size_t vl) // CHECK-RV64-LABEL: @test_vfwadd_wv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv1f32.nxv1f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv1f32.nxv1f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwadd_wv_f32mf2 (vfloat32mf2_t op1, vfloat16mf4_t op2, size_t vl) { @@ -355,7 +355,7 @@ vfloat32mf2_t test_vfwadd_wv_f32mf2 (vfloat32mf2_t op1, vfloat16mf4_t op2, size_ // CHECK-RV64-LABEL: @test_vfwadd_wf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv1f32.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv1f32.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwadd_wf_f32mf2 (vfloat32mf2_t op1, _Float16 op2, size_t vl) { @@ -364,7 +364,7 @@ vfloat32mf2_t test_vfwadd_wf_f32mf2 (vfloat32mf2_t op1, _Float16 op2, size_t vl) // CHECK-RV64-LABEL: @test_vfwadd_vv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv2f32.nxv2f16.nxv2f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv2f32.nxv2f16.nxv2f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwadd_vv_f32m1 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { @@ -373,7 +373,7 @@ vfloat32m1_t test_vfwadd_vv_f32m1 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t // CHECK-RV64-LABEL: @test_vfwadd_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv2f32.nxv2f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv2f32.nxv2f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwadd_vf_f32m1 (vfloat16mf2_t op1, _Float16 op2, size_t vl) { @@ -382,7 +382,7 @@ vfloat32m1_t test_vfwadd_vf_f32m1 (vfloat16mf2_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfwadd_wv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv2f32.nxv2f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv2f32.nxv2f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwadd_wv_f32m1 (vfloat32m1_t op1, vfloat16mf2_t op2, size_t vl) { @@ -391,7 +391,7 @@ vfloat32m1_t test_vfwadd_wv_f32m1 (vfloat32m1_t op1, vfloat16mf2_t op2, size_t v // CHECK-RV64-LABEL: @test_vfwadd_wf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv2f32.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv2f32.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwadd_wf_f32m1 (vfloat32m1_t op1, _Float16 op2, size_t vl) { @@ -400,7 +400,7 @@ vfloat32m1_t test_vfwadd_wf_f32m1 (vfloat32m1_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfwadd_vv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv4f32.nxv4f16.nxv4f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv4f32.nxv4f16.nxv4f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwadd_vv_f32m2 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { @@ -409,7 +409,7 @@ vfloat32m2_t test_vfwadd_vv_f32m2 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl // CHECK-RV64-LABEL: @test_vfwadd_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv4f32.nxv4f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv4f32.nxv4f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwadd_vf_f32m2 (vfloat16m1_t op1, _Float16 op2, size_t vl) { @@ -418,7 +418,7 @@ vfloat32m2_t test_vfwadd_vf_f32m2 (vfloat16m1_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfwadd_wv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv4f32.nxv4f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv4f32.nxv4f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwadd_wv_f32m2 (vfloat32m2_t op1, vfloat16m1_t op2, size_t vl) { @@ -427,7 +427,7 @@ vfloat32m2_t test_vfwadd_wv_f32m2 (vfloat32m2_t op1, vfloat16m1_t op2, size_t vl // CHECK-RV64-LABEL: @test_vfwadd_wf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv4f32.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv4f32.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwadd_wf_f32m2 (vfloat32m2_t op1, _Float16 op2, size_t vl) { @@ -436,7 +436,7 @@ vfloat32m2_t test_vfwadd_wf_f32m2 (vfloat32m2_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfwadd_vv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv8f32.nxv8f16.nxv8f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv8f32.nxv8f16.nxv8f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwadd_vv_f32m4 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { @@ -445,7 +445,7 @@ vfloat32m4_t test_vfwadd_vv_f32m4 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl // CHECK-RV64-LABEL: @test_vfwadd_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv8f32.nxv8f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv8f32.nxv8f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwadd_vf_f32m4 (vfloat16m2_t op1, _Float16 op2, size_t vl) { @@ -454,7 +454,7 @@ vfloat32m4_t test_vfwadd_vf_f32m4 (vfloat16m2_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfwadd_wv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv8f32.nxv8f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv8f32.nxv8f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwadd_wv_f32m4 (vfloat32m4_t op1, vfloat16m2_t op2, size_t vl) { @@ -463,7 +463,7 @@ vfloat32m4_t test_vfwadd_wv_f32m4 (vfloat32m4_t op1, vfloat16m2_t op2, size_t vl // CHECK-RV64-LABEL: @test_vfwadd_wf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv8f32.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv8f32.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwadd_wf_f32m4 (vfloat32m4_t op1, _Float16 op2, size_t vl) { @@ -472,7 +472,7 @@ vfloat32m4_t test_vfwadd_wf_f32m4 (vfloat32m4_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfwadd_vv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv16f32.nxv16f16.nxv16f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv16f32.nxv16f16.nxv16f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwadd_vv_f32m8 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { @@ -481,7 +481,7 @@ vfloat32m8_t test_vfwadd_vv_f32m8 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl // CHECK-RV64-LABEL: @test_vfwadd_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv16f32.nxv16f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv16f32.nxv16f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwadd_vf_f32m8 (vfloat16m4_t op1, _Float16 op2, size_t vl) { @@ -490,7 +490,7 @@ vfloat32m8_t test_vfwadd_vf_f32m8 (vfloat16m4_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfwadd_wv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv16f32.nxv16f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv16f32.nxv16f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwadd_wv_f32m8 (vfloat32m8_t op1, vfloat16m4_t op2, size_t vl) { @@ -499,7 +499,7 @@ vfloat32m8_t test_vfwadd_wv_f32m8 (vfloat32m8_t op1, vfloat16m4_t op2, size_t vl // CHECK-RV64-LABEL: @test_vfwadd_wf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv16f32.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv16f32.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwadd_wf_f32m8 (vfloat32m8_t op1, _Float16 op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwmul.c index 0254680b38d2e2c883dff5968610b4095eff092d..49ca097e060b33b3b61f0190c2ce90913eb59062 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwmul.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwmul.c @@ -8,7 +8,7 @@ // CHECK-RV64-LABEL: @test_vfwmul_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv1f64.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv1f64.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwmul_vv_f64m1(vfloat32mf2_t op1, vfloat32mf2_t op2, @@ -18,7 +18,7 @@ vfloat64m1_t test_vfwmul_vv_f64m1(vfloat32mf2_t op1, vfloat32mf2_t op2, // CHECK-RV64-LABEL: @test_vfwmul_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv1f64.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv1f64.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwmul_vf_f64m1(vfloat32mf2_t op1, float op2, size_t vl) { @@ -27,7 +27,7 @@ vfloat64m1_t test_vfwmul_vf_f64m1(vfloat32mf2_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfwmul_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv2f64.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv2f64.nxv2f32.nxv2f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwmul_vv_f64m2(vfloat32m1_t op1, vfloat32m1_t op2, @@ -37,7 +37,7 @@ vfloat64m2_t test_vfwmul_vv_f64m2(vfloat32m1_t op1, vfloat32m1_t op2, // CHECK-RV64-LABEL: @test_vfwmul_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv2f64.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv2f64.nxv2f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwmul_vf_f64m2(vfloat32m1_t op1, float op2, size_t vl) { @@ -46,7 +46,7 @@ vfloat64m2_t test_vfwmul_vf_f64m2(vfloat32m1_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfwmul_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv4f64.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv4f64.nxv4f32.nxv4f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwmul_vv_f64m4(vfloat32m2_t op1, vfloat32m2_t op2, @@ -56,7 +56,7 @@ vfloat64m4_t test_vfwmul_vv_f64m4(vfloat32m2_t op1, vfloat32m2_t op2, // CHECK-RV64-LABEL: @test_vfwmul_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv4f64.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv4f64.nxv4f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwmul_vf_f64m4(vfloat32m2_t op1, float op2, size_t vl) { @@ -65,7 +65,7 @@ vfloat64m4_t test_vfwmul_vf_f64m4(vfloat32m2_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfwmul_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv8f64.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv8f64.nxv8f32.nxv8f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwmul_vv_f64m8(vfloat32m4_t op1, vfloat32m4_t op2, @@ -75,7 +75,7 @@ vfloat64m8_t test_vfwmul_vv_f64m8(vfloat32m4_t op1, vfloat32m4_t op2, // CHECK-RV64-LABEL: @test_vfwmul_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv8f64.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv8f64.nxv8f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwmul_vf_f64m8(vfloat32m4_t op1, float op2, size_t vl) { @@ -168,7 +168,7 @@ vfloat64m8_t test_vfwmul_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, // CHECK-RV64-LABEL: @test_vfwmul_vv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv1f32.nxv1f16.nxv1f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv1f32.nxv1f16.nxv1f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwmul_vv_f32mf2 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { @@ -177,7 +177,7 @@ vfloat32mf2_t test_vfwmul_vv_f32mf2 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_ // CHECK-RV64-LABEL: @test_vfwmul_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv1f32.nxv1f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv1f32.nxv1f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwmul_vf_f32mf2 (vfloat16mf4_t op1, _Float16 op2, size_t vl) { @@ -186,7 +186,7 @@ vfloat32mf2_t test_vfwmul_vf_f32mf2 (vfloat16mf4_t op1, _Float16 op2, size_t vl) // CHECK-RV64-LABEL: @test_vfwmul_vv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv2f32.nxv2f16.nxv2f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv2f32.nxv2f16.nxv2f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwmul_vv_f32m1 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { @@ -195,7 +195,7 @@ vfloat32m1_t test_vfwmul_vv_f32m1 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t // CHECK-RV64-LABEL: @test_vfwmul_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv2f32.nxv2f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv2f32.nxv2f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwmul_vf_f32m1 (vfloat16mf2_t op1, _Float16 op2, size_t vl) { @@ -204,7 +204,7 @@ vfloat32m1_t test_vfwmul_vf_f32m1 (vfloat16mf2_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfwmul_vv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv4f32.nxv4f16.nxv4f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv4f32.nxv4f16.nxv4f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwmul_vv_f32m2 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { @@ -213,7 +213,7 @@ vfloat32m2_t test_vfwmul_vv_f32m2 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl // CHECK-RV64-LABEL: @test_vfwmul_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv4f32.nxv4f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv4f32.nxv4f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwmul_vf_f32m2 (vfloat16m1_t op1, _Float16 op2, size_t vl) { @@ -222,7 +222,7 @@ vfloat32m2_t test_vfwmul_vf_f32m2 (vfloat16m1_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfwmul_vv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv8f32.nxv8f16.nxv8f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv8f32.nxv8f16.nxv8f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwmul_vv_f32m4 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { @@ -231,7 +231,7 @@ vfloat32m4_t test_vfwmul_vv_f32m4 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl // CHECK-RV64-LABEL: @test_vfwmul_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv8f32.nxv8f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv8f32.nxv8f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwmul_vf_f32m4 (vfloat16m2_t op1, _Float16 op2, size_t vl) { @@ -240,7 +240,7 @@ vfloat32m4_t test_vfwmul_vf_f32m4 (vfloat16m2_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfwmul_vv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv16f32.nxv16f16.nxv16f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv16f32.nxv16f16.nxv16f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwmul_vv_f32m8 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { @@ -249,7 +249,7 @@ vfloat32m8_t test_vfwmul_vv_f32m8 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl // CHECK-RV64-LABEL: @test_vfwmul_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv16f32.nxv16f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv16f32.nxv16f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwmul_vf_f32m8 (vfloat16m4_t op1, _Float16 op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwsub.c index 410b3c89738b150d6c3dcd6c46cd5565c91c2f65..e295290fe1f5c14192e402489ac21e19fe00d6ab 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwsub.c @@ -8,7 +8,7 @@ // CHECK-RV64-LABEL: @test_vfwsub_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv1f64.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv1f64.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwsub_vv_f64m1(vfloat32mf2_t op1, vfloat32mf2_t op2, @@ -18,7 +18,7 @@ vfloat64m1_t test_vfwsub_vv_f64m1(vfloat32mf2_t op1, vfloat32mf2_t op2, // CHECK-RV64-LABEL: @test_vfwsub_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv1f64.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv1f64.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwsub_vf_f64m1(vfloat32mf2_t op1, float op2, size_t vl) { @@ -27,7 +27,7 @@ vfloat64m1_t test_vfwsub_vf_f64m1(vfloat32mf2_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfwsub_wv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv1f64.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv1f64.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwsub_wv_f64m1(vfloat64m1_t op1, vfloat32mf2_t op2, @@ -37,7 +37,7 @@ vfloat64m1_t test_vfwsub_wv_f64m1(vfloat64m1_t op1, vfloat32mf2_t op2, // CHECK-RV64-LABEL: @test_vfwsub_wf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv1f64.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv1f64.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwsub_wf_f64m1(vfloat64m1_t op1, float op2, size_t vl) { @@ -46,7 +46,7 @@ vfloat64m1_t test_vfwsub_wf_f64m1(vfloat64m1_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfwsub_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv2f64.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv2f64.nxv2f32.nxv2f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwsub_vv_f64m2(vfloat32m1_t op1, vfloat32m1_t op2, @@ -56,7 +56,7 @@ vfloat64m2_t test_vfwsub_vv_f64m2(vfloat32m1_t op1, vfloat32m1_t op2, // CHECK-RV64-LABEL: @test_vfwsub_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv2f64.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv2f64.nxv2f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwsub_vf_f64m2(vfloat32m1_t op1, float op2, size_t vl) { @@ -65,7 +65,7 @@ vfloat64m2_t test_vfwsub_vf_f64m2(vfloat32m1_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfwsub_wv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv2f64.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv2f64.nxv2f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwsub_wv_f64m2(vfloat64m2_t op1, vfloat32m1_t op2, @@ -75,7 +75,7 @@ vfloat64m2_t test_vfwsub_wv_f64m2(vfloat64m2_t op1, vfloat32m1_t op2, // CHECK-RV64-LABEL: @test_vfwsub_wf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv2f64.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv2f64.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwsub_wf_f64m2(vfloat64m2_t op1, float op2, size_t vl) { @@ -84,7 +84,7 @@ vfloat64m2_t test_vfwsub_wf_f64m2(vfloat64m2_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfwsub_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv4f64.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv4f64.nxv4f32.nxv4f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwsub_vv_f64m4(vfloat32m2_t op1, vfloat32m2_t op2, @@ -94,7 +94,7 @@ vfloat64m4_t test_vfwsub_vv_f64m4(vfloat32m2_t op1, vfloat32m2_t op2, // CHECK-RV64-LABEL: @test_vfwsub_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv4f64.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv4f64.nxv4f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwsub_vf_f64m4(vfloat32m2_t op1, float op2, size_t vl) { @@ -103,7 +103,7 @@ vfloat64m4_t test_vfwsub_vf_f64m4(vfloat32m2_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfwsub_wv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv4f64.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv4f64.nxv4f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwsub_wv_f64m4(vfloat64m4_t op1, vfloat32m2_t op2, @@ -113,7 +113,7 @@ vfloat64m4_t test_vfwsub_wv_f64m4(vfloat64m4_t op1, vfloat32m2_t op2, // CHECK-RV64-LABEL: @test_vfwsub_wf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv4f64.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv4f64.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwsub_wf_f64m4(vfloat64m4_t op1, float op2, size_t vl) { @@ -122,7 +122,7 @@ vfloat64m4_t test_vfwsub_wf_f64m4(vfloat64m4_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfwsub_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv8f64.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv8f64.nxv8f32.nxv8f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwsub_vv_f64m8(vfloat32m4_t op1, vfloat32m4_t op2, @@ -132,7 +132,7 @@ vfloat64m8_t test_vfwsub_vv_f64m8(vfloat32m4_t op1, vfloat32m4_t op2, // CHECK-RV64-LABEL: @test_vfwsub_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv8f64.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv8f64.nxv8f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwsub_vf_f64m8(vfloat32m4_t op1, float op2, size_t vl) { @@ -141,7 +141,7 @@ vfloat64m8_t test_vfwsub_vf_f64m8(vfloat32m4_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfwsub_wv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv8f64.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv8f64.nxv8f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwsub_wv_f64m8(vfloat64m8_t op1, vfloat32m4_t op2, @@ -151,7 +151,7 @@ vfloat64m8_t test_vfwsub_wv_f64m8(vfloat64m8_t op1, vfloat32m4_t op2, // CHECK-RV64-LABEL: @test_vfwsub_wf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv8f64.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv8f64.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwsub_wf_f64m8(vfloat64m8_t op1, float op2, size_t vl) { @@ -328,7 +328,7 @@ vfloat64m8_t test_vfwsub_wf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, // CHECK-RV64-LABEL: @test_vfwsub_vv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv1f32.nxv1f16.nxv1f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv1f32.nxv1f16.nxv1f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwsub_vv_f32mf2 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { @@ -337,7 +337,7 @@ vfloat32mf2_t test_vfwsub_vv_f32mf2 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_ // CHECK-RV64-LABEL: @test_vfwsub_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv1f32.nxv1f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv1f32.nxv1f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwsub_vf_f32mf2 (vfloat16mf4_t op1, _Float16 op2, size_t vl) { @@ -346,7 +346,7 @@ vfloat32mf2_t test_vfwsub_vf_f32mf2 (vfloat16mf4_t op1, _Float16 op2, size_t vl) // CHECK-RV64-LABEL: @test_vfwsub_wv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv1f32.nxv1f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv1f32.nxv1f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwsub_wv_f32mf2 (vfloat32mf2_t op1, vfloat16mf4_t op2, size_t vl) { @@ -355,7 +355,7 @@ vfloat32mf2_t test_vfwsub_wv_f32mf2 (vfloat32mf2_t op1, vfloat16mf4_t op2, size_ // CHECK-RV64-LABEL: @test_vfwsub_wf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv1f32.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv1f32.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwsub_wf_f32mf2 (vfloat32mf2_t op1, _Float16 op2, size_t vl) { @@ -364,7 +364,7 @@ vfloat32mf2_t test_vfwsub_wf_f32mf2 (vfloat32mf2_t op1, _Float16 op2, size_t vl) // CHECK-RV64-LABEL: @test_vfwsub_vv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv2f32.nxv2f16.nxv2f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv2f32.nxv2f16.nxv2f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwsub_vv_f32m1 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { @@ -373,7 +373,7 @@ vfloat32m1_t test_vfwsub_vv_f32m1 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t // CHECK-RV64-LABEL: @test_vfwsub_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv2f32.nxv2f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv2f32.nxv2f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwsub_vf_f32m1 (vfloat16mf2_t op1, _Float16 op2, size_t vl) { @@ -382,7 +382,7 @@ vfloat32m1_t test_vfwsub_vf_f32m1 (vfloat16mf2_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfwsub_wv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv2f32.nxv2f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv2f32.nxv2f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwsub_wv_f32m1 (vfloat32m1_t op1, vfloat16mf2_t op2, size_t vl) { @@ -391,7 +391,7 @@ vfloat32m1_t test_vfwsub_wv_f32m1 (vfloat32m1_t op1, vfloat16mf2_t op2, size_t v // CHECK-RV64-LABEL: @test_vfwsub_wf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv2f32.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv2f32.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwsub_wf_f32m1 (vfloat32m1_t op1, _Float16 op2, size_t vl) { @@ -400,7 +400,7 @@ vfloat32m1_t test_vfwsub_wf_f32m1 (vfloat32m1_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfwsub_vv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv4f32.nxv4f16.nxv4f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv4f32.nxv4f16.nxv4f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwsub_vv_f32m2 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { @@ -409,7 +409,7 @@ vfloat32m2_t test_vfwsub_vv_f32m2 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl // CHECK-RV64-LABEL: @test_vfwsub_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv4f32.nxv4f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv4f32.nxv4f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwsub_vf_f32m2 (vfloat16m1_t op1, _Float16 op2, size_t vl) { @@ -418,7 +418,7 @@ vfloat32m2_t test_vfwsub_vf_f32m2 (vfloat16m1_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfwsub_wv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv4f32.nxv4f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv4f32.nxv4f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwsub_wv_f32m2 (vfloat32m2_t op1, vfloat16m1_t op2, size_t vl) { @@ -427,7 +427,7 @@ vfloat32m2_t test_vfwsub_wv_f32m2 (vfloat32m2_t op1, vfloat16m1_t op2, size_t vl // CHECK-RV64-LABEL: @test_vfwsub_wf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv4f32.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv4f32.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwsub_wf_f32m2 (vfloat32m2_t op1, _Float16 op2, size_t vl) { @@ -436,7 +436,7 @@ vfloat32m2_t test_vfwsub_wf_f32m2 (vfloat32m2_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfwsub_vv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv8f32.nxv8f16.nxv8f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv8f32.nxv8f16.nxv8f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwsub_vv_f32m4 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { @@ -445,7 +445,7 @@ vfloat32m4_t test_vfwsub_vv_f32m4 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl // CHECK-RV64-LABEL: @test_vfwsub_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv8f32.nxv8f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv8f32.nxv8f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwsub_vf_f32m4 (vfloat16m2_t op1, _Float16 op2, size_t vl) { @@ -454,7 +454,7 @@ vfloat32m4_t test_vfwsub_vf_f32m4 (vfloat16m2_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfwsub_wv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv8f32.nxv8f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv8f32.nxv8f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwsub_wv_f32m4 (vfloat32m4_t op1, vfloat16m2_t op2, size_t vl) { @@ -463,7 +463,7 @@ vfloat32m4_t test_vfwsub_wv_f32m4 (vfloat32m4_t op1, vfloat16m2_t op2, size_t vl // CHECK-RV64-LABEL: @test_vfwsub_wf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv8f32.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv8f32.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwsub_wf_f32m4 (vfloat32m4_t op1, _Float16 op2, size_t vl) { @@ -472,7 +472,7 @@ vfloat32m4_t test_vfwsub_wf_f32m4 (vfloat32m4_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfwsub_vv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv16f32.nxv16f16.nxv16f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv16f32.nxv16f16.nxv16f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwsub_vv_f32m8 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { @@ -481,7 +481,7 @@ vfloat32m8_t test_vfwsub_vv_f32m8 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl // CHECK-RV64-LABEL: @test_vfwsub_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv16f32.nxv16f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv16f32.nxv16f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwsub_vf_f32m8 (vfloat16m4_t op1, _Float16 op2, size_t vl) { @@ -490,7 +490,7 @@ vfloat32m8_t test_vfwsub_vf_f32m8 (vfloat16m4_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfwsub_wv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv16f32.nxv16f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv16f32.nxv16f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwsub_wv_f32m8 (vfloat32m8_t op1, vfloat16m4_t op2, size_t vl) { @@ -499,7 +499,7 @@ vfloat32m8_t test_vfwsub_wv_f32m8 (vfloat32m8_t op1, vfloat16m4_t op2, size_t vl // CHECK-RV64-LABEL: @test_vfwsub_wf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv16f32.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv16f32.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwsub_wf_f32m8 (vfloat32m8_t op1, _Float16 op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmax.c index 17a4c55b87babae8ae1de6f17936c67b03eab881..6c24e04056c806fe7027967fa5df10a2a0585f63 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmax.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmax.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vmax_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmax_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -15,7 +15,7 @@ vint8mf8_t test_vmax_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmax_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -24,7 +24,7 @@ vint8mf8_t test_vmax_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmax_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -33,7 +33,7 @@ vint8mf4_t test_vmax_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmax_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -42,7 +42,7 @@ vint8mf4_t test_vmax_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmax_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -51,7 +51,7 @@ vint8mf2_t test_vmax_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmax_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -60,7 +60,7 @@ vint8mf2_t test_vmax_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmax_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -69,7 +69,7 @@ vint8m1_t test_vmax_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmax_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { @@ -78,7 +78,7 @@ vint8m1_t test_vmax_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmax_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -87,7 +87,7 @@ vint8m2_t test_vmax_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmax_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { @@ -96,7 +96,7 @@ vint8m2_t test_vmax_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmax_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -105,7 +105,7 @@ vint8m4_t test_vmax_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmax_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { @@ -114,7 +114,7 @@ vint8m4_t test_vmax_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmax_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -123,7 +123,7 @@ vint8m8_t test_vmax_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmax_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { @@ -132,7 +132,7 @@ vint8m8_t test_vmax_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmax_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -141,7 +141,7 @@ vint16mf4_t test_vmax_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmax_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -150,7 +150,7 @@ vint16mf4_t test_vmax_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmax_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -159,7 +159,7 @@ vint16mf2_t test_vmax_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmax_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -168,7 +168,7 @@ vint16mf2_t test_vmax_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmax_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -177,7 +177,7 @@ vint16m1_t test_vmax_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmax_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { @@ -186,7 +186,7 @@ vint16m1_t test_vmax_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmax_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -195,7 +195,7 @@ vint16m2_t test_vmax_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmax_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { @@ -204,7 +204,7 @@ vint16m2_t test_vmax_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmax_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -213,7 +213,7 @@ vint16m4_t test_vmax_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmax_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { @@ -222,7 +222,7 @@ vint16m4_t test_vmax_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmax_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -231,7 +231,7 @@ vint16m8_t test_vmax_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmax_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { @@ -240,7 +240,7 @@ vint16m8_t test_vmax_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmax_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -249,7 +249,7 @@ vint32mf2_t test_vmax_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmax_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -258,7 +258,7 @@ vint32mf2_t test_vmax_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmax_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -267,7 +267,7 @@ vint32m1_t test_vmax_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmax_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { @@ -276,7 +276,7 @@ vint32m1_t test_vmax_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmax_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -285,7 +285,7 @@ vint32m2_t test_vmax_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmax_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { @@ -294,7 +294,7 @@ vint32m2_t test_vmax_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmax_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -303,7 +303,7 @@ vint32m4_t test_vmax_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmax_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { @@ -312,7 +312,7 @@ vint32m4_t test_vmax_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmax_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -321,7 +321,7 @@ vint32m8_t test_vmax_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmax_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { @@ -330,7 +330,7 @@ vint32m8_t test_vmax_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmax_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -339,7 +339,7 @@ vint64m1_t test_vmax_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmax_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { @@ -348,7 +348,7 @@ vint64m1_t test_vmax_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmax_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -357,7 +357,7 @@ vint64m2_t test_vmax_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmax_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { @@ -366,7 +366,7 @@ vint64m2_t test_vmax_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmax_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -375,7 +375,7 @@ vint64m4_t test_vmax_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmax_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { @@ -384,7 +384,7 @@ vint64m4_t test_vmax_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmax_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -393,7 +393,7 @@ vint64m8_t test_vmax_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmax_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { @@ -402,7 +402,7 @@ vint64m8_t test_vmax_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmaxu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -411,7 +411,7 @@ vuint8mf8_t test_vmaxu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmaxu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -420,7 +420,7 @@ vuint8mf8_t test_vmaxu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmaxu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -429,7 +429,7 @@ vuint8mf4_t test_vmaxu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmaxu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -438,7 +438,7 @@ vuint8mf4_t test_vmaxu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmaxu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -447,7 +447,7 @@ vuint8mf2_t test_vmaxu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmaxu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -456,7 +456,7 @@ vuint8mf2_t test_vmaxu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmaxu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -465,7 +465,7 @@ vuint8m1_t test_vmaxu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmaxu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -474,7 +474,7 @@ vuint8m1_t test_vmaxu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmaxu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -483,7 +483,7 @@ vuint8m2_t test_vmaxu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmaxu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -492,7 +492,7 @@ vuint8m2_t test_vmaxu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmaxu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -501,7 +501,7 @@ vuint8m4_t test_vmaxu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmaxu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -510,7 +510,7 @@ vuint8m4_t test_vmaxu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmaxu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -519,7 +519,7 @@ vuint8m8_t test_vmaxu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmaxu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -528,7 +528,7 @@ vuint8m8_t test_vmaxu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmaxu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -537,7 +537,7 @@ vuint16mf4_t test_vmaxu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vmaxu_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmaxu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -546,7 +546,7 @@ vuint16mf4_t test_vmaxu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmaxu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -555,7 +555,7 @@ vuint16mf2_t test_vmaxu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vmaxu_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmaxu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -564,7 +564,7 @@ vuint16mf2_t test_vmaxu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmaxu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -573,7 +573,7 @@ vuint16m1_t test_vmaxu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmaxu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -582,7 +582,7 @@ vuint16m1_t test_vmaxu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmaxu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -591,7 +591,7 @@ vuint16m2_t test_vmaxu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmaxu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -600,7 +600,7 @@ vuint16m2_t test_vmaxu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmaxu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -609,7 +609,7 @@ vuint16m4_t test_vmaxu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmaxu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -618,7 +618,7 @@ vuint16m4_t test_vmaxu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmaxu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -627,7 +627,7 @@ vuint16m8_t test_vmaxu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmaxu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -636,7 +636,7 @@ vuint16m8_t test_vmaxu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmaxu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -645,7 +645,7 @@ vuint32mf2_t test_vmaxu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vmaxu_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmaxu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -654,7 +654,7 @@ vuint32mf2_t test_vmaxu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmaxu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -663,7 +663,7 @@ vuint32m1_t test_vmaxu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmaxu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -672,7 +672,7 @@ vuint32m1_t test_vmaxu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmaxu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -681,7 +681,7 @@ vuint32m2_t test_vmaxu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmaxu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -690,7 +690,7 @@ vuint32m2_t test_vmaxu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmaxu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -699,7 +699,7 @@ vuint32m4_t test_vmaxu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmaxu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -708,7 +708,7 @@ vuint32m4_t test_vmaxu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmaxu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -717,7 +717,7 @@ vuint32m8_t test_vmaxu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmaxu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -726,7 +726,7 @@ vuint32m8_t test_vmaxu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmaxu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -735,7 +735,7 @@ vuint64m1_t test_vmaxu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmaxu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -744,7 +744,7 @@ vuint64m1_t test_vmaxu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmaxu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -753,7 +753,7 @@ vuint64m2_t test_vmaxu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmaxu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -762,7 +762,7 @@ vuint64m2_t test_vmaxu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmaxu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -771,7 +771,7 @@ vuint64m4_t test_vmaxu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmaxu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -780,7 +780,7 @@ vuint64m4_t test_vmaxu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmaxu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -789,7 +789,7 @@ vuint64m8_t test_vmaxu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmaxu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmin.c index bb31db3465e65fe8f7ffe458691c77055a4f1d57..db1a83f366185d763c70d23637e460e5c9c11c4e 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmin.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmin.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vmin_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmin_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -15,7 +15,7 @@ vint8mf8_t test_vmin_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmin_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -24,7 +24,7 @@ vint8mf8_t test_vmin_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmin_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -33,7 +33,7 @@ vint8mf4_t test_vmin_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmin_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -42,7 +42,7 @@ vint8mf4_t test_vmin_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmin_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -51,7 +51,7 @@ vint8mf2_t test_vmin_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmin_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -60,7 +60,7 @@ vint8mf2_t test_vmin_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmin_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -69,7 +69,7 @@ vint8m1_t test_vmin_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmin_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { @@ -78,7 +78,7 @@ vint8m1_t test_vmin_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmin_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -87,7 +87,7 @@ vint8m2_t test_vmin_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmin_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { @@ -96,7 +96,7 @@ vint8m2_t test_vmin_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmin_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -105,7 +105,7 @@ vint8m4_t test_vmin_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmin_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { @@ -114,7 +114,7 @@ vint8m4_t test_vmin_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmin_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -123,7 +123,7 @@ vint8m8_t test_vmin_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmin_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { @@ -132,7 +132,7 @@ vint8m8_t test_vmin_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmin_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -141,7 +141,7 @@ vint16mf4_t test_vmin_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmin_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -150,7 +150,7 @@ vint16mf4_t test_vmin_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmin_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -159,7 +159,7 @@ vint16mf2_t test_vmin_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmin_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -168,7 +168,7 @@ vint16mf2_t test_vmin_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmin_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -177,7 +177,7 @@ vint16m1_t test_vmin_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmin_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { @@ -186,7 +186,7 @@ vint16m1_t test_vmin_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmin_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -195,7 +195,7 @@ vint16m2_t test_vmin_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmin_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { @@ -204,7 +204,7 @@ vint16m2_t test_vmin_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmin_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -213,7 +213,7 @@ vint16m4_t test_vmin_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmin_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { @@ -222,7 +222,7 @@ vint16m4_t test_vmin_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmin_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -231,7 +231,7 @@ vint16m8_t test_vmin_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmin_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { @@ -240,7 +240,7 @@ vint16m8_t test_vmin_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmin_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -249,7 +249,7 @@ vint32mf2_t test_vmin_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmin_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -258,7 +258,7 @@ vint32mf2_t test_vmin_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmin_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -267,7 +267,7 @@ vint32m1_t test_vmin_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmin_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { @@ -276,7 +276,7 @@ vint32m1_t test_vmin_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmin_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -285,7 +285,7 @@ vint32m2_t test_vmin_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmin_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { @@ -294,7 +294,7 @@ vint32m2_t test_vmin_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmin_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -303,7 +303,7 @@ vint32m4_t test_vmin_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmin_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { @@ -312,7 +312,7 @@ vint32m4_t test_vmin_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmin_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -321,7 +321,7 @@ vint32m8_t test_vmin_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmin_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { @@ -330,7 +330,7 @@ vint32m8_t test_vmin_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmin_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -339,7 +339,7 @@ vint64m1_t test_vmin_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmin_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { @@ -348,7 +348,7 @@ vint64m1_t test_vmin_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmin_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -357,7 +357,7 @@ vint64m2_t test_vmin_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmin_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { @@ -366,7 +366,7 @@ vint64m2_t test_vmin_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmin_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -375,7 +375,7 @@ vint64m4_t test_vmin_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmin_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { @@ -384,7 +384,7 @@ vint64m4_t test_vmin_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmin_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -393,7 +393,7 @@ vint64m8_t test_vmin_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmin_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { @@ -402,7 +402,7 @@ vint64m8_t test_vmin_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vminu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -411,7 +411,7 @@ vuint8mf8_t test_vminu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vminu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -420,7 +420,7 @@ vuint8mf8_t test_vminu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vminu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -429,7 +429,7 @@ vuint8mf4_t test_vminu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vminu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -438,7 +438,7 @@ vuint8mf4_t test_vminu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vminu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -447,7 +447,7 @@ vuint8mf2_t test_vminu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vminu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -456,7 +456,7 @@ vuint8mf2_t test_vminu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vminu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -465,7 +465,7 @@ vuint8m1_t test_vminu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vminu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -474,7 +474,7 @@ vuint8m1_t test_vminu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vminu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -483,7 +483,7 @@ vuint8m2_t test_vminu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vminu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -492,7 +492,7 @@ vuint8m2_t test_vminu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vminu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -501,7 +501,7 @@ vuint8m4_t test_vminu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vminu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -510,7 +510,7 @@ vuint8m4_t test_vminu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vminu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -519,7 +519,7 @@ vuint8m8_t test_vminu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vminu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -528,7 +528,7 @@ vuint8m8_t test_vminu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vminu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -537,7 +537,7 @@ vuint16mf4_t test_vminu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vminu_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vminu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -546,7 +546,7 @@ vuint16mf4_t test_vminu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vminu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -555,7 +555,7 @@ vuint16mf2_t test_vminu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vminu_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vminu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -564,7 +564,7 @@ vuint16mf2_t test_vminu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vminu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -573,7 +573,7 @@ vuint16m1_t test_vminu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vminu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -582,7 +582,7 @@ vuint16m1_t test_vminu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vminu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -591,7 +591,7 @@ vuint16m2_t test_vminu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vminu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -600,7 +600,7 @@ vuint16m2_t test_vminu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vminu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -609,7 +609,7 @@ vuint16m4_t test_vminu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vminu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -618,7 +618,7 @@ vuint16m4_t test_vminu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vminu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -627,7 +627,7 @@ vuint16m8_t test_vminu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vminu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -636,7 +636,7 @@ vuint16m8_t test_vminu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vminu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -645,7 +645,7 @@ vuint32mf2_t test_vminu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vminu_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vminu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -654,7 +654,7 @@ vuint32mf2_t test_vminu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vminu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -663,7 +663,7 @@ vuint32m1_t test_vminu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vminu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -672,7 +672,7 @@ vuint32m1_t test_vminu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vminu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -681,7 +681,7 @@ vuint32m2_t test_vminu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vminu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -690,7 +690,7 @@ vuint32m2_t test_vminu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vminu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -699,7 +699,7 @@ vuint32m4_t test_vminu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vminu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -708,7 +708,7 @@ vuint32m4_t test_vminu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vminu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -717,7 +717,7 @@ vuint32m8_t test_vminu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vminu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -726,7 +726,7 @@ vuint32m8_t test_vminu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vminu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -735,7 +735,7 @@ vuint64m1_t test_vminu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vminu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -744,7 +744,7 @@ vuint64m1_t test_vminu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vminu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -753,7 +753,7 @@ vuint64m2_t test_vminu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vminu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -762,7 +762,7 @@ vuint64m2_t test_vminu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vminu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -771,7 +771,7 @@ vuint64m4_t test_vminu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vminu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -780,7 +780,7 @@ vuint64m4_t test_vminu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vminu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -789,7 +789,7 @@ vuint64m8_t test_vminu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vminu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmul-eew64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmul-eew64.c index 97686762db6664f79bf31a569f75af74779312b2..48f138669bfd88932c6878aaac2e0ab5f8e1b25a 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmul-eew64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmul-eew64.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vmulh_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmulh_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -18,7 +18,7 @@ vint64m1_t test_vmulh_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmulh_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { @@ -27,7 +27,7 @@ vint64m1_t test_vmulh_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmulh_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -36,7 +36,7 @@ vint64m2_t test_vmulh_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmulh_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { @@ -45,7 +45,7 @@ vint64m2_t test_vmulh_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmulh_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -54,7 +54,7 @@ vint64m4_t test_vmulh_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmulh_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { @@ -63,7 +63,7 @@ vint64m4_t test_vmulh_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmulh_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -72,7 +72,7 @@ vint64m8_t test_vmulh_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmulh_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { @@ -81,7 +81,7 @@ vint64m8_t test_vmulh_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmulhu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -90,7 +90,7 @@ vuint64m1_t test_vmulhu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmulhu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -99,7 +99,7 @@ vuint64m1_t test_vmulhu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmulhu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -108,7 +108,7 @@ vuint64m2_t test_vmulhu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmulhu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -117,7 +117,7 @@ vuint64m2_t test_vmulhu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmulhu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -126,7 +126,7 @@ vuint64m4_t test_vmulhu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmulhu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -135,7 +135,7 @@ vuint64m4_t test_vmulhu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmulhu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -144,7 +144,7 @@ vuint64m8_t test_vmulhu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmulhu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { @@ -153,7 +153,7 @@ vuint64m8_t test_vmulhu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmulhsu_vv_i64m1(vint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -162,7 +162,7 @@ vint64m1_t test_vmulhsu_vv_i64m1(vint64m1_t op1, vuint64m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmulhsu_vx_i64m1(vint64m1_t op1, uint64_t op2, size_t vl) { @@ -171,7 +171,7 @@ vint64m1_t test_vmulhsu_vx_i64m1(vint64m1_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmulhsu_vv_i64m2(vint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -180,7 +180,7 @@ vint64m2_t test_vmulhsu_vv_i64m2(vint64m2_t op1, vuint64m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmulhsu_vx_i64m2(vint64m2_t op1, uint64_t op2, size_t vl) { @@ -189,7 +189,7 @@ vint64m2_t test_vmulhsu_vx_i64m2(vint64m2_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmulhsu_vv_i64m4(vint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -198,7 +198,7 @@ vint64m4_t test_vmulhsu_vv_i64m4(vint64m4_t op1, vuint64m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmulhsu_vx_i64m4(vint64m4_t op1, uint64_t op2, size_t vl) { @@ -207,7 +207,7 @@ vint64m4_t test_vmulhsu_vx_i64m4(vint64m4_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmulhsu_vv_i64m8(vint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -216,7 +216,7 @@ vint64m8_t test_vmulhsu_vv_i64m8(vint64m8_t op1, vuint64m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmulhsu_vx_i64m8(vint64m8_t op1, uint64_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmul.c index c2c4522ca174b2554bf96e11bcca0b32f5c15375..2e0c093f81d4c5faa38ee0e6e52ca96b5348a543 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmul.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmul.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vmul_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmul_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -15,7 +15,7 @@ vint8mf8_t test_vmul_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmul_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -24,7 +24,7 @@ vint8mf8_t test_vmul_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmul_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -33,7 +33,7 @@ vint8mf4_t test_vmul_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmul_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -42,7 +42,7 @@ vint8mf4_t test_vmul_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmul_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -51,7 +51,7 @@ vint8mf2_t test_vmul_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmul_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -60,7 +60,7 @@ vint8mf2_t test_vmul_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmul_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -69,7 +69,7 @@ vint8m1_t test_vmul_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmul_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { @@ -78,7 +78,7 @@ vint8m1_t test_vmul_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmul_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -87,7 +87,7 @@ vint8m2_t test_vmul_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmul_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { @@ -96,7 +96,7 @@ vint8m2_t test_vmul_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmul_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -105,7 +105,7 @@ vint8m4_t test_vmul_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmul_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { @@ -114,7 +114,7 @@ vint8m4_t test_vmul_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmul_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -123,7 +123,7 @@ vint8m8_t test_vmul_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmul_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { @@ -132,7 +132,7 @@ vint8m8_t test_vmul_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmul_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -141,7 +141,7 @@ vint16mf4_t test_vmul_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmul_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -150,7 +150,7 @@ vint16mf4_t test_vmul_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmul_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -159,7 +159,7 @@ vint16mf2_t test_vmul_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmul_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -168,7 +168,7 @@ vint16mf2_t test_vmul_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmul_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -177,7 +177,7 @@ vint16m1_t test_vmul_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmul_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { @@ -186,7 +186,7 @@ vint16m1_t test_vmul_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmul_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -195,7 +195,7 @@ vint16m2_t test_vmul_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmul_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { @@ -204,7 +204,7 @@ vint16m2_t test_vmul_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmul_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -213,7 +213,7 @@ vint16m4_t test_vmul_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmul_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { @@ -222,7 +222,7 @@ vint16m4_t test_vmul_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmul_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -231,7 +231,7 @@ vint16m8_t test_vmul_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmul_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { @@ -240,7 +240,7 @@ vint16m8_t test_vmul_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmul_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -249,7 +249,7 @@ vint32mf2_t test_vmul_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmul_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -258,7 +258,7 @@ vint32mf2_t test_vmul_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmul_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -267,7 +267,7 @@ vint32m1_t test_vmul_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmul_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { @@ -276,7 +276,7 @@ vint32m1_t test_vmul_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmul_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -285,7 +285,7 @@ vint32m2_t test_vmul_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmul_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { @@ -294,7 +294,7 @@ vint32m2_t test_vmul_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmul_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -303,7 +303,7 @@ vint32m4_t test_vmul_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmul_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { @@ -312,7 +312,7 @@ vint32m4_t test_vmul_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmul_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -321,7 +321,7 @@ vint32m8_t test_vmul_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmul_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { @@ -330,7 +330,7 @@ vint32m8_t test_vmul_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmul_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -339,7 +339,7 @@ vint64m1_t test_vmul_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmul_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { @@ -348,7 +348,7 @@ vint64m1_t test_vmul_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmul_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -357,7 +357,7 @@ vint64m2_t test_vmul_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmul_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { @@ -366,7 +366,7 @@ vint64m2_t test_vmul_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmul_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -375,7 +375,7 @@ vint64m4_t test_vmul_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmul_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { @@ -384,7 +384,7 @@ vint64m4_t test_vmul_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmul_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -393,7 +393,7 @@ vint64m8_t test_vmul_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmul_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { @@ -402,7 +402,7 @@ vint64m8_t test_vmul_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmul_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -411,7 +411,7 @@ vuint8mf8_t test_vmul_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmul_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -420,7 +420,7 @@ vuint8mf8_t test_vmul_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmul_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -429,7 +429,7 @@ vuint8mf4_t test_vmul_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmul_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -438,7 +438,7 @@ vuint8mf4_t test_vmul_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmul_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -447,7 +447,7 @@ vuint8mf2_t test_vmul_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmul_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -456,7 +456,7 @@ vuint8mf2_t test_vmul_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmul_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -465,7 +465,7 @@ vuint8m1_t test_vmul_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmul_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -474,7 +474,7 @@ vuint8m1_t test_vmul_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmul_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -483,7 +483,7 @@ vuint8m2_t test_vmul_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmul_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -492,7 +492,7 @@ vuint8m2_t test_vmul_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmul_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -501,7 +501,7 @@ vuint8m4_t test_vmul_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmul_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -510,7 +510,7 @@ vuint8m4_t test_vmul_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmul_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -519,7 +519,7 @@ vuint8m8_t test_vmul_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmul_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -528,7 +528,7 @@ vuint8m8_t test_vmul_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmul_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -537,7 +537,7 @@ vuint16mf4_t test_vmul_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vmul_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmul_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -546,7 +546,7 @@ vuint16mf4_t test_vmul_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmul_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -555,7 +555,7 @@ vuint16mf2_t test_vmul_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vmul_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmul_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -564,7 +564,7 @@ vuint16mf2_t test_vmul_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmul_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -573,7 +573,7 @@ vuint16m1_t test_vmul_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmul_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -582,7 +582,7 @@ vuint16m1_t test_vmul_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmul_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -591,7 +591,7 @@ vuint16m2_t test_vmul_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmul_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -600,7 +600,7 @@ vuint16m2_t test_vmul_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmul_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -609,7 +609,7 @@ vuint16m4_t test_vmul_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmul_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -618,7 +618,7 @@ vuint16m4_t test_vmul_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmul_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -627,7 +627,7 @@ vuint16m8_t test_vmul_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmul_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -636,7 +636,7 @@ vuint16m8_t test_vmul_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmul_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -645,7 +645,7 @@ vuint32mf2_t test_vmul_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vmul_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmul_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -654,7 +654,7 @@ vuint32mf2_t test_vmul_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmul_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -663,7 +663,7 @@ vuint32m1_t test_vmul_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmul_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -672,7 +672,7 @@ vuint32m1_t test_vmul_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmul_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -681,7 +681,7 @@ vuint32m2_t test_vmul_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmul_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -690,7 +690,7 @@ vuint32m2_t test_vmul_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmul_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -699,7 +699,7 @@ vuint32m4_t test_vmul_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmul_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -708,7 +708,7 @@ vuint32m4_t test_vmul_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmul_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -717,7 +717,7 @@ vuint32m8_t test_vmul_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmul_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -726,7 +726,7 @@ vuint32m8_t test_vmul_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmul_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -735,7 +735,7 @@ vuint64m1_t test_vmul_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmul_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -744,7 +744,7 @@ vuint64m1_t test_vmul_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmul_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -753,7 +753,7 @@ vuint64m2_t test_vmul_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmul_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -762,7 +762,7 @@ vuint64m2_t test_vmul_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmul_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -771,7 +771,7 @@ vuint64m4_t test_vmul_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmul_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -780,7 +780,7 @@ vuint64m4_t test_vmul_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmul_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -789,7 +789,7 @@ vuint64m8_t test_vmul_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmul_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { @@ -798,7 +798,7 @@ vuint64m8_t test_vmul_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmulh_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -807,7 +807,7 @@ vint8mf8_t test_vmulh_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmulh_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -816,7 +816,7 @@ vint8mf8_t test_vmulh_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmulh_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -825,7 +825,7 @@ vint8mf4_t test_vmulh_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmulh_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -834,7 +834,7 @@ vint8mf4_t test_vmulh_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmulh_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -843,7 +843,7 @@ vint8mf2_t test_vmulh_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmulh_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -852,7 +852,7 @@ vint8mf2_t test_vmulh_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmulh_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -861,7 +861,7 @@ vint8m1_t test_vmulh_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmulh_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { @@ -870,7 +870,7 @@ vint8m1_t test_vmulh_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmulh_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -879,7 +879,7 @@ vint8m2_t test_vmulh_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmulh_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { @@ -888,7 +888,7 @@ vint8m2_t test_vmulh_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmulh_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -897,7 +897,7 @@ vint8m4_t test_vmulh_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmulh_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { @@ -906,7 +906,7 @@ vint8m4_t test_vmulh_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmulh_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -915,7 +915,7 @@ vint8m8_t test_vmulh_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmulh_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { @@ -924,7 +924,7 @@ vint8m8_t test_vmulh_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmulh_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -933,7 +933,7 @@ vint16mf4_t test_vmulh_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmulh_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -942,7 +942,7 @@ vint16mf4_t test_vmulh_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmulh_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -951,7 +951,7 @@ vint16mf2_t test_vmulh_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmulh_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -960,7 +960,7 @@ vint16mf2_t test_vmulh_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmulh_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -969,7 +969,7 @@ vint16m1_t test_vmulh_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmulh_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { @@ -978,7 +978,7 @@ vint16m1_t test_vmulh_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmulh_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -987,7 +987,7 @@ vint16m2_t test_vmulh_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmulh_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { @@ -996,7 +996,7 @@ vint16m2_t test_vmulh_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmulh_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -1005,7 +1005,7 @@ vint16m4_t test_vmulh_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmulh_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { @@ -1014,7 +1014,7 @@ vint16m4_t test_vmulh_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmulh_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -1023,7 +1023,7 @@ vint16m8_t test_vmulh_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmulh_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { @@ -1032,7 +1032,7 @@ vint16m8_t test_vmulh_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmulh_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -1041,7 +1041,7 @@ vint32mf2_t test_vmulh_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmulh_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -1050,7 +1050,7 @@ vint32mf2_t test_vmulh_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmulh_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -1059,7 +1059,7 @@ vint32m1_t test_vmulh_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmulh_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { @@ -1068,7 +1068,7 @@ vint32m1_t test_vmulh_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmulh_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -1077,7 +1077,7 @@ vint32m2_t test_vmulh_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmulh_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { @@ -1086,7 +1086,7 @@ vint32m2_t test_vmulh_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmulh_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -1095,7 +1095,7 @@ vint32m4_t test_vmulh_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmulh_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { @@ -1104,7 +1104,7 @@ vint32m4_t test_vmulh_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmulh_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -1113,7 +1113,7 @@ vint32m8_t test_vmulh_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmulh_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { @@ -1122,7 +1122,7 @@ vint32m8_t test_vmulh_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmulhu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -1131,7 +1131,7 @@ vuint8mf8_t test_vmulhu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmulhu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -1140,7 +1140,7 @@ vuint8mf8_t test_vmulhu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmulhu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -1149,7 +1149,7 @@ vuint8mf4_t test_vmulhu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmulhu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -1158,7 +1158,7 @@ vuint8mf4_t test_vmulhu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmulhu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -1167,7 +1167,7 @@ vuint8mf2_t test_vmulhu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmulhu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -1176,7 +1176,7 @@ vuint8mf2_t test_vmulhu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmulhu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -1185,7 +1185,7 @@ vuint8m1_t test_vmulhu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmulhu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -1194,7 +1194,7 @@ vuint8m1_t test_vmulhu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmulhu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -1203,7 +1203,7 @@ vuint8m2_t test_vmulhu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmulhu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -1212,7 +1212,7 @@ vuint8m2_t test_vmulhu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmulhu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -1221,7 +1221,7 @@ vuint8m4_t test_vmulhu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmulhu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -1230,7 +1230,7 @@ vuint8m4_t test_vmulhu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmulhu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -1239,7 +1239,7 @@ vuint8m8_t test_vmulhu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmulhu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -1248,7 +1248,7 @@ vuint8m8_t test_vmulhu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmulhu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -1257,7 +1257,7 @@ vuint16mf4_t test_vmulhu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl // CHECK-RV64-LABEL: @test_vmulhu_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmulhu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -1266,7 +1266,7 @@ vuint16mf4_t test_vmulhu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmulhu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -1275,7 +1275,7 @@ vuint16mf2_t test_vmulhu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl // CHECK-RV64-LABEL: @test_vmulhu_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmulhu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -1284,7 +1284,7 @@ vuint16mf2_t test_vmulhu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmulhu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -1293,7 +1293,7 @@ vuint16m1_t test_vmulhu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmulhu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -1302,7 +1302,7 @@ vuint16m1_t test_vmulhu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmulhu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -1311,7 +1311,7 @@ vuint16m2_t test_vmulhu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmulhu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -1320,7 +1320,7 @@ vuint16m2_t test_vmulhu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmulhu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -1329,7 +1329,7 @@ vuint16m4_t test_vmulhu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmulhu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -1338,7 +1338,7 @@ vuint16m4_t test_vmulhu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmulhu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -1347,7 +1347,7 @@ vuint16m8_t test_vmulhu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmulhu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -1356,7 +1356,7 @@ vuint16m8_t test_vmulhu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmulhu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -1365,7 +1365,7 @@ vuint32mf2_t test_vmulhu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl // CHECK-RV64-LABEL: @test_vmulhu_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmulhu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -1374,7 +1374,7 @@ vuint32mf2_t test_vmulhu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmulhu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -1383,7 +1383,7 @@ vuint32m1_t test_vmulhu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmulhu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -1392,7 +1392,7 @@ vuint32m1_t test_vmulhu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmulhu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -1401,7 +1401,7 @@ vuint32m2_t test_vmulhu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmulhu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -1410,7 +1410,7 @@ vuint32m2_t test_vmulhu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmulhu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -1419,7 +1419,7 @@ vuint32m4_t test_vmulhu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmulhu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -1428,7 +1428,7 @@ vuint32m4_t test_vmulhu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmulhu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -1437,7 +1437,7 @@ vuint32m8_t test_vmulhu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmulhu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -1446,7 +1446,7 @@ vuint32m8_t test_vmulhu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmulhsu_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -1455,7 +1455,7 @@ vint8mf8_t test_vmulhsu_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmulhsu_vx_i8mf8(vint8mf8_t op1, uint8_t op2, size_t vl) { @@ -1464,7 +1464,7 @@ vint8mf8_t test_vmulhsu_vx_i8mf8(vint8mf8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmulhsu_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -1473,7 +1473,7 @@ vint8mf4_t test_vmulhsu_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmulhsu_vx_i8mf4(vint8mf4_t op1, uint8_t op2, size_t vl) { @@ -1482,7 +1482,7 @@ vint8mf4_t test_vmulhsu_vx_i8mf4(vint8mf4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmulhsu_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -1491,7 +1491,7 @@ vint8mf2_t test_vmulhsu_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmulhsu_vx_i8mf2(vint8mf2_t op1, uint8_t op2, size_t vl) { @@ -1500,7 +1500,7 @@ vint8mf2_t test_vmulhsu_vx_i8mf2(vint8mf2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmulhsu_vv_i8m1(vint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -1509,7 +1509,7 @@ vint8m1_t test_vmulhsu_vv_i8m1(vint8m1_t op1, vuint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmulhsu_vx_i8m1(vint8m1_t op1, uint8_t op2, size_t vl) { @@ -1518,7 +1518,7 @@ vint8m1_t test_vmulhsu_vx_i8m1(vint8m1_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmulhsu_vv_i8m2(vint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -1527,7 +1527,7 @@ vint8m2_t test_vmulhsu_vv_i8m2(vint8m2_t op1, vuint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmulhsu_vx_i8m2(vint8m2_t op1, uint8_t op2, size_t vl) { @@ -1536,7 +1536,7 @@ vint8m2_t test_vmulhsu_vx_i8m2(vint8m2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmulhsu_vv_i8m4(vint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -1545,7 +1545,7 @@ vint8m4_t test_vmulhsu_vv_i8m4(vint8m4_t op1, vuint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmulhsu_vx_i8m4(vint8m4_t op1, uint8_t op2, size_t vl) { @@ -1554,7 +1554,7 @@ vint8m4_t test_vmulhsu_vx_i8m4(vint8m4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmulhsu_vv_i8m8(vint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -1563,7 +1563,7 @@ vint8m8_t test_vmulhsu_vv_i8m8(vint8m8_t op1, vuint8m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmulhsu_vx_i8m8(vint8m8_t op1, uint8_t op2, size_t vl) { @@ -1572,7 +1572,7 @@ vint8m8_t test_vmulhsu_vx_i8m8(vint8m8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmulhsu_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -1581,7 +1581,7 @@ vint16mf4_t test_vmulhsu_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmulhsu_vx_i16mf4(vint16mf4_t op1, uint16_t op2, size_t vl) { @@ -1590,7 +1590,7 @@ vint16mf4_t test_vmulhsu_vx_i16mf4(vint16mf4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmulhsu_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -1599,7 +1599,7 @@ vint16mf2_t test_vmulhsu_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmulhsu_vx_i16mf2(vint16mf2_t op1, uint16_t op2, size_t vl) { @@ -1608,7 +1608,7 @@ vint16mf2_t test_vmulhsu_vx_i16mf2(vint16mf2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmulhsu_vv_i16m1(vint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -1617,7 +1617,7 @@ vint16m1_t test_vmulhsu_vv_i16m1(vint16m1_t op1, vuint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmulhsu_vx_i16m1(vint16m1_t op1, uint16_t op2, size_t vl) { @@ -1626,7 +1626,7 @@ vint16m1_t test_vmulhsu_vx_i16m1(vint16m1_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmulhsu_vv_i16m2(vint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -1635,7 +1635,7 @@ vint16m2_t test_vmulhsu_vv_i16m2(vint16m2_t op1, vuint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmulhsu_vx_i16m2(vint16m2_t op1, uint16_t op2, size_t vl) { @@ -1644,7 +1644,7 @@ vint16m2_t test_vmulhsu_vx_i16m2(vint16m2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmulhsu_vv_i16m4(vint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -1653,7 +1653,7 @@ vint16m4_t test_vmulhsu_vv_i16m4(vint16m4_t op1, vuint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmulhsu_vx_i16m4(vint16m4_t op1, uint16_t op2, size_t vl) { @@ -1662,7 +1662,7 @@ vint16m4_t test_vmulhsu_vx_i16m4(vint16m4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmulhsu_vv_i16m8(vint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -1671,7 +1671,7 @@ vint16m8_t test_vmulhsu_vv_i16m8(vint16m8_t op1, vuint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmulhsu_vx_i16m8(vint16m8_t op1, uint16_t op2, size_t vl) { @@ -1680,7 +1680,7 @@ vint16m8_t test_vmulhsu_vx_i16m8(vint16m8_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmulhsu_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -1689,7 +1689,7 @@ vint32mf2_t test_vmulhsu_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmulhsu_vx_i32mf2(vint32mf2_t op1, uint32_t op2, size_t vl) { @@ -1698,7 +1698,7 @@ vint32mf2_t test_vmulhsu_vx_i32mf2(vint32mf2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmulhsu_vv_i32m1(vint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -1707,7 +1707,7 @@ vint32m1_t test_vmulhsu_vv_i32m1(vint32m1_t op1, vuint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmulhsu_vx_i32m1(vint32m1_t op1, uint32_t op2, size_t vl) { @@ -1716,7 +1716,7 @@ vint32m1_t test_vmulhsu_vx_i32m1(vint32m1_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmulhsu_vv_i32m2(vint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -1725,7 +1725,7 @@ vint32m2_t test_vmulhsu_vv_i32m2(vint32m2_t op1, vuint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmulhsu_vx_i32m2(vint32m2_t op1, uint32_t op2, size_t vl) { @@ -1734,7 +1734,7 @@ vint32m2_t test_vmulhsu_vx_i32m2(vint32m2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmulhsu_vv_i32m4(vint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -1743,7 +1743,7 @@ vint32m4_t test_vmulhsu_vv_i32m4(vint32m4_t op1, vuint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmulhsu_vx_i32m4(vint32m4_t op1, uint32_t op2, size_t vl) { @@ -1752,7 +1752,7 @@ vint32m4_t test_vmulhsu_vx_i32m4(vint32m4_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmulhsu_vv_i32m8(vint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -1761,7 +1761,7 @@ vint32m8_t test_vmulhsu_vv_i32m8(vint32m8_t op1, vuint32m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmulhsu_vx_i32m8(vint32m8_t op1, uint32_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vnclip.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vnclip.c index 81dc0a7ee0d8f1d7046ce53090838ea25b87576d..582b4f3d2680bd40471ec009ca9464287ac02133 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vnclip.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vnclip.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vnclip_wv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i8.nxv1i16.nxv1i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i8.nxv1i16.nxv1i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnclip_wv_i8mf8(vint16mf4_t op1, vuint8mf8_t shift, size_t vl) { @@ -15,7 +15,7 @@ vint8mf8_t test_vnclip_wv_i8mf8(vint16mf4_t op1, vuint8mf8_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclip_wx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i8.nxv1i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i8.nxv1i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnclip_wx_i8mf8(vint16mf4_t op1, size_t shift, size_t vl) { @@ -24,7 +24,7 @@ vint8mf8_t test_vnclip_wx_i8mf8(vint16mf4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclip_wv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i8.nxv2i16.nxv2i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i8.nxv2i16.nxv2i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnclip_wv_i8mf4(vint16mf2_t op1, vuint8mf4_t shift, size_t vl) { @@ -33,7 +33,7 @@ vint8mf4_t test_vnclip_wv_i8mf4(vint16mf2_t op1, vuint8mf4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclip_wx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i8.nxv2i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i8.nxv2i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnclip_wx_i8mf4(vint16mf2_t op1, size_t shift, size_t vl) { @@ -42,7 +42,7 @@ vint8mf4_t test_vnclip_wx_i8mf4(vint16mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclip_wv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i8.nxv4i16.nxv4i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i8.nxv4i16.nxv4i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnclip_wv_i8mf2(vint16m1_t op1, vuint8mf2_t shift, size_t vl) { @@ -51,7 +51,7 @@ vint8mf2_t test_vnclip_wv_i8mf2(vint16m1_t op1, vuint8mf2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclip_wx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i8.nxv4i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i8.nxv4i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnclip_wx_i8mf2(vint16m1_t op1, size_t shift, size_t vl) { @@ -60,7 +60,7 @@ vint8mf2_t test_vnclip_wx_i8mf2(vint16m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclip_wv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i8.nxv8i16.nxv8i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i8.nxv8i16.nxv8i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnclip_wv_i8m1(vint16m2_t op1, vuint8m1_t shift, size_t vl) { @@ -69,7 +69,7 @@ vint8m1_t test_vnclip_wv_i8m1(vint16m2_t op1, vuint8m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclip_wx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i8.nxv8i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i8.nxv8i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnclip_wx_i8m1(vint16m2_t op1, size_t shift, size_t vl) { @@ -78,7 +78,7 @@ vint8m1_t test_vnclip_wx_i8m1(vint16m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclip_wv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv16i8.nxv16i16.nxv16i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv16i8.nxv16i16.nxv16i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnclip_wv_i8m2(vint16m4_t op1, vuint8m2_t shift, size_t vl) { @@ -87,7 +87,7 @@ vint8m2_t test_vnclip_wv_i8m2(vint16m4_t op1, vuint8m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclip_wx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv16i8.nxv16i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv16i8.nxv16i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnclip_wx_i8m2(vint16m4_t op1, size_t shift, size_t vl) { @@ -96,7 +96,7 @@ vint8m2_t test_vnclip_wx_i8m2(vint16m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclip_wv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv32i8.nxv32i16.nxv32i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv32i8.nxv32i16.nxv32i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnclip_wv_i8m4(vint16m8_t op1, vuint8m4_t shift, size_t vl) { @@ -105,7 +105,7 @@ vint8m4_t test_vnclip_wv_i8m4(vint16m8_t op1, vuint8m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclip_wx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv32i8.nxv32i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv32i8.nxv32i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnclip_wx_i8m4(vint16m8_t op1, size_t shift, size_t vl) { @@ -114,7 +114,7 @@ vint8m4_t test_vnclip_wx_i8m4(vint16m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclip_wv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i16.nxv1i32.nxv1i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i16.nxv1i32.nxv1i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnclip_wv_i16mf4(vint32mf2_t op1, vuint16mf4_t shift, @@ -124,7 +124,7 @@ vint16mf4_t test_vnclip_wv_i16mf4(vint32mf2_t op1, vuint16mf4_t shift, // CHECK-RV64-LABEL: @test_vnclip_wx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i16.nxv1i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i16.nxv1i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnclip_wx_i16mf4(vint32mf2_t op1, size_t shift, size_t vl) { @@ -133,7 +133,7 @@ vint16mf4_t test_vnclip_wx_i16mf4(vint32mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclip_wv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i16.nxv2i32.nxv2i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i16.nxv2i32.nxv2i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnclip_wv_i16mf2(vint32m1_t op1, vuint16mf2_t shift, @@ -143,7 +143,7 @@ vint16mf2_t test_vnclip_wv_i16mf2(vint32m1_t op1, vuint16mf2_t shift, // CHECK-RV64-LABEL: @test_vnclip_wx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i16.nxv2i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i16.nxv2i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnclip_wx_i16mf2(vint32m1_t op1, size_t shift, size_t vl) { @@ -152,7 +152,7 @@ vint16mf2_t test_vnclip_wx_i16mf2(vint32m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclip_wv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i16.nxv4i32.nxv4i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i16.nxv4i32.nxv4i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnclip_wv_i16m1(vint32m2_t op1, vuint16m1_t shift, size_t vl) { @@ -161,7 +161,7 @@ vint16m1_t test_vnclip_wv_i16m1(vint32m2_t op1, vuint16m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclip_wx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i16.nxv4i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i16.nxv4i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnclip_wx_i16m1(vint32m2_t op1, size_t shift, size_t vl) { @@ -170,7 +170,7 @@ vint16m1_t test_vnclip_wx_i16m1(vint32m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclip_wv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i16.nxv8i32.nxv8i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i16.nxv8i32.nxv8i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnclip_wv_i16m2(vint32m4_t op1, vuint16m2_t shift, size_t vl) { @@ -179,7 +179,7 @@ vint16m2_t test_vnclip_wv_i16m2(vint32m4_t op1, vuint16m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclip_wx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i16.nxv8i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i16.nxv8i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnclip_wx_i16m2(vint32m4_t op1, size_t shift, size_t vl) { @@ -188,7 +188,7 @@ vint16m2_t test_vnclip_wx_i16m2(vint32m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclip_wv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv16i16.nxv16i32.nxv16i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv16i16.nxv16i32.nxv16i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnclip_wv_i16m4(vint32m8_t op1, vuint16m4_t shift, size_t vl) { @@ -197,7 +197,7 @@ vint16m4_t test_vnclip_wv_i16m4(vint32m8_t op1, vuint16m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclip_wx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv16i16.nxv16i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv16i16.nxv16i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnclip_wx_i16m4(vint32m8_t op1, size_t shift, size_t vl) { @@ -206,7 +206,7 @@ vint16m4_t test_vnclip_wx_i16m4(vint32m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclip_wv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i32.nxv1i64.nxv1i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i32.nxv1i64.nxv1i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnclip_wv_i32mf2(vint64m1_t op1, vuint32mf2_t shift, @@ -216,7 +216,7 @@ vint32mf2_t test_vnclip_wv_i32mf2(vint64m1_t op1, vuint32mf2_t shift, // CHECK-RV64-LABEL: @test_vnclip_wx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i32.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i32.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnclip_wx_i32mf2(vint64m1_t op1, size_t shift, size_t vl) { @@ -225,7 +225,7 @@ vint32mf2_t test_vnclip_wx_i32mf2(vint64m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclip_wv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i32.nxv2i64.nxv2i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i32.nxv2i64.nxv2i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnclip_wv_i32m1(vint64m2_t op1, vuint32m1_t shift, size_t vl) { @@ -234,7 +234,7 @@ vint32m1_t test_vnclip_wv_i32m1(vint64m2_t op1, vuint32m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclip_wx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i32.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i32.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnclip_wx_i32m1(vint64m2_t op1, size_t shift, size_t vl) { @@ -243,7 +243,7 @@ vint32m1_t test_vnclip_wx_i32m1(vint64m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclip_wv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i32.nxv4i64.nxv4i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i32.nxv4i64.nxv4i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnclip_wv_i32m2(vint64m4_t op1, vuint32m2_t shift, size_t vl) { @@ -252,7 +252,7 @@ vint32m2_t test_vnclip_wv_i32m2(vint64m4_t op1, vuint32m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclip_wx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i32.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i32.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnclip_wx_i32m2(vint64m4_t op1, size_t shift, size_t vl) { @@ -261,7 +261,7 @@ vint32m2_t test_vnclip_wx_i32m2(vint64m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclip_wv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i32.nxv8i64.nxv8i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i32.nxv8i64.nxv8i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnclip_wv_i32m4(vint64m8_t op1, vuint32m4_t shift, size_t vl) { @@ -270,7 +270,7 @@ vint32m4_t test_vnclip_wv_i32m4(vint64m8_t op1, vuint32m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclip_wx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i32.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i32.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnclip_wx_i32m4(vint64m8_t op1, size_t shift, size_t vl) { @@ -279,7 +279,7 @@ vint32m4_t test_vnclip_wx_i32m4(vint64m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i8.nxv1i16.nxv1i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i8.nxv1i16.nxv1i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnclipu_wv_u8mf8(vuint16mf4_t op1, vuint8mf8_t shift, @@ -289,7 +289,7 @@ vuint8mf8_t test_vnclipu_wv_u8mf8(vuint16mf4_t op1, vuint8mf8_t shift, // CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i8.nxv1i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i8.nxv1i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnclipu_wx_u8mf8(vuint16mf4_t op1, size_t shift, size_t vl) { @@ -298,7 +298,7 @@ vuint8mf8_t test_vnclipu_wx_u8mf8(vuint16mf4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i8.nxv2i16.nxv2i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i8.nxv2i16.nxv2i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnclipu_wv_u8mf4(vuint16mf2_t op1, vuint8mf4_t shift, @@ -308,7 +308,7 @@ vuint8mf4_t test_vnclipu_wv_u8mf4(vuint16mf2_t op1, vuint8mf4_t shift, // CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i8.nxv2i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i8.nxv2i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnclipu_wx_u8mf4(vuint16mf2_t op1, size_t shift, size_t vl) { @@ -317,7 +317,7 @@ vuint8mf4_t test_vnclipu_wx_u8mf4(vuint16mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i8.nxv4i16.nxv4i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i8.nxv4i16.nxv4i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnclipu_wv_u8mf2(vuint16m1_t op1, vuint8mf2_t shift, @@ -327,7 +327,7 @@ vuint8mf2_t test_vnclipu_wv_u8mf2(vuint16m1_t op1, vuint8mf2_t shift, // CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i8.nxv4i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i8.nxv4i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnclipu_wx_u8mf2(vuint16m1_t op1, size_t shift, size_t vl) { @@ -336,7 +336,7 @@ vuint8mf2_t test_vnclipu_wx_u8mf2(vuint16m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclipu_wv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i8.nxv8i16.nxv8i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i8.nxv8i16.nxv8i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnclipu_wv_u8m1(vuint16m2_t op1, vuint8m1_t shift, size_t vl) { @@ -345,7 +345,7 @@ vuint8m1_t test_vnclipu_wv_u8m1(vuint16m2_t op1, vuint8m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclipu_wx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i8.nxv8i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i8.nxv8i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnclipu_wx_u8m1(vuint16m2_t op1, size_t shift, size_t vl) { @@ -354,7 +354,7 @@ vuint8m1_t test_vnclipu_wx_u8m1(vuint16m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclipu_wv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv16i8.nxv16i16.nxv16i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv16i8.nxv16i16.nxv16i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnclipu_wv_u8m2(vuint16m4_t op1, vuint8m2_t shift, size_t vl) { @@ -363,7 +363,7 @@ vuint8m2_t test_vnclipu_wv_u8m2(vuint16m4_t op1, vuint8m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclipu_wx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv16i8.nxv16i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv16i8.nxv16i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnclipu_wx_u8m2(vuint16m4_t op1, size_t shift, size_t vl) { @@ -372,7 +372,7 @@ vuint8m2_t test_vnclipu_wx_u8m2(vuint16m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclipu_wv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv32i8.nxv32i16.nxv32i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv32i8.nxv32i16.nxv32i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnclipu_wv_u8m4(vuint16m8_t op1, vuint8m4_t shift, size_t vl) { @@ -381,7 +381,7 @@ vuint8m4_t test_vnclipu_wv_u8m4(vuint16m8_t op1, vuint8m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclipu_wx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv32i8.nxv32i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv32i8.nxv32i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnclipu_wx_u8m4(vuint16m8_t op1, size_t shift, size_t vl) { @@ -390,7 +390,7 @@ vuint8m4_t test_vnclipu_wx_u8m4(vuint16m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclipu_wv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i16.nxv1i32.nxv1i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i16.nxv1i32.nxv1i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnclipu_wv_u16mf4(vuint32mf2_t op1, vuint16mf4_t shift, @@ -400,7 +400,7 @@ vuint16mf4_t test_vnclipu_wv_u16mf4(vuint32mf2_t op1, vuint16mf4_t shift, // CHECK-RV64-LABEL: @test_vnclipu_wx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i16.nxv1i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i16.nxv1i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnclipu_wx_u16mf4(vuint32mf2_t op1, size_t shift, size_t vl) { @@ -409,7 +409,7 @@ vuint16mf4_t test_vnclipu_wx_u16mf4(vuint32mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclipu_wv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i16.nxv2i32.nxv2i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i16.nxv2i32.nxv2i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnclipu_wv_u16mf2(vuint32m1_t op1, vuint16mf2_t shift, @@ -419,7 +419,7 @@ vuint16mf2_t test_vnclipu_wv_u16mf2(vuint32m1_t op1, vuint16mf2_t shift, // CHECK-RV64-LABEL: @test_vnclipu_wx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i16.nxv2i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i16.nxv2i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnclipu_wx_u16mf2(vuint32m1_t op1, size_t shift, size_t vl) { @@ -428,7 +428,7 @@ vuint16mf2_t test_vnclipu_wx_u16mf2(vuint32m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclipu_wv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i16.nxv4i32.nxv4i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i16.nxv4i32.nxv4i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnclipu_wv_u16m1(vuint32m2_t op1, vuint16m1_t shift, @@ -438,7 +438,7 @@ vuint16m1_t test_vnclipu_wv_u16m1(vuint32m2_t op1, vuint16m1_t shift, // CHECK-RV64-LABEL: @test_vnclipu_wx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i16.nxv4i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i16.nxv4i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnclipu_wx_u16m1(vuint32m2_t op1, size_t shift, size_t vl) { @@ -447,7 +447,7 @@ vuint16m1_t test_vnclipu_wx_u16m1(vuint32m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclipu_wv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i16.nxv8i32.nxv8i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i16.nxv8i32.nxv8i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnclipu_wv_u16m2(vuint32m4_t op1, vuint16m2_t shift, @@ -457,7 +457,7 @@ vuint16m2_t test_vnclipu_wv_u16m2(vuint32m4_t op1, vuint16m2_t shift, // CHECK-RV64-LABEL: @test_vnclipu_wx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i16.nxv8i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i16.nxv8i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnclipu_wx_u16m2(vuint32m4_t op1, size_t shift, size_t vl) { @@ -466,7 +466,7 @@ vuint16m2_t test_vnclipu_wx_u16m2(vuint32m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclipu_wv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv16i16.nxv16i32.nxv16i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv16i16.nxv16i32.nxv16i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnclipu_wv_u16m4(vuint32m8_t op1, vuint16m4_t shift, @@ -476,7 +476,7 @@ vuint16m4_t test_vnclipu_wv_u16m4(vuint32m8_t op1, vuint16m4_t shift, // CHECK-RV64-LABEL: @test_vnclipu_wx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv16i16.nxv16i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv16i16.nxv16i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnclipu_wx_u16m4(vuint32m8_t op1, size_t shift, size_t vl) { @@ -485,7 +485,7 @@ vuint16m4_t test_vnclipu_wx_u16m4(vuint32m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclipu_wv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i32.nxv1i64.nxv1i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i32.nxv1i64.nxv1i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnclipu_wv_u32mf2(vuint64m1_t op1, vuint32mf2_t shift, @@ -495,7 +495,7 @@ vuint32mf2_t test_vnclipu_wv_u32mf2(vuint64m1_t op1, vuint32mf2_t shift, // CHECK-RV64-LABEL: @test_vnclipu_wx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i32.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i32.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnclipu_wx_u32mf2(vuint64m1_t op1, size_t shift, size_t vl) { @@ -504,7 +504,7 @@ vuint32mf2_t test_vnclipu_wx_u32mf2(vuint64m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclipu_wv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i32.nxv2i64.nxv2i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i32.nxv2i64.nxv2i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnclipu_wv_u32m1(vuint64m2_t op1, vuint32m1_t shift, @@ -514,7 +514,7 @@ vuint32m1_t test_vnclipu_wv_u32m1(vuint64m2_t op1, vuint32m1_t shift, // CHECK-RV64-LABEL: @test_vnclipu_wx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i32.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i32.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnclipu_wx_u32m1(vuint64m2_t op1, size_t shift, size_t vl) { @@ -523,7 +523,7 @@ vuint32m1_t test_vnclipu_wx_u32m1(vuint64m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclipu_wv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i32.nxv4i64.nxv4i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i32.nxv4i64.nxv4i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnclipu_wv_u32m2(vuint64m4_t op1, vuint32m2_t shift, @@ -533,7 +533,7 @@ vuint32m2_t test_vnclipu_wv_u32m2(vuint64m4_t op1, vuint32m2_t shift, // CHECK-RV64-LABEL: @test_vnclipu_wx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i32.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i32.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnclipu_wx_u32m2(vuint64m4_t op1, size_t shift, size_t vl) { @@ -542,7 +542,7 @@ vuint32m2_t test_vnclipu_wx_u32m2(vuint64m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclipu_wv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i32.nxv8i64.nxv8i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i32.nxv8i64.nxv8i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnclipu_wv_u32m4(vuint64m8_t op1, vuint32m4_t shift, @@ -552,7 +552,7 @@ vuint32m4_t test_vnclipu_wv_u32m4(vuint64m8_t op1, vuint32m4_t shift, // CHECK-RV64-LABEL: @test_vnclipu_wx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i32.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i32.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnclipu_wx_u32m4(vuint64m8_t op1, size_t shift, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vncvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vncvt.c index 6bdb8baec8ab88e158424b02baf0a41613e73d04..3b5dd626c29d5db72f56ffdb585e4a5a6977dbab 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vncvt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vncvt.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i8.nxv1i16.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i8.nxv1i16.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vncvt_x_x_w_i8mf8 (vint16mf4_t src, size_t vl) { @@ -15,7 +15,7 @@ vint8mf8_t test_vncvt_x_x_w_i8mf8 (vint16mf4_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i8.nxv2i16.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i8.nxv2i16.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vncvt_x_x_w_i8mf4 (vint16mf2_t src, size_t vl) { @@ -24,7 +24,7 @@ vint8mf4_t test_vncvt_x_x_w_i8mf4 (vint16mf2_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i8.nxv4i16.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i8.nxv4i16.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vncvt_x_x_w_i8mf2 (vint16m1_t src, size_t vl) { @@ -33,7 +33,7 @@ vint8mf2_t test_vncvt_x_x_w_i8mf2 (vint16m1_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i8.nxv8i16.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i8.nxv8i16.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vncvt_x_x_w_i8m1 (vint16m2_t src, size_t vl) { @@ -42,7 +42,7 @@ vint8m1_t test_vncvt_x_x_w_i8m1 (vint16m2_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv16i8.nxv16i16.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv16i8.nxv16i16.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vncvt_x_x_w_i8m2 (vint16m4_t src, size_t vl) { @@ -51,7 +51,7 @@ vint8m2_t test_vncvt_x_x_w_i8m2 (vint16m4_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv32i8.nxv32i16.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv32i8.nxv32i16.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vncvt_x_x_w_i8m4 (vint16m8_t src, size_t vl) { @@ -60,7 +60,7 @@ vint8m4_t test_vncvt_x_x_w_i8m4 (vint16m8_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i8.nxv1i16.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i8.nxv1i16.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vncvt_x_x_w_u8mf8 (vuint16mf4_t src, size_t vl) { @@ -69,7 +69,7 @@ vuint8mf8_t test_vncvt_x_x_w_u8mf8 (vuint16mf4_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i8.nxv2i16.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i8.nxv2i16.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vncvt_x_x_w_u8mf4 (vuint16mf2_t src, size_t vl) { @@ -78,7 +78,7 @@ vuint8mf4_t test_vncvt_x_x_w_u8mf4 (vuint16mf2_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i8.nxv4i16.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i8.nxv4i16.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vncvt_x_x_w_u8mf2 (vuint16m1_t src, size_t vl) { @@ -87,7 +87,7 @@ vuint8mf2_t test_vncvt_x_x_w_u8mf2 (vuint16m1_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i8.nxv8i16.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i8.nxv8i16.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vncvt_x_x_w_u8m1 (vuint16m2_t src, size_t vl) { @@ -96,7 +96,7 @@ vuint8m1_t test_vncvt_x_x_w_u8m1 (vuint16m2_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv16i8.nxv16i16.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv16i8.nxv16i16.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vncvt_x_x_w_u8m2 (vuint16m4_t src, size_t vl) { @@ -105,7 +105,7 @@ vuint8m2_t test_vncvt_x_x_w_u8m2 (vuint16m4_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv32i8.nxv32i16.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv32i8.nxv32i16.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vncvt_x_x_w_u8m4 (vuint16m8_t src, size_t vl) { @@ -114,7 +114,7 @@ vuint8m4_t test_vncvt_x_x_w_u8m4 (vuint16m8_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i16.nxv1i32.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i16.nxv1i32.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vncvt_x_x_w_i16mf4 (vint32mf2_t src, size_t vl) { @@ -123,7 +123,7 @@ vint16mf4_t test_vncvt_x_x_w_i16mf4 (vint32mf2_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i16.nxv2i32.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i16.nxv2i32.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vncvt_x_x_w_i16mf2 (vint32m1_t src, size_t vl) { @@ -132,7 +132,7 @@ vint16mf2_t test_vncvt_x_x_w_i16mf2 (vint32m1_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i16.nxv4i32.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i16.nxv4i32.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vncvt_x_x_w_i16m1 (vint32m2_t src, size_t vl) { @@ -141,7 +141,7 @@ vint16m1_t test_vncvt_x_x_w_i16m1 (vint32m2_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i16.nxv8i32.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i16.nxv8i32.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vncvt_x_x_w_i16m2 (vint32m4_t src, size_t vl) { @@ -150,7 +150,7 @@ vint16m2_t test_vncvt_x_x_w_i16m2 (vint32m4_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv16i16.nxv16i32.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv16i16.nxv16i32.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vncvt_x_x_w_i16m4 (vint32m8_t src, size_t vl) { @@ -159,7 +159,7 @@ vint16m4_t test_vncvt_x_x_w_i16m4 (vint32m8_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i16.nxv1i32.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i16.nxv1i32.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vncvt_x_x_w_u16mf4 (vuint32mf2_t src, size_t vl) { @@ -168,7 +168,7 @@ vuint16mf4_t test_vncvt_x_x_w_u16mf4 (vuint32mf2_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i16.nxv2i32.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i16.nxv2i32.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vncvt_x_x_w_u16mf2 (vuint32m1_t src, size_t vl) { @@ -177,7 +177,7 @@ vuint16mf2_t test_vncvt_x_x_w_u16mf2 (vuint32m1_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i16.nxv4i32.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i16.nxv4i32.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vncvt_x_x_w_u16m1 (vuint32m2_t src, size_t vl) { @@ -186,7 +186,7 @@ vuint16m1_t test_vncvt_x_x_w_u16m1 (vuint32m2_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i16.nxv8i32.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i16.nxv8i32.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vncvt_x_x_w_u16m2 (vuint32m4_t src, size_t vl) { @@ -195,7 +195,7 @@ vuint16m2_t test_vncvt_x_x_w_u16m2 (vuint32m4_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv16i16.nxv16i32.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv16i16.nxv16i32.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vncvt_x_x_w_u16m4 (vuint32m8_t src, size_t vl) { @@ -204,7 +204,7 @@ vuint16m4_t test_vncvt_x_x_w_u16m4 (vuint32m8_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i32.nxv1i64.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i32.nxv1i64.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vncvt_x_x_w_i32mf2 (vint64m1_t src, size_t vl) { @@ -213,7 +213,7 @@ vint32mf2_t test_vncvt_x_x_w_i32mf2 (vint64m1_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i32.nxv2i64.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i32.nxv2i64.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vncvt_x_x_w_i32m1 (vint64m2_t src, size_t vl) { @@ -222,7 +222,7 @@ vint32m1_t test_vncvt_x_x_w_i32m1 (vint64m2_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i32.nxv4i64.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i32.nxv4i64.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vncvt_x_x_w_i32m2 (vint64m4_t src, size_t vl) { @@ -231,7 +231,7 @@ vint32m2_t test_vncvt_x_x_w_i32m2 (vint64m4_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i32.nxv8i64.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i32.nxv8i64.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vncvt_x_x_w_i32m4 (vint64m8_t src, size_t vl) { @@ -240,7 +240,7 @@ vint32m4_t test_vncvt_x_x_w_i32m4 (vint64m8_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i32.nxv1i64.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i32.nxv1i64.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vncvt_x_x_w_u32mf2 (vuint64m1_t src, size_t vl) { @@ -249,7 +249,7 @@ vuint32mf2_t test_vncvt_x_x_w_u32mf2 (vuint64m1_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i32.nxv2i64.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i32.nxv2i64.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vncvt_x_x_w_u32m1 (vuint64m2_t src, size_t vl) { @@ -258,7 +258,7 @@ vuint32m1_t test_vncvt_x_x_w_u32m1 (vuint64m2_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i32.nxv4i64.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i32.nxv4i64.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vncvt_x_x_w_u32m2 (vuint64m4_t src, size_t vl) { @@ -267,7 +267,7 @@ vuint32m2_t test_vncvt_x_x_w_u32m2 (vuint64m4_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i32.nxv8i64.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i32.nxv8i64.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vncvt_x_x_w_u32m4 (vuint64m8_t src, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vneg.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vneg.c index e83e29b7244c5a8f19d1485d0948b21ddc339590..8501b4c2e1f064fbd3f246901a7de46dc650abe1 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vneg.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vneg.c @@ -7,7 +7,7 @@ // CHECK-RV64-LABEL: @test_vneg_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i8.i8.i64( [[OP1:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vneg_v_i8mf8 (vint8mf8_t op1, size_t vl) { @@ -16,7 +16,7 @@ vint8mf8_t test_vneg_v_i8mf8 (vint8mf8_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vneg_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i8.i8.i64( [[OP1:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vneg_v_i8mf4 (vint8mf4_t op1, size_t vl) { @@ -25,7 +25,7 @@ vint8mf4_t test_vneg_v_i8mf4 (vint8mf4_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vneg_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i8.i8.i64( [[OP1:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vneg_v_i8mf2 (vint8mf2_t op1, size_t vl) { @@ -34,7 +34,7 @@ vint8mf2_t test_vneg_v_i8mf2 (vint8mf2_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vneg_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i8.i8.i64( [[OP1:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vneg_v_i8m1 (vint8m1_t op1, size_t vl) { @@ -43,7 +43,7 @@ vint8m1_t test_vneg_v_i8m1 (vint8m1_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vneg_v_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i8.i8.i64( [[OP1:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vneg_v_i8m2 (vint8m2_t op1, size_t vl) { @@ -52,7 +52,7 @@ vint8m2_t test_vneg_v_i8m2 (vint8m2_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vneg_v_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv32i8.i8.i64( [[OP1:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vneg_v_i8m4 (vint8m4_t op1, size_t vl) { @@ -61,7 +61,7 @@ vint8m4_t test_vneg_v_i8m4 (vint8m4_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vneg_v_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv64i8.i8.i64( [[OP1:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vneg_v_i8m8 (vint8m8_t op1, size_t vl) { @@ -70,7 +70,7 @@ vint8m8_t test_vneg_v_i8m8 (vint8m8_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vneg_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i16.i16.i64( [[OP1:%.*]], i16 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vneg_v_i16mf4 (vint16mf4_t op1, size_t vl) { @@ -79,7 +79,7 @@ vint16mf4_t test_vneg_v_i16mf4 (vint16mf4_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vneg_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i16.i16.i64( [[OP1:%.*]], i16 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vneg_v_i16mf2 (vint16mf2_t op1, size_t vl) { @@ -88,7 +88,7 @@ vint16mf2_t test_vneg_v_i16mf2 (vint16mf2_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vneg_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i16.i16.i64( [[OP1:%.*]], i16 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vneg_v_i16m1 (vint16m1_t op1, size_t vl) { @@ -97,7 +97,7 @@ vint16m1_t test_vneg_v_i16m1 (vint16m1_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vneg_v_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i16.i16.i64( [[OP1:%.*]], i16 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vneg_v_i16m2 (vint16m2_t op1, size_t vl) { @@ -106,7 +106,7 @@ vint16m2_t test_vneg_v_i16m2 (vint16m2_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vneg_v_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i16.i16.i64( [[OP1:%.*]], i16 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vneg_v_i16m4 (vint16m4_t op1, size_t vl) { @@ -115,7 +115,7 @@ vint16m4_t test_vneg_v_i16m4 (vint16m4_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vneg_v_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv32i16.i16.i64( [[OP1:%.*]], i16 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vneg_v_i16m8 (vint16m8_t op1, size_t vl) { @@ -124,7 +124,7 @@ vint16m8_t test_vneg_v_i16m8 (vint16m8_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vneg_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i32.i32.i64( [[OP1:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vneg_v_i32mf2 (vint32mf2_t op1, size_t vl) { @@ -133,7 +133,7 @@ vint32mf2_t test_vneg_v_i32mf2 (vint32mf2_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vneg_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i32.i32.i64( [[OP1:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vneg_v_i32m1 (vint32m1_t op1, size_t vl) { @@ -142,7 +142,7 @@ vint32m1_t test_vneg_v_i32m1 (vint32m1_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vneg_v_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i32.i32.i64( [[OP1:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vneg_v_i32m2 (vint32m2_t op1, size_t vl) { @@ -151,7 +151,7 @@ vint32m2_t test_vneg_v_i32m2 (vint32m2_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vneg_v_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i32.i32.i64( [[OP1:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vneg_v_i32m4 (vint32m4_t op1, size_t vl) { @@ -160,7 +160,7 @@ vint32m4_t test_vneg_v_i32m4 (vint32m4_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vneg_v_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i32.i32.i64( [[OP1:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vneg_v_i32m8 (vint32m8_t op1, size_t vl) { @@ -169,7 +169,7 @@ vint32m8_t test_vneg_v_i32m8 (vint32m8_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vneg_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i64.i64.i64( [[OP1:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vneg_v_i64m1 (vint64m1_t op1, size_t vl) { @@ -178,7 +178,7 @@ vint64m1_t test_vneg_v_i64m1 (vint64m1_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vneg_v_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i64.i64.i64( [[OP1:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vneg_v_i64m2 (vint64m2_t op1, size_t vl) { @@ -187,7 +187,7 @@ vint64m2_t test_vneg_v_i64m2 (vint64m2_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vneg_v_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i64.i64.i64( [[OP1:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vneg_v_i64m4 (vint64m4_t op1, size_t vl) { @@ -196,7 +196,7 @@ vint64m4_t test_vneg_v_i64m4 (vint64m4_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vneg_v_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i64.i64.i64( [[OP1:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vneg_v_i64m8 (vint64m8_t op1, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vnot.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vnot.c index 0736b3c35f461994a6b429c5bc22458c0eee4eb7..e8a975946a394368ca967458039e36be63939ef5 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vnot.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vnot.c @@ -7,7 +7,7 @@ // CHECK-RV64-LABEL: @test_vnot_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i8.i8.i64( [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnot_v_i8mf8 (vint8mf8_t op1, size_t vl) { @@ -16,7 +16,7 @@ vint8mf8_t test_vnot_v_i8mf8 (vint8mf8_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i8.i8.i64( [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnot_v_i8mf4 (vint8mf4_t op1, size_t vl) { @@ -25,7 +25,7 @@ vint8mf4_t test_vnot_v_i8mf4 (vint8mf4_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i8.i8.i64( [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnot_v_i8mf2 (vint8mf2_t op1, size_t vl) { @@ -34,7 +34,7 @@ vint8mf2_t test_vnot_v_i8mf2 (vint8mf2_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i8.i8.i64( [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnot_v_i8m1 (vint8m1_t op1, size_t vl) { @@ -43,7 +43,7 @@ vint8m1_t test_vnot_v_i8m1 (vint8m1_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i8.i8.i64( [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnot_v_i8m2 (vint8m2_t op1, size_t vl) { @@ -52,7 +52,7 @@ vint8m2_t test_vnot_v_i8m2 (vint8m2_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i8.i8.i64( [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnot_v_i8m4 (vint8m4_t op1, size_t vl) { @@ -61,7 +61,7 @@ vint8m4_t test_vnot_v_i8m4 (vint8m4_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv64i8.i8.i64( [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vnot_v_i8m8 (vint8m8_t op1, size_t vl) { @@ -70,7 +70,7 @@ vint8m8_t test_vnot_v_i8m8 (vint8m8_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i16.i16.i64( [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnot_v_i16mf4 (vint16mf4_t op1, size_t vl) { @@ -79,7 +79,7 @@ vint16mf4_t test_vnot_v_i16mf4 (vint16mf4_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i16.i16.i64( [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnot_v_i16mf2 (vint16mf2_t op1, size_t vl) { @@ -88,7 +88,7 @@ vint16mf2_t test_vnot_v_i16mf2 (vint16mf2_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i16.i16.i64( [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnot_v_i16m1 (vint16m1_t op1, size_t vl) { @@ -97,7 +97,7 @@ vint16m1_t test_vnot_v_i16m1 (vint16m1_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i16.i16.i64( [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnot_v_i16m2 (vint16m2_t op1, size_t vl) { @@ -106,7 +106,7 @@ vint16m2_t test_vnot_v_i16m2 (vint16m2_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i16.i16.i64( [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnot_v_i16m4 (vint16m4_t op1, size_t vl) { @@ -115,7 +115,7 @@ vint16m4_t test_vnot_v_i16m4 (vint16m4_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i16.i16.i64( [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vnot_v_i16m8 (vint16m8_t op1, size_t vl) { @@ -124,7 +124,7 @@ vint16m8_t test_vnot_v_i16m8 (vint16m8_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.i32.i64( [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnot_v_i32mf2 (vint32mf2_t op1, size_t vl) { @@ -133,7 +133,7 @@ vint32mf2_t test_vnot_v_i32mf2 (vint32mf2_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i32.i32.i64( [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnot_v_i32m1 (vint32m1_t op1, size_t vl) { @@ -142,7 +142,7 @@ vint32m1_t test_vnot_v_i32m1 (vint32m1_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i32.i32.i64( [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnot_v_i32m2 (vint32m2_t op1, size_t vl) { @@ -151,7 +151,7 @@ vint32m2_t test_vnot_v_i32m2 (vint32m2_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i32.i32.i64( [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnot_v_i32m4 (vint32m4_t op1, size_t vl) { @@ -160,7 +160,7 @@ vint32m4_t test_vnot_v_i32m4 (vint32m4_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i32.i32.i64( [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vnot_v_i32m8 (vint32m8_t op1, size_t vl) { @@ -169,7 +169,7 @@ vint32m8_t test_vnot_v_i32m8 (vint32m8_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i64.i64.i64( [[OP1:%.*]], i64 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vnot_v_i64m1 (vint64m1_t op1, size_t vl) { @@ -178,7 +178,7 @@ vint64m1_t test_vnot_v_i64m1 (vint64m1_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i64.i64.i64( [[OP1:%.*]], i64 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vnot_v_i64m2 (vint64m2_t op1, size_t vl) { @@ -187,7 +187,7 @@ vint64m2_t test_vnot_v_i64m2 (vint64m2_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i64.i64.i64( [[OP1:%.*]], i64 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vnot_v_i64m4 (vint64m4_t op1, size_t vl) { @@ -196,7 +196,7 @@ vint64m4_t test_vnot_v_i64m4 (vint64m4_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i64.i64.i64( [[OP1:%.*]], i64 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vnot_v_i64m8 (vint64m8_t op1, size_t vl) { @@ -205,7 +205,7 @@ vint64m8_t test_vnot_v_i64m8 (vint64m8_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i8.i8.i64( [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnot_v_u8mf8 (vuint8mf8_t op1, size_t vl) { @@ -214,7 +214,7 @@ vuint8mf8_t test_vnot_v_u8mf8 (vuint8mf8_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i8.i8.i64( [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnot_v_u8mf4 (vuint8mf4_t op1, size_t vl) { @@ -223,7 +223,7 @@ vuint8mf4_t test_vnot_v_u8mf4 (vuint8mf4_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i8.i8.i64( [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnot_v_u8mf2 (vuint8mf2_t op1, size_t vl) { @@ -232,7 +232,7 @@ vuint8mf2_t test_vnot_v_u8mf2 (vuint8mf2_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i8.i8.i64( [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnot_v_u8m1 (vuint8m1_t op1, size_t vl) { @@ -241,7 +241,7 @@ vuint8m1_t test_vnot_v_u8m1 (vuint8m1_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i8.i8.i64( [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnot_v_u8m2 (vuint8m2_t op1, size_t vl) { @@ -250,7 +250,7 @@ vuint8m2_t test_vnot_v_u8m2 (vuint8m2_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i8.i8.i64( [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnot_v_u8m4 (vuint8m4_t op1, size_t vl) { @@ -259,7 +259,7 @@ vuint8m4_t test_vnot_v_u8m4 (vuint8m4_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv64i8.i8.i64( [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vnot_v_u8m8 (vuint8m8_t op1, size_t vl) { @@ -268,7 +268,7 @@ vuint8m8_t test_vnot_v_u8m8 (vuint8m8_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i16.i16.i64( [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnot_v_u16mf4 (vuint16mf4_t op1, size_t vl) { @@ -277,7 +277,7 @@ vuint16mf4_t test_vnot_v_u16mf4 (vuint16mf4_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i16.i16.i64( [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnot_v_u16mf2 (vuint16mf2_t op1, size_t vl) { @@ -286,7 +286,7 @@ vuint16mf2_t test_vnot_v_u16mf2 (vuint16mf2_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i16.i16.i64( [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnot_v_u16m1 (vuint16m1_t op1, size_t vl) { @@ -295,7 +295,7 @@ vuint16m1_t test_vnot_v_u16m1 (vuint16m1_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i16.i16.i64( [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnot_v_u16m2 (vuint16m2_t op1, size_t vl) { @@ -304,7 +304,7 @@ vuint16m2_t test_vnot_v_u16m2 (vuint16m2_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i16.i16.i64( [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnot_v_u16m4 (vuint16m4_t op1, size_t vl) { @@ -313,7 +313,7 @@ vuint16m4_t test_vnot_v_u16m4 (vuint16m4_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i16.i16.i64( [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vnot_v_u16m8 (vuint16m8_t op1, size_t vl) { @@ -322,7 +322,7 @@ vuint16m8_t test_vnot_v_u16m8 (vuint16m8_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.i32.i64( [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnot_v_u32mf2 (vuint32mf2_t op1, size_t vl) { @@ -331,7 +331,7 @@ vuint32mf2_t test_vnot_v_u32mf2 (vuint32mf2_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i32.i32.i64( [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnot_v_u32m1 (vuint32m1_t op1, size_t vl) { @@ -340,7 +340,7 @@ vuint32m1_t test_vnot_v_u32m1 (vuint32m1_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i32.i32.i64( [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnot_v_u32m2 (vuint32m2_t op1, size_t vl) { @@ -349,7 +349,7 @@ vuint32m2_t test_vnot_v_u32m2 (vuint32m2_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i32.i32.i64( [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnot_v_u32m4 (vuint32m4_t op1, size_t vl) { @@ -358,7 +358,7 @@ vuint32m4_t test_vnot_v_u32m4 (vuint32m4_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i32.i32.i64( [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vnot_v_u32m8 (vuint32m8_t op1, size_t vl) { @@ -367,7 +367,7 @@ vuint32m8_t test_vnot_v_u32m8 (vuint32m8_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i64.i64.i64( [[OP1:%.*]], i64 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vnot_v_u64m1 (vuint64m1_t op1, size_t vl) { @@ -376,7 +376,7 @@ vuint64m1_t test_vnot_v_u64m1 (vuint64m1_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i64.i64.i64( [[OP1:%.*]], i64 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vnot_v_u64m2 (vuint64m2_t op1, size_t vl) { @@ -385,7 +385,7 @@ vuint64m2_t test_vnot_v_u64m2 (vuint64m2_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i64.i64.i64( [[OP1:%.*]], i64 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vnot_v_u64m4 (vuint64m4_t op1, size_t vl) { @@ -394,7 +394,7 @@ vuint64m4_t test_vnot_v_u64m4 (vuint64m4_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i64.i64.i64( [[OP1:%.*]], i64 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vnot_v_u64m8 (vuint64m8_t op1, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vnsra.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vnsra.c index f86d5353acd91be4aa7a69a49cda9a4fb47814d1..c3cfb74a529985db3aff85398f6ec5ecd0736ff1 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vnsra.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vnsra.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vnsra_wv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv1i8.nxv1i16.nxv1i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv1i8.nxv1i16.nxv1i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnsra_wv_i8mf8(vint16mf4_t op1, vuint8mf8_t shift, size_t vl) { @@ -15,7 +15,7 @@ vint8mf8_t test_vnsra_wv_i8mf8(vint16mf4_t op1, vuint8mf8_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsra_wx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv1i8.nxv1i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv1i8.nxv1i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnsra_wx_i8mf8(vint16mf4_t op1, size_t shift, size_t vl) { @@ -24,7 +24,7 @@ vint8mf8_t test_vnsra_wx_i8mf8(vint16mf4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsra_wv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv2i8.nxv2i16.nxv2i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv2i8.nxv2i16.nxv2i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnsra_wv_i8mf4(vint16mf2_t op1, vuint8mf4_t shift, size_t vl) { @@ -33,7 +33,7 @@ vint8mf4_t test_vnsra_wv_i8mf4(vint16mf2_t op1, vuint8mf4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsra_wx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv2i8.nxv2i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv2i8.nxv2i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnsra_wx_i8mf4(vint16mf2_t op1, size_t shift, size_t vl) { @@ -42,7 +42,7 @@ vint8mf4_t test_vnsra_wx_i8mf4(vint16mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsra_wv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv4i8.nxv4i16.nxv4i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv4i8.nxv4i16.nxv4i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnsra_wv_i8mf2(vint16m1_t op1, vuint8mf2_t shift, size_t vl) { @@ -51,7 +51,7 @@ vint8mf2_t test_vnsra_wv_i8mf2(vint16m1_t op1, vuint8mf2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsra_wx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv4i8.nxv4i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv4i8.nxv4i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnsra_wx_i8mf2(vint16m1_t op1, size_t shift, size_t vl) { @@ -60,7 +60,7 @@ vint8mf2_t test_vnsra_wx_i8mf2(vint16m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsra_wv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv8i8.nxv8i16.nxv8i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv8i8.nxv8i16.nxv8i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnsra_wv_i8m1(vint16m2_t op1, vuint8m1_t shift, size_t vl) { @@ -69,7 +69,7 @@ vint8m1_t test_vnsra_wv_i8m1(vint16m2_t op1, vuint8m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsra_wx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv8i8.nxv8i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv8i8.nxv8i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnsra_wx_i8m1(vint16m2_t op1, size_t shift, size_t vl) { @@ -78,7 +78,7 @@ vint8m1_t test_vnsra_wx_i8m1(vint16m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsra_wv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv16i8.nxv16i16.nxv16i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv16i8.nxv16i16.nxv16i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnsra_wv_i8m2(vint16m4_t op1, vuint8m2_t shift, size_t vl) { @@ -87,7 +87,7 @@ vint8m2_t test_vnsra_wv_i8m2(vint16m4_t op1, vuint8m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsra_wx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv16i8.nxv16i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv16i8.nxv16i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnsra_wx_i8m2(vint16m4_t op1, size_t shift, size_t vl) { @@ -96,7 +96,7 @@ vint8m2_t test_vnsra_wx_i8m2(vint16m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsra_wv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv32i8.nxv32i16.nxv32i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv32i8.nxv32i16.nxv32i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnsra_wv_i8m4(vint16m8_t op1, vuint8m4_t shift, size_t vl) { @@ -105,7 +105,7 @@ vint8m4_t test_vnsra_wv_i8m4(vint16m8_t op1, vuint8m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsra_wx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv32i8.nxv32i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv32i8.nxv32i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnsra_wx_i8m4(vint16m8_t op1, size_t shift, size_t vl) { @@ -114,7 +114,7 @@ vint8m4_t test_vnsra_wx_i8m4(vint16m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsra_wv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv1i16.nxv1i32.nxv1i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv1i16.nxv1i32.nxv1i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnsra_wv_i16mf4(vint32mf2_t op1, vuint16mf4_t shift, size_t vl) { @@ -123,7 +123,7 @@ vint16mf4_t test_vnsra_wv_i16mf4(vint32mf2_t op1, vuint16mf4_t shift, size_t vl) // CHECK-RV64-LABEL: @test_vnsra_wx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv1i16.nxv1i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv1i16.nxv1i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnsra_wx_i16mf4(vint32mf2_t op1, size_t shift, size_t vl) { @@ -132,7 +132,7 @@ vint16mf4_t test_vnsra_wx_i16mf4(vint32mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsra_wv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv2i16.nxv2i32.nxv2i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv2i16.nxv2i32.nxv2i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnsra_wv_i16mf2(vint32m1_t op1, vuint16mf2_t shift, size_t vl) { @@ -141,7 +141,7 @@ vint16mf2_t test_vnsra_wv_i16mf2(vint32m1_t op1, vuint16mf2_t shift, size_t vl) // CHECK-RV64-LABEL: @test_vnsra_wx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv2i16.nxv2i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv2i16.nxv2i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnsra_wx_i16mf2(vint32m1_t op1, size_t shift, size_t vl) { @@ -150,7 +150,7 @@ vint16mf2_t test_vnsra_wx_i16mf2(vint32m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsra_wv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv4i16.nxv4i32.nxv4i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv4i16.nxv4i32.nxv4i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnsra_wv_i16m1(vint32m2_t op1, vuint16m1_t shift, size_t vl) { @@ -159,7 +159,7 @@ vint16m1_t test_vnsra_wv_i16m1(vint32m2_t op1, vuint16m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsra_wx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv4i16.nxv4i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv4i16.nxv4i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnsra_wx_i16m1(vint32m2_t op1, size_t shift, size_t vl) { @@ -168,7 +168,7 @@ vint16m1_t test_vnsra_wx_i16m1(vint32m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsra_wv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv8i16.nxv8i32.nxv8i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv8i16.nxv8i32.nxv8i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnsra_wv_i16m2(vint32m4_t op1, vuint16m2_t shift, size_t vl) { @@ -177,7 +177,7 @@ vint16m2_t test_vnsra_wv_i16m2(vint32m4_t op1, vuint16m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsra_wx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv8i16.nxv8i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv8i16.nxv8i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnsra_wx_i16m2(vint32m4_t op1, size_t shift, size_t vl) { @@ -186,7 +186,7 @@ vint16m2_t test_vnsra_wx_i16m2(vint32m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsra_wv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv16i16.nxv16i32.nxv16i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv16i16.nxv16i32.nxv16i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnsra_wv_i16m4(vint32m8_t op1, vuint16m4_t shift, size_t vl) { @@ -195,7 +195,7 @@ vint16m4_t test_vnsra_wv_i16m4(vint32m8_t op1, vuint16m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsra_wx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv16i16.nxv16i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv16i16.nxv16i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnsra_wx_i16m4(vint32m8_t op1, size_t shift, size_t vl) { @@ -204,7 +204,7 @@ vint16m4_t test_vnsra_wx_i16m4(vint32m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsra_wv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv1i32.nxv1i64.nxv1i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv1i32.nxv1i64.nxv1i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnsra_wv_i32mf2(vint64m1_t op1, vuint32mf2_t shift, size_t vl) { @@ -213,7 +213,7 @@ vint32mf2_t test_vnsra_wv_i32mf2(vint64m1_t op1, vuint32mf2_t shift, size_t vl) // CHECK-RV64-LABEL: @test_vnsra_wx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv1i32.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv1i32.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnsra_wx_i32mf2(vint64m1_t op1, size_t shift, size_t vl) { @@ -222,7 +222,7 @@ vint32mf2_t test_vnsra_wx_i32mf2(vint64m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsra_wv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv2i32.nxv2i64.nxv2i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv2i32.nxv2i64.nxv2i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnsra_wv_i32m1(vint64m2_t op1, vuint32m1_t shift, size_t vl) { @@ -231,7 +231,7 @@ vint32m1_t test_vnsra_wv_i32m1(vint64m2_t op1, vuint32m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsra_wx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv2i32.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv2i32.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnsra_wx_i32m1(vint64m2_t op1, size_t shift, size_t vl) { @@ -240,7 +240,7 @@ vint32m1_t test_vnsra_wx_i32m1(vint64m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsra_wv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv4i32.nxv4i64.nxv4i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv4i32.nxv4i64.nxv4i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnsra_wv_i32m2(vint64m4_t op1, vuint32m2_t shift, size_t vl) { @@ -249,7 +249,7 @@ vint32m2_t test_vnsra_wv_i32m2(vint64m4_t op1, vuint32m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsra_wx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv4i32.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv4i32.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnsra_wx_i32m2(vint64m4_t op1, size_t shift, size_t vl) { @@ -258,7 +258,7 @@ vint32m2_t test_vnsra_wx_i32m2(vint64m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsra_wv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv8i32.nxv8i64.nxv8i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv8i32.nxv8i64.nxv8i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnsra_wv_i32m4(vint64m8_t op1, vuint32m4_t shift, size_t vl) { @@ -267,7 +267,7 @@ vint32m4_t test_vnsra_wv_i32m4(vint64m8_t op1, vuint32m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsra_wx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv8i32.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv8i32.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnsra_wx_i32m4(vint64m8_t op1, size_t shift, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vnsrl.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vnsrl.c index 33d3479ec7abef64475fe84f29ec0dce69e777a3..92743610fd1b640d70ab6deab36f5b402ffbebc3 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vnsrl.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vnsrl.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vnsrl_wv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i8.nxv1i16.nxv1i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i8.nxv1i16.nxv1i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnsrl_wv_u8mf8(vuint16mf4_t op1, vuint8mf8_t shift, size_t vl) { @@ -15,7 +15,7 @@ vuint8mf8_t test_vnsrl_wv_u8mf8(vuint16mf4_t op1, vuint8mf8_t shift, size_t vl) // CHECK-RV64-LABEL: @test_vnsrl_wx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i8.nxv1i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i8.nxv1i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnsrl_wx_u8mf8(vuint16mf4_t op1, size_t shift, size_t vl) { @@ -24,7 +24,7 @@ vuint8mf8_t test_vnsrl_wx_u8mf8(vuint16mf4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsrl_wv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i8.nxv2i16.nxv2i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i8.nxv2i16.nxv2i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnsrl_wv_u8mf4(vuint16mf2_t op1, vuint8mf4_t shift, size_t vl) { @@ -33,7 +33,7 @@ vuint8mf4_t test_vnsrl_wv_u8mf4(vuint16mf2_t op1, vuint8mf4_t shift, size_t vl) // CHECK-RV64-LABEL: @test_vnsrl_wx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i8.nxv2i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i8.nxv2i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnsrl_wx_u8mf4(vuint16mf2_t op1, size_t shift, size_t vl) { @@ -42,7 +42,7 @@ vuint8mf4_t test_vnsrl_wx_u8mf4(vuint16mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsrl_wv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i8.nxv4i16.nxv4i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i8.nxv4i16.nxv4i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnsrl_wv_u8mf2(vuint16m1_t op1, vuint8mf2_t shift, size_t vl) { @@ -51,7 +51,7 @@ vuint8mf2_t test_vnsrl_wv_u8mf2(vuint16m1_t op1, vuint8mf2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsrl_wx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i8.nxv4i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i8.nxv4i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnsrl_wx_u8mf2(vuint16m1_t op1, size_t shift, size_t vl) { @@ -60,7 +60,7 @@ vuint8mf2_t test_vnsrl_wx_u8mf2(vuint16m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsrl_wv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i8.nxv8i16.nxv8i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i8.nxv8i16.nxv8i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnsrl_wv_u8m1(vuint16m2_t op1, vuint8m1_t shift, size_t vl) { @@ -69,7 +69,7 @@ vuint8m1_t test_vnsrl_wv_u8m1(vuint16m2_t op1, vuint8m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsrl_wx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i8.nxv8i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i8.nxv8i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnsrl_wx_u8m1(vuint16m2_t op1, size_t shift, size_t vl) { @@ -78,7 +78,7 @@ vuint8m1_t test_vnsrl_wx_u8m1(vuint16m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsrl_wv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv16i8.nxv16i16.nxv16i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv16i8.nxv16i16.nxv16i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnsrl_wv_u8m2(vuint16m4_t op1, vuint8m2_t shift, size_t vl) { @@ -87,7 +87,7 @@ vuint8m2_t test_vnsrl_wv_u8m2(vuint16m4_t op1, vuint8m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsrl_wx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv16i8.nxv16i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv16i8.nxv16i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnsrl_wx_u8m2(vuint16m4_t op1, size_t shift, size_t vl) { @@ -96,7 +96,7 @@ vuint8m2_t test_vnsrl_wx_u8m2(vuint16m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsrl_wv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv32i8.nxv32i16.nxv32i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv32i8.nxv32i16.nxv32i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnsrl_wv_u8m4(vuint16m8_t op1, vuint8m4_t shift, size_t vl) { @@ -105,7 +105,7 @@ vuint8m4_t test_vnsrl_wv_u8m4(vuint16m8_t op1, vuint8m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsrl_wx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv32i8.nxv32i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv32i8.nxv32i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnsrl_wx_u8m4(vuint16m8_t op1, size_t shift, size_t vl) { @@ -114,7 +114,7 @@ vuint8m4_t test_vnsrl_wx_u8m4(vuint16m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsrl_wv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i16.nxv1i32.nxv1i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i16.nxv1i32.nxv1i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnsrl_wv_u16mf4(vuint32mf2_t op1, vuint16mf4_t shift, size_t vl) { @@ -123,7 +123,7 @@ vuint16mf4_t test_vnsrl_wv_u16mf4(vuint32mf2_t op1, vuint16mf4_t shift, size_t v // CHECK-RV64-LABEL: @test_vnsrl_wx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i16.nxv1i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i16.nxv1i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnsrl_wx_u16mf4(vuint32mf2_t op1, size_t shift, size_t vl) { @@ -132,7 +132,7 @@ vuint16mf4_t test_vnsrl_wx_u16mf4(vuint32mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsrl_wv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i16.nxv2i32.nxv2i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i16.nxv2i32.nxv2i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnsrl_wv_u16mf2(vuint32m1_t op1, vuint16mf2_t shift, size_t vl) { @@ -141,7 +141,7 @@ vuint16mf2_t test_vnsrl_wv_u16mf2(vuint32m1_t op1, vuint16mf2_t shift, size_t vl // CHECK-RV64-LABEL: @test_vnsrl_wx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i16.nxv2i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i16.nxv2i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnsrl_wx_u16mf2(vuint32m1_t op1, size_t shift, size_t vl) { @@ -150,7 +150,7 @@ vuint16mf2_t test_vnsrl_wx_u16mf2(vuint32m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsrl_wv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i16.nxv4i32.nxv4i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i16.nxv4i32.nxv4i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnsrl_wv_u16m1(vuint32m2_t op1, vuint16m1_t shift, size_t vl) { @@ -159,7 +159,7 @@ vuint16m1_t test_vnsrl_wv_u16m1(vuint32m2_t op1, vuint16m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsrl_wx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i16.nxv4i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i16.nxv4i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnsrl_wx_u16m1(vuint32m2_t op1, size_t shift, size_t vl) { @@ -168,7 +168,7 @@ vuint16m1_t test_vnsrl_wx_u16m1(vuint32m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsrl_wv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i16.nxv8i32.nxv8i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i16.nxv8i32.nxv8i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnsrl_wv_u16m2(vuint32m4_t op1, vuint16m2_t shift, size_t vl) { @@ -177,7 +177,7 @@ vuint16m2_t test_vnsrl_wv_u16m2(vuint32m4_t op1, vuint16m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsrl_wx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i16.nxv8i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i16.nxv8i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnsrl_wx_u16m2(vuint32m4_t op1, size_t shift, size_t vl) { @@ -186,7 +186,7 @@ vuint16m2_t test_vnsrl_wx_u16m2(vuint32m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsrl_wv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv16i16.nxv16i32.nxv16i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv16i16.nxv16i32.nxv16i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnsrl_wv_u16m4(vuint32m8_t op1, vuint16m4_t shift, size_t vl) { @@ -195,7 +195,7 @@ vuint16m4_t test_vnsrl_wv_u16m4(vuint32m8_t op1, vuint16m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsrl_wx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv16i16.nxv16i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv16i16.nxv16i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnsrl_wx_u16m4(vuint32m8_t op1, size_t shift, size_t vl) { @@ -204,7 +204,7 @@ vuint16m4_t test_vnsrl_wx_u16m4(vuint32m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsrl_wv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i32.nxv1i64.nxv1i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i32.nxv1i64.nxv1i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnsrl_wv_u32mf2(vuint64m1_t op1, vuint32mf2_t shift, size_t vl) { @@ -213,7 +213,7 @@ vuint32mf2_t test_vnsrl_wv_u32mf2(vuint64m1_t op1, vuint32mf2_t shift, size_t vl // CHECK-RV64-LABEL: @test_vnsrl_wx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i32.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i32.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnsrl_wx_u32mf2(vuint64m1_t op1, size_t shift, size_t vl) { @@ -222,7 +222,7 @@ vuint32mf2_t test_vnsrl_wx_u32mf2(vuint64m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsrl_wv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i32.nxv2i64.nxv2i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i32.nxv2i64.nxv2i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnsrl_wv_u32m1(vuint64m2_t op1, vuint32m1_t shift, size_t vl) { @@ -231,7 +231,7 @@ vuint32m1_t test_vnsrl_wv_u32m1(vuint64m2_t op1, vuint32m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsrl_wx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i32.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i32.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnsrl_wx_u32m1(vuint64m2_t op1, size_t shift, size_t vl) { @@ -240,7 +240,7 @@ vuint32m1_t test_vnsrl_wx_u32m1(vuint64m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsrl_wv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i32.nxv4i64.nxv4i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i32.nxv4i64.nxv4i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnsrl_wv_u32m2(vuint64m4_t op1, vuint32m2_t shift, size_t vl) { @@ -249,7 +249,7 @@ vuint32m2_t test_vnsrl_wv_u32m2(vuint64m4_t op1, vuint32m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsrl_wx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i32.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i32.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnsrl_wx_u32m2(vuint64m4_t op1, size_t shift, size_t vl) { @@ -258,7 +258,7 @@ vuint32m2_t test_vnsrl_wx_u32m2(vuint64m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsrl_wv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i32.nxv8i64.nxv8i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i32.nxv8i64.nxv8i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnsrl_wv_u32m4(vuint64m8_t op1, vuint32m4_t shift, size_t vl) { @@ -267,7 +267,7 @@ vuint32m4_t test_vnsrl_wv_u32m4(vuint64m8_t op1, vuint32m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsrl_wx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i32.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i32.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnsrl_wx_u32m4(vuint64m8_t op1, size_t shift, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vor.c index 95630044f5e3b0193e3b33c9d2d3d304ac398c4e..ee25469f6ffcafc91ae6ab5212164967aa014475 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vor.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vor.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vor_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vor_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -15,7 +15,7 @@ vint8mf8_t test_vor_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vor_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -24,7 +24,7 @@ vint8mf8_t test_vor_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vor_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -33,7 +33,7 @@ vint8mf4_t test_vor_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vor_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -42,7 +42,7 @@ vint8mf4_t test_vor_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vor_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -51,7 +51,7 @@ vint8mf2_t test_vor_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vor_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -60,7 +60,7 @@ vint8mf2_t test_vor_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vor_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -69,7 +69,7 @@ vint8m1_t test_vor_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vor_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { @@ -78,7 +78,7 @@ vint8m1_t test_vor_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vor_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -87,7 +87,7 @@ vint8m2_t test_vor_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vor_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { @@ -96,7 +96,7 @@ vint8m2_t test_vor_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vor_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -105,7 +105,7 @@ vint8m4_t test_vor_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vor_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { @@ -114,7 +114,7 @@ vint8m4_t test_vor_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vor_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -123,7 +123,7 @@ vint8m8_t test_vor_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vor_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { @@ -132,7 +132,7 @@ vint8m8_t test_vor_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vor_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -141,7 +141,7 @@ vint16mf4_t test_vor_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vor_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -150,7 +150,7 @@ vint16mf4_t test_vor_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vor_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -159,7 +159,7 @@ vint16mf2_t test_vor_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vor_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -168,7 +168,7 @@ vint16mf2_t test_vor_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vor_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -177,7 +177,7 @@ vint16m1_t test_vor_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vor_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { @@ -186,7 +186,7 @@ vint16m1_t test_vor_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vor_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -195,7 +195,7 @@ vint16m2_t test_vor_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vor_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { @@ -204,7 +204,7 @@ vint16m2_t test_vor_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vor_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -213,7 +213,7 @@ vint16m4_t test_vor_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vor_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { @@ -222,7 +222,7 @@ vint16m4_t test_vor_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vor_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -231,7 +231,7 @@ vint16m8_t test_vor_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vor_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { @@ -240,7 +240,7 @@ vint16m8_t test_vor_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vor_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -249,7 +249,7 @@ vint32mf2_t test_vor_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vor_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -258,7 +258,7 @@ vint32mf2_t test_vor_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vor_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -267,7 +267,7 @@ vint32m1_t test_vor_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vor_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { @@ -276,7 +276,7 @@ vint32m1_t test_vor_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vor_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -285,7 +285,7 @@ vint32m2_t test_vor_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vor_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { @@ -294,7 +294,7 @@ vint32m2_t test_vor_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vor_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -303,7 +303,7 @@ vint32m4_t test_vor_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vor_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { @@ -312,7 +312,7 @@ vint32m4_t test_vor_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vor_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -321,7 +321,7 @@ vint32m8_t test_vor_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vor_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { @@ -330,7 +330,7 @@ vint32m8_t test_vor_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vor_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -339,7 +339,7 @@ vint64m1_t test_vor_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vor_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { @@ -348,7 +348,7 @@ vint64m1_t test_vor_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vor_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -357,7 +357,7 @@ vint64m2_t test_vor_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vor_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { @@ -366,7 +366,7 @@ vint64m2_t test_vor_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vor_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -375,7 +375,7 @@ vint64m4_t test_vor_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vor_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { @@ -384,7 +384,7 @@ vint64m4_t test_vor_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vor_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -393,7 +393,7 @@ vint64m8_t test_vor_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vor_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { @@ -402,7 +402,7 @@ vint64m8_t test_vor_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vor_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -411,7 +411,7 @@ vuint8mf8_t test_vor_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vor_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -420,7 +420,7 @@ vuint8mf8_t test_vor_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vor_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -429,7 +429,7 @@ vuint8mf4_t test_vor_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vor_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -438,7 +438,7 @@ vuint8mf4_t test_vor_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vor_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -447,7 +447,7 @@ vuint8mf2_t test_vor_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vor_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -456,7 +456,7 @@ vuint8mf2_t test_vor_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vor_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -465,7 +465,7 @@ vuint8m1_t test_vor_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vor_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -474,7 +474,7 @@ vuint8m1_t test_vor_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vor_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -483,7 +483,7 @@ vuint8m2_t test_vor_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vor_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -492,7 +492,7 @@ vuint8m2_t test_vor_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vor_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -501,7 +501,7 @@ vuint8m4_t test_vor_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vor_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -510,7 +510,7 @@ vuint8m4_t test_vor_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vor_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -519,7 +519,7 @@ vuint8m8_t test_vor_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vor_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -528,7 +528,7 @@ vuint8m8_t test_vor_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vor_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -537,7 +537,7 @@ vuint16mf4_t test_vor_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vor_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -546,7 +546,7 @@ vuint16mf4_t test_vor_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vor_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -555,7 +555,7 @@ vuint16mf2_t test_vor_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vor_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -564,7 +564,7 @@ vuint16mf2_t test_vor_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vor_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -573,7 +573,7 @@ vuint16m1_t test_vor_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vor_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -582,7 +582,7 @@ vuint16m1_t test_vor_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vor_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -591,7 +591,7 @@ vuint16m2_t test_vor_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vor_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -600,7 +600,7 @@ vuint16m2_t test_vor_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vor_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -609,7 +609,7 @@ vuint16m4_t test_vor_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vor_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -618,7 +618,7 @@ vuint16m4_t test_vor_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vor_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -627,7 +627,7 @@ vuint16m8_t test_vor_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vor_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -636,7 +636,7 @@ vuint16m8_t test_vor_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vor_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -645,7 +645,7 @@ vuint32mf2_t test_vor_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vor_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -654,7 +654,7 @@ vuint32mf2_t test_vor_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vor_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -663,7 +663,7 @@ vuint32m1_t test_vor_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vor_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -672,7 +672,7 @@ vuint32m1_t test_vor_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vor_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -681,7 +681,7 @@ vuint32m2_t test_vor_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vor_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -690,7 +690,7 @@ vuint32m2_t test_vor_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vor_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -699,7 +699,7 @@ vuint32m4_t test_vor_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vor_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -708,7 +708,7 @@ vuint32m4_t test_vor_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vor_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -717,7 +717,7 @@ vuint32m8_t test_vor_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vor_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -726,7 +726,7 @@ vuint32m8_t test_vor_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vor_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -735,7 +735,7 @@ vuint64m1_t test_vor_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vor_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -744,7 +744,7 @@ vuint64m1_t test_vor_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vor_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -753,7 +753,7 @@ vuint64m2_t test_vor_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vor_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -762,7 +762,7 @@ vuint64m2_t test_vor_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vor_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -771,7 +771,7 @@ vuint64m4_t test_vor_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vor_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -780,7 +780,7 @@ vuint64m4_t test_vor_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vor_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -789,7 +789,7 @@ vuint64m8_t test_vor_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vor_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vrem.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vrem.c index 01024f14a0407d5659b61ca032e8d1f0365b86b6..3164f1d57923bf987df76a983a90cb7b7045f080 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vrem.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vrem.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vrem_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vrem_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -15,7 +15,7 @@ vint8mf8_t test_vrem_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vrem_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -24,7 +24,7 @@ vint8mf8_t test_vrem_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vrem_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -33,7 +33,7 @@ vint8mf4_t test_vrem_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vrem_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -42,7 +42,7 @@ vint8mf4_t test_vrem_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vrem_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -51,7 +51,7 @@ vint8mf2_t test_vrem_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vrem_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -60,7 +60,7 @@ vint8mf2_t test_vrem_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vrem_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -69,7 +69,7 @@ vint8m1_t test_vrem_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vrem_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { @@ -78,7 +78,7 @@ vint8m1_t test_vrem_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vrem_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -87,7 +87,7 @@ vint8m2_t test_vrem_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vrem_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { @@ -96,7 +96,7 @@ vint8m2_t test_vrem_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vrem_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -105,7 +105,7 @@ vint8m4_t test_vrem_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vrem_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { @@ -114,7 +114,7 @@ vint8m4_t test_vrem_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vrem_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -123,7 +123,7 @@ vint8m8_t test_vrem_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vrem_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { @@ -132,7 +132,7 @@ vint8m8_t test_vrem_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vrem_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -141,7 +141,7 @@ vint16mf4_t test_vrem_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vrem_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -150,7 +150,7 @@ vint16mf4_t test_vrem_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vrem_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -159,7 +159,7 @@ vint16mf2_t test_vrem_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vrem_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -168,7 +168,7 @@ vint16mf2_t test_vrem_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vrem_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -177,7 +177,7 @@ vint16m1_t test_vrem_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vrem_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { @@ -186,7 +186,7 @@ vint16m1_t test_vrem_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vrem_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -195,7 +195,7 @@ vint16m2_t test_vrem_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vrem_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { @@ -204,7 +204,7 @@ vint16m2_t test_vrem_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vrem_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -213,7 +213,7 @@ vint16m4_t test_vrem_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vrem_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { @@ -222,7 +222,7 @@ vint16m4_t test_vrem_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vrem_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -231,7 +231,7 @@ vint16m8_t test_vrem_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vrem_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { @@ -240,7 +240,7 @@ vint16m8_t test_vrem_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vrem_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -249,7 +249,7 @@ vint32mf2_t test_vrem_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vrem_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -258,7 +258,7 @@ vint32mf2_t test_vrem_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vrem_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -267,7 +267,7 @@ vint32m1_t test_vrem_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vrem_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { @@ -276,7 +276,7 @@ vint32m1_t test_vrem_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vrem_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -285,7 +285,7 @@ vint32m2_t test_vrem_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vrem_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { @@ -294,7 +294,7 @@ vint32m2_t test_vrem_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vrem_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -303,7 +303,7 @@ vint32m4_t test_vrem_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vrem_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { @@ -312,7 +312,7 @@ vint32m4_t test_vrem_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vrem_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -321,7 +321,7 @@ vint32m8_t test_vrem_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vrem_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { @@ -330,7 +330,7 @@ vint32m8_t test_vrem_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vrem_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -339,7 +339,7 @@ vint64m1_t test_vrem_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vrem_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { @@ -348,7 +348,7 @@ vint64m1_t test_vrem_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vrem_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -357,7 +357,7 @@ vint64m2_t test_vrem_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vrem_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { @@ -366,7 +366,7 @@ vint64m2_t test_vrem_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vrem_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -375,7 +375,7 @@ vint64m4_t test_vrem_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vrem_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { @@ -384,7 +384,7 @@ vint64m4_t test_vrem_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vrem_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -393,7 +393,7 @@ vint64m8_t test_vrem_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vrem_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { @@ -402,7 +402,7 @@ vint64m8_t test_vrem_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vremu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -411,7 +411,7 @@ vuint8mf8_t test_vremu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vremu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -420,7 +420,7 @@ vuint8mf8_t test_vremu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vremu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -429,7 +429,7 @@ vuint8mf4_t test_vremu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vremu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -438,7 +438,7 @@ vuint8mf4_t test_vremu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vremu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -447,7 +447,7 @@ vuint8mf2_t test_vremu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vremu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -456,7 +456,7 @@ vuint8mf2_t test_vremu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vremu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -465,7 +465,7 @@ vuint8m1_t test_vremu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vremu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -474,7 +474,7 @@ vuint8m1_t test_vremu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vremu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -483,7 +483,7 @@ vuint8m2_t test_vremu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vremu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -492,7 +492,7 @@ vuint8m2_t test_vremu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vremu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -501,7 +501,7 @@ vuint8m4_t test_vremu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vremu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -510,7 +510,7 @@ vuint8m4_t test_vremu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vremu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -519,7 +519,7 @@ vuint8m8_t test_vremu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vremu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -528,7 +528,7 @@ vuint8m8_t test_vremu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vremu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -537,7 +537,7 @@ vuint16mf4_t test_vremu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vremu_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vremu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -546,7 +546,7 @@ vuint16mf4_t test_vremu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vremu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -555,7 +555,7 @@ vuint16mf2_t test_vremu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vremu_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vremu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -564,7 +564,7 @@ vuint16mf2_t test_vremu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vremu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -573,7 +573,7 @@ vuint16m1_t test_vremu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vremu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -582,7 +582,7 @@ vuint16m1_t test_vremu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vremu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -591,7 +591,7 @@ vuint16m2_t test_vremu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vremu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -600,7 +600,7 @@ vuint16m2_t test_vremu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vremu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -609,7 +609,7 @@ vuint16m4_t test_vremu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vremu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -618,7 +618,7 @@ vuint16m4_t test_vremu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vremu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -627,7 +627,7 @@ vuint16m8_t test_vremu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vremu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -636,7 +636,7 @@ vuint16m8_t test_vremu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vremu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -645,7 +645,7 @@ vuint32mf2_t test_vremu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vremu_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vremu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -654,7 +654,7 @@ vuint32mf2_t test_vremu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vremu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -663,7 +663,7 @@ vuint32m1_t test_vremu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vremu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -672,7 +672,7 @@ vuint32m1_t test_vremu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vremu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -681,7 +681,7 @@ vuint32m2_t test_vremu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vremu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -690,7 +690,7 @@ vuint32m2_t test_vremu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vremu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -699,7 +699,7 @@ vuint32m4_t test_vremu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vremu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -708,7 +708,7 @@ vuint32m4_t test_vremu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vremu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -717,7 +717,7 @@ vuint32m8_t test_vremu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vremu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -726,7 +726,7 @@ vuint32m8_t test_vremu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vremu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -735,7 +735,7 @@ vuint64m1_t test_vremu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vremu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -744,7 +744,7 @@ vuint64m1_t test_vremu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vremu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -753,7 +753,7 @@ vuint64m2_t test_vremu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vremu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -762,7 +762,7 @@ vuint64m2_t test_vremu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vremu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -771,7 +771,7 @@ vuint64m4_t test_vremu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vremu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -780,7 +780,7 @@ vuint64m4_t test_vremu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vremu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -789,7 +789,7 @@ vuint64m8_t test_vremu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vremu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vrgather.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vrgather.c index afc7638555f82e303367f586dd06c33a73571c89..6e787cd485c738311843ecbd1c3b6798fa9737bb 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vrgather.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vrgather.c @@ -8,7 +8,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i8.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i8.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vrgather_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t index, @@ -18,7 +18,7 @@ vint8mf8_t test_vrgather_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i8.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i8.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vrgather_vx_i8mf8(vint8mf8_t op1, size_t index, size_t vl) { @@ -27,7 +27,7 @@ vint8mf8_t test_vrgather_vx_i8mf8(vint8mf8_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i8.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i8.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vrgather_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t index, @@ -37,7 +37,7 @@ vint8mf4_t test_vrgather_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2i8.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2i8.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vrgather_vx_i8mf4(vint8mf4_t op1, size_t index, size_t vl) { @@ -46,7 +46,7 @@ vint8mf4_t test_vrgather_vx_i8mf4(vint8mf4_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i8.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i8.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vrgather_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t index, @@ -56,7 +56,7 @@ vint8mf2_t test_vrgather_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4i8.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4i8.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vrgather_vx_i8mf2(vint8mf2_t op1, size_t index, size_t vl) { @@ -65,7 +65,7 @@ vint8mf2_t test_vrgather_vx_i8mf2(vint8mf2_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i8.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i8.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vrgather_vv_i8m1(vint8m1_t op1, vuint8m1_t index, size_t vl) { @@ -74,7 +74,7 @@ vint8m1_t test_vrgather_vv_i8m1(vint8m1_t op1, vuint8m1_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8i8.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8i8.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vrgather_vx_i8m1(vint8m1_t op1, size_t index, size_t vl) { @@ -83,7 +83,7 @@ vint8m1_t test_vrgather_vx_i8m1(vint8m1_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16i8.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16i8.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vrgather_vv_i8m2(vint8m2_t op1, vuint8m2_t index, size_t vl) { @@ -92,7 +92,7 @@ vint8m2_t test_vrgather_vv_i8m2(vint8m2_t op1, vuint8m2_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv16i8.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv16i8.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vrgather_vx_i8m2(vint8m2_t op1, size_t index, size_t vl) { @@ -101,7 +101,7 @@ vint8m2_t test_vrgather_vx_i8m2(vint8m2_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv32i8.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv32i8.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vrgather_vv_i8m4(vint8m4_t op1, vuint8m4_t index, size_t vl) { @@ -110,7 +110,7 @@ vint8m4_t test_vrgather_vv_i8m4(vint8m4_t op1, vuint8m4_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv32i8.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv32i8.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vrgather_vx_i8m4(vint8m4_t op1, size_t index, size_t vl) { @@ -119,7 +119,7 @@ vint8m4_t test_vrgather_vx_i8m4(vint8m4_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv64i8.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv64i8.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vrgather_vv_i8m8(vint8m8_t op1, vuint8m8_t index, size_t vl) { @@ -128,7 +128,7 @@ vint8m8_t test_vrgather_vv_i8m8(vint8m8_t op1, vuint8m8_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv64i8.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv64i8.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vrgather_vx_i8m8(vint8m8_t op1, size_t index, size_t vl) { @@ -137,7 +137,7 @@ vint8m8_t test_vrgather_vx_i8m8(vint8m8_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i16.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vrgather_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t index, @@ -147,7 +147,7 @@ vint16mf4_t test_vrgather_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i16.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vrgather_vx_i16mf4(vint16mf4_t op1, size_t index, size_t vl) { @@ -156,7 +156,7 @@ vint16mf4_t test_vrgather_vx_i16mf4(vint16mf4_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i16.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vrgather_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t index, @@ -166,7 +166,7 @@ vint16mf2_t test_vrgather_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2i16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2i16.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vrgather_vx_i16mf2(vint16mf2_t op1, size_t index, size_t vl) { @@ -175,7 +175,7 @@ vint16mf2_t test_vrgather_vx_i16mf2(vint16mf2_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i16.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vrgather_vv_i16m1(vint16m1_t op1, vuint16m1_t index, @@ -185,7 +185,7 @@ vint16m1_t test_vrgather_vv_i16m1(vint16m1_t op1, vuint16m1_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4i16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4i16.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vrgather_vx_i16m1(vint16m1_t op1, size_t index, size_t vl) { @@ -194,7 +194,7 @@ vint16m1_t test_vrgather_vx_i16m1(vint16m1_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i16.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vrgather_vv_i16m2(vint16m2_t op1, vuint16m2_t index, @@ -204,7 +204,7 @@ vint16m2_t test_vrgather_vv_i16m2(vint16m2_t op1, vuint16m2_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8i16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8i16.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vrgather_vx_i16m2(vint16m2_t op1, size_t index, size_t vl) { @@ -213,7 +213,7 @@ vint16m2_t test_vrgather_vx_i16m2(vint16m2_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16i16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16i16.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vrgather_vv_i16m4(vint16m4_t op1, vuint16m4_t index, @@ -223,7 +223,7 @@ vint16m4_t test_vrgather_vv_i16m4(vint16m4_t op1, vuint16m4_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv16i16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv16i16.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vrgather_vx_i16m4(vint16m4_t op1, size_t index, size_t vl) { @@ -232,7 +232,7 @@ vint16m4_t test_vrgather_vx_i16m4(vint16m4_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv32i16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv32i16.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vrgather_vv_i16m8(vint16m8_t op1, vuint16m8_t index, @@ -242,7 +242,7 @@ vint16m8_t test_vrgather_vv_i16m8(vint16m8_t op1, vuint16m8_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv32i16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv32i16.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vrgather_vx_i16m8(vint16m8_t op1, size_t index, size_t vl) { @@ -251,7 +251,7 @@ vint16m8_t test_vrgather_vx_i16m8(vint16m8_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i32.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vrgather_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t index, @@ -261,7 +261,7 @@ vint32mf2_t test_vrgather_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i32.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i32.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vrgather_vx_i32mf2(vint32mf2_t op1, size_t index, size_t vl) { @@ -270,7 +270,7 @@ vint32mf2_t test_vrgather_vx_i32mf2(vint32mf2_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i32.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vrgather_vv_i32m1(vint32m1_t op1, vuint32m1_t index, @@ -280,7 +280,7 @@ vint32m1_t test_vrgather_vv_i32m1(vint32m1_t op1, vuint32m1_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2i32.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2i32.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vrgather_vx_i32m1(vint32m1_t op1, size_t index, size_t vl) { @@ -289,7 +289,7 @@ vint32m1_t test_vrgather_vx_i32m1(vint32m1_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i32.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vrgather_vv_i32m2(vint32m2_t op1, vuint32m2_t index, @@ -299,7 +299,7 @@ vint32m2_t test_vrgather_vv_i32m2(vint32m2_t op1, vuint32m2_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4i32.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4i32.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vrgather_vx_i32m2(vint32m2_t op1, size_t index, size_t vl) { @@ -308,7 +308,7 @@ vint32m2_t test_vrgather_vx_i32m2(vint32m2_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i32.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vrgather_vv_i32m4(vint32m4_t op1, vuint32m4_t index, @@ -318,7 +318,7 @@ vint32m4_t test_vrgather_vv_i32m4(vint32m4_t op1, vuint32m4_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8i32.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8i32.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vrgather_vx_i32m4(vint32m4_t op1, size_t index, size_t vl) { @@ -327,7 +327,7 @@ vint32m4_t test_vrgather_vx_i32m4(vint32m4_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16i32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16i32.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vrgather_vv_i32m8(vint32m8_t op1, vuint32m8_t index, @@ -337,7 +337,7 @@ vint32m8_t test_vrgather_vv_i32m8(vint32m8_t op1, vuint32m8_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv16i32.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv16i32.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vrgather_vx_i32m8(vint32m8_t op1, size_t index, size_t vl) { @@ -346,7 +346,7 @@ vint32m8_t test_vrgather_vx_i32m8(vint32m8_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i64.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i64.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vrgather_vv_i64m1(vint64m1_t op1, vuint64m1_t index, @@ -356,7 +356,7 @@ vint64m1_t test_vrgather_vv_i64m1(vint64m1_t op1, vuint64m1_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i64.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i64.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vrgather_vx_i64m1(vint64m1_t op1, size_t index, size_t vl) { @@ -365,7 +365,7 @@ vint64m1_t test_vrgather_vx_i64m1(vint64m1_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i64.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i64.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vrgather_vv_i64m2(vint64m2_t op1, vuint64m2_t index, @@ -375,7 +375,7 @@ vint64m2_t test_vrgather_vv_i64m2(vint64m2_t op1, vuint64m2_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2i64.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2i64.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vrgather_vx_i64m2(vint64m2_t op1, size_t index, size_t vl) { @@ -384,7 +384,7 @@ vint64m2_t test_vrgather_vx_i64m2(vint64m2_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i64.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i64.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vrgather_vv_i64m4(vint64m4_t op1, vuint64m4_t index, @@ -394,7 +394,7 @@ vint64m4_t test_vrgather_vv_i64m4(vint64m4_t op1, vuint64m4_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4i64.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4i64.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vrgather_vx_i64m4(vint64m4_t op1, size_t index, size_t vl) { @@ -403,7 +403,7 @@ vint64m4_t test_vrgather_vx_i64m4(vint64m4_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i64.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i64.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vrgather_vv_i64m8(vint64m8_t op1, vuint64m8_t index, @@ -413,7 +413,7 @@ vint64m8_t test_vrgather_vv_i64m8(vint64m8_t op1, vuint64m8_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8i64.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8i64.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vrgather_vx_i64m8(vint64m8_t op1, size_t index, size_t vl) { @@ -422,7 +422,7 @@ vint64m8_t test_vrgather_vx_i64m8(vint64m8_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i8.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i8.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vrgather_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t index, @@ -432,7 +432,7 @@ vuint8mf8_t test_vrgather_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i8.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i8.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vrgather_vx_u8mf8(vuint8mf8_t op1, size_t index, size_t vl) { @@ -441,7 +441,7 @@ vuint8mf8_t test_vrgather_vx_u8mf8(vuint8mf8_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i8.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i8.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vrgather_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t index, @@ -451,7 +451,7 @@ vuint8mf4_t test_vrgather_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2i8.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2i8.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vrgather_vx_u8mf4(vuint8mf4_t op1, size_t index, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf4_t test_vrgather_vx_u8mf4(vuint8mf4_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i8.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i8.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vrgather_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t index, @@ -470,7 +470,7 @@ vuint8mf2_t test_vrgather_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4i8.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4i8.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vrgather_vx_u8mf2(vuint8mf2_t op1, size_t index, size_t vl) { @@ -479,7 +479,7 @@ vuint8mf2_t test_vrgather_vx_u8mf2(vuint8mf2_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i8.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i8.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vrgather_vv_u8m1(vuint8m1_t op1, vuint8m1_t index, size_t vl) { @@ -488,7 +488,7 @@ vuint8m1_t test_vrgather_vv_u8m1(vuint8m1_t op1, vuint8m1_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8i8.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8i8.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vrgather_vx_u8m1(vuint8m1_t op1, size_t index, size_t vl) { @@ -497,7 +497,7 @@ vuint8m1_t test_vrgather_vx_u8m1(vuint8m1_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16i8.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16i8.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vrgather_vv_u8m2(vuint8m2_t op1, vuint8m2_t index, size_t vl) { @@ -506,7 +506,7 @@ vuint8m2_t test_vrgather_vv_u8m2(vuint8m2_t op1, vuint8m2_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv16i8.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv16i8.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vrgather_vx_u8m2(vuint8m2_t op1, size_t index, size_t vl) { @@ -515,7 +515,7 @@ vuint8m2_t test_vrgather_vx_u8m2(vuint8m2_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv32i8.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv32i8.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vrgather_vv_u8m4(vuint8m4_t op1, vuint8m4_t index, size_t vl) { @@ -524,7 +524,7 @@ vuint8m4_t test_vrgather_vv_u8m4(vuint8m4_t op1, vuint8m4_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv32i8.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv32i8.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vrgather_vx_u8m4(vuint8m4_t op1, size_t index, size_t vl) { @@ -533,7 +533,7 @@ vuint8m4_t test_vrgather_vx_u8m4(vuint8m4_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv64i8.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv64i8.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vrgather_vv_u8m8(vuint8m8_t op1, vuint8m8_t index, size_t vl) { @@ -542,7 +542,7 @@ vuint8m8_t test_vrgather_vv_u8m8(vuint8m8_t op1, vuint8m8_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv64i8.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv64i8.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vrgather_vx_u8m8(vuint8m8_t op1, size_t index, size_t vl) { @@ -551,7 +551,7 @@ vuint8m8_t test_vrgather_vx_u8m8(vuint8m8_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i16.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vrgather_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t index, @@ -561,7 +561,7 @@ vuint16mf4_t test_vrgather_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i16.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vrgather_vx_u16mf4(vuint16mf4_t op1, size_t index, @@ -571,7 +571,7 @@ vuint16mf4_t test_vrgather_vx_u16mf4(vuint16mf4_t op1, size_t index, // CHECK-RV64-LABEL: @test_vrgather_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i16.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vrgather_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t index, @@ -581,7 +581,7 @@ vuint16mf2_t test_vrgather_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2i16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2i16.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vrgather_vx_u16mf2(vuint16mf2_t op1, size_t index, @@ -591,7 +591,7 @@ vuint16mf2_t test_vrgather_vx_u16mf2(vuint16mf2_t op1, size_t index, // CHECK-RV64-LABEL: @test_vrgather_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i16.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vrgather_vv_u16m1(vuint16m1_t op1, vuint16m1_t index, @@ -601,7 +601,7 @@ vuint16m1_t test_vrgather_vv_u16m1(vuint16m1_t op1, vuint16m1_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4i16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4i16.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vrgather_vx_u16m1(vuint16m1_t op1, size_t index, size_t vl) { @@ -610,7 +610,7 @@ vuint16m1_t test_vrgather_vx_u16m1(vuint16m1_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i16.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vrgather_vv_u16m2(vuint16m2_t op1, vuint16m2_t index, @@ -620,7 +620,7 @@ vuint16m2_t test_vrgather_vv_u16m2(vuint16m2_t op1, vuint16m2_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8i16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8i16.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vrgather_vx_u16m2(vuint16m2_t op1, size_t index, size_t vl) { @@ -629,7 +629,7 @@ vuint16m2_t test_vrgather_vx_u16m2(vuint16m2_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16i16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16i16.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vrgather_vv_u16m4(vuint16m4_t op1, vuint16m4_t index, @@ -639,7 +639,7 @@ vuint16m4_t test_vrgather_vv_u16m4(vuint16m4_t op1, vuint16m4_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv16i16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv16i16.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vrgather_vx_u16m4(vuint16m4_t op1, size_t index, size_t vl) { @@ -648,7 +648,7 @@ vuint16m4_t test_vrgather_vx_u16m4(vuint16m4_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv32i16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv32i16.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vrgather_vv_u16m8(vuint16m8_t op1, vuint16m8_t index, @@ -658,7 +658,7 @@ vuint16m8_t test_vrgather_vv_u16m8(vuint16m8_t op1, vuint16m8_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv32i16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv32i16.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vrgather_vx_u16m8(vuint16m8_t op1, size_t index, size_t vl) { @@ -667,7 +667,7 @@ vuint16m8_t test_vrgather_vx_u16m8(vuint16m8_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i32.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vrgather_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t index, @@ -677,7 +677,7 @@ vuint32mf2_t test_vrgather_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i32.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i32.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vrgather_vx_u32mf2(vuint32mf2_t op1, size_t index, @@ -687,7 +687,7 @@ vuint32mf2_t test_vrgather_vx_u32mf2(vuint32mf2_t op1, size_t index, // CHECK-RV64-LABEL: @test_vrgather_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i32.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vrgather_vv_u32m1(vuint32m1_t op1, vuint32m1_t index, @@ -697,7 +697,7 @@ vuint32m1_t test_vrgather_vv_u32m1(vuint32m1_t op1, vuint32m1_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2i32.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2i32.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vrgather_vx_u32m1(vuint32m1_t op1, size_t index, size_t vl) { @@ -706,7 +706,7 @@ vuint32m1_t test_vrgather_vx_u32m1(vuint32m1_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i32.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vrgather_vv_u32m2(vuint32m2_t op1, vuint32m2_t index, @@ -716,7 +716,7 @@ vuint32m2_t test_vrgather_vv_u32m2(vuint32m2_t op1, vuint32m2_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4i32.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4i32.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vrgather_vx_u32m2(vuint32m2_t op1, size_t index, size_t vl) { @@ -725,7 +725,7 @@ vuint32m2_t test_vrgather_vx_u32m2(vuint32m2_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i32.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vrgather_vv_u32m4(vuint32m4_t op1, vuint32m4_t index, @@ -735,7 +735,7 @@ vuint32m4_t test_vrgather_vv_u32m4(vuint32m4_t op1, vuint32m4_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8i32.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8i32.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vrgather_vx_u32m4(vuint32m4_t op1, size_t index, size_t vl) { @@ -744,7 +744,7 @@ vuint32m4_t test_vrgather_vx_u32m4(vuint32m4_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16i32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16i32.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vrgather_vv_u32m8(vuint32m8_t op1, vuint32m8_t index, @@ -754,7 +754,7 @@ vuint32m8_t test_vrgather_vv_u32m8(vuint32m8_t op1, vuint32m8_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv16i32.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv16i32.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vrgather_vx_u32m8(vuint32m8_t op1, size_t index, size_t vl) { @@ -763,7 +763,7 @@ vuint32m8_t test_vrgather_vx_u32m8(vuint32m8_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i64.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i64.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vrgather_vv_u64m1(vuint64m1_t op1, vuint64m1_t index, @@ -773,7 +773,7 @@ vuint64m1_t test_vrgather_vv_u64m1(vuint64m1_t op1, vuint64m1_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i64.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i64.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vrgather_vx_u64m1(vuint64m1_t op1, size_t index, size_t vl) { @@ -782,7 +782,7 @@ vuint64m1_t test_vrgather_vx_u64m1(vuint64m1_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i64.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i64.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vrgather_vv_u64m2(vuint64m2_t op1, vuint64m2_t index, @@ -792,7 +792,7 @@ vuint64m2_t test_vrgather_vv_u64m2(vuint64m2_t op1, vuint64m2_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2i64.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2i64.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vrgather_vx_u64m2(vuint64m2_t op1, size_t index, size_t vl) { @@ -801,7 +801,7 @@ vuint64m2_t test_vrgather_vx_u64m2(vuint64m2_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i64.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i64.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vrgather_vv_u64m4(vuint64m4_t op1, vuint64m4_t index, @@ -811,7 +811,7 @@ vuint64m4_t test_vrgather_vv_u64m4(vuint64m4_t op1, vuint64m4_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4i64.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4i64.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vrgather_vx_u64m4(vuint64m4_t op1, size_t index, size_t vl) { @@ -820,7 +820,7 @@ vuint64m4_t test_vrgather_vx_u64m4(vuint64m4_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i64.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i64.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vrgather_vv_u64m8(vuint64m8_t op1, vuint64m8_t index, @@ -830,7 +830,7 @@ vuint64m8_t test_vrgather_vv_u64m8(vuint64m8_t op1, vuint64m8_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8i64.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8i64.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vrgather_vx_u64m8(vuint64m8_t op1, size_t index, size_t vl) { @@ -839,7 +839,7 @@ vuint64m8_t test_vrgather_vx_u64m8(vuint64m8_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1f32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1f32.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vrgather_vv_f32mf2(vfloat32mf2_t op1, vuint32mf2_t index, @@ -849,7 +849,7 @@ vfloat32mf2_t test_vrgather_vv_f32mf2(vfloat32mf2_t op1, vuint32mf2_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1f32.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1f32.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vrgather_vx_f32mf2(vfloat32mf2_t op1, size_t index, @@ -859,7 +859,7 @@ vfloat32mf2_t test_vrgather_vx_f32mf2(vfloat32mf2_t op1, size_t index, // CHECK-RV64-LABEL: @test_vrgather_vv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2f32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2f32.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vrgather_vv_f32m1(vfloat32m1_t op1, vuint32m1_t index, @@ -869,7 +869,7 @@ vfloat32m1_t test_vrgather_vv_f32m1(vfloat32m1_t op1, vuint32m1_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2f32.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2f32.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vrgather_vx_f32m1(vfloat32m1_t op1, size_t index, size_t vl) { @@ -878,7 +878,7 @@ vfloat32m1_t test_vrgather_vx_f32m1(vfloat32m1_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4f32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4f32.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vrgather_vv_f32m2(vfloat32m2_t op1, vuint32m2_t index, @@ -888,7 +888,7 @@ vfloat32m2_t test_vrgather_vv_f32m2(vfloat32m2_t op1, vuint32m2_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4f32.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4f32.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vrgather_vx_f32m2(vfloat32m2_t op1, size_t index, size_t vl) { @@ -897,7 +897,7 @@ vfloat32m2_t test_vrgather_vx_f32m2(vfloat32m2_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8f32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8f32.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vrgather_vv_f32m4(vfloat32m4_t op1, vuint32m4_t index, @@ -907,7 +907,7 @@ vfloat32m4_t test_vrgather_vv_f32m4(vfloat32m4_t op1, vuint32m4_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8f32.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8f32.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vrgather_vx_f32m4(vfloat32m4_t op1, size_t index, size_t vl) { @@ -916,7 +916,7 @@ vfloat32m4_t test_vrgather_vx_f32m4(vfloat32m4_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16f32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16f32.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vrgather_vv_f32m8(vfloat32m8_t op1, vuint32m8_t index, @@ -926,7 +926,7 @@ vfloat32m8_t test_vrgather_vv_f32m8(vfloat32m8_t op1, vuint32m8_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv16f32.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv16f32.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vrgather_vx_f32m8(vfloat32m8_t op1, size_t index, size_t vl) { @@ -935,7 +935,7 @@ vfloat32m8_t test_vrgather_vx_f32m8(vfloat32m8_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1f64.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1f64.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vrgather_vv_f64m1(vfloat64m1_t op1, vuint64m1_t index, @@ -945,7 +945,7 @@ vfloat64m1_t test_vrgather_vv_f64m1(vfloat64m1_t op1, vuint64m1_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1f64.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1f64.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vrgather_vx_f64m1(vfloat64m1_t op1, size_t index, size_t vl) { @@ -954,7 +954,7 @@ vfloat64m1_t test_vrgather_vx_f64m1(vfloat64m1_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2f64.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2f64.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vrgather_vv_f64m2(vfloat64m2_t op1, vuint64m2_t index, @@ -964,7 +964,7 @@ vfloat64m2_t test_vrgather_vv_f64m2(vfloat64m2_t op1, vuint64m2_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2f64.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2f64.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vrgather_vx_f64m2(vfloat64m2_t op1, size_t index, size_t vl) { @@ -973,7 +973,7 @@ vfloat64m2_t test_vrgather_vx_f64m2(vfloat64m2_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4f64.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4f64.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vrgather_vv_f64m4(vfloat64m4_t op1, vuint64m4_t index, @@ -983,7 +983,7 @@ vfloat64m4_t test_vrgather_vv_f64m4(vfloat64m4_t op1, vuint64m4_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4f64.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4f64.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vrgather_vx_f64m4(vfloat64m4_t op1, size_t index, size_t vl) { @@ -992,7 +992,7 @@ vfloat64m4_t test_vrgather_vx_f64m4(vfloat64m4_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8f64.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8f64.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vrgather_vv_f64m8(vfloat64m8_t op1, vuint64m8_t index, @@ -1002,7 +1002,7 @@ vfloat64m8_t test_vrgather_vv_f64m8(vfloat64m8_t op1, vuint64m8_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8f64.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8f64.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vrgather_vx_f64m8(vfloat64m8_t op1, size_t index, size_t vl) { @@ -1011,7 +1011,7 @@ vfloat64m8_t test_vrgather_vx_f64m8(vfloat64m8_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vrgatherei16_vv_i8mf8(vint8mf8_t op1, vuint16mf4_t op2, @@ -1021,7 +1021,7 @@ vint8mf8_t test_vrgatherei16_vv_i8mf8(vint8mf8_t op1, vuint16mf4_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vrgatherei16_vv_i8mf4(vint8mf4_t op1, vuint16mf2_t op2, @@ -1031,7 +1031,7 @@ vint8mf4_t test_vrgatherei16_vv_i8mf4(vint8mf4_t op1, vuint16mf2_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vrgatherei16_vv_i8mf2(vint8mf2_t op1, vuint16m1_t op2, @@ -1041,7 +1041,7 @@ vint8mf2_t test_vrgatherei16_vv_i8mf2(vint8mf2_t op1, vuint16m1_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vrgatherei16_vv_i8m1(vint8m1_t op1, vuint16m2_t op2, size_t vl) { @@ -1050,7 +1050,7 @@ vint8m1_t test_vrgatherei16_vv_i8m1(vint8m1_t op1, vuint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vrgatherei16_vv_i8m2(vint8m2_t op1, vuint16m4_t op2, size_t vl) { @@ -1059,7 +1059,7 @@ vint8m2_t test_vrgatherei16_vv_i8m2(vint8m2_t op1, vuint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vrgatherei16_vv_i8m4(vint8m4_t op1, vuint16m8_t op2, size_t vl) { @@ -1068,7 +1068,7 @@ vint8m4_t test_vrgatherei16_vv_i8m4(vint8m4_t op1, vuint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vrgatherei16_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t op2, @@ -1078,7 +1078,7 @@ vint16mf4_t test_vrgatherei16_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vrgatherei16_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t op2, @@ -1088,7 +1088,7 @@ vint16mf2_t test_vrgatherei16_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vrgatherei16_vv_i16m1(vint16m1_t op1, vuint16m1_t op2, @@ -1098,7 +1098,7 @@ vint16m1_t test_vrgatherei16_vv_i16m1(vint16m1_t op1, vuint16m1_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vrgatherei16_vv_i16m2(vint16m2_t op1, vuint16m2_t op2, @@ -1108,7 +1108,7 @@ vint16m2_t test_vrgatherei16_vv_i16m2(vint16m2_t op1, vuint16m2_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vrgatherei16_vv_i16m4(vint16m4_t op1, vuint16m4_t op2, @@ -1118,7 +1118,7 @@ vint16m4_t test_vrgatherei16_vv_i16m4(vint16m4_t op1, vuint16m4_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vrgatherei16_vv_i16m8(vint16m8_t op1, vuint16m8_t op2, @@ -1128,7 +1128,7 @@ vint16m8_t test_vrgatherei16_vv_i16m8(vint16m8_t op1, vuint16m8_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vrgatherei16_vv_i32mf2(vint32mf2_t op1, vuint16mf4_t op2, @@ -1138,7 +1138,7 @@ vint32mf2_t test_vrgatherei16_vv_i32mf2(vint32mf2_t op1, vuint16mf4_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vrgatherei16_vv_i32m1(vint32m1_t op1, vuint16mf2_t op2, @@ -1148,7 +1148,7 @@ vint32m1_t test_vrgatherei16_vv_i32m1(vint32m1_t op1, vuint16mf2_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vrgatherei16_vv_i32m2(vint32m2_t op1, vuint16m1_t op2, @@ -1158,7 +1158,7 @@ vint32m2_t test_vrgatherei16_vv_i32m2(vint32m2_t op1, vuint16m1_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vrgatherei16_vv_i32m4(vint32m4_t op1, vuint16m2_t op2, @@ -1168,7 +1168,7 @@ vint32m4_t test_vrgatherei16_vv_i32m4(vint32m4_t op1, vuint16m2_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vrgatherei16_vv_i32m8(vint32m8_t op1, vuint16m4_t op2, @@ -1178,7 +1178,7 @@ vint32m8_t test_vrgatherei16_vv_i32m8(vint32m8_t op1, vuint16m4_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vrgatherei16_vv_i64m1(vint64m1_t op1, vuint16mf4_t op2, @@ -1188,7 +1188,7 @@ vint64m1_t test_vrgatherei16_vv_i64m1(vint64m1_t op1, vuint16mf4_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vrgatherei16_vv_i64m2(vint64m2_t op1, vuint16mf2_t op2, @@ -1198,7 +1198,7 @@ vint64m2_t test_vrgatherei16_vv_i64m2(vint64m2_t op1, vuint16mf2_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vrgatherei16_vv_i64m4(vint64m4_t op1, vuint16m1_t op2, @@ -1208,7 +1208,7 @@ vint64m4_t test_vrgatherei16_vv_i64m4(vint64m4_t op1, vuint16m1_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vrgatherei16_vv_i64m8(vint64m8_t op1, vuint16m2_t op2, @@ -1218,7 +1218,7 @@ vint64m8_t test_vrgatherei16_vv_i64m8(vint64m8_t op1, vuint16m2_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vrgatherei16_vv_u8mf8(vuint8mf8_t op1, vuint16mf4_t op2, @@ -1228,7 +1228,7 @@ vuint8mf8_t test_vrgatherei16_vv_u8mf8(vuint8mf8_t op1, vuint16mf4_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vrgatherei16_vv_u8mf4(vuint8mf4_t op1, vuint16mf2_t op2, @@ -1238,7 +1238,7 @@ vuint8mf4_t test_vrgatherei16_vv_u8mf4(vuint8mf4_t op1, vuint16mf2_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vrgatherei16_vv_u8mf2(vuint8mf2_t op1, vuint16m1_t op2, @@ -1248,7 +1248,7 @@ vuint8mf2_t test_vrgatherei16_vv_u8mf2(vuint8mf2_t op1, vuint16m1_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vrgatherei16_vv_u8m1(vuint8m1_t op1, vuint16m2_t op2, @@ -1258,7 +1258,7 @@ vuint8m1_t test_vrgatherei16_vv_u8m1(vuint8m1_t op1, vuint16m2_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vrgatherei16_vv_u8m2(vuint8m2_t op1, vuint16m4_t op2, @@ -1268,7 +1268,7 @@ vuint8m2_t test_vrgatherei16_vv_u8m2(vuint8m2_t op1, vuint16m4_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vrgatherei16_vv_u8m4(vuint8m4_t op1, vuint16m8_t op2, @@ -1278,7 +1278,7 @@ vuint8m4_t test_vrgatherei16_vv_u8m4(vuint8m4_t op1, vuint16m8_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vrgatherei16_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, @@ -1288,7 +1288,7 @@ vuint16mf4_t test_vrgatherei16_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vrgatherei16_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, @@ -1298,7 +1298,7 @@ vuint16mf2_t test_vrgatherei16_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vrgatherei16_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, @@ -1308,7 +1308,7 @@ vuint16m1_t test_vrgatherei16_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vrgatherei16_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, @@ -1318,7 +1318,7 @@ vuint16m2_t test_vrgatherei16_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vrgatherei16_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, @@ -1328,7 +1328,7 @@ vuint16m4_t test_vrgatherei16_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vrgatherei16_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, @@ -1338,7 +1338,7 @@ vuint16m8_t test_vrgatherei16_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vrgatherei16_vv_u32mf2(vuint32mf2_t op1, vuint16mf4_t op2, @@ -1348,7 +1348,7 @@ vuint32mf2_t test_vrgatherei16_vv_u32mf2(vuint32mf2_t op1, vuint16mf4_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vrgatherei16_vv_u32m1(vuint32m1_t op1, vuint16mf2_t op2, @@ -1358,7 +1358,7 @@ vuint32m1_t test_vrgatherei16_vv_u32m1(vuint32m1_t op1, vuint16mf2_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vrgatherei16_vv_u32m2(vuint32m2_t op1, vuint16m1_t op2, @@ -1368,7 +1368,7 @@ vuint32m2_t test_vrgatherei16_vv_u32m2(vuint32m2_t op1, vuint16m1_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vrgatherei16_vv_u32m4(vuint32m4_t op1, vuint16m2_t op2, @@ -1378,7 +1378,7 @@ vuint32m4_t test_vrgatherei16_vv_u32m4(vuint32m4_t op1, vuint16m2_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vrgatherei16_vv_u32m8(vuint32m8_t op1, vuint16m4_t op2, @@ -1388,7 +1388,7 @@ vuint32m8_t test_vrgatherei16_vv_u32m8(vuint32m8_t op1, vuint16m4_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vrgatherei16_vv_u64m1(vuint64m1_t op1, vuint16mf4_t op2, @@ -1398,7 +1398,7 @@ vuint64m1_t test_vrgatherei16_vv_u64m1(vuint64m1_t op1, vuint16mf4_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vrgatherei16_vv_u64m2(vuint64m2_t op1, vuint16mf2_t op2, @@ -1408,7 +1408,7 @@ vuint64m2_t test_vrgatherei16_vv_u64m2(vuint64m2_t op1, vuint16mf2_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vrgatherei16_vv_u64m4(vuint64m4_t op1, vuint16m1_t op2, @@ -1418,7 +1418,7 @@ vuint64m4_t test_vrgatherei16_vv_u64m4(vuint64m4_t op1, vuint16m1_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vrgatherei16_vv_u64m8(vuint64m8_t op1, vuint16m2_t op2, @@ -1428,7 +1428,7 @@ vuint64m8_t test_vrgatherei16_vv_u64m8(vuint64m8_t op1, vuint16m2_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vrgatherei16_vv_f32mf2(vfloat32mf2_t op1, vuint16mf4_t op2, @@ -1438,7 +1438,7 @@ vfloat32mf2_t test_vrgatherei16_vv_f32mf2(vfloat32mf2_t op1, vuint16mf4_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vrgatherei16_vv_f32m1(vfloat32m1_t op1, vuint16mf2_t op2, @@ -1448,7 +1448,7 @@ vfloat32m1_t test_vrgatherei16_vv_f32m1(vfloat32m1_t op1, vuint16mf2_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vrgatherei16_vv_f32m2(vfloat32m2_t op1, vuint16m1_t op2, @@ -1458,7 +1458,7 @@ vfloat32m2_t test_vrgatherei16_vv_f32m2(vfloat32m2_t op1, vuint16m1_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vrgatherei16_vv_f32m4(vfloat32m4_t op1, vuint16m2_t op2, @@ -1468,7 +1468,7 @@ vfloat32m4_t test_vrgatherei16_vv_f32m4(vfloat32m4_t op1, vuint16m2_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv16f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv16f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vrgatherei16_vv_f32m8(vfloat32m8_t op1, vuint16m4_t op2, @@ -1478,7 +1478,7 @@ vfloat32m8_t test_vrgatherei16_vv_f32m8(vfloat32m8_t op1, vuint16m4_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vrgatherei16_vv_f64m1(vfloat64m1_t op1, vuint16mf4_t op2, @@ -1488,7 +1488,7 @@ vfloat64m1_t test_vrgatherei16_vv_f64m1(vfloat64m1_t op1, vuint16mf4_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vrgatherei16_vv_f64m2(vfloat64m2_t op1, vuint16mf2_t op2, @@ -1498,7 +1498,7 @@ vfloat64m2_t test_vrgatherei16_vv_f64m2(vfloat64m2_t op1, vuint16mf2_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vrgatherei16_vv_f64m4(vfloat64m4_t op1, vuint16m1_t op2, @@ -1508,7 +1508,7 @@ vfloat64m4_t test_vrgatherei16_vv_f64m4(vfloat64m4_t op1, vuint16m1_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vrgatherei16_vv_f64m8(vfloat64m8_t op1, vuint16m2_t op2, @@ -3212,7 +3212,7 @@ vfloat64m8_t test_vrgatherei16_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, // CHECK-RV64-LABEL: @test_vrgather_vv_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1f16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1f16.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vrgather_vv_f16mf4 (vfloat16mf4_t op1, vuint16mf4_t index, size_t vl) { @@ -3221,7 +3221,7 @@ vfloat16mf4_t test_vrgather_vv_f16mf4 (vfloat16mf4_t op1, vuint16mf4_t index, si // CHECK-RV64-LABEL: @test_vrgather_vx_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1f16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1f16.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vrgather_vx_f16mf4 (vfloat16mf4_t op1, size_t index, size_t vl) { @@ -3230,7 +3230,7 @@ vfloat16mf4_t test_vrgather_vx_f16mf4 (vfloat16mf4_t op1, size_t index, size_t v // CHECK-RV64-LABEL: @test_vrgather_vv_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2f16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2f16.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vrgather_vv_f16mf2 (vfloat16mf2_t op1, vuint16mf2_t index, size_t vl) { @@ -3239,7 +3239,7 @@ vfloat16mf2_t test_vrgather_vv_f16mf2 (vfloat16mf2_t op1, vuint16mf2_t index, si // CHECK-RV64-LABEL: @test_vrgather_vx_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2f16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2f16.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vrgather_vx_f16mf2 (vfloat16mf2_t op1, size_t index, size_t vl) { @@ -3248,7 +3248,7 @@ vfloat16mf2_t test_vrgather_vx_f16mf2 (vfloat16mf2_t op1, size_t index, size_t v // CHECK-RV64-LABEL: @test_vrgather_vv_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4f16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4f16.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vrgather_vv_f16m1 (vfloat16m1_t op1, vuint16m1_t index, size_t vl) { @@ -3257,7 +3257,7 @@ vfloat16m1_t test_vrgather_vv_f16m1 (vfloat16m1_t op1, vuint16m1_t index, size_t // CHECK-RV64-LABEL: @test_vrgather_vx_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4f16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4f16.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vrgather_vx_f16m1 (vfloat16m1_t op1, size_t index, size_t vl) { @@ -3266,7 +3266,7 @@ vfloat16m1_t test_vrgather_vx_f16m1 (vfloat16m1_t op1, size_t index, size_t vl) // CHECK-RV64-LABEL: @test_vrgather_vv_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8f16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8f16.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vrgather_vv_f16m2 (vfloat16m2_t op1, vuint16m2_t index, size_t vl) { @@ -3275,7 +3275,7 @@ vfloat16m2_t test_vrgather_vv_f16m2 (vfloat16m2_t op1, vuint16m2_t index, size_t // CHECK-RV64-LABEL: @test_vrgather_vx_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8f16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8f16.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vrgather_vx_f16m2 (vfloat16m2_t op1, size_t index, size_t vl) { @@ -3284,7 +3284,7 @@ vfloat16m2_t test_vrgather_vx_f16m2 (vfloat16m2_t op1, size_t index, size_t vl) // CHECK-RV64-LABEL: @test_vrgather_vv_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16f16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16f16.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vrgather_vv_f16m4 (vfloat16m4_t op1, vuint16m4_t index, size_t vl) { @@ -3293,7 +3293,7 @@ vfloat16m4_t test_vrgather_vv_f16m4 (vfloat16m4_t op1, vuint16m4_t index, size_t // CHECK-RV64-LABEL: @test_vrgather_vx_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv16f16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv16f16.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vrgather_vx_f16m4 (vfloat16m4_t op1, size_t index, size_t vl) { @@ -3302,7 +3302,7 @@ vfloat16m4_t test_vrgather_vx_f16m4 (vfloat16m4_t op1, size_t index, size_t vl) // CHECK-RV64-LABEL: @test_vrgather_vv_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv32f16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv32f16.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vrgather_vv_f16m8 (vfloat16m8_t op1, vuint16m8_t index, size_t vl) { @@ -3311,7 +3311,7 @@ vfloat16m8_t test_vrgather_vv_f16m8 (vfloat16m8_t op1, vuint16m8_t index, size_t // CHECK-RV64-LABEL: @test_vrgather_vx_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv32f16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv32f16.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vrgather_vx_f16m8 (vfloat16m8_t op1, size_t index, size_t vl) { @@ -3320,7 +3320,7 @@ vfloat16m8_t test_vrgather_vx_f16m8 (vfloat16m8_t op1, size_t index, size_t vl) // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vrgatherei16_vv_f16mf4 (vfloat16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -3329,7 +3329,7 @@ vfloat16mf4_t test_vrgatherei16_vv_f16mf4 (vfloat16mf4_t op1, vuint16mf4_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vrgatherei16_vv_f16mf2 (vfloat16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -3338,7 +3338,7 @@ vfloat16mf2_t test_vrgatherei16_vv_f16mf2 (vfloat16mf2_t op1, vuint16mf2_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vrgatherei16_vv_f16m1 (vfloat16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -3347,7 +3347,7 @@ vfloat16m1_t test_vrgatherei16_vv_f16m1 (vfloat16m1_t op1, vuint16m1_t op2, size // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vrgatherei16_vv_f16m2 (vfloat16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -3356,7 +3356,7 @@ vfloat16m2_t test_vrgatherei16_vv_f16m2 (vfloat16m2_t op1, vuint16m2_t op2, size // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv16f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv16f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vrgatherei16_vv_f16m4 (vfloat16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -3365,7 +3365,7 @@ vfloat16m4_t test_vrgatherei16_vv_f16m4 (vfloat16m4_t op1, vuint16m4_t op2, size // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv32f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv32f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vrgatherei16_vv_f16m8 (vfloat16m8_t op1, vuint16m8_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vrsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vrsub.c index d7688d2642c4d3d6a32ad4b28d664fcf5fd6cd3e..b5f951ff8c828d03fe93fcbc2fceebe83ef26e68 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vrsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vrsub.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vrsub_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vrsub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -15,7 +15,7 @@ vint8mf8_t test_vrsub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vrsub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -24,7 +24,7 @@ vint8mf4_t test_vrsub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vrsub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -33,7 +33,7 @@ vint8mf2_t test_vrsub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vrsub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { @@ -42,7 +42,7 @@ vint8m1_t test_vrsub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vrsub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { @@ -51,7 +51,7 @@ vint8m2_t test_vrsub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vrsub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { @@ -60,7 +60,7 @@ vint8m4_t test_vrsub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vrsub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { @@ -69,7 +69,7 @@ vint8m8_t test_vrsub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vrsub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -78,7 +78,7 @@ vint16mf4_t test_vrsub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vrsub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -87,7 +87,7 @@ vint16mf2_t test_vrsub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vrsub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { @@ -96,7 +96,7 @@ vint16m1_t test_vrsub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vrsub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { @@ -105,7 +105,7 @@ vint16m2_t test_vrsub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vrsub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { @@ -114,7 +114,7 @@ vint16m4_t test_vrsub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vrsub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { @@ -123,7 +123,7 @@ vint16m8_t test_vrsub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vrsub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -132,7 +132,7 @@ vint32mf2_t test_vrsub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vrsub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { @@ -141,7 +141,7 @@ vint32m1_t test_vrsub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vrsub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { @@ -150,7 +150,7 @@ vint32m2_t test_vrsub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vrsub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { @@ -159,7 +159,7 @@ vint32m4_t test_vrsub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vrsub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { @@ -168,7 +168,7 @@ vint32m8_t test_vrsub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vrsub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { @@ -177,7 +177,7 @@ vint64m1_t test_vrsub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vrsub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { @@ -186,7 +186,7 @@ vint64m2_t test_vrsub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vrsub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { @@ -195,7 +195,7 @@ vint64m4_t test_vrsub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vrsub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { @@ -204,7 +204,7 @@ vint64m8_t test_vrsub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vrsub_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -213,7 +213,7 @@ vuint8mf8_t test_vrsub_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vrsub_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -222,7 +222,7 @@ vuint8mf4_t test_vrsub_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vrsub_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -231,7 +231,7 @@ vuint8mf2_t test_vrsub_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vrsub_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -240,7 +240,7 @@ vuint8m1_t test_vrsub_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vrsub_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -249,7 +249,7 @@ vuint8m2_t test_vrsub_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vrsub_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -258,7 +258,7 @@ vuint8m4_t test_vrsub_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vrsub_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -267,7 +267,7 @@ vuint8m8_t test_vrsub_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vrsub_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -276,7 +276,7 @@ vuint16mf4_t test_vrsub_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vrsub_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -285,7 +285,7 @@ vuint16mf2_t test_vrsub_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vrsub_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -294,7 +294,7 @@ vuint16m1_t test_vrsub_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vrsub_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -303,7 +303,7 @@ vuint16m2_t test_vrsub_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vrsub_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -312,7 +312,7 @@ vuint16m4_t test_vrsub_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vrsub_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -321,7 +321,7 @@ vuint16m8_t test_vrsub_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vrsub_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -330,7 +330,7 @@ vuint32mf2_t test_vrsub_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vrsub_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -339,7 +339,7 @@ vuint32m1_t test_vrsub_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vrsub_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -348,7 +348,7 @@ vuint32m2_t test_vrsub_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vrsub_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -357,7 +357,7 @@ vuint32m4_t test_vrsub_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vrsub_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -366,7 +366,7 @@ vuint32m8_t test_vrsub_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vrsub_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -375,7 +375,7 @@ vuint64m1_t test_vrsub_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vrsub_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -384,7 +384,7 @@ vuint64m2_t test_vrsub_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vrsub_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -393,7 +393,7 @@ vuint64m4_t test_vrsub_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vrsub_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsadd.c index abfe106708dcdf24b2b6fc54c75f39ecd8a2a5cb..166bc19fca8d899be886197570669ff146a14dd4 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsadd.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vsadd_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsadd_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -15,7 +15,7 @@ vint8mf8_t test_vsadd_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsadd_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -24,7 +24,7 @@ vint8mf8_t test_vsadd_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsadd_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -33,7 +33,7 @@ vint8mf4_t test_vsadd_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsadd_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -42,7 +42,7 @@ vint8mf4_t test_vsadd_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsadd_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -51,7 +51,7 @@ vint8mf2_t test_vsadd_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsadd_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -60,7 +60,7 @@ vint8mf2_t test_vsadd_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsadd_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -69,7 +69,7 @@ vint8m1_t test_vsadd_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsadd_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { @@ -78,7 +78,7 @@ vint8m1_t test_vsadd_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsadd_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -87,7 +87,7 @@ vint8m2_t test_vsadd_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsadd_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { @@ -96,7 +96,7 @@ vint8m2_t test_vsadd_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsadd_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -105,7 +105,7 @@ vint8m4_t test_vsadd_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsadd_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { @@ -114,7 +114,7 @@ vint8m4_t test_vsadd_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsadd_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -123,7 +123,7 @@ vint8m8_t test_vsadd_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsadd_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { @@ -132,7 +132,7 @@ vint8m8_t test_vsadd_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsadd_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -141,7 +141,7 @@ vint16mf4_t test_vsadd_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsadd_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -150,7 +150,7 @@ vint16mf4_t test_vsadd_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsadd_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -159,7 +159,7 @@ vint16mf2_t test_vsadd_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsadd_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -168,7 +168,7 @@ vint16mf2_t test_vsadd_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsadd_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -177,7 +177,7 @@ vint16m1_t test_vsadd_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsadd_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { @@ -186,7 +186,7 @@ vint16m1_t test_vsadd_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsadd_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -195,7 +195,7 @@ vint16m2_t test_vsadd_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsadd_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { @@ -204,7 +204,7 @@ vint16m2_t test_vsadd_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsadd_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -213,7 +213,7 @@ vint16m4_t test_vsadd_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsadd_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { @@ -222,7 +222,7 @@ vint16m4_t test_vsadd_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsadd_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -231,7 +231,7 @@ vint16m8_t test_vsadd_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsadd_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { @@ -240,7 +240,7 @@ vint16m8_t test_vsadd_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsadd_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -249,7 +249,7 @@ vint32mf2_t test_vsadd_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsadd_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -258,7 +258,7 @@ vint32mf2_t test_vsadd_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsadd_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -267,7 +267,7 @@ vint32m1_t test_vsadd_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsadd_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { @@ -276,7 +276,7 @@ vint32m1_t test_vsadd_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsadd_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -285,7 +285,7 @@ vint32m2_t test_vsadd_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsadd_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { @@ -294,7 +294,7 @@ vint32m2_t test_vsadd_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsadd_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -303,7 +303,7 @@ vint32m4_t test_vsadd_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsadd_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { @@ -312,7 +312,7 @@ vint32m4_t test_vsadd_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsadd_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -321,7 +321,7 @@ vint32m8_t test_vsadd_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsadd_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { @@ -330,7 +330,7 @@ vint32m8_t test_vsadd_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsadd_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -339,7 +339,7 @@ vint64m1_t test_vsadd_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsadd_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { @@ -348,7 +348,7 @@ vint64m1_t test_vsadd_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsadd_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -357,7 +357,7 @@ vint64m2_t test_vsadd_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsadd_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { @@ -366,7 +366,7 @@ vint64m2_t test_vsadd_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsadd_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -375,7 +375,7 @@ vint64m4_t test_vsadd_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsadd_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { @@ -384,7 +384,7 @@ vint64m4_t test_vsadd_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsadd_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -393,7 +393,7 @@ vint64m8_t test_vsadd_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsadd_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { @@ -402,7 +402,7 @@ vint64m8_t test_vsadd_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsaddu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -411,7 +411,7 @@ vuint8mf8_t test_vsaddu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsaddu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -420,7 +420,7 @@ vuint8mf8_t test_vsaddu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsaddu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -429,7 +429,7 @@ vuint8mf4_t test_vsaddu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsaddu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -438,7 +438,7 @@ vuint8mf4_t test_vsaddu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsaddu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -447,7 +447,7 @@ vuint8mf2_t test_vsaddu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsaddu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -456,7 +456,7 @@ vuint8mf2_t test_vsaddu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsaddu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -465,7 +465,7 @@ vuint8m1_t test_vsaddu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsaddu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -474,7 +474,7 @@ vuint8m1_t test_vsaddu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsaddu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -483,7 +483,7 @@ vuint8m2_t test_vsaddu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsaddu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -492,7 +492,7 @@ vuint8m2_t test_vsaddu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsaddu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -501,7 +501,7 @@ vuint8m4_t test_vsaddu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsaddu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -510,7 +510,7 @@ vuint8m4_t test_vsaddu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsaddu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -519,7 +519,7 @@ vuint8m8_t test_vsaddu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsaddu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -528,7 +528,7 @@ vuint8m8_t test_vsaddu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsaddu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, @@ -538,7 +538,7 @@ vuint16mf4_t test_vsaddu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, // CHECK-RV64-LABEL: @test_vsaddu_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsaddu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -547,7 +547,7 @@ vuint16mf4_t test_vsaddu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsaddu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, @@ -557,7 +557,7 @@ vuint16mf2_t test_vsaddu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, // CHECK-RV64-LABEL: @test_vsaddu_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsaddu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -566,7 +566,7 @@ vuint16mf2_t test_vsaddu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsaddu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -575,7 +575,7 @@ vuint16m1_t test_vsaddu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsaddu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -584,7 +584,7 @@ vuint16m1_t test_vsaddu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsaddu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -593,7 +593,7 @@ vuint16m2_t test_vsaddu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsaddu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -602,7 +602,7 @@ vuint16m2_t test_vsaddu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsaddu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -611,7 +611,7 @@ vuint16m4_t test_vsaddu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsaddu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -620,7 +620,7 @@ vuint16m4_t test_vsaddu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsaddu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -629,7 +629,7 @@ vuint16m8_t test_vsaddu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsaddu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -638,7 +638,7 @@ vuint16m8_t test_vsaddu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsaddu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, @@ -648,7 +648,7 @@ vuint32mf2_t test_vsaddu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, // CHECK-RV64-LABEL: @test_vsaddu_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsaddu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -657,7 +657,7 @@ vuint32mf2_t test_vsaddu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsaddu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -666,7 +666,7 @@ vuint32m1_t test_vsaddu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsaddu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -675,7 +675,7 @@ vuint32m1_t test_vsaddu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsaddu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -684,7 +684,7 @@ vuint32m2_t test_vsaddu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsaddu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -693,7 +693,7 @@ vuint32m2_t test_vsaddu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsaddu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -702,7 +702,7 @@ vuint32m4_t test_vsaddu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsaddu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -711,7 +711,7 @@ vuint32m4_t test_vsaddu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsaddu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -720,7 +720,7 @@ vuint32m8_t test_vsaddu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsaddu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -729,7 +729,7 @@ vuint32m8_t test_vsaddu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsaddu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -738,7 +738,7 @@ vuint64m1_t test_vsaddu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsaddu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -747,7 +747,7 @@ vuint64m1_t test_vsaddu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsaddu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -756,7 +756,7 @@ vuint64m2_t test_vsaddu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsaddu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -765,7 +765,7 @@ vuint64m2_t test_vsaddu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsaddu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -774,7 +774,7 @@ vuint64m4_t test_vsaddu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsaddu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -783,7 +783,7 @@ vuint64m4_t test_vsaddu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsaddu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -792,7 +792,7 @@ vuint64m8_t test_vsaddu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsaddu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vslide1down.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vslide1down.c index 867fe098640d71513f5a9b91f8586327410fdcf4..26b72ea52e98e036be59ba4dbcae2e43359be9bd 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vslide1down.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vslide1down.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vslide1down_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vslide1down_vx_i8mf8(vint8mf8_t src, int8_t value, size_t vl) { @@ -15,7 +15,7 @@ vint8mf8_t test_vslide1down_vx_i8mf8(vint8mf8_t src, int8_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1down_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vslide1down_vx_i8mf4(vint8mf4_t src, int8_t value, size_t vl) { @@ -24,7 +24,7 @@ vint8mf4_t test_vslide1down_vx_i8mf4(vint8mf4_t src, int8_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1down_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vslide1down_vx_i8mf2(vint8mf2_t src, int8_t value, size_t vl) { @@ -33,7 +33,7 @@ vint8mf2_t test_vslide1down_vx_i8mf2(vint8mf2_t src, int8_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1down_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vslide1down_vx_i8m1(vint8m1_t src, int8_t value, size_t vl) { @@ -42,7 +42,7 @@ vint8m1_t test_vslide1down_vx_i8m1(vint8m1_t src, int8_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1down_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv16i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv16i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vslide1down_vx_i8m2(vint8m2_t src, int8_t value, size_t vl) { @@ -51,7 +51,7 @@ vint8m2_t test_vslide1down_vx_i8m2(vint8m2_t src, int8_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1down_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv32i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv32i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vslide1down_vx_i8m4(vint8m4_t src, int8_t value, size_t vl) { @@ -60,7 +60,7 @@ vint8m4_t test_vslide1down_vx_i8m4(vint8m4_t src, int8_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1down_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv64i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv64i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vslide1down_vx_i8m8(vint8m8_t src, int8_t value, size_t vl) { @@ -69,7 +69,7 @@ vint8m8_t test_vslide1down_vx_i8m8(vint8m8_t src, int8_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1down_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vslide1down_vx_i16mf4(vint16mf4_t src, int16_t value, @@ -79,7 +79,7 @@ vint16mf4_t test_vslide1down_vx_i16mf4(vint16mf4_t src, int16_t value, // CHECK-RV64-LABEL: @test_vslide1down_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vslide1down_vx_i16mf2(vint16mf2_t src, int16_t value, @@ -89,7 +89,7 @@ vint16mf2_t test_vslide1down_vx_i16mf2(vint16mf2_t src, int16_t value, // CHECK-RV64-LABEL: @test_vslide1down_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vslide1down_vx_i16m1(vint16m1_t src, int16_t value, size_t vl) { @@ -98,7 +98,7 @@ vint16m1_t test_vslide1down_vx_i16m1(vint16m1_t src, int16_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1down_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vslide1down_vx_i16m2(vint16m2_t src, int16_t value, size_t vl) { @@ -107,7 +107,7 @@ vint16m2_t test_vslide1down_vx_i16m2(vint16m2_t src, int16_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1down_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv16i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv16i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vslide1down_vx_i16m4(vint16m4_t src, int16_t value, size_t vl) { @@ -116,7 +116,7 @@ vint16m4_t test_vslide1down_vx_i16m4(vint16m4_t src, int16_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1down_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv32i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv32i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vslide1down_vx_i16m8(vint16m8_t src, int16_t value, size_t vl) { @@ -125,7 +125,7 @@ vint16m8_t test_vslide1down_vx_i16m8(vint16m8_t src, int16_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1down_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vslide1down_vx_i32mf2(vint32mf2_t src, int32_t value, @@ -135,7 +135,7 @@ vint32mf2_t test_vslide1down_vx_i32mf2(vint32mf2_t src, int32_t value, // CHECK-RV64-LABEL: @test_vslide1down_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vslide1down_vx_i32m1(vint32m1_t src, int32_t value, size_t vl) { @@ -144,7 +144,7 @@ vint32m1_t test_vslide1down_vx_i32m1(vint32m1_t src, int32_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1down_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vslide1down_vx_i32m2(vint32m2_t src, int32_t value, size_t vl) { @@ -153,7 +153,7 @@ vint32m2_t test_vslide1down_vx_i32m2(vint32m2_t src, int32_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1down_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vslide1down_vx_i32m4(vint32m4_t src, int32_t value, size_t vl) { @@ -162,7 +162,7 @@ vint32m4_t test_vslide1down_vx_i32m4(vint32m4_t src, int32_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1down_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv16i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv16i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vslide1down_vx_i32m8(vint32m8_t src, int32_t value, size_t vl) { @@ -171,7 +171,7 @@ vint32m8_t test_vslide1down_vx_i32m8(vint32m8_t src, int32_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1down_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i64.i64.i64( undef, [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vslide1down_vx_i64m1(vint64m1_t src, int64_t value, size_t vl) { @@ -180,7 +180,7 @@ vint64m1_t test_vslide1down_vx_i64m1(vint64m1_t src, int64_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1down_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i64.i64.i64( undef, [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vslide1down_vx_i64m2(vint64m2_t src, int64_t value, size_t vl) { @@ -189,7 +189,7 @@ vint64m2_t test_vslide1down_vx_i64m2(vint64m2_t src, int64_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1down_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i64.i64.i64( undef, [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vslide1down_vx_i64m4(vint64m4_t src, int64_t value, size_t vl) { @@ -198,7 +198,7 @@ vint64m4_t test_vslide1down_vx_i64m4(vint64m4_t src, int64_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1down_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i64.i64.i64( undef, [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vslide1down_vx_i64m8(vint64m8_t src, int64_t value, size_t vl) { @@ -207,7 +207,7 @@ vint64m8_t test_vslide1down_vx_i64m8(vint64m8_t src, int64_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1down_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vslide1down_vx_u8mf8(vuint8mf8_t src, uint8_t value, @@ -217,7 +217,7 @@ vuint8mf8_t test_vslide1down_vx_u8mf8(vuint8mf8_t src, uint8_t value, // CHECK-RV64-LABEL: @test_vslide1down_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vslide1down_vx_u8mf4(vuint8mf4_t src, uint8_t value, @@ -227,7 +227,7 @@ vuint8mf4_t test_vslide1down_vx_u8mf4(vuint8mf4_t src, uint8_t value, // CHECK-RV64-LABEL: @test_vslide1down_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vslide1down_vx_u8mf2(vuint8mf2_t src, uint8_t value, @@ -237,7 +237,7 @@ vuint8mf2_t test_vslide1down_vx_u8mf2(vuint8mf2_t src, uint8_t value, // CHECK-RV64-LABEL: @test_vslide1down_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vslide1down_vx_u8m1(vuint8m1_t src, uint8_t value, size_t vl) { @@ -246,7 +246,7 @@ vuint8m1_t test_vslide1down_vx_u8m1(vuint8m1_t src, uint8_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1down_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv16i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv16i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vslide1down_vx_u8m2(vuint8m2_t src, uint8_t value, size_t vl) { @@ -255,7 +255,7 @@ vuint8m2_t test_vslide1down_vx_u8m2(vuint8m2_t src, uint8_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1down_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv32i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv32i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vslide1down_vx_u8m4(vuint8m4_t src, uint8_t value, size_t vl) { @@ -264,7 +264,7 @@ vuint8m4_t test_vslide1down_vx_u8m4(vuint8m4_t src, uint8_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1down_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv64i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv64i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vslide1down_vx_u8m8(vuint8m8_t src, uint8_t value, size_t vl) { @@ -273,7 +273,7 @@ vuint8m8_t test_vslide1down_vx_u8m8(vuint8m8_t src, uint8_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1down_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vslide1down_vx_u16mf4(vuint16mf4_t src, uint16_t value, @@ -283,7 +283,7 @@ vuint16mf4_t test_vslide1down_vx_u16mf4(vuint16mf4_t src, uint16_t value, // CHECK-RV64-LABEL: @test_vslide1down_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vslide1down_vx_u16mf2(vuint16mf2_t src, uint16_t value, @@ -293,7 +293,7 @@ vuint16mf2_t test_vslide1down_vx_u16mf2(vuint16mf2_t src, uint16_t value, // CHECK-RV64-LABEL: @test_vslide1down_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vslide1down_vx_u16m1(vuint16m1_t src, uint16_t value, @@ -303,7 +303,7 @@ vuint16m1_t test_vslide1down_vx_u16m1(vuint16m1_t src, uint16_t value, // CHECK-RV64-LABEL: @test_vslide1down_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vslide1down_vx_u16m2(vuint16m2_t src, uint16_t value, @@ -313,7 +313,7 @@ vuint16m2_t test_vslide1down_vx_u16m2(vuint16m2_t src, uint16_t value, // CHECK-RV64-LABEL: @test_vslide1down_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv16i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv16i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vslide1down_vx_u16m4(vuint16m4_t src, uint16_t value, @@ -323,7 +323,7 @@ vuint16m4_t test_vslide1down_vx_u16m4(vuint16m4_t src, uint16_t value, // CHECK-RV64-LABEL: @test_vslide1down_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv32i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv32i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vslide1down_vx_u16m8(vuint16m8_t src, uint16_t value, @@ -333,7 +333,7 @@ vuint16m8_t test_vslide1down_vx_u16m8(vuint16m8_t src, uint16_t value, // CHECK-RV64-LABEL: @test_vslide1down_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vslide1down_vx_u32mf2(vuint32mf2_t src, uint32_t value, @@ -343,7 +343,7 @@ vuint32mf2_t test_vslide1down_vx_u32mf2(vuint32mf2_t src, uint32_t value, // CHECK-RV64-LABEL: @test_vslide1down_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vslide1down_vx_u32m1(vuint32m1_t src, uint32_t value, @@ -353,7 +353,7 @@ vuint32m1_t test_vslide1down_vx_u32m1(vuint32m1_t src, uint32_t value, // CHECK-RV64-LABEL: @test_vslide1down_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vslide1down_vx_u32m2(vuint32m2_t src, uint32_t value, @@ -363,7 +363,7 @@ vuint32m2_t test_vslide1down_vx_u32m2(vuint32m2_t src, uint32_t value, // CHECK-RV64-LABEL: @test_vslide1down_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vslide1down_vx_u32m4(vuint32m4_t src, uint32_t value, @@ -373,7 +373,7 @@ vuint32m4_t test_vslide1down_vx_u32m4(vuint32m4_t src, uint32_t value, // CHECK-RV64-LABEL: @test_vslide1down_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv16i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv16i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vslide1down_vx_u32m8(vuint32m8_t src, uint32_t value, @@ -383,7 +383,7 @@ vuint32m8_t test_vslide1down_vx_u32m8(vuint32m8_t src, uint32_t value, // CHECK-RV64-LABEL: @test_vslide1down_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i64.i64.i64( undef, [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vslide1down_vx_u64m1(vuint64m1_t src, uint64_t value, @@ -393,7 +393,7 @@ vuint64m1_t test_vslide1down_vx_u64m1(vuint64m1_t src, uint64_t value, // CHECK-RV64-LABEL: @test_vslide1down_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i64.i64.i64( undef, [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vslide1down_vx_u64m2(vuint64m2_t src, uint64_t value, @@ -403,7 +403,7 @@ vuint64m2_t test_vslide1down_vx_u64m2(vuint64m2_t src, uint64_t value, // CHECK-RV64-LABEL: @test_vslide1down_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i64.i64.i64( undef, [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vslide1down_vx_u64m4(vuint64m4_t src, uint64_t value, @@ -413,7 +413,7 @@ vuint64m4_t test_vslide1down_vx_u64m4(vuint64m4_t src, uint64_t value, // CHECK-RV64-LABEL: @test_vslide1down_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i64.i64.i64( undef, [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vslide1down_vx_u64m8(vuint64m8_t src, uint64_t value, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vslide1up.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vslide1up.c index 61d7023eb431c0e7bfebe1cb11e9f27c1049b1e6..b903528b48e8369d9b9ffa7d98eac767cd5de429 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vslide1up.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vslide1up.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vslide1up_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vslide1up_vx_i8mf8(vint8mf8_t src, int8_t value, size_t vl) { @@ -15,7 +15,7 @@ vint8mf8_t test_vslide1up_vx_i8mf8(vint8mf8_t src, int8_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1up_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vslide1up_vx_i8mf4(vint8mf4_t src, int8_t value, size_t vl) { @@ -24,7 +24,7 @@ vint8mf4_t test_vslide1up_vx_i8mf4(vint8mf4_t src, int8_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1up_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vslide1up_vx_i8mf2(vint8mf2_t src, int8_t value, size_t vl) { @@ -33,7 +33,7 @@ vint8mf2_t test_vslide1up_vx_i8mf2(vint8mf2_t src, int8_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1up_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vslide1up_vx_i8m1(vint8m1_t src, int8_t value, size_t vl) { @@ -42,7 +42,7 @@ vint8m1_t test_vslide1up_vx_i8m1(vint8m1_t src, int8_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1up_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv16i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv16i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vslide1up_vx_i8m2(vint8m2_t src, int8_t value, size_t vl) { @@ -51,7 +51,7 @@ vint8m2_t test_vslide1up_vx_i8m2(vint8m2_t src, int8_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1up_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv32i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv32i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vslide1up_vx_i8m4(vint8m4_t src, int8_t value, size_t vl) { @@ -60,7 +60,7 @@ vint8m4_t test_vslide1up_vx_i8m4(vint8m4_t src, int8_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1up_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv64i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv64i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vslide1up_vx_i8m8(vint8m8_t src, int8_t value, size_t vl) { @@ -69,7 +69,7 @@ vint8m8_t test_vslide1up_vx_i8m8(vint8m8_t src, int8_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1up_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vslide1up_vx_i16mf4(vint16mf4_t src, int16_t value, @@ -79,7 +79,7 @@ vint16mf4_t test_vslide1up_vx_i16mf4(vint16mf4_t src, int16_t value, // CHECK-RV64-LABEL: @test_vslide1up_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vslide1up_vx_i16mf2(vint16mf2_t src, int16_t value, @@ -89,7 +89,7 @@ vint16mf2_t test_vslide1up_vx_i16mf2(vint16mf2_t src, int16_t value, // CHECK-RV64-LABEL: @test_vslide1up_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vslide1up_vx_i16m1(vint16m1_t src, int16_t value, size_t vl) { @@ -98,7 +98,7 @@ vint16m1_t test_vslide1up_vx_i16m1(vint16m1_t src, int16_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1up_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vslide1up_vx_i16m2(vint16m2_t src, int16_t value, size_t vl) { @@ -107,7 +107,7 @@ vint16m2_t test_vslide1up_vx_i16m2(vint16m2_t src, int16_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1up_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv16i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv16i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vslide1up_vx_i16m4(vint16m4_t src, int16_t value, size_t vl) { @@ -116,7 +116,7 @@ vint16m4_t test_vslide1up_vx_i16m4(vint16m4_t src, int16_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1up_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv32i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv32i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vslide1up_vx_i16m8(vint16m8_t src, int16_t value, size_t vl) { @@ -125,7 +125,7 @@ vint16m8_t test_vslide1up_vx_i16m8(vint16m8_t src, int16_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1up_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vslide1up_vx_i32mf2(vint32mf2_t src, int32_t value, @@ -135,7 +135,7 @@ vint32mf2_t test_vslide1up_vx_i32mf2(vint32mf2_t src, int32_t value, // CHECK-RV64-LABEL: @test_vslide1up_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vslide1up_vx_i32m1(vint32m1_t src, int32_t value, size_t vl) { @@ -144,7 +144,7 @@ vint32m1_t test_vslide1up_vx_i32m1(vint32m1_t src, int32_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1up_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vslide1up_vx_i32m2(vint32m2_t src, int32_t value, size_t vl) { @@ -153,7 +153,7 @@ vint32m2_t test_vslide1up_vx_i32m2(vint32m2_t src, int32_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1up_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vslide1up_vx_i32m4(vint32m4_t src, int32_t value, size_t vl) { @@ -162,7 +162,7 @@ vint32m4_t test_vslide1up_vx_i32m4(vint32m4_t src, int32_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1up_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv16i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv16i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vslide1up_vx_i32m8(vint32m8_t src, int32_t value, size_t vl) { @@ -171,7 +171,7 @@ vint32m8_t test_vslide1up_vx_i32m8(vint32m8_t src, int32_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1up_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i64.i64.i64( undef, [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vslide1up_vx_i64m1(vint64m1_t src, int64_t value, size_t vl) { @@ -180,7 +180,7 @@ vint64m1_t test_vslide1up_vx_i64m1(vint64m1_t src, int64_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1up_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i64.i64.i64( undef, [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vslide1up_vx_i64m2(vint64m2_t src, int64_t value, size_t vl) { @@ -189,7 +189,7 @@ vint64m2_t test_vslide1up_vx_i64m2(vint64m2_t src, int64_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1up_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i64.i64.i64( undef, [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vslide1up_vx_i64m4(vint64m4_t src, int64_t value, size_t vl) { @@ -198,7 +198,7 @@ vint64m4_t test_vslide1up_vx_i64m4(vint64m4_t src, int64_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1up_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i64.i64.i64( undef, [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vslide1up_vx_i64m8(vint64m8_t src, int64_t value, size_t vl) { @@ -207,7 +207,7 @@ vint64m8_t test_vslide1up_vx_i64m8(vint64m8_t src, int64_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1up_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vslide1up_vx_u8mf8(vuint8mf8_t src, uint8_t value, size_t vl) { @@ -216,7 +216,7 @@ vuint8mf8_t test_vslide1up_vx_u8mf8(vuint8mf8_t src, uint8_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1up_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vslide1up_vx_u8mf4(vuint8mf4_t src, uint8_t value, size_t vl) { @@ -225,7 +225,7 @@ vuint8mf4_t test_vslide1up_vx_u8mf4(vuint8mf4_t src, uint8_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1up_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vslide1up_vx_u8mf2(vuint8mf2_t src, uint8_t value, size_t vl) { @@ -234,7 +234,7 @@ vuint8mf2_t test_vslide1up_vx_u8mf2(vuint8mf2_t src, uint8_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1up_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vslide1up_vx_u8m1(vuint8m1_t src, uint8_t value, size_t vl) { @@ -243,7 +243,7 @@ vuint8m1_t test_vslide1up_vx_u8m1(vuint8m1_t src, uint8_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1up_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv16i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv16i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vslide1up_vx_u8m2(vuint8m2_t src, uint8_t value, size_t vl) { @@ -252,7 +252,7 @@ vuint8m2_t test_vslide1up_vx_u8m2(vuint8m2_t src, uint8_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1up_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv32i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv32i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vslide1up_vx_u8m4(vuint8m4_t src, uint8_t value, size_t vl) { @@ -261,7 +261,7 @@ vuint8m4_t test_vslide1up_vx_u8m4(vuint8m4_t src, uint8_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1up_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv64i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv64i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vslide1up_vx_u8m8(vuint8m8_t src, uint8_t value, size_t vl) { @@ -270,7 +270,7 @@ vuint8m8_t test_vslide1up_vx_u8m8(vuint8m8_t src, uint8_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1up_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vslide1up_vx_u16mf4(vuint16mf4_t src, uint16_t value, @@ -280,7 +280,7 @@ vuint16mf4_t test_vslide1up_vx_u16mf4(vuint16mf4_t src, uint16_t value, // CHECK-RV64-LABEL: @test_vslide1up_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vslide1up_vx_u16mf2(vuint16mf2_t src, uint16_t value, @@ -290,7 +290,7 @@ vuint16mf2_t test_vslide1up_vx_u16mf2(vuint16mf2_t src, uint16_t value, // CHECK-RV64-LABEL: @test_vslide1up_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vslide1up_vx_u16m1(vuint16m1_t src, uint16_t value, @@ -300,7 +300,7 @@ vuint16m1_t test_vslide1up_vx_u16m1(vuint16m1_t src, uint16_t value, // CHECK-RV64-LABEL: @test_vslide1up_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vslide1up_vx_u16m2(vuint16m2_t src, uint16_t value, @@ -310,7 +310,7 @@ vuint16m2_t test_vslide1up_vx_u16m2(vuint16m2_t src, uint16_t value, // CHECK-RV64-LABEL: @test_vslide1up_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv16i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv16i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vslide1up_vx_u16m4(vuint16m4_t src, uint16_t value, @@ -320,7 +320,7 @@ vuint16m4_t test_vslide1up_vx_u16m4(vuint16m4_t src, uint16_t value, // CHECK-RV64-LABEL: @test_vslide1up_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv32i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv32i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vslide1up_vx_u16m8(vuint16m8_t src, uint16_t value, @@ -330,7 +330,7 @@ vuint16m8_t test_vslide1up_vx_u16m8(vuint16m8_t src, uint16_t value, // CHECK-RV64-LABEL: @test_vslide1up_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vslide1up_vx_u32mf2(vuint32mf2_t src, uint32_t value, @@ -340,7 +340,7 @@ vuint32mf2_t test_vslide1up_vx_u32mf2(vuint32mf2_t src, uint32_t value, // CHECK-RV64-LABEL: @test_vslide1up_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vslide1up_vx_u32m1(vuint32m1_t src, uint32_t value, @@ -350,7 +350,7 @@ vuint32m1_t test_vslide1up_vx_u32m1(vuint32m1_t src, uint32_t value, // CHECK-RV64-LABEL: @test_vslide1up_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vslide1up_vx_u32m2(vuint32m2_t src, uint32_t value, @@ -360,7 +360,7 @@ vuint32m2_t test_vslide1up_vx_u32m2(vuint32m2_t src, uint32_t value, // CHECK-RV64-LABEL: @test_vslide1up_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vslide1up_vx_u32m4(vuint32m4_t src, uint32_t value, @@ -370,7 +370,7 @@ vuint32m4_t test_vslide1up_vx_u32m4(vuint32m4_t src, uint32_t value, // CHECK-RV64-LABEL: @test_vslide1up_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv16i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv16i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vslide1up_vx_u32m8(vuint32m8_t src, uint32_t value, @@ -380,7 +380,7 @@ vuint32m8_t test_vslide1up_vx_u32m8(vuint32m8_t src, uint32_t value, // CHECK-RV64-LABEL: @test_vslide1up_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i64.i64.i64( undef, [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vslide1up_vx_u64m1(vuint64m1_t src, uint64_t value, @@ -390,7 +390,7 @@ vuint64m1_t test_vslide1up_vx_u64m1(vuint64m1_t src, uint64_t value, // CHECK-RV64-LABEL: @test_vslide1up_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i64.i64.i64( undef, [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vslide1up_vx_u64m2(vuint64m2_t src, uint64_t value, @@ -400,7 +400,7 @@ vuint64m2_t test_vslide1up_vx_u64m2(vuint64m2_t src, uint64_t value, // CHECK-RV64-LABEL: @test_vslide1up_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i64.i64.i64( undef, [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vslide1up_vx_u64m4(vuint64m4_t src, uint64_t value, @@ -410,7 +410,7 @@ vuint64m4_t test_vslide1up_vx_u64m4(vuint64m4_t src, uint64_t value, // CHECK-RV64-LABEL: @test_vslide1up_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i64.i64.i64( undef, [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vslide1up_vx_u64m8(vuint64m8_t src, uint64_t value, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsll.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsll.c index a7f2dec73689396dc6901f4d7cd07d19ff6ff1a9..67e7033896bb0db5bd5313d3c58a5dbfd7c2bf68 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsll.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsll.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vsll_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsll_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { @@ -15,7 +15,7 @@ vint8mf8_t test_vsll_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsll_vx_i8mf8(vint8mf8_t op1, size_t shift, size_t vl) { @@ -24,7 +24,7 @@ vint8mf8_t test_vsll_vx_i8mf8(vint8mf8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsll_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { @@ -33,7 +33,7 @@ vint8mf4_t test_vsll_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsll_vx_i8mf4(vint8mf4_t op1, size_t shift, size_t vl) { @@ -42,7 +42,7 @@ vint8mf4_t test_vsll_vx_i8mf4(vint8mf4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsll_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { @@ -51,7 +51,7 @@ vint8mf2_t test_vsll_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsll_vx_i8mf2(vint8mf2_t op1, size_t shift, size_t vl) { @@ -60,7 +60,7 @@ vint8mf2_t test_vsll_vx_i8mf2(vint8mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsll_vv_i8m1(vint8m1_t op1, vuint8m1_t shift, size_t vl) { @@ -69,7 +69,7 @@ vint8m1_t test_vsll_vv_i8m1(vint8m1_t op1, vuint8m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsll_vx_i8m1(vint8m1_t op1, size_t shift, size_t vl) { @@ -78,7 +78,7 @@ vint8m1_t test_vsll_vx_i8m1(vint8m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsll_vv_i8m2(vint8m2_t op1, vuint8m2_t shift, size_t vl) { @@ -87,7 +87,7 @@ vint8m2_t test_vsll_vv_i8m2(vint8m2_t op1, vuint8m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsll_vx_i8m2(vint8m2_t op1, size_t shift, size_t vl) { @@ -96,7 +96,7 @@ vint8m2_t test_vsll_vx_i8m2(vint8m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsll_vv_i8m4(vint8m4_t op1, vuint8m4_t shift, size_t vl) { @@ -105,7 +105,7 @@ vint8m4_t test_vsll_vv_i8m4(vint8m4_t op1, vuint8m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsll_vx_i8m4(vint8m4_t op1, size_t shift, size_t vl) { @@ -114,7 +114,7 @@ vint8m4_t test_vsll_vx_i8m4(vint8m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsll_vv_i8m8(vint8m8_t op1, vuint8m8_t shift, size_t vl) { @@ -123,7 +123,7 @@ vint8m8_t test_vsll_vv_i8m8(vint8m8_t op1, vuint8m8_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv64i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv64i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsll_vx_i8m8(vint8m8_t op1, size_t shift, size_t vl) { @@ -132,7 +132,7 @@ vint8m8_t test_vsll_vx_i8m8(vint8m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsll_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t shift, size_t vl) { @@ -141,7 +141,7 @@ vint16mf4_t test_vsll_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t shift, size_t vl) // CHECK-RV64-LABEL: @test_vsll_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsll_vx_i16mf4(vint16mf4_t op1, size_t shift, size_t vl) { @@ -150,7 +150,7 @@ vint16mf4_t test_vsll_vx_i16mf4(vint16mf4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsll_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t shift, size_t vl) { @@ -159,7 +159,7 @@ vint16mf2_t test_vsll_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t shift, size_t vl) // CHECK-RV64-LABEL: @test_vsll_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsll_vx_i16mf2(vint16mf2_t op1, size_t shift, size_t vl) { @@ -168,7 +168,7 @@ vint16mf2_t test_vsll_vx_i16mf2(vint16mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsll_vv_i16m1(vint16m1_t op1, vuint16m1_t shift, size_t vl) { @@ -177,7 +177,7 @@ vint16m1_t test_vsll_vv_i16m1(vint16m1_t op1, vuint16m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsll_vx_i16m1(vint16m1_t op1, size_t shift, size_t vl) { @@ -186,7 +186,7 @@ vint16m1_t test_vsll_vx_i16m1(vint16m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsll_vv_i16m2(vint16m2_t op1, vuint16m2_t shift, size_t vl) { @@ -195,7 +195,7 @@ vint16m2_t test_vsll_vv_i16m2(vint16m2_t op1, vuint16m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsll_vx_i16m2(vint16m2_t op1, size_t shift, size_t vl) { @@ -204,7 +204,7 @@ vint16m2_t test_vsll_vx_i16m2(vint16m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsll_vv_i16m4(vint16m4_t op1, vuint16m4_t shift, size_t vl) { @@ -213,7 +213,7 @@ vint16m4_t test_vsll_vv_i16m4(vint16m4_t op1, vuint16m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsll_vx_i16m4(vint16m4_t op1, size_t shift, size_t vl) { @@ -222,7 +222,7 @@ vint16m4_t test_vsll_vx_i16m4(vint16m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsll_vv_i16m8(vint16m8_t op1, vuint16m8_t shift, size_t vl) { @@ -231,7 +231,7 @@ vint16m8_t test_vsll_vv_i16m8(vint16m8_t op1, vuint16m8_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsll_vx_i16m8(vint16m8_t op1, size_t shift, size_t vl) { @@ -240,7 +240,7 @@ vint16m8_t test_vsll_vx_i16m8(vint16m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsll_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { @@ -249,7 +249,7 @@ vint32mf2_t test_vsll_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t shift, size_t vl) // CHECK-RV64-LABEL: @test_vsll_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsll_vx_i32mf2(vint32mf2_t op1, size_t shift, size_t vl) { @@ -258,7 +258,7 @@ vint32mf2_t test_vsll_vx_i32mf2(vint32mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsll_vv_i32m1(vint32m1_t op1, vuint32m1_t shift, size_t vl) { @@ -267,7 +267,7 @@ vint32m1_t test_vsll_vv_i32m1(vint32m1_t op1, vuint32m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsll_vx_i32m1(vint32m1_t op1, size_t shift, size_t vl) { @@ -276,7 +276,7 @@ vint32m1_t test_vsll_vx_i32m1(vint32m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsll_vv_i32m2(vint32m2_t op1, vuint32m2_t shift, size_t vl) { @@ -285,7 +285,7 @@ vint32m2_t test_vsll_vv_i32m2(vint32m2_t op1, vuint32m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsll_vx_i32m2(vint32m2_t op1, size_t shift, size_t vl) { @@ -294,7 +294,7 @@ vint32m2_t test_vsll_vx_i32m2(vint32m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsll_vv_i32m4(vint32m4_t op1, vuint32m4_t shift, size_t vl) { @@ -303,7 +303,7 @@ vint32m4_t test_vsll_vv_i32m4(vint32m4_t op1, vuint32m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsll_vx_i32m4(vint32m4_t op1, size_t shift, size_t vl) { @@ -312,7 +312,7 @@ vint32m4_t test_vsll_vx_i32m4(vint32m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsll_vv_i32m8(vint32m8_t op1, vuint32m8_t shift, size_t vl) { @@ -321,7 +321,7 @@ vint32m8_t test_vsll_vv_i32m8(vint32m8_t op1, vuint32m8_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsll_vx_i32m8(vint32m8_t op1, size_t shift, size_t vl) { @@ -330,7 +330,7 @@ vint32m8_t test_vsll_vx_i32m8(vint32m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsll_vv_i64m1(vint64m1_t op1, vuint64m1_t shift, size_t vl) { @@ -339,7 +339,7 @@ vint64m1_t test_vsll_vv_i64m1(vint64m1_t op1, vuint64m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsll_vx_i64m1(vint64m1_t op1, size_t shift, size_t vl) { @@ -348,7 +348,7 @@ vint64m1_t test_vsll_vx_i64m1(vint64m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsll_vv_i64m2(vint64m2_t op1, vuint64m2_t shift, size_t vl) { @@ -357,7 +357,7 @@ vint64m2_t test_vsll_vv_i64m2(vint64m2_t op1, vuint64m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsll_vx_i64m2(vint64m2_t op1, size_t shift, size_t vl) { @@ -366,7 +366,7 @@ vint64m2_t test_vsll_vx_i64m2(vint64m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsll_vv_i64m4(vint64m4_t op1, vuint64m4_t shift, size_t vl) { @@ -375,7 +375,7 @@ vint64m4_t test_vsll_vv_i64m4(vint64m4_t op1, vuint64m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsll_vx_i64m4(vint64m4_t op1, size_t shift, size_t vl) { @@ -384,7 +384,7 @@ vint64m4_t test_vsll_vx_i64m4(vint64m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsll_vv_i64m8(vint64m8_t op1, vuint64m8_t shift, size_t vl) { @@ -393,7 +393,7 @@ vint64m8_t test_vsll_vv_i64m8(vint64m8_t op1, vuint64m8_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsll_vx_i64m8(vint64m8_t op1, size_t shift, size_t vl) { @@ -402,7 +402,7 @@ vint64m8_t test_vsll_vx_i64m8(vint64m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsll_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { @@ -411,7 +411,7 @@ vuint8mf8_t test_vsll_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsll_vx_u8mf8(vuint8mf8_t op1, size_t shift, size_t vl) { @@ -420,7 +420,7 @@ vuint8mf8_t test_vsll_vx_u8mf8(vuint8mf8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsll_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { @@ -429,7 +429,7 @@ vuint8mf4_t test_vsll_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsll_vx_u8mf4(vuint8mf4_t op1, size_t shift, size_t vl) { @@ -438,7 +438,7 @@ vuint8mf4_t test_vsll_vx_u8mf4(vuint8mf4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsll_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { @@ -447,7 +447,7 @@ vuint8mf2_t test_vsll_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsll_vx_u8mf2(vuint8mf2_t op1, size_t shift, size_t vl) { @@ -456,7 +456,7 @@ vuint8mf2_t test_vsll_vx_u8mf2(vuint8mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsll_vv_u8m1(vuint8m1_t op1, vuint8m1_t shift, size_t vl) { @@ -465,7 +465,7 @@ vuint8m1_t test_vsll_vv_u8m1(vuint8m1_t op1, vuint8m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsll_vx_u8m1(vuint8m1_t op1, size_t shift, size_t vl) { @@ -474,7 +474,7 @@ vuint8m1_t test_vsll_vx_u8m1(vuint8m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsll_vv_u8m2(vuint8m2_t op1, vuint8m2_t shift, size_t vl) { @@ -483,7 +483,7 @@ vuint8m2_t test_vsll_vv_u8m2(vuint8m2_t op1, vuint8m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsll_vx_u8m2(vuint8m2_t op1, size_t shift, size_t vl) { @@ -492,7 +492,7 @@ vuint8m2_t test_vsll_vx_u8m2(vuint8m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsll_vv_u8m4(vuint8m4_t op1, vuint8m4_t shift, size_t vl) { @@ -501,7 +501,7 @@ vuint8m4_t test_vsll_vv_u8m4(vuint8m4_t op1, vuint8m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsll_vx_u8m4(vuint8m4_t op1, size_t shift, size_t vl) { @@ -510,7 +510,7 @@ vuint8m4_t test_vsll_vx_u8m4(vuint8m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsll_vv_u8m8(vuint8m8_t op1, vuint8m8_t shift, size_t vl) { @@ -519,7 +519,7 @@ vuint8m8_t test_vsll_vv_u8m8(vuint8m8_t op1, vuint8m8_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv64i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv64i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsll_vx_u8m8(vuint8m8_t op1, size_t shift, size_t vl) { @@ -528,7 +528,7 @@ vuint8m8_t test_vsll_vx_u8m8(vuint8m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsll_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) { @@ -537,7 +537,7 @@ vuint16mf4_t test_vsll_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t shift, size_t vl // CHECK-RV64-LABEL: @test_vsll_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsll_vx_u16mf4(vuint16mf4_t op1, size_t shift, size_t vl) { @@ -546,7 +546,7 @@ vuint16mf4_t test_vsll_vx_u16mf4(vuint16mf4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsll_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) { @@ -555,7 +555,7 @@ vuint16mf2_t test_vsll_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t shift, size_t vl // CHECK-RV64-LABEL: @test_vsll_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsll_vx_u16mf2(vuint16mf2_t op1, size_t shift, size_t vl) { @@ -564,7 +564,7 @@ vuint16mf2_t test_vsll_vx_u16mf2(vuint16mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsll_vv_u16m1(vuint16m1_t op1, vuint16m1_t shift, size_t vl) { @@ -573,7 +573,7 @@ vuint16m1_t test_vsll_vv_u16m1(vuint16m1_t op1, vuint16m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsll_vx_u16m1(vuint16m1_t op1, size_t shift, size_t vl) { @@ -582,7 +582,7 @@ vuint16m1_t test_vsll_vx_u16m1(vuint16m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsll_vv_u16m2(vuint16m2_t op1, vuint16m2_t shift, size_t vl) { @@ -591,7 +591,7 @@ vuint16m2_t test_vsll_vv_u16m2(vuint16m2_t op1, vuint16m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsll_vx_u16m2(vuint16m2_t op1, size_t shift, size_t vl) { @@ -600,7 +600,7 @@ vuint16m2_t test_vsll_vx_u16m2(vuint16m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsll_vv_u16m4(vuint16m4_t op1, vuint16m4_t shift, size_t vl) { @@ -609,7 +609,7 @@ vuint16m4_t test_vsll_vv_u16m4(vuint16m4_t op1, vuint16m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsll_vx_u16m4(vuint16m4_t op1, size_t shift, size_t vl) { @@ -618,7 +618,7 @@ vuint16m4_t test_vsll_vx_u16m4(vuint16m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsll_vv_u16m8(vuint16m8_t op1, vuint16m8_t shift, size_t vl) { @@ -627,7 +627,7 @@ vuint16m8_t test_vsll_vv_u16m8(vuint16m8_t op1, vuint16m8_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsll_vx_u16m8(vuint16m8_t op1, size_t shift, size_t vl) { @@ -636,7 +636,7 @@ vuint16m8_t test_vsll_vx_u16m8(vuint16m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsll_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { @@ -645,7 +645,7 @@ vuint32mf2_t test_vsll_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t shift, size_t vl // CHECK-RV64-LABEL: @test_vsll_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsll_vx_u32mf2(vuint32mf2_t op1, size_t shift, size_t vl) { @@ -654,7 +654,7 @@ vuint32mf2_t test_vsll_vx_u32mf2(vuint32mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsll_vv_u32m1(vuint32m1_t op1, vuint32m1_t shift, size_t vl) { @@ -663,7 +663,7 @@ vuint32m1_t test_vsll_vv_u32m1(vuint32m1_t op1, vuint32m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsll_vx_u32m1(vuint32m1_t op1, size_t shift, size_t vl) { @@ -672,7 +672,7 @@ vuint32m1_t test_vsll_vx_u32m1(vuint32m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsll_vv_u32m2(vuint32m2_t op1, vuint32m2_t shift, size_t vl) { @@ -681,7 +681,7 @@ vuint32m2_t test_vsll_vv_u32m2(vuint32m2_t op1, vuint32m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsll_vx_u32m2(vuint32m2_t op1, size_t shift, size_t vl) { @@ -690,7 +690,7 @@ vuint32m2_t test_vsll_vx_u32m2(vuint32m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsll_vv_u32m4(vuint32m4_t op1, vuint32m4_t shift, size_t vl) { @@ -699,7 +699,7 @@ vuint32m4_t test_vsll_vv_u32m4(vuint32m4_t op1, vuint32m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsll_vx_u32m4(vuint32m4_t op1, size_t shift, size_t vl) { @@ -708,7 +708,7 @@ vuint32m4_t test_vsll_vx_u32m4(vuint32m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsll_vv_u32m8(vuint32m8_t op1, vuint32m8_t shift, size_t vl) { @@ -717,7 +717,7 @@ vuint32m8_t test_vsll_vv_u32m8(vuint32m8_t op1, vuint32m8_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsll_vx_u32m8(vuint32m8_t op1, size_t shift, size_t vl) { @@ -726,7 +726,7 @@ vuint32m8_t test_vsll_vx_u32m8(vuint32m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsll_vv_u64m1(vuint64m1_t op1, vuint64m1_t shift, size_t vl) { @@ -735,7 +735,7 @@ vuint64m1_t test_vsll_vv_u64m1(vuint64m1_t op1, vuint64m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsll_vx_u64m1(vuint64m1_t op1, size_t shift, size_t vl) { @@ -744,7 +744,7 @@ vuint64m1_t test_vsll_vx_u64m1(vuint64m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsll_vv_u64m2(vuint64m2_t op1, vuint64m2_t shift, size_t vl) { @@ -753,7 +753,7 @@ vuint64m2_t test_vsll_vv_u64m2(vuint64m2_t op1, vuint64m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsll_vx_u64m2(vuint64m2_t op1, size_t shift, size_t vl) { @@ -762,7 +762,7 @@ vuint64m2_t test_vsll_vx_u64m2(vuint64m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsll_vv_u64m4(vuint64m4_t op1, vuint64m4_t shift, size_t vl) { @@ -771,7 +771,7 @@ vuint64m4_t test_vsll_vv_u64m4(vuint64m4_t op1, vuint64m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsll_vx_u64m4(vuint64m4_t op1, size_t shift, size_t vl) { @@ -780,7 +780,7 @@ vuint64m4_t test_vsll_vx_u64m4(vuint64m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsll_vv_u64m8(vuint64m8_t op1, vuint64m8_t shift, size_t vl) { @@ -789,7 +789,7 @@ vuint64m8_t test_vsll_vv_u64m8(vuint64m8_t op1, vuint64m8_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsll_vx_u64m8(vuint64m8_t op1, size_t shift, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsmul-eew64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsmul-eew64.c index 43983b15f18d5e19c9c30ba3c48b9622640b827f..c92c792049755e4a923edec63d3724f131effcb3 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsmul-eew64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsmul-eew64.c @@ -8,7 +8,7 @@ // CHECK-RV64-LABEL: @test_vsmul_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsmul_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -17,7 +17,7 @@ vint64m1_t test_vsmul_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsmul_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { @@ -26,7 +26,7 @@ vint64m1_t test_vsmul_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsmul_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -35,7 +35,7 @@ vint64m2_t test_vsmul_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsmul_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { @@ -44,7 +44,7 @@ vint64m2_t test_vsmul_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsmul_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -53,7 +53,7 @@ vint64m4_t test_vsmul_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsmul_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { @@ -62,7 +62,7 @@ vint64m4_t test_vsmul_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsmul_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -71,7 +71,7 @@ vint64m8_t test_vsmul_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsmul_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsmul.c index e1db9d589ec656b8797ed1b79a19a0098a48ee98..fd821e4cbd79c3c73b714f5570707d4626f404d1 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsmul.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsmul.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vsmul_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsmul_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -15,7 +15,7 @@ vint8mf8_t test_vsmul_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsmul_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -24,7 +24,7 @@ vint8mf8_t test_vsmul_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsmul_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -33,7 +33,7 @@ vint8mf4_t test_vsmul_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsmul_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -42,7 +42,7 @@ vint8mf4_t test_vsmul_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsmul_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -51,7 +51,7 @@ vint8mf2_t test_vsmul_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsmul_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -60,7 +60,7 @@ vint8mf2_t test_vsmul_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsmul_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -69,7 +69,7 @@ vint8m1_t test_vsmul_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsmul_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { @@ -78,7 +78,7 @@ vint8m1_t test_vsmul_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsmul_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -87,7 +87,7 @@ vint8m2_t test_vsmul_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsmul_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { @@ -96,7 +96,7 @@ vint8m2_t test_vsmul_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsmul_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -105,7 +105,7 @@ vint8m4_t test_vsmul_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsmul_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { @@ -114,7 +114,7 @@ vint8m4_t test_vsmul_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsmul_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -123,7 +123,7 @@ vint8m8_t test_vsmul_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsmul_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { @@ -132,7 +132,7 @@ vint8m8_t test_vsmul_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsmul_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -141,7 +141,7 @@ vint16mf4_t test_vsmul_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsmul_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -150,7 +150,7 @@ vint16mf4_t test_vsmul_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsmul_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -159,7 +159,7 @@ vint16mf2_t test_vsmul_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsmul_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -168,7 +168,7 @@ vint16mf2_t test_vsmul_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsmul_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -177,7 +177,7 @@ vint16m1_t test_vsmul_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsmul_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { @@ -186,7 +186,7 @@ vint16m1_t test_vsmul_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsmul_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -195,7 +195,7 @@ vint16m2_t test_vsmul_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsmul_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { @@ -204,7 +204,7 @@ vint16m2_t test_vsmul_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsmul_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -213,7 +213,7 @@ vint16m4_t test_vsmul_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsmul_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { @@ -222,7 +222,7 @@ vint16m4_t test_vsmul_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsmul_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -231,7 +231,7 @@ vint16m8_t test_vsmul_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsmul_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { @@ -240,7 +240,7 @@ vint16m8_t test_vsmul_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsmul_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -249,7 +249,7 @@ vint32mf2_t test_vsmul_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsmul_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -258,7 +258,7 @@ vint32mf2_t test_vsmul_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsmul_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -267,7 +267,7 @@ vint32m1_t test_vsmul_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsmul_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { @@ -276,7 +276,7 @@ vint32m1_t test_vsmul_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsmul_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -285,7 +285,7 @@ vint32m2_t test_vsmul_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsmul_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { @@ -294,7 +294,7 @@ vint32m2_t test_vsmul_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsmul_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -303,7 +303,7 @@ vint32m4_t test_vsmul_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsmul_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { @@ -312,7 +312,7 @@ vint32m4_t test_vsmul_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsmul_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -321,7 +321,7 @@ vint32m8_t test_vsmul_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsmul_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsra.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsra.c index ddf1f8bce3c971907da0d39f796f726256be3ce7..2b3352e3fc188d14b244969a00a487ee1e0fd96e 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsra.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsra.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vsra_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsra_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { @@ -15,7 +15,7 @@ vint8mf8_t test_vsra_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsra_vx_i8mf8(vint8mf8_t op1, size_t shift, size_t vl) { @@ -24,7 +24,7 @@ vint8mf8_t test_vsra_vx_i8mf8(vint8mf8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsra_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { @@ -33,7 +33,7 @@ vint8mf4_t test_vsra_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsra_vx_i8mf4(vint8mf4_t op1, size_t shift, size_t vl) { @@ -42,7 +42,7 @@ vint8mf4_t test_vsra_vx_i8mf4(vint8mf4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsra_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { @@ -51,7 +51,7 @@ vint8mf2_t test_vsra_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsra_vx_i8mf2(vint8mf2_t op1, size_t shift, size_t vl) { @@ -60,7 +60,7 @@ vint8mf2_t test_vsra_vx_i8mf2(vint8mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsra_vv_i8m1(vint8m1_t op1, vuint8m1_t shift, size_t vl) { @@ -69,7 +69,7 @@ vint8m1_t test_vsra_vv_i8m1(vint8m1_t op1, vuint8m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsra_vx_i8m1(vint8m1_t op1, size_t shift, size_t vl) { @@ -78,7 +78,7 @@ vint8m1_t test_vsra_vx_i8m1(vint8m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsra_vv_i8m2(vint8m2_t op1, vuint8m2_t shift, size_t vl) { @@ -87,7 +87,7 @@ vint8m2_t test_vsra_vv_i8m2(vint8m2_t op1, vuint8m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv16i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv16i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsra_vx_i8m2(vint8m2_t op1, size_t shift, size_t vl) { @@ -96,7 +96,7 @@ vint8m2_t test_vsra_vx_i8m2(vint8m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsra_vv_i8m4(vint8m4_t op1, vuint8m4_t shift, size_t vl) { @@ -105,7 +105,7 @@ vint8m4_t test_vsra_vv_i8m4(vint8m4_t op1, vuint8m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv32i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv32i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsra_vx_i8m4(vint8m4_t op1, size_t shift, size_t vl) { @@ -114,7 +114,7 @@ vint8m4_t test_vsra_vx_i8m4(vint8m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsra_vv_i8m8(vint8m8_t op1, vuint8m8_t shift, size_t vl) { @@ -123,7 +123,7 @@ vint8m8_t test_vsra_vv_i8m8(vint8m8_t op1, vuint8m8_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv64i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv64i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsra_vx_i8m8(vint8m8_t op1, size_t shift, size_t vl) { @@ -132,7 +132,7 @@ vint8m8_t test_vsra_vx_i8m8(vint8m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsra_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t shift, size_t vl) { @@ -141,7 +141,7 @@ vint16mf4_t test_vsra_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t shift, size_t vl) // CHECK-RV64-LABEL: @test_vsra_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsra_vx_i16mf4(vint16mf4_t op1, size_t shift, size_t vl) { @@ -150,7 +150,7 @@ vint16mf4_t test_vsra_vx_i16mf4(vint16mf4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsra_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t shift, size_t vl) { @@ -159,7 +159,7 @@ vint16mf2_t test_vsra_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t shift, size_t vl) // CHECK-RV64-LABEL: @test_vsra_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsra_vx_i16mf2(vint16mf2_t op1, size_t shift, size_t vl) { @@ -168,7 +168,7 @@ vint16mf2_t test_vsra_vx_i16mf2(vint16mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsra_vv_i16m1(vint16m1_t op1, vuint16m1_t shift, size_t vl) { @@ -177,7 +177,7 @@ vint16m1_t test_vsra_vv_i16m1(vint16m1_t op1, vuint16m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsra_vx_i16m1(vint16m1_t op1, size_t shift, size_t vl) { @@ -186,7 +186,7 @@ vint16m1_t test_vsra_vx_i16m1(vint16m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsra_vv_i16m2(vint16m2_t op1, vuint16m2_t shift, size_t vl) { @@ -195,7 +195,7 @@ vint16m2_t test_vsra_vv_i16m2(vint16m2_t op1, vuint16m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsra_vx_i16m2(vint16m2_t op1, size_t shift, size_t vl) { @@ -204,7 +204,7 @@ vint16m2_t test_vsra_vx_i16m2(vint16m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsra_vv_i16m4(vint16m4_t op1, vuint16m4_t shift, size_t vl) { @@ -213,7 +213,7 @@ vint16m4_t test_vsra_vv_i16m4(vint16m4_t op1, vuint16m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv16i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv16i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsra_vx_i16m4(vint16m4_t op1, size_t shift, size_t vl) { @@ -222,7 +222,7 @@ vint16m4_t test_vsra_vx_i16m4(vint16m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsra_vv_i16m8(vint16m8_t op1, vuint16m8_t shift, size_t vl) { @@ -231,7 +231,7 @@ vint16m8_t test_vsra_vv_i16m8(vint16m8_t op1, vuint16m8_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv32i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv32i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsra_vx_i16m8(vint16m8_t op1, size_t shift, size_t vl) { @@ -240,7 +240,7 @@ vint16m8_t test_vsra_vx_i16m8(vint16m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsra_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { @@ -249,7 +249,7 @@ vint32mf2_t test_vsra_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t shift, size_t vl) // CHECK-RV64-LABEL: @test_vsra_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsra_vx_i32mf2(vint32mf2_t op1, size_t shift, size_t vl) { @@ -258,7 +258,7 @@ vint32mf2_t test_vsra_vx_i32mf2(vint32mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsra_vv_i32m1(vint32m1_t op1, vuint32m1_t shift, size_t vl) { @@ -267,7 +267,7 @@ vint32m1_t test_vsra_vv_i32m1(vint32m1_t op1, vuint32m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsra_vx_i32m1(vint32m1_t op1, size_t shift, size_t vl) { @@ -276,7 +276,7 @@ vint32m1_t test_vsra_vx_i32m1(vint32m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsra_vv_i32m2(vint32m2_t op1, vuint32m2_t shift, size_t vl) { @@ -285,7 +285,7 @@ vint32m2_t test_vsra_vv_i32m2(vint32m2_t op1, vuint32m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsra_vx_i32m2(vint32m2_t op1, size_t shift, size_t vl) { @@ -294,7 +294,7 @@ vint32m2_t test_vsra_vx_i32m2(vint32m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsra_vv_i32m4(vint32m4_t op1, vuint32m4_t shift, size_t vl) { @@ -303,7 +303,7 @@ vint32m4_t test_vsra_vv_i32m4(vint32m4_t op1, vuint32m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsra_vx_i32m4(vint32m4_t op1, size_t shift, size_t vl) { @@ -312,7 +312,7 @@ vint32m4_t test_vsra_vx_i32m4(vint32m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsra_vv_i32m8(vint32m8_t op1, vuint32m8_t shift, size_t vl) { @@ -321,7 +321,7 @@ vint32m8_t test_vsra_vv_i32m8(vint32m8_t op1, vuint32m8_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv16i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv16i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsra_vx_i32m8(vint32m8_t op1, size_t shift, size_t vl) { @@ -330,7 +330,7 @@ vint32m8_t test_vsra_vx_i32m8(vint32m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsra_vv_i64m1(vint64m1_t op1, vuint64m1_t shift, size_t vl) { @@ -339,7 +339,7 @@ vint64m1_t test_vsra_vv_i64m1(vint64m1_t op1, vuint64m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsra_vx_i64m1(vint64m1_t op1, size_t shift, size_t vl) { @@ -348,7 +348,7 @@ vint64m1_t test_vsra_vx_i64m1(vint64m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsra_vv_i64m2(vint64m2_t op1, vuint64m2_t shift, size_t vl) { @@ -357,7 +357,7 @@ vint64m2_t test_vsra_vv_i64m2(vint64m2_t op1, vuint64m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsra_vx_i64m2(vint64m2_t op1, size_t shift, size_t vl) { @@ -366,7 +366,7 @@ vint64m2_t test_vsra_vx_i64m2(vint64m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsra_vv_i64m4(vint64m4_t op1, vuint64m4_t shift, size_t vl) { @@ -375,7 +375,7 @@ vint64m4_t test_vsra_vv_i64m4(vint64m4_t op1, vuint64m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsra_vx_i64m4(vint64m4_t op1, size_t shift, size_t vl) { @@ -384,7 +384,7 @@ vint64m4_t test_vsra_vx_i64m4(vint64m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsra_vv_i64m8(vint64m8_t op1, vuint64m8_t shift, size_t vl) { @@ -393,7 +393,7 @@ vint64m8_t test_vsra_vv_i64m8(vint64m8_t op1, vuint64m8_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsra_vx_i64m8(vint64m8_t op1, size_t shift, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsrl.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsrl.c index 3150746b841dc1122a38d8063d2362835ba20fe8..c7d59a775a0677f3146c38bd584eadda91ff10e9 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsrl.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsrl.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vsrl_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsrl_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { @@ -15,7 +15,7 @@ vuint8mf8_t test_vsrl_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsrl_vx_u8mf8(vuint8mf8_t op1, size_t shift, size_t vl) { @@ -24,7 +24,7 @@ vuint8mf8_t test_vsrl_vx_u8mf8(vuint8mf8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsrl_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { @@ -33,7 +33,7 @@ vuint8mf4_t test_vsrl_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsrl_vx_u8mf4(vuint8mf4_t op1, size_t shift, size_t vl) { @@ -42,7 +42,7 @@ vuint8mf4_t test_vsrl_vx_u8mf4(vuint8mf4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsrl_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { @@ -51,7 +51,7 @@ vuint8mf2_t test_vsrl_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsrl_vx_u8mf2(vuint8mf2_t op1, size_t shift, size_t vl) { @@ -60,7 +60,7 @@ vuint8mf2_t test_vsrl_vx_u8mf2(vuint8mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsrl_vv_u8m1(vuint8m1_t op1, vuint8m1_t shift, size_t vl) { @@ -69,7 +69,7 @@ vuint8m1_t test_vsrl_vv_u8m1(vuint8m1_t op1, vuint8m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsrl_vx_u8m1(vuint8m1_t op1, size_t shift, size_t vl) { @@ -78,7 +78,7 @@ vuint8m1_t test_vsrl_vx_u8m1(vuint8m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsrl_vv_u8m2(vuint8m2_t op1, vuint8m2_t shift, size_t vl) { @@ -87,7 +87,7 @@ vuint8m2_t test_vsrl_vv_u8m2(vuint8m2_t op1, vuint8m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv16i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv16i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsrl_vx_u8m2(vuint8m2_t op1, size_t shift, size_t vl) { @@ -96,7 +96,7 @@ vuint8m2_t test_vsrl_vx_u8m2(vuint8m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsrl_vv_u8m4(vuint8m4_t op1, vuint8m4_t shift, size_t vl) { @@ -105,7 +105,7 @@ vuint8m4_t test_vsrl_vv_u8m4(vuint8m4_t op1, vuint8m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv32i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv32i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsrl_vx_u8m4(vuint8m4_t op1, size_t shift, size_t vl) { @@ -114,7 +114,7 @@ vuint8m4_t test_vsrl_vx_u8m4(vuint8m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsrl_vv_u8m8(vuint8m8_t op1, vuint8m8_t shift, size_t vl) { @@ -123,7 +123,7 @@ vuint8m8_t test_vsrl_vv_u8m8(vuint8m8_t op1, vuint8m8_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv64i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv64i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsrl_vx_u8m8(vuint8m8_t op1, size_t shift, size_t vl) { @@ -132,7 +132,7 @@ vuint8m8_t test_vsrl_vx_u8m8(vuint8m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsrl_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) { @@ -141,7 +141,7 @@ vuint16mf4_t test_vsrl_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t shift, size_t vl // CHECK-RV64-LABEL: @test_vsrl_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsrl_vx_u16mf4(vuint16mf4_t op1, size_t shift, size_t vl) { @@ -150,7 +150,7 @@ vuint16mf4_t test_vsrl_vx_u16mf4(vuint16mf4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsrl_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) { @@ -159,7 +159,7 @@ vuint16mf2_t test_vsrl_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t shift, size_t vl // CHECK-RV64-LABEL: @test_vsrl_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsrl_vx_u16mf2(vuint16mf2_t op1, size_t shift, size_t vl) { @@ -168,7 +168,7 @@ vuint16mf2_t test_vsrl_vx_u16mf2(vuint16mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsrl_vv_u16m1(vuint16m1_t op1, vuint16m1_t shift, size_t vl) { @@ -177,7 +177,7 @@ vuint16m1_t test_vsrl_vv_u16m1(vuint16m1_t op1, vuint16m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsrl_vx_u16m1(vuint16m1_t op1, size_t shift, size_t vl) { @@ -186,7 +186,7 @@ vuint16m1_t test_vsrl_vx_u16m1(vuint16m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsrl_vv_u16m2(vuint16m2_t op1, vuint16m2_t shift, size_t vl) { @@ -195,7 +195,7 @@ vuint16m2_t test_vsrl_vv_u16m2(vuint16m2_t op1, vuint16m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsrl_vx_u16m2(vuint16m2_t op1, size_t shift, size_t vl) { @@ -204,7 +204,7 @@ vuint16m2_t test_vsrl_vx_u16m2(vuint16m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsrl_vv_u16m4(vuint16m4_t op1, vuint16m4_t shift, size_t vl) { @@ -213,7 +213,7 @@ vuint16m4_t test_vsrl_vv_u16m4(vuint16m4_t op1, vuint16m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv16i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv16i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsrl_vx_u16m4(vuint16m4_t op1, size_t shift, size_t vl) { @@ -222,7 +222,7 @@ vuint16m4_t test_vsrl_vx_u16m4(vuint16m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsrl_vv_u16m8(vuint16m8_t op1, vuint16m8_t shift, size_t vl) { @@ -231,7 +231,7 @@ vuint16m8_t test_vsrl_vv_u16m8(vuint16m8_t op1, vuint16m8_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv32i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv32i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsrl_vx_u16m8(vuint16m8_t op1, size_t shift, size_t vl) { @@ -240,7 +240,7 @@ vuint16m8_t test_vsrl_vx_u16m8(vuint16m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsrl_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { @@ -249,7 +249,7 @@ vuint32mf2_t test_vsrl_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t shift, size_t vl // CHECK-RV64-LABEL: @test_vsrl_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsrl_vx_u32mf2(vuint32mf2_t op1, size_t shift, size_t vl) { @@ -258,7 +258,7 @@ vuint32mf2_t test_vsrl_vx_u32mf2(vuint32mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsrl_vv_u32m1(vuint32m1_t op1, vuint32m1_t shift, size_t vl) { @@ -267,7 +267,7 @@ vuint32m1_t test_vsrl_vv_u32m1(vuint32m1_t op1, vuint32m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsrl_vx_u32m1(vuint32m1_t op1, size_t shift, size_t vl) { @@ -276,7 +276,7 @@ vuint32m1_t test_vsrl_vx_u32m1(vuint32m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsrl_vv_u32m2(vuint32m2_t op1, vuint32m2_t shift, size_t vl) { @@ -285,7 +285,7 @@ vuint32m2_t test_vsrl_vv_u32m2(vuint32m2_t op1, vuint32m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsrl_vx_u32m2(vuint32m2_t op1, size_t shift, size_t vl) { @@ -294,7 +294,7 @@ vuint32m2_t test_vsrl_vx_u32m2(vuint32m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsrl_vv_u32m4(vuint32m4_t op1, vuint32m4_t shift, size_t vl) { @@ -303,7 +303,7 @@ vuint32m4_t test_vsrl_vv_u32m4(vuint32m4_t op1, vuint32m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsrl_vx_u32m4(vuint32m4_t op1, size_t shift, size_t vl) { @@ -312,7 +312,7 @@ vuint32m4_t test_vsrl_vx_u32m4(vuint32m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsrl_vv_u32m8(vuint32m8_t op1, vuint32m8_t shift, size_t vl) { @@ -321,7 +321,7 @@ vuint32m8_t test_vsrl_vv_u32m8(vuint32m8_t op1, vuint32m8_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv16i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv16i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsrl_vx_u32m8(vuint32m8_t op1, size_t shift, size_t vl) { @@ -330,7 +330,7 @@ vuint32m8_t test_vsrl_vx_u32m8(vuint32m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsrl_vv_u64m1(vuint64m1_t op1, vuint64m1_t shift, size_t vl) { @@ -339,7 +339,7 @@ vuint64m1_t test_vsrl_vv_u64m1(vuint64m1_t op1, vuint64m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsrl_vx_u64m1(vuint64m1_t op1, size_t shift, size_t vl) { @@ -348,7 +348,7 @@ vuint64m1_t test_vsrl_vx_u64m1(vuint64m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsrl_vv_u64m2(vuint64m2_t op1, vuint64m2_t shift, size_t vl) { @@ -357,7 +357,7 @@ vuint64m2_t test_vsrl_vv_u64m2(vuint64m2_t op1, vuint64m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsrl_vx_u64m2(vuint64m2_t op1, size_t shift, size_t vl) { @@ -366,7 +366,7 @@ vuint64m2_t test_vsrl_vx_u64m2(vuint64m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsrl_vv_u64m4(vuint64m4_t op1, vuint64m4_t shift, size_t vl) { @@ -375,7 +375,7 @@ vuint64m4_t test_vsrl_vv_u64m4(vuint64m4_t op1, vuint64m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsrl_vx_u64m4(vuint64m4_t op1, size_t shift, size_t vl) { @@ -384,7 +384,7 @@ vuint64m4_t test_vsrl_vx_u64m4(vuint64m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsrl_vv_u64m8(vuint64m8_t op1, vuint64m8_t shift, size_t vl) { @@ -393,7 +393,7 @@ vuint64m8_t test_vsrl_vv_u64m8(vuint64m8_t op1, vuint64m8_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsrl_vx_u64m8(vuint64m8_t op1, size_t shift, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vssra.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vssra.c index 95e9cf843fc669bf72d70179b12d87f8899f5eb3..b14e0b210573810e1675eadde90185653e309479 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vssra.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vssra.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vssra_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vssra_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { @@ -15,7 +15,7 @@ vint8mf8_t test_vssra_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vssra_vx_i8mf8(vint8mf8_t op1, size_t shift, size_t vl) { @@ -24,7 +24,7 @@ vint8mf8_t test_vssra_vx_i8mf8(vint8mf8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vssra_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { @@ -33,7 +33,7 @@ vint8mf4_t test_vssra_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vssra_vx_i8mf4(vint8mf4_t op1, size_t shift, size_t vl) { @@ -42,7 +42,7 @@ vint8mf4_t test_vssra_vx_i8mf4(vint8mf4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vssra_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { @@ -51,7 +51,7 @@ vint8mf2_t test_vssra_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vssra_vx_i8mf2(vint8mf2_t op1, size_t shift, size_t vl) { @@ -60,7 +60,7 @@ vint8mf2_t test_vssra_vx_i8mf2(vint8mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vssra_vv_i8m1(vint8m1_t op1, vuint8m1_t shift, size_t vl) { @@ -69,7 +69,7 @@ vint8m1_t test_vssra_vv_i8m1(vint8m1_t op1, vuint8m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vssra_vx_i8m1(vint8m1_t op1, size_t shift, size_t vl) { @@ -78,7 +78,7 @@ vint8m1_t test_vssra_vx_i8m1(vint8m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vssra_vv_i8m2(vint8m2_t op1, vuint8m2_t shift, size_t vl) { @@ -87,7 +87,7 @@ vint8m2_t test_vssra_vv_i8m2(vint8m2_t op1, vuint8m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vssra_vx_i8m2(vint8m2_t op1, size_t shift, size_t vl) { @@ -96,7 +96,7 @@ vint8m2_t test_vssra_vx_i8m2(vint8m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vssra_vv_i8m4(vint8m4_t op1, vuint8m4_t shift, size_t vl) { @@ -105,7 +105,7 @@ vint8m4_t test_vssra_vv_i8m4(vint8m4_t op1, vuint8m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv32i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv32i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vssra_vx_i8m4(vint8m4_t op1, size_t shift, size_t vl) { @@ -114,7 +114,7 @@ vint8m4_t test_vssra_vx_i8m4(vint8m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vssra_vv_i8m8(vint8m8_t op1, vuint8m8_t shift, size_t vl) { @@ -123,7 +123,7 @@ vint8m8_t test_vssra_vv_i8m8(vint8m8_t op1, vuint8m8_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv64i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv64i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vssra_vx_i8m8(vint8m8_t op1, size_t shift, size_t vl) { @@ -132,7 +132,7 @@ vint8m8_t test_vssra_vx_i8m8(vint8m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vssra_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t shift, @@ -142,7 +142,7 @@ vint16mf4_t test_vssra_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t shift, // CHECK-RV64-LABEL: @test_vssra_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vssra_vx_i16mf4(vint16mf4_t op1, size_t shift, size_t vl) { @@ -151,7 +151,7 @@ vint16mf4_t test_vssra_vx_i16mf4(vint16mf4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vssra_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t shift, @@ -161,7 +161,7 @@ vint16mf2_t test_vssra_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t shift, // CHECK-RV64-LABEL: @test_vssra_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vssra_vx_i16mf2(vint16mf2_t op1, size_t shift, size_t vl) { @@ -170,7 +170,7 @@ vint16mf2_t test_vssra_vx_i16mf2(vint16mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vssra_vv_i16m1(vint16m1_t op1, vuint16m1_t shift, size_t vl) { @@ -179,7 +179,7 @@ vint16m1_t test_vssra_vv_i16m1(vint16m1_t op1, vuint16m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vssra_vx_i16m1(vint16m1_t op1, size_t shift, size_t vl) { @@ -188,7 +188,7 @@ vint16m1_t test_vssra_vx_i16m1(vint16m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vssra_vv_i16m2(vint16m2_t op1, vuint16m2_t shift, size_t vl) { @@ -197,7 +197,7 @@ vint16m2_t test_vssra_vv_i16m2(vint16m2_t op1, vuint16m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vssra_vx_i16m2(vint16m2_t op1, size_t shift, size_t vl) { @@ -206,7 +206,7 @@ vint16m2_t test_vssra_vx_i16m2(vint16m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vssra_vv_i16m4(vint16m4_t op1, vuint16m4_t shift, size_t vl) { @@ -215,7 +215,7 @@ vint16m4_t test_vssra_vv_i16m4(vint16m4_t op1, vuint16m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vssra_vx_i16m4(vint16m4_t op1, size_t shift, size_t vl) { @@ -224,7 +224,7 @@ vint16m4_t test_vssra_vx_i16m4(vint16m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vssra_vv_i16m8(vint16m8_t op1, vuint16m8_t shift, size_t vl) { @@ -233,7 +233,7 @@ vint16m8_t test_vssra_vv_i16m8(vint16m8_t op1, vuint16m8_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv32i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv32i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vssra_vx_i16m8(vint16m8_t op1, size_t shift, size_t vl) { @@ -242,7 +242,7 @@ vint16m8_t test_vssra_vx_i16m8(vint16m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vssra_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t shift, @@ -252,7 +252,7 @@ vint32mf2_t test_vssra_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t shift, // CHECK-RV64-LABEL: @test_vssra_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vssra_vx_i32mf2(vint32mf2_t op1, size_t shift, size_t vl) { @@ -261,7 +261,7 @@ vint32mf2_t test_vssra_vx_i32mf2(vint32mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vssra_vv_i32m1(vint32m1_t op1, vuint32m1_t shift, size_t vl) { @@ -270,7 +270,7 @@ vint32m1_t test_vssra_vv_i32m1(vint32m1_t op1, vuint32m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vssra_vx_i32m1(vint32m1_t op1, size_t shift, size_t vl) { @@ -279,7 +279,7 @@ vint32m1_t test_vssra_vx_i32m1(vint32m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vssra_vv_i32m2(vint32m2_t op1, vuint32m2_t shift, size_t vl) { @@ -288,7 +288,7 @@ vint32m2_t test_vssra_vv_i32m2(vint32m2_t op1, vuint32m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vssra_vx_i32m2(vint32m2_t op1, size_t shift, size_t vl) { @@ -297,7 +297,7 @@ vint32m2_t test_vssra_vx_i32m2(vint32m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vssra_vv_i32m4(vint32m4_t op1, vuint32m4_t shift, size_t vl) { @@ -306,7 +306,7 @@ vint32m4_t test_vssra_vv_i32m4(vint32m4_t op1, vuint32m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vssra_vx_i32m4(vint32m4_t op1, size_t shift, size_t vl) { @@ -315,7 +315,7 @@ vint32m4_t test_vssra_vx_i32m4(vint32m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vssra_vv_i32m8(vint32m8_t op1, vuint32m8_t shift, size_t vl) { @@ -324,7 +324,7 @@ vint32m8_t test_vssra_vv_i32m8(vint32m8_t op1, vuint32m8_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vssra_vx_i32m8(vint32m8_t op1, size_t shift, size_t vl) { @@ -333,7 +333,7 @@ vint32m8_t test_vssra_vx_i32m8(vint32m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vssra_vv_i64m1(vint64m1_t op1, vuint64m1_t shift, size_t vl) { @@ -342,7 +342,7 @@ vint64m1_t test_vssra_vv_i64m1(vint64m1_t op1, vuint64m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vssra_vx_i64m1(vint64m1_t op1, size_t shift, size_t vl) { @@ -351,7 +351,7 @@ vint64m1_t test_vssra_vx_i64m1(vint64m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vssra_vv_i64m2(vint64m2_t op1, vuint64m2_t shift, size_t vl) { @@ -360,7 +360,7 @@ vint64m2_t test_vssra_vv_i64m2(vint64m2_t op1, vuint64m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vssra_vx_i64m2(vint64m2_t op1, size_t shift, size_t vl) { @@ -369,7 +369,7 @@ vint64m2_t test_vssra_vx_i64m2(vint64m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vssra_vv_i64m4(vint64m4_t op1, vuint64m4_t shift, size_t vl) { @@ -378,7 +378,7 @@ vint64m4_t test_vssra_vv_i64m4(vint64m4_t op1, vuint64m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vssra_vx_i64m4(vint64m4_t op1, size_t shift, size_t vl) { @@ -387,7 +387,7 @@ vint64m4_t test_vssra_vx_i64m4(vint64m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vssra_vv_i64m8(vint64m8_t op1, vuint64m8_t shift, size_t vl) { @@ -396,7 +396,7 @@ vint64m8_t test_vssra_vv_i64m8(vint64m8_t op1, vuint64m8_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vssra_vx_i64m8(vint64m8_t op1, size_t shift, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vssrl.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vssrl.c index fccf57aecfb1df92c3e1856460cc190e0477e9fc..464d78ce9d5690f1b459bd5a03e5e28777509ccf 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vssrl.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vssrl.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vssrl_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vssrl_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { @@ -15,7 +15,7 @@ vuint8mf8_t test_vssrl_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vssrl_vx_u8mf8(vuint8mf8_t op1, size_t shift, size_t vl) { @@ -24,7 +24,7 @@ vuint8mf8_t test_vssrl_vx_u8mf8(vuint8mf8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vssrl_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { @@ -33,7 +33,7 @@ vuint8mf4_t test_vssrl_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vssrl_vx_u8mf4(vuint8mf4_t op1, size_t shift, size_t vl) { @@ -42,7 +42,7 @@ vuint8mf4_t test_vssrl_vx_u8mf4(vuint8mf4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vssrl_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { @@ -51,7 +51,7 @@ vuint8mf2_t test_vssrl_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vssrl_vx_u8mf2(vuint8mf2_t op1, size_t shift, size_t vl) { @@ -60,7 +60,7 @@ vuint8mf2_t test_vssrl_vx_u8mf2(vuint8mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vssrl_vv_u8m1(vuint8m1_t op1, vuint8m1_t shift, size_t vl) { @@ -69,7 +69,7 @@ vuint8m1_t test_vssrl_vv_u8m1(vuint8m1_t op1, vuint8m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vssrl_vx_u8m1(vuint8m1_t op1, size_t shift, size_t vl) { @@ -78,7 +78,7 @@ vuint8m1_t test_vssrl_vx_u8m1(vuint8m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vssrl_vv_u8m2(vuint8m2_t op1, vuint8m2_t shift, size_t vl) { @@ -87,7 +87,7 @@ vuint8m2_t test_vssrl_vv_u8m2(vuint8m2_t op1, vuint8m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vssrl_vx_u8m2(vuint8m2_t op1, size_t shift, size_t vl) { @@ -96,7 +96,7 @@ vuint8m2_t test_vssrl_vx_u8m2(vuint8m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vssrl_vv_u8m4(vuint8m4_t op1, vuint8m4_t shift, size_t vl) { @@ -105,7 +105,7 @@ vuint8m4_t test_vssrl_vv_u8m4(vuint8m4_t op1, vuint8m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv32i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv32i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vssrl_vx_u8m4(vuint8m4_t op1, size_t shift, size_t vl) { @@ -114,7 +114,7 @@ vuint8m4_t test_vssrl_vx_u8m4(vuint8m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vssrl_vv_u8m8(vuint8m8_t op1, vuint8m8_t shift, size_t vl) { @@ -123,7 +123,7 @@ vuint8m8_t test_vssrl_vv_u8m8(vuint8m8_t op1, vuint8m8_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv64i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv64i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vssrl_vx_u8m8(vuint8m8_t op1, size_t shift, size_t vl) { @@ -132,7 +132,7 @@ vuint8m8_t test_vssrl_vx_u8m8(vuint8m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vssrl_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t shift, @@ -142,7 +142,7 @@ vuint16mf4_t test_vssrl_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t shift, // CHECK-RV64-LABEL: @test_vssrl_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vssrl_vx_u16mf4(vuint16mf4_t op1, size_t shift, size_t vl) { @@ -151,7 +151,7 @@ vuint16mf4_t test_vssrl_vx_u16mf4(vuint16mf4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vssrl_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t shift, @@ -161,7 +161,7 @@ vuint16mf2_t test_vssrl_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t shift, // CHECK-RV64-LABEL: @test_vssrl_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vssrl_vx_u16mf2(vuint16mf2_t op1, size_t shift, size_t vl) { @@ -170,7 +170,7 @@ vuint16mf2_t test_vssrl_vx_u16mf2(vuint16mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vssrl_vv_u16m1(vuint16m1_t op1, vuint16m1_t shift, size_t vl) { @@ -179,7 +179,7 @@ vuint16m1_t test_vssrl_vv_u16m1(vuint16m1_t op1, vuint16m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vssrl_vx_u16m1(vuint16m1_t op1, size_t shift, size_t vl) { @@ -188,7 +188,7 @@ vuint16m1_t test_vssrl_vx_u16m1(vuint16m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vssrl_vv_u16m2(vuint16m2_t op1, vuint16m2_t shift, size_t vl) { @@ -197,7 +197,7 @@ vuint16m2_t test_vssrl_vv_u16m2(vuint16m2_t op1, vuint16m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vssrl_vx_u16m2(vuint16m2_t op1, size_t shift, size_t vl) { @@ -206,7 +206,7 @@ vuint16m2_t test_vssrl_vx_u16m2(vuint16m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vssrl_vv_u16m4(vuint16m4_t op1, vuint16m4_t shift, size_t vl) { @@ -215,7 +215,7 @@ vuint16m4_t test_vssrl_vv_u16m4(vuint16m4_t op1, vuint16m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vssrl_vx_u16m4(vuint16m4_t op1, size_t shift, size_t vl) { @@ -224,7 +224,7 @@ vuint16m4_t test_vssrl_vx_u16m4(vuint16m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vssrl_vv_u16m8(vuint16m8_t op1, vuint16m8_t shift, size_t vl) { @@ -233,7 +233,7 @@ vuint16m8_t test_vssrl_vv_u16m8(vuint16m8_t op1, vuint16m8_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv32i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv32i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vssrl_vx_u16m8(vuint16m8_t op1, size_t shift, size_t vl) { @@ -242,7 +242,7 @@ vuint16m8_t test_vssrl_vx_u16m8(vuint16m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vssrl_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t shift, @@ -252,7 +252,7 @@ vuint32mf2_t test_vssrl_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t shift, // CHECK-RV64-LABEL: @test_vssrl_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vssrl_vx_u32mf2(vuint32mf2_t op1, size_t shift, size_t vl) { @@ -261,7 +261,7 @@ vuint32mf2_t test_vssrl_vx_u32mf2(vuint32mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vssrl_vv_u32m1(vuint32m1_t op1, vuint32m1_t shift, size_t vl) { @@ -270,7 +270,7 @@ vuint32m1_t test_vssrl_vv_u32m1(vuint32m1_t op1, vuint32m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vssrl_vx_u32m1(vuint32m1_t op1, size_t shift, size_t vl) { @@ -279,7 +279,7 @@ vuint32m1_t test_vssrl_vx_u32m1(vuint32m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vssrl_vv_u32m2(vuint32m2_t op1, vuint32m2_t shift, size_t vl) { @@ -288,7 +288,7 @@ vuint32m2_t test_vssrl_vv_u32m2(vuint32m2_t op1, vuint32m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vssrl_vx_u32m2(vuint32m2_t op1, size_t shift, size_t vl) { @@ -297,7 +297,7 @@ vuint32m2_t test_vssrl_vx_u32m2(vuint32m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vssrl_vv_u32m4(vuint32m4_t op1, vuint32m4_t shift, size_t vl) { @@ -306,7 +306,7 @@ vuint32m4_t test_vssrl_vv_u32m4(vuint32m4_t op1, vuint32m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vssrl_vx_u32m4(vuint32m4_t op1, size_t shift, size_t vl) { @@ -315,7 +315,7 @@ vuint32m4_t test_vssrl_vx_u32m4(vuint32m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vssrl_vv_u32m8(vuint32m8_t op1, vuint32m8_t shift, size_t vl) { @@ -324,7 +324,7 @@ vuint32m8_t test_vssrl_vv_u32m8(vuint32m8_t op1, vuint32m8_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vssrl_vx_u32m8(vuint32m8_t op1, size_t shift, size_t vl) { @@ -333,7 +333,7 @@ vuint32m8_t test_vssrl_vx_u32m8(vuint32m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vssrl_vv_u64m1(vuint64m1_t op1, vuint64m1_t shift, size_t vl) { @@ -342,7 +342,7 @@ vuint64m1_t test_vssrl_vv_u64m1(vuint64m1_t op1, vuint64m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vssrl_vx_u64m1(vuint64m1_t op1, size_t shift, size_t vl) { @@ -351,7 +351,7 @@ vuint64m1_t test_vssrl_vx_u64m1(vuint64m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vssrl_vv_u64m2(vuint64m2_t op1, vuint64m2_t shift, size_t vl) { @@ -360,7 +360,7 @@ vuint64m2_t test_vssrl_vv_u64m2(vuint64m2_t op1, vuint64m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vssrl_vx_u64m2(vuint64m2_t op1, size_t shift, size_t vl) { @@ -369,7 +369,7 @@ vuint64m2_t test_vssrl_vx_u64m2(vuint64m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vssrl_vv_u64m4(vuint64m4_t op1, vuint64m4_t shift, size_t vl) { @@ -378,7 +378,7 @@ vuint64m4_t test_vssrl_vv_u64m4(vuint64m4_t op1, vuint64m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vssrl_vx_u64m4(vuint64m4_t op1, size_t shift, size_t vl) { @@ -387,7 +387,7 @@ vuint64m4_t test_vssrl_vx_u64m4(vuint64m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vssrl_vv_u64m8(vuint64m8_t op1, vuint64m8_t shift, size_t vl) { @@ -396,7 +396,7 @@ vuint64m8_t test_vssrl_vv_u64m8(vuint64m8_t op1, vuint64m8_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vssrl_vx_u64m8(vuint64m8_t op1, size_t shift, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vssub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vssub.c index dc16819e04db3b9f321d76264962f8e1488ae229..1ed8555c70cf95b78ea3332ae94ba54ec0e1c67a 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vssub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vssub.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vssub_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vssub_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -15,7 +15,7 @@ vint8mf8_t test_vssub_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vssub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -24,7 +24,7 @@ vint8mf8_t test_vssub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vssub_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -33,7 +33,7 @@ vint8mf4_t test_vssub_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vssub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -42,7 +42,7 @@ vint8mf4_t test_vssub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vssub_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -51,7 +51,7 @@ vint8mf2_t test_vssub_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vssub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -60,7 +60,7 @@ vint8mf2_t test_vssub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vssub_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -69,7 +69,7 @@ vint8m1_t test_vssub_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vssub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { @@ -78,7 +78,7 @@ vint8m1_t test_vssub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vssub_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -87,7 +87,7 @@ vint8m2_t test_vssub_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vssub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { @@ -96,7 +96,7 @@ vint8m2_t test_vssub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vssub_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -105,7 +105,7 @@ vint8m4_t test_vssub_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vssub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { @@ -114,7 +114,7 @@ vint8m4_t test_vssub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vssub_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -123,7 +123,7 @@ vint8m8_t test_vssub_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vssub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { @@ -132,7 +132,7 @@ vint8m8_t test_vssub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vssub_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -141,7 +141,7 @@ vint16mf4_t test_vssub_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vssub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -150,7 +150,7 @@ vint16mf4_t test_vssub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vssub_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -159,7 +159,7 @@ vint16mf2_t test_vssub_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vssub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -168,7 +168,7 @@ vint16mf2_t test_vssub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vssub_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -177,7 +177,7 @@ vint16m1_t test_vssub_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vssub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { @@ -186,7 +186,7 @@ vint16m1_t test_vssub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vssub_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -195,7 +195,7 @@ vint16m2_t test_vssub_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vssub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { @@ -204,7 +204,7 @@ vint16m2_t test_vssub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vssub_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -213,7 +213,7 @@ vint16m4_t test_vssub_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vssub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { @@ -222,7 +222,7 @@ vint16m4_t test_vssub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vssub_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -231,7 +231,7 @@ vint16m8_t test_vssub_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vssub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { @@ -240,7 +240,7 @@ vint16m8_t test_vssub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vssub_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -249,7 +249,7 @@ vint32mf2_t test_vssub_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vssub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -258,7 +258,7 @@ vint32mf2_t test_vssub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vssub_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -267,7 +267,7 @@ vint32m1_t test_vssub_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vssub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { @@ -276,7 +276,7 @@ vint32m1_t test_vssub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vssub_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -285,7 +285,7 @@ vint32m2_t test_vssub_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vssub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { @@ -294,7 +294,7 @@ vint32m2_t test_vssub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vssub_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -303,7 +303,7 @@ vint32m4_t test_vssub_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vssub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { @@ -312,7 +312,7 @@ vint32m4_t test_vssub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vssub_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -321,7 +321,7 @@ vint32m8_t test_vssub_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vssub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { @@ -330,7 +330,7 @@ vint32m8_t test_vssub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vssub_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -339,7 +339,7 @@ vint64m1_t test_vssub_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vssub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { @@ -348,7 +348,7 @@ vint64m1_t test_vssub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vssub_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -357,7 +357,7 @@ vint64m2_t test_vssub_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vssub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { @@ -366,7 +366,7 @@ vint64m2_t test_vssub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vssub_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -375,7 +375,7 @@ vint64m4_t test_vssub_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vssub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { @@ -384,7 +384,7 @@ vint64m4_t test_vssub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vssub_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -393,7 +393,7 @@ vint64m8_t test_vssub_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vssub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { @@ -402,7 +402,7 @@ vint64m8_t test_vssub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vssubu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -411,7 +411,7 @@ vuint8mf8_t test_vssubu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vssubu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -420,7 +420,7 @@ vuint8mf8_t test_vssubu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vssubu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -429,7 +429,7 @@ vuint8mf4_t test_vssubu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vssubu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -438,7 +438,7 @@ vuint8mf4_t test_vssubu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vssubu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -447,7 +447,7 @@ vuint8mf2_t test_vssubu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vssubu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -456,7 +456,7 @@ vuint8mf2_t test_vssubu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vssubu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -465,7 +465,7 @@ vuint8m1_t test_vssubu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vssubu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -474,7 +474,7 @@ vuint8m1_t test_vssubu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vssubu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -483,7 +483,7 @@ vuint8m2_t test_vssubu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vssubu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -492,7 +492,7 @@ vuint8m2_t test_vssubu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vssubu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -501,7 +501,7 @@ vuint8m4_t test_vssubu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vssubu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -510,7 +510,7 @@ vuint8m4_t test_vssubu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vssubu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -519,7 +519,7 @@ vuint8m8_t test_vssubu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vssubu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -528,7 +528,7 @@ vuint8m8_t test_vssubu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vssubu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, @@ -538,7 +538,7 @@ vuint16mf4_t test_vssubu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, // CHECK-RV64-LABEL: @test_vssubu_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vssubu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -547,7 +547,7 @@ vuint16mf4_t test_vssubu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vssubu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, @@ -557,7 +557,7 @@ vuint16mf2_t test_vssubu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, // CHECK-RV64-LABEL: @test_vssubu_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vssubu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -566,7 +566,7 @@ vuint16mf2_t test_vssubu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vssubu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -575,7 +575,7 @@ vuint16m1_t test_vssubu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vssubu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -584,7 +584,7 @@ vuint16m1_t test_vssubu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vssubu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -593,7 +593,7 @@ vuint16m2_t test_vssubu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vssubu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -602,7 +602,7 @@ vuint16m2_t test_vssubu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vssubu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -611,7 +611,7 @@ vuint16m4_t test_vssubu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vssubu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -620,7 +620,7 @@ vuint16m4_t test_vssubu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vssubu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -629,7 +629,7 @@ vuint16m8_t test_vssubu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vssubu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -638,7 +638,7 @@ vuint16m8_t test_vssubu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vssubu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, @@ -648,7 +648,7 @@ vuint32mf2_t test_vssubu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, // CHECK-RV64-LABEL: @test_vssubu_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vssubu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -657,7 +657,7 @@ vuint32mf2_t test_vssubu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vssubu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -666,7 +666,7 @@ vuint32m1_t test_vssubu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vssubu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -675,7 +675,7 @@ vuint32m1_t test_vssubu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vssubu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -684,7 +684,7 @@ vuint32m2_t test_vssubu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vssubu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -693,7 +693,7 @@ vuint32m2_t test_vssubu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vssubu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -702,7 +702,7 @@ vuint32m4_t test_vssubu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vssubu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -711,7 +711,7 @@ vuint32m4_t test_vssubu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vssubu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -720,7 +720,7 @@ vuint32m8_t test_vssubu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vssubu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -729,7 +729,7 @@ vuint32m8_t test_vssubu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vssubu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -738,7 +738,7 @@ vuint64m1_t test_vssubu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vssubu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -747,7 +747,7 @@ vuint64m1_t test_vssubu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vssubu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -756,7 +756,7 @@ vuint64m2_t test_vssubu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vssubu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -765,7 +765,7 @@ vuint64m2_t test_vssubu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vssubu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -774,7 +774,7 @@ vuint64m4_t test_vssubu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vssubu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -783,7 +783,7 @@ vuint64m4_t test_vssubu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vssubu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -792,7 +792,7 @@ vuint64m8_t test_vssubu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vssubu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsub.c index 4275d13f9d71479c1c39f0acb4310f4ff0cc7ada..30929ff6a773c11e7a90c7809c32b0bb6f3aa84d 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsub.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vsub_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsub_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -15,7 +15,7 @@ vint8mf8_t test_vsub_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -24,7 +24,7 @@ vint8mf8_t test_vsub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsub_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -33,7 +33,7 @@ vint8mf4_t test_vsub_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -42,7 +42,7 @@ vint8mf4_t test_vsub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsub_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -51,7 +51,7 @@ vint8mf2_t test_vsub_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -60,7 +60,7 @@ vint8mf2_t test_vsub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsub_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -69,7 +69,7 @@ vint8m1_t test_vsub_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { @@ -78,7 +78,7 @@ vint8m1_t test_vsub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsub_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -87,7 +87,7 @@ vint8m2_t test_vsub_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { @@ -96,7 +96,7 @@ vint8m2_t test_vsub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsub_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -105,7 +105,7 @@ vint8m4_t test_vsub_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { @@ -114,7 +114,7 @@ vint8m4_t test_vsub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsub_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -123,7 +123,7 @@ vint8m8_t test_vsub_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { @@ -132,7 +132,7 @@ vint8m8_t test_vsub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsub_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -141,7 +141,7 @@ vint16mf4_t test_vsub_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -150,7 +150,7 @@ vint16mf4_t test_vsub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsub_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -159,7 +159,7 @@ vint16mf2_t test_vsub_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -168,7 +168,7 @@ vint16mf2_t test_vsub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsub_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -177,7 +177,7 @@ vint16m1_t test_vsub_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { @@ -186,7 +186,7 @@ vint16m1_t test_vsub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsub_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -195,7 +195,7 @@ vint16m2_t test_vsub_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { @@ -204,7 +204,7 @@ vint16m2_t test_vsub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsub_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -213,7 +213,7 @@ vint16m4_t test_vsub_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { @@ -222,7 +222,7 @@ vint16m4_t test_vsub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsub_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -231,7 +231,7 @@ vint16m8_t test_vsub_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { @@ -240,7 +240,7 @@ vint16m8_t test_vsub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsub_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -249,7 +249,7 @@ vint32mf2_t test_vsub_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -258,7 +258,7 @@ vint32mf2_t test_vsub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsub_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -267,7 +267,7 @@ vint32m1_t test_vsub_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { @@ -276,7 +276,7 @@ vint32m1_t test_vsub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsub_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -285,7 +285,7 @@ vint32m2_t test_vsub_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { @@ -294,7 +294,7 @@ vint32m2_t test_vsub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsub_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -303,7 +303,7 @@ vint32m4_t test_vsub_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { @@ -312,7 +312,7 @@ vint32m4_t test_vsub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsub_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -321,7 +321,7 @@ vint32m8_t test_vsub_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { @@ -330,7 +330,7 @@ vint32m8_t test_vsub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsub_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -339,7 +339,7 @@ vint64m1_t test_vsub_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { @@ -348,7 +348,7 @@ vint64m1_t test_vsub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsub_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -357,7 +357,7 @@ vint64m2_t test_vsub_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { @@ -366,7 +366,7 @@ vint64m2_t test_vsub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsub_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -375,7 +375,7 @@ vint64m4_t test_vsub_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { @@ -384,7 +384,7 @@ vint64m4_t test_vsub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsub_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -393,7 +393,7 @@ vint64m8_t test_vsub_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { @@ -402,7 +402,7 @@ vint64m8_t test_vsub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsub_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -411,7 +411,7 @@ vuint8mf8_t test_vsub_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsub_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -420,7 +420,7 @@ vuint8mf8_t test_vsub_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsub_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -429,7 +429,7 @@ vuint8mf4_t test_vsub_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsub_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -438,7 +438,7 @@ vuint8mf4_t test_vsub_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsub_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -447,7 +447,7 @@ vuint8mf2_t test_vsub_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsub_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -456,7 +456,7 @@ vuint8mf2_t test_vsub_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsub_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -465,7 +465,7 @@ vuint8m1_t test_vsub_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsub_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -474,7 +474,7 @@ vuint8m1_t test_vsub_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsub_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -483,7 +483,7 @@ vuint8m2_t test_vsub_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsub_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -492,7 +492,7 @@ vuint8m2_t test_vsub_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsub_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -501,7 +501,7 @@ vuint8m4_t test_vsub_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsub_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -510,7 +510,7 @@ vuint8m4_t test_vsub_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsub_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -519,7 +519,7 @@ vuint8m8_t test_vsub_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsub_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -528,7 +528,7 @@ vuint8m8_t test_vsub_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsub_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -537,7 +537,7 @@ vuint16mf4_t test_vsub_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vsub_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsub_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -546,7 +546,7 @@ vuint16mf4_t test_vsub_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsub_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -555,7 +555,7 @@ vuint16mf2_t test_vsub_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vsub_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsub_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -564,7 +564,7 @@ vuint16mf2_t test_vsub_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsub_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -573,7 +573,7 @@ vuint16m1_t test_vsub_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsub_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -582,7 +582,7 @@ vuint16m1_t test_vsub_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsub_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -591,7 +591,7 @@ vuint16m2_t test_vsub_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsub_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -600,7 +600,7 @@ vuint16m2_t test_vsub_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsub_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -609,7 +609,7 @@ vuint16m4_t test_vsub_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsub_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -618,7 +618,7 @@ vuint16m4_t test_vsub_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsub_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -627,7 +627,7 @@ vuint16m8_t test_vsub_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsub_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -636,7 +636,7 @@ vuint16m8_t test_vsub_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsub_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -645,7 +645,7 @@ vuint32mf2_t test_vsub_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vsub_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsub_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -654,7 +654,7 @@ vuint32mf2_t test_vsub_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsub_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -663,7 +663,7 @@ vuint32m1_t test_vsub_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsub_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -672,7 +672,7 @@ vuint32m1_t test_vsub_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsub_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -681,7 +681,7 @@ vuint32m2_t test_vsub_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsub_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -690,7 +690,7 @@ vuint32m2_t test_vsub_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsub_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -699,7 +699,7 @@ vuint32m4_t test_vsub_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsub_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -708,7 +708,7 @@ vuint32m4_t test_vsub_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsub_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -717,7 +717,7 @@ vuint32m8_t test_vsub_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsub_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -726,7 +726,7 @@ vuint32m8_t test_vsub_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsub_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -735,7 +735,7 @@ vuint64m1_t test_vsub_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsub_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -744,7 +744,7 @@ vuint64m1_t test_vsub_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsub_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -753,7 +753,7 @@ vuint64m2_t test_vsub_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsub_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -762,7 +762,7 @@ vuint64m2_t test_vsub_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsub_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -771,7 +771,7 @@ vuint64m4_t test_vsub_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsub_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -780,7 +780,7 @@ vuint64m4_t test_vsub_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsub_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -789,7 +789,7 @@ vuint64m8_t test_vsub_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsub_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vwadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vwadd.c index 77036c4d6e7524f6ba4895670b67e24d65360ced..93f3581d87207f7278ccb4955024b11fdd5500d3 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vwadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vwadd.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vwadd_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i16.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i16.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwadd_vv_i16mf4(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -15,7 +15,7 @@ vint16mf4_t test_vwadd_vv_i16mf4(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i16.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i16.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwadd_vx_i16mf4(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -24,7 +24,7 @@ vint16mf4_t test_vwadd_vx_i16mf4(vint8mf8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_wv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i16.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i16.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwadd_wv_i16mf4(vint16mf4_t op1, vint8mf8_t op2, size_t vl) { @@ -33,7 +33,7 @@ vint16mf4_t test_vwadd_wv_i16mf4(vint16mf4_t op1, vint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_wx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwadd_wx_i16mf4(vint16mf4_t op1, int8_t op2, size_t vl) { @@ -42,7 +42,7 @@ vint16mf4_t test_vwadd_wx_i16mf4(vint16mf4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i16.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i16.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwadd_vv_i16mf2(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -51,7 +51,7 @@ vint16mf2_t test_vwadd_vv_i16mf2(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i16.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i16.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwadd_vx_i16mf2(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -60,7 +60,7 @@ vint16mf2_t test_vwadd_vx_i16mf2(vint8mf4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_wv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i16.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i16.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwadd_wv_i16mf2(vint16mf2_t op1, vint8mf4_t op2, size_t vl) { @@ -69,7 +69,7 @@ vint16mf2_t test_vwadd_wv_i16mf2(vint16mf2_t op1, vint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_wx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwadd_wx_i16mf2(vint16mf2_t op1, int8_t op2, size_t vl) { @@ -78,7 +78,7 @@ vint16mf2_t test_vwadd_wx_i16mf2(vint16mf2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i16.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i16.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwadd_vv_i16m1(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -87,7 +87,7 @@ vint16m1_t test_vwadd_vv_i16m1(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i16.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i16.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwadd_vx_i16m1(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -96,7 +96,7 @@ vint16m1_t test_vwadd_vx_i16m1(vint8mf2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_wv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i16.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i16.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwadd_wv_i16m1(vint16m1_t op1, vint8mf2_t op2, size_t vl) { @@ -105,7 +105,7 @@ vint16m1_t test_vwadd_wv_i16m1(vint16m1_t op1, vint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_wx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwadd_wx_i16m1(vint16m1_t op1, int8_t op2, size_t vl) { @@ -114,7 +114,7 @@ vint16m1_t test_vwadd_wx_i16m1(vint16m1_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i16.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i16.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwadd_vv_i16m2(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -123,7 +123,7 @@ vint16m2_t test_vwadd_vv_i16m2(vint8m1_t op1, vint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i16.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i16.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwadd_vx_i16m2(vint8m1_t op1, int8_t op2, size_t vl) { @@ -132,7 +132,7 @@ vint16m2_t test_vwadd_vx_i16m2(vint8m1_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_wv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i16.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i16.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwadd_wv_i16m2(vint16m2_t op1, vint8m1_t op2, size_t vl) { @@ -141,7 +141,7 @@ vint16m2_t test_vwadd_wv_i16m2(vint16m2_t op1, vint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_wx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwadd_wx_i16m2(vint16m2_t op1, int8_t op2, size_t vl) { @@ -150,7 +150,7 @@ vint16m2_t test_vwadd_wx_i16m2(vint16m2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv16i16.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv16i16.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwadd_vv_i16m4(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -159,7 +159,7 @@ vint16m4_t test_vwadd_vv_i16m4(vint8m2_t op1, vint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv16i16.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv16i16.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwadd_vx_i16m4(vint8m2_t op1, int8_t op2, size_t vl) { @@ -168,7 +168,7 @@ vint16m4_t test_vwadd_vx_i16m4(vint8m2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_wv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv16i16.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv16i16.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwadd_wv_i16m4(vint16m4_t op1, vint8m2_t op2, size_t vl) { @@ -177,7 +177,7 @@ vint16m4_t test_vwadd_wv_i16m4(vint16m4_t op1, vint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_wx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv16i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv16i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwadd_wx_i16m4(vint16m4_t op1, int8_t op2, size_t vl) { @@ -186,7 +186,7 @@ vint16m4_t test_vwadd_wx_i16m4(vint16m4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv32i16.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv32i16.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwadd_vv_i16m8(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -195,7 +195,7 @@ vint16m8_t test_vwadd_vv_i16m8(vint8m4_t op1, vint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv32i16.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv32i16.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwadd_vx_i16m8(vint8m4_t op1, int8_t op2, size_t vl) { @@ -204,7 +204,7 @@ vint16m8_t test_vwadd_vx_i16m8(vint8m4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_wv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv32i16.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv32i16.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwadd_wv_i16m8(vint16m8_t op1, vint8m4_t op2, size_t vl) { @@ -213,7 +213,7 @@ vint16m8_t test_vwadd_wv_i16m8(vint16m8_t op1, vint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_wx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv32i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv32i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwadd_wx_i16m8(vint16m8_t op1, int8_t op2, size_t vl) { @@ -222,7 +222,7 @@ vint16m8_t test_vwadd_wx_i16m8(vint16m8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i32.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i32.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwadd_vv_i32mf2(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -231,7 +231,7 @@ vint32mf2_t test_vwadd_vv_i32mf2(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i32.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i32.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwadd_vx_i32mf2(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -240,7 +240,7 @@ vint32mf2_t test_vwadd_vx_i32mf2(vint16mf4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_wv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i32.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i32.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwadd_wv_i32mf2(vint32mf2_t op1, vint16mf4_t op2, size_t vl) { @@ -249,7 +249,7 @@ vint32mf2_t test_vwadd_wv_i32mf2(vint32mf2_t op1, vint16mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_wx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwadd_wx_i32mf2(vint32mf2_t op1, int16_t op2, size_t vl) { @@ -258,7 +258,7 @@ vint32mf2_t test_vwadd_wx_i32mf2(vint32mf2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i32.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i32.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwadd_vv_i32m1(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -267,7 +267,7 @@ vint32m1_t test_vwadd_vv_i32m1(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i32.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i32.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwadd_vx_i32m1(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -276,7 +276,7 @@ vint32m1_t test_vwadd_vx_i32m1(vint16mf2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_wv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i32.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i32.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwadd_wv_i32m1(vint32m1_t op1, vint16mf2_t op2, size_t vl) { @@ -285,7 +285,7 @@ vint32m1_t test_vwadd_wv_i32m1(vint32m1_t op1, vint16mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_wx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwadd_wx_i32m1(vint32m1_t op1, int16_t op2, size_t vl) { @@ -294,7 +294,7 @@ vint32m1_t test_vwadd_wx_i32m1(vint32m1_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i32.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i32.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwadd_vv_i32m2(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -303,7 +303,7 @@ vint32m2_t test_vwadd_vv_i32m2(vint16m1_t op1, vint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i32.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i32.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwadd_vx_i32m2(vint16m1_t op1, int16_t op2, size_t vl) { @@ -312,7 +312,7 @@ vint32m2_t test_vwadd_vx_i32m2(vint16m1_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_wv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i32.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i32.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwadd_wv_i32m2(vint32m2_t op1, vint16m1_t op2, size_t vl) { @@ -321,7 +321,7 @@ vint32m2_t test_vwadd_wv_i32m2(vint32m2_t op1, vint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_wx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwadd_wx_i32m2(vint32m2_t op1, int16_t op2, size_t vl) { @@ -330,7 +330,7 @@ vint32m2_t test_vwadd_wx_i32m2(vint32m2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i32.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i32.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwadd_vv_i32m4(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -339,7 +339,7 @@ vint32m4_t test_vwadd_vv_i32m4(vint16m2_t op1, vint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i32.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i32.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwadd_vx_i32m4(vint16m2_t op1, int16_t op2, size_t vl) { @@ -348,7 +348,7 @@ vint32m4_t test_vwadd_vx_i32m4(vint16m2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_wv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i32.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i32.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwadd_wv_i32m4(vint32m4_t op1, vint16m2_t op2, size_t vl) { @@ -357,7 +357,7 @@ vint32m4_t test_vwadd_wv_i32m4(vint32m4_t op1, vint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_wx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwadd_wx_i32m4(vint32m4_t op1, int16_t op2, size_t vl) { @@ -366,7 +366,7 @@ vint32m4_t test_vwadd_wx_i32m4(vint32m4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv16i32.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv16i32.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwadd_vv_i32m8(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -375,7 +375,7 @@ vint32m8_t test_vwadd_vv_i32m8(vint16m4_t op1, vint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv16i32.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv16i32.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwadd_vx_i32m8(vint16m4_t op1, int16_t op2, size_t vl) { @@ -384,7 +384,7 @@ vint32m8_t test_vwadd_vx_i32m8(vint16m4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_wv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv16i32.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv16i32.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwadd_wv_i32m8(vint32m8_t op1, vint16m4_t op2, size_t vl) { @@ -393,7 +393,7 @@ vint32m8_t test_vwadd_wv_i32m8(vint32m8_t op1, vint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_wx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv16i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv16i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwadd_wx_i32m8(vint32m8_t op1, int16_t op2, size_t vl) { @@ -402,7 +402,7 @@ vint32m8_t test_vwadd_wx_i32m8(vint32m8_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i64.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i64.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwadd_vv_i64m1(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -411,7 +411,7 @@ vint64m1_t test_vwadd_vv_i64m1(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i64.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i64.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwadd_vx_i64m1(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -420,7 +420,7 @@ vint64m1_t test_vwadd_vx_i64m1(vint32mf2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_wv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i64.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i64.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwadd_wv_i64m1(vint64m1_t op1, vint32mf2_t op2, size_t vl) { @@ -429,7 +429,7 @@ vint64m1_t test_vwadd_wv_i64m1(vint64m1_t op1, vint32mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_wx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i64.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwadd_wx_i64m1(vint64m1_t op1, int32_t op2, size_t vl) { @@ -438,7 +438,7 @@ vint64m1_t test_vwadd_wx_i64m1(vint64m1_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i64.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i64.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwadd_vv_i64m2(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -447,7 +447,7 @@ vint64m2_t test_vwadd_vv_i64m2(vint32m1_t op1, vint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i64.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i64.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwadd_vx_i64m2(vint32m1_t op1, int32_t op2, size_t vl) { @@ -456,7 +456,7 @@ vint64m2_t test_vwadd_vx_i64m2(vint32m1_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_wv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i64.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i64.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwadd_wv_i64m2(vint64m2_t op1, vint32m1_t op2, size_t vl) { @@ -465,7 +465,7 @@ vint64m2_t test_vwadd_wv_i64m2(vint64m2_t op1, vint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_wx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i64.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwadd_wx_i64m2(vint64m2_t op1, int32_t op2, size_t vl) { @@ -474,7 +474,7 @@ vint64m2_t test_vwadd_wx_i64m2(vint64m2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i64.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i64.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwadd_vv_i64m4(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -483,7 +483,7 @@ vint64m4_t test_vwadd_vv_i64m4(vint32m2_t op1, vint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i64.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i64.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwadd_vx_i64m4(vint32m2_t op1, int32_t op2, size_t vl) { @@ -492,7 +492,7 @@ vint64m4_t test_vwadd_vx_i64m4(vint32m2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_wv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i64.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i64.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwadd_wv_i64m4(vint64m4_t op1, vint32m2_t op2, size_t vl) { @@ -501,7 +501,7 @@ vint64m4_t test_vwadd_wv_i64m4(vint64m4_t op1, vint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_wx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i64.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwadd_wx_i64m4(vint64m4_t op1, int32_t op2, size_t vl) { @@ -510,7 +510,7 @@ vint64m4_t test_vwadd_wx_i64m4(vint64m4_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i64.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i64.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwadd_vv_i64m8(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -519,7 +519,7 @@ vint64m8_t test_vwadd_vv_i64m8(vint32m4_t op1, vint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i64.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i64.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwadd_vx_i64m8(vint32m4_t op1, int32_t op2, size_t vl) { @@ -528,7 +528,7 @@ vint64m8_t test_vwadd_vx_i64m8(vint32m4_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_wv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i64.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i64.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwadd_wv_i64m8(vint64m8_t op1, vint32m4_t op2, size_t vl) { @@ -537,7 +537,7 @@ vint64m8_t test_vwadd_wv_i64m8(vint64m8_t op1, vint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_wx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i64.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwadd_wx_i64m8(vint64m8_t op1, int32_t op2, size_t vl) { @@ -546,7 +546,7 @@ vint64m8_t test_vwadd_wx_i64m8(vint64m8_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i16.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i16.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwaddu_vv_u16mf4(vuint8mf8_t op1, vuint8mf8_t op2, @@ -556,7 +556,7 @@ vuint16mf4_t test_vwaddu_vv_u16mf4(vuint8mf8_t op1, vuint8mf8_t op2, // CHECK-RV64-LABEL: @test_vwaddu_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i16.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i16.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwaddu_vx_u16mf4(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -565,7 +565,7 @@ vuint16mf4_t test_vwaddu_vx_u16mf4(vuint8mf8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_wv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i16.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i16.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwaddu_wv_u16mf4(vuint16mf4_t op1, vuint8mf8_t op2, @@ -575,7 +575,7 @@ vuint16mf4_t test_vwaddu_wv_u16mf4(vuint16mf4_t op1, vuint8mf8_t op2, // CHECK-RV64-LABEL: @test_vwaddu_wx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwaddu_wx_u16mf4(vuint16mf4_t op1, uint8_t op2, size_t vl) { @@ -584,7 +584,7 @@ vuint16mf4_t test_vwaddu_wx_u16mf4(vuint16mf4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i16.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i16.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwaddu_vv_u16mf2(vuint8mf4_t op1, vuint8mf4_t op2, @@ -594,7 +594,7 @@ vuint16mf2_t test_vwaddu_vv_u16mf2(vuint8mf4_t op1, vuint8mf4_t op2, // CHECK-RV64-LABEL: @test_vwaddu_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i16.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i16.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwaddu_vx_u16mf2(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -603,7 +603,7 @@ vuint16mf2_t test_vwaddu_vx_u16mf2(vuint8mf4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_wv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i16.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i16.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwaddu_wv_u16mf2(vuint16mf2_t op1, vuint8mf4_t op2, @@ -613,7 +613,7 @@ vuint16mf2_t test_vwaddu_wv_u16mf2(vuint16mf2_t op1, vuint8mf4_t op2, // CHECK-RV64-LABEL: @test_vwaddu_wx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwaddu_wx_u16mf2(vuint16mf2_t op1, uint8_t op2, size_t vl) { @@ -622,7 +622,7 @@ vuint16mf2_t test_vwaddu_wx_u16mf2(vuint16mf2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i16.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i16.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwaddu_vv_u16m1(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -631,7 +631,7 @@ vuint16m1_t test_vwaddu_vv_u16m1(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i16.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i16.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwaddu_vx_u16m1(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -640,7 +640,7 @@ vuint16m1_t test_vwaddu_vx_u16m1(vuint8mf2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_wv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i16.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i16.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwaddu_wv_u16m1(vuint16m1_t op1, vuint8mf2_t op2, size_t vl) { @@ -649,7 +649,7 @@ vuint16m1_t test_vwaddu_wv_u16m1(vuint16m1_t op1, vuint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_wx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwaddu_wx_u16m1(vuint16m1_t op1, uint8_t op2, size_t vl) { @@ -658,7 +658,7 @@ vuint16m1_t test_vwaddu_wx_u16m1(vuint16m1_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i16.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i16.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwaddu_vv_u16m2(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -667,7 +667,7 @@ vuint16m2_t test_vwaddu_vv_u16m2(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i16.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i16.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwaddu_vx_u16m2(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -676,7 +676,7 @@ vuint16m2_t test_vwaddu_vx_u16m2(vuint8m1_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_wv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i16.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i16.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwaddu_wv_u16m2(vuint16m2_t op1, vuint8m1_t op2, size_t vl) { @@ -685,7 +685,7 @@ vuint16m2_t test_vwaddu_wv_u16m2(vuint16m2_t op1, vuint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_wx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwaddu_wx_u16m2(vuint16m2_t op1, uint8_t op2, size_t vl) { @@ -694,7 +694,7 @@ vuint16m2_t test_vwaddu_wx_u16m2(vuint16m2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv16i16.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv16i16.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwaddu_vv_u16m4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -703,7 +703,7 @@ vuint16m4_t test_vwaddu_vv_u16m4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv16i16.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv16i16.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwaddu_vx_u16m4(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -712,7 +712,7 @@ vuint16m4_t test_vwaddu_vx_u16m4(vuint8m2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_wv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv16i16.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv16i16.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwaddu_wv_u16m4(vuint16m4_t op1, vuint8m2_t op2, size_t vl) { @@ -721,7 +721,7 @@ vuint16m4_t test_vwaddu_wv_u16m4(vuint16m4_t op1, vuint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_wx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv16i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv16i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwaddu_wx_u16m4(vuint16m4_t op1, uint8_t op2, size_t vl) { @@ -730,7 +730,7 @@ vuint16m4_t test_vwaddu_wx_u16m4(vuint16m4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv32i16.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv32i16.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwaddu_vv_u16m8(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -739,7 +739,7 @@ vuint16m8_t test_vwaddu_vv_u16m8(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv32i16.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv32i16.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwaddu_vx_u16m8(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -748,7 +748,7 @@ vuint16m8_t test_vwaddu_vx_u16m8(vuint8m4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_wv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv32i16.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv32i16.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwaddu_wv_u16m8(vuint16m8_t op1, vuint8m4_t op2, size_t vl) { @@ -757,7 +757,7 @@ vuint16m8_t test_vwaddu_wv_u16m8(vuint16m8_t op1, vuint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_wx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv32i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv32i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwaddu_wx_u16m8(vuint16m8_t op1, uint8_t op2, size_t vl) { @@ -766,7 +766,7 @@ vuint16m8_t test_vwaddu_wx_u16m8(vuint16m8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i32.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i32.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwaddu_vv_u32mf2(vuint16mf4_t op1, vuint16mf4_t op2, @@ -776,7 +776,7 @@ vuint32mf2_t test_vwaddu_vv_u32mf2(vuint16mf4_t op1, vuint16mf4_t op2, // CHECK-RV64-LABEL: @test_vwaddu_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i32.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i32.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwaddu_vx_u32mf2(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -785,7 +785,7 @@ vuint32mf2_t test_vwaddu_vx_u32mf2(vuint16mf4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_wv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i32.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i32.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwaddu_wv_u32mf2(vuint32mf2_t op1, vuint16mf4_t op2, @@ -795,7 +795,7 @@ vuint32mf2_t test_vwaddu_wv_u32mf2(vuint32mf2_t op1, vuint16mf4_t op2, // CHECK-RV64-LABEL: @test_vwaddu_wx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwaddu_wx_u32mf2(vuint32mf2_t op1, uint16_t op2, size_t vl) { @@ -804,7 +804,7 @@ vuint32mf2_t test_vwaddu_wx_u32mf2(vuint32mf2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i32.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i32.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwaddu_vv_u32m1(vuint16mf2_t op1, vuint16mf2_t op2, @@ -814,7 +814,7 @@ vuint32m1_t test_vwaddu_vv_u32m1(vuint16mf2_t op1, vuint16mf2_t op2, // CHECK-RV64-LABEL: @test_vwaddu_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i32.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i32.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwaddu_vx_u32m1(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -823,7 +823,7 @@ vuint32m1_t test_vwaddu_vx_u32m1(vuint16mf2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_wv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i32.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i32.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwaddu_wv_u32m1(vuint32m1_t op1, vuint16mf2_t op2, size_t vl) { @@ -832,7 +832,7 @@ vuint32m1_t test_vwaddu_wv_u32m1(vuint32m1_t op1, vuint16mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_wx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwaddu_wx_u32m1(vuint32m1_t op1, uint16_t op2, size_t vl) { @@ -841,7 +841,7 @@ vuint32m1_t test_vwaddu_wx_u32m1(vuint32m1_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i32.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i32.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwaddu_vv_u32m2(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -850,7 +850,7 @@ vuint32m2_t test_vwaddu_vv_u32m2(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i32.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i32.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwaddu_vx_u32m2(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -859,7 +859,7 @@ vuint32m2_t test_vwaddu_vx_u32m2(vuint16m1_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_wv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i32.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i32.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwaddu_wv_u32m2(vuint32m2_t op1, vuint16m1_t op2, size_t vl) { @@ -868,7 +868,7 @@ vuint32m2_t test_vwaddu_wv_u32m2(vuint32m2_t op1, vuint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_wx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwaddu_wx_u32m2(vuint32m2_t op1, uint16_t op2, size_t vl) { @@ -877,7 +877,7 @@ vuint32m2_t test_vwaddu_wx_u32m2(vuint32m2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i32.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i32.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwaddu_vv_u32m4(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -886,7 +886,7 @@ vuint32m4_t test_vwaddu_vv_u32m4(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i32.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i32.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwaddu_vx_u32m4(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -895,7 +895,7 @@ vuint32m4_t test_vwaddu_vx_u32m4(vuint16m2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_wv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i32.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i32.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwaddu_wv_u32m4(vuint32m4_t op1, vuint16m2_t op2, size_t vl) { @@ -904,7 +904,7 @@ vuint32m4_t test_vwaddu_wv_u32m4(vuint32m4_t op1, vuint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_wx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwaddu_wx_u32m4(vuint32m4_t op1, uint16_t op2, size_t vl) { @@ -913,7 +913,7 @@ vuint32m4_t test_vwaddu_wx_u32m4(vuint32m4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv16i32.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv16i32.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwaddu_vv_u32m8(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -922,7 +922,7 @@ vuint32m8_t test_vwaddu_vv_u32m8(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv16i32.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv16i32.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwaddu_vx_u32m8(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -931,7 +931,7 @@ vuint32m8_t test_vwaddu_vx_u32m8(vuint16m4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_wv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv16i32.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv16i32.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwaddu_wv_u32m8(vuint32m8_t op1, vuint16m4_t op2, size_t vl) { @@ -940,7 +940,7 @@ vuint32m8_t test_vwaddu_wv_u32m8(vuint32m8_t op1, vuint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_wx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv16i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv16i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwaddu_wx_u32m8(vuint32m8_t op1, uint16_t op2, size_t vl) { @@ -949,7 +949,7 @@ vuint32m8_t test_vwaddu_wx_u32m8(vuint32m8_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i64.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i64.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwaddu_vv_u64m1(vuint32mf2_t op1, vuint32mf2_t op2, @@ -959,7 +959,7 @@ vuint64m1_t test_vwaddu_vv_u64m1(vuint32mf2_t op1, vuint32mf2_t op2, // CHECK-RV64-LABEL: @test_vwaddu_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i64.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i64.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwaddu_vx_u64m1(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -968,7 +968,7 @@ vuint64m1_t test_vwaddu_vx_u64m1(vuint32mf2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_wv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i64.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i64.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwaddu_wv_u64m1(vuint64m1_t op1, vuint32mf2_t op2, size_t vl) { @@ -977,7 +977,7 @@ vuint64m1_t test_vwaddu_wv_u64m1(vuint64m1_t op1, vuint32mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_wx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i64.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwaddu_wx_u64m1(vuint64m1_t op1, uint32_t op2, size_t vl) { @@ -986,7 +986,7 @@ vuint64m1_t test_vwaddu_wx_u64m1(vuint64m1_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i64.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i64.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwaddu_vv_u64m2(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -995,7 +995,7 @@ vuint64m2_t test_vwaddu_vv_u64m2(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i64.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i64.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwaddu_vx_u64m2(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -1004,7 +1004,7 @@ vuint64m2_t test_vwaddu_vx_u64m2(vuint32m1_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_wv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i64.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i64.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwaddu_wv_u64m2(vuint64m2_t op1, vuint32m1_t op2, size_t vl) { @@ -1013,7 +1013,7 @@ vuint64m2_t test_vwaddu_wv_u64m2(vuint64m2_t op1, vuint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_wx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i64.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwaddu_wx_u64m2(vuint64m2_t op1, uint32_t op2, size_t vl) { @@ -1022,7 +1022,7 @@ vuint64m2_t test_vwaddu_wx_u64m2(vuint64m2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i64.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i64.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwaddu_vv_u64m4(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -1031,7 +1031,7 @@ vuint64m4_t test_vwaddu_vv_u64m4(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i64.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i64.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwaddu_vx_u64m4(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -1040,7 +1040,7 @@ vuint64m4_t test_vwaddu_vx_u64m4(vuint32m2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_wv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i64.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i64.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwaddu_wv_u64m4(vuint64m4_t op1, vuint32m2_t op2, size_t vl) { @@ -1049,7 +1049,7 @@ vuint64m4_t test_vwaddu_wv_u64m4(vuint64m4_t op1, vuint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_wx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i64.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwaddu_wx_u64m4(vuint64m4_t op1, uint32_t op2, size_t vl) { @@ -1058,7 +1058,7 @@ vuint64m4_t test_vwaddu_wx_u64m4(vuint64m4_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i64.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i64.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwaddu_vv_u64m8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -1067,7 +1067,7 @@ vuint64m8_t test_vwaddu_vv_u64m8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i64.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i64.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwaddu_vx_u64m8(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -1076,7 +1076,7 @@ vuint64m8_t test_vwaddu_vx_u64m8(vuint32m4_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_wv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i64.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i64.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwaddu_wv_u64m8(vuint64m8_t op1, vuint32m4_t op2, size_t vl) { @@ -1085,7 +1085,7 @@ vuint64m8_t test_vwaddu_wv_u64m8(vuint64m8_t op1, vuint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_wx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i64.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwaddu_wx_u64m8(vuint64m8_t op1, uint32_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vwcvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vwcvt.c index 4c4c6a26861b3bc6f698985d7de35d61959810b9..03d06da6b0b9bcebc3d12119e8bd9e8f99112260 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vwcvt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vwcvt.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i16.nxv1i8.i8.i64( [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i16.nxv1i8.i8.i64( undef, [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwcvt_x_x_v_i16mf4 (vint8mf8_t src, size_t vl) { @@ -15,7 +15,7 @@ vint16mf4_t test_vwcvt_x_x_v_i16mf4 (vint8mf8_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i16.nxv2i8.i8.i64( [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i16.nxv2i8.i8.i64( undef, [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwcvt_x_x_v_i16mf2 (vint8mf4_t src, size_t vl) { @@ -24,7 +24,7 @@ vint16mf2_t test_vwcvt_x_x_v_i16mf2 (vint8mf4_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i16.nxv4i8.i8.i64( [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i16.nxv4i8.i8.i64( undef, [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwcvt_x_x_v_i16m1 (vint8mf2_t src, size_t vl) { @@ -33,7 +33,7 @@ vint16m1_t test_vwcvt_x_x_v_i16m1 (vint8mf2_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i16.nxv8i8.i8.i64( [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i16.nxv8i8.i8.i64( undef, [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwcvt_x_x_v_i16m2 (vint8m1_t src, size_t vl) { @@ -42,7 +42,7 @@ vint16m2_t test_vwcvt_x_x_v_i16m2 (vint8m1_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv16i16.nxv16i8.i8.i64( [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv16i16.nxv16i8.i8.i64( undef, [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwcvt_x_x_v_i16m4 (vint8m2_t src, size_t vl) { @@ -51,7 +51,7 @@ vint16m4_t test_vwcvt_x_x_v_i16m4 (vint8m2_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv32i16.nxv32i8.i8.i64( [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv32i16.nxv32i8.i8.i64( undef, [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwcvt_x_x_v_i16m8 (vint8m4_t src, size_t vl) { @@ -60,7 +60,7 @@ vint16m8_t test_vwcvt_x_x_v_i16m8 (vint8m4_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i16.nxv1i8.i8.i64( [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i16.nxv1i8.i8.i64( undef, [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwcvtu_x_x_v_u16mf4 (vuint8mf8_t src, size_t vl) { @@ -69,7 +69,7 @@ vuint16mf4_t test_vwcvtu_x_x_v_u16mf4 (vuint8mf8_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i16.nxv2i8.i8.i64( [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i16.nxv2i8.i8.i64( undef, [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwcvtu_x_x_v_u16mf2 (vuint8mf4_t src, size_t vl) { @@ -78,7 +78,7 @@ vuint16mf2_t test_vwcvtu_x_x_v_u16mf2 (vuint8mf4_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i16.nxv4i8.i8.i64( [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i16.nxv4i8.i8.i64( undef, [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwcvtu_x_x_v_u16m1 (vuint8mf2_t src, size_t vl) { @@ -87,7 +87,7 @@ vuint16m1_t test_vwcvtu_x_x_v_u16m1 (vuint8mf2_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i16.nxv8i8.i8.i64( [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i16.nxv8i8.i8.i64( undef, [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwcvtu_x_x_v_u16m2 (vuint8m1_t src, size_t vl) { @@ -96,7 +96,7 @@ vuint16m2_t test_vwcvtu_x_x_v_u16m2 (vuint8m1_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv16i16.nxv16i8.i8.i64( [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv16i16.nxv16i8.i8.i64( undef, [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwcvtu_x_x_v_u16m4 (vuint8m2_t src, size_t vl) { @@ -105,7 +105,7 @@ vuint16m4_t test_vwcvtu_x_x_v_u16m4 (vuint8m2_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv32i16.nxv32i8.i8.i64( [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv32i16.nxv32i8.i8.i64( undef, [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwcvtu_x_x_v_u16m8 (vuint8m4_t src, size_t vl) { @@ -114,7 +114,7 @@ vuint16m8_t test_vwcvtu_x_x_v_u16m8 (vuint8m4_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i32.nxv1i16.i16.i64( [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i32.nxv1i16.i16.i64( undef, [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwcvt_x_x_v_i32mf2 (vint16mf4_t src, size_t vl) { @@ -123,7 +123,7 @@ vint32mf2_t test_vwcvt_x_x_v_i32mf2 (vint16mf4_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i32.nxv2i16.i16.i64( [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i32.nxv2i16.i16.i64( undef, [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwcvt_x_x_v_i32m1 (vint16mf2_t src, size_t vl) { @@ -132,7 +132,7 @@ vint32m1_t test_vwcvt_x_x_v_i32m1 (vint16mf2_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i32.nxv4i16.i16.i64( [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i32.nxv4i16.i16.i64( undef, [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwcvt_x_x_v_i32m2 (vint16m1_t src, size_t vl) { @@ -141,7 +141,7 @@ vint32m2_t test_vwcvt_x_x_v_i32m2 (vint16m1_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i32.nxv8i16.i16.i64( [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i32.nxv8i16.i16.i64( undef, [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwcvt_x_x_v_i32m4 (vint16m2_t src, size_t vl) { @@ -150,7 +150,7 @@ vint32m4_t test_vwcvt_x_x_v_i32m4 (vint16m2_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv16i32.nxv16i16.i16.i64( [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv16i32.nxv16i16.i16.i64( undef, [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwcvt_x_x_v_i32m8 (vint16m4_t src, size_t vl) { @@ -159,7 +159,7 @@ vint32m8_t test_vwcvt_x_x_v_i32m8 (vint16m4_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i32.nxv1i16.i16.i64( [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i32.nxv1i16.i16.i64( undef, [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwcvtu_x_x_v_u32mf2 (vuint16mf4_t src, size_t vl) { @@ -168,7 +168,7 @@ vuint32mf2_t test_vwcvtu_x_x_v_u32mf2 (vuint16mf4_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i32.nxv2i16.i16.i64( [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i32.nxv2i16.i16.i64( undef, [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwcvtu_x_x_v_u32m1 (vuint16mf2_t src, size_t vl) { @@ -177,7 +177,7 @@ vuint32m1_t test_vwcvtu_x_x_v_u32m1 (vuint16mf2_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i32.nxv4i16.i16.i64( [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i32.nxv4i16.i16.i64( undef, [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwcvtu_x_x_v_u32m2 (vuint16m1_t src, size_t vl) { @@ -186,7 +186,7 @@ vuint32m2_t test_vwcvtu_x_x_v_u32m2 (vuint16m1_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i32.nxv8i16.i16.i64( [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i32.nxv8i16.i16.i64( undef, [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwcvtu_x_x_v_u32m4 (vuint16m2_t src, size_t vl) { @@ -195,7 +195,7 @@ vuint32m4_t test_vwcvtu_x_x_v_u32m4 (vuint16m2_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv16i32.nxv16i16.i16.i64( [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv16i32.nxv16i16.i16.i64( undef, [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwcvtu_x_x_v_u32m8 (vuint16m4_t src, size_t vl) { @@ -204,7 +204,7 @@ vuint32m8_t test_vwcvtu_x_x_v_u32m8 (vuint16m4_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i64.nxv1i32.i32.i64( [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i64.nxv1i32.i32.i64( undef, [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwcvt_x_x_v_i64m1 (vint32mf2_t src, size_t vl) { @@ -213,7 +213,7 @@ vint64m1_t test_vwcvt_x_x_v_i64m1 (vint32mf2_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i64.nxv2i32.i32.i64( [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i64.nxv2i32.i32.i64( undef, [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwcvt_x_x_v_i64m2 (vint32m1_t src, size_t vl) { @@ -222,7 +222,7 @@ vint64m2_t test_vwcvt_x_x_v_i64m2 (vint32m1_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i64.nxv4i32.i32.i64( [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i64.nxv4i32.i32.i64( undef, [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwcvt_x_x_v_i64m4 (vint32m2_t src, size_t vl) { @@ -231,7 +231,7 @@ vint64m4_t test_vwcvt_x_x_v_i64m4 (vint32m2_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i64.nxv8i32.i32.i64( [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i64.nxv8i32.i32.i64( undef, [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwcvt_x_x_v_i64m8 (vint32m4_t src, size_t vl) { @@ -240,7 +240,7 @@ vint64m8_t test_vwcvt_x_x_v_i64m8 (vint32m4_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i64.nxv1i32.i32.i64( [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i64.nxv1i32.i32.i64( undef, [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwcvtu_x_x_v_u64m1 (vuint32mf2_t src, size_t vl) { @@ -249,7 +249,7 @@ vuint64m1_t test_vwcvtu_x_x_v_u64m1 (vuint32mf2_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i64.nxv2i32.i32.i64( [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i64.nxv2i32.i32.i64( undef, [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwcvtu_x_x_v_u64m2 (vuint32m1_t src, size_t vl) { @@ -258,7 +258,7 @@ vuint64m2_t test_vwcvtu_x_x_v_u64m2 (vuint32m1_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i64.nxv4i32.i32.i64( [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i64.nxv4i32.i32.i64( undef, [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwcvtu_x_x_v_u64m4 (vuint32m2_t src, size_t vl) { @@ -267,7 +267,7 @@ vuint64m4_t test_vwcvtu_x_x_v_u64m4 (vuint32m2_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i64.nxv8i32.i32.i64( [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i64.nxv8i32.i32.i64( undef, [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwcvtu_x_x_v_u64m8 (vuint32m4_t src, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vwmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vwmul.c index 023133b8f331d4653d9ce8f6d4bb0cc0bee35c3f..3a86c65edb6de9bac15fc0fcb022feed5a6902be 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vwmul.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vwmul.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vwmul_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv1i16.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv1i16.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmul_vv_i16mf4(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -15,7 +15,7 @@ vint16mf4_t test_vwmul_vv_i16mf4(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmul_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv1i16.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv1i16.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmul_vx_i16mf4(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -24,7 +24,7 @@ vint16mf4_t test_vwmul_vx_i16mf4(vint8mf8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmul_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv2i16.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv2i16.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmul_vv_i16mf2(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -33,7 +33,7 @@ vint16mf2_t test_vwmul_vv_i16mf2(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmul_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv2i16.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv2i16.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmul_vx_i16mf2(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -42,7 +42,7 @@ vint16mf2_t test_vwmul_vx_i16mf2(vint8mf4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmul_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv4i16.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv4i16.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmul_vv_i16m1(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -51,7 +51,7 @@ vint16m1_t test_vwmul_vv_i16m1(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmul_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv4i16.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv4i16.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmul_vx_i16m1(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -60,7 +60,7 @@ vint16m1_t test_vwmul_vx_i16m1(vint8mf2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmul_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv8i16.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv8i16.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmul_vv_i16m2(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -69,7 +69,7 @@ vint16m2_t test_vwmul_vv_i16m2(vint8m1_t op1, vint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmul_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv8i16.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv8i16.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmul_vx_i16m2(vint8m1_t op1, int8_t op2, size_t vl) { @@ -78,7 +78,7 @@ vint16m2_t test_vwmul_vx_i16m2(vint8m1_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmul_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv16i16.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv16i16.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmul_vv_i16m4(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -87,7 +87,7 @@ vint16m4_t test_vwmul_vv_i16m4(vint8m2_t op1, vint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmul_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv16i16.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv16i16.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmul_vx_i16m4(vint8m2_t op1, int8_t op2, size_t vl) { @@ -96,7 +96,7 @@ vint16m4_t test_vwmul_vx_i16m4(vint8m2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmul_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv32i16.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv32i16.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmul_vv_i16m8(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -105,7 +105,7 @@ vint16m8_t test_vwmul_vv_i16m8(vint8m4_t op1, vint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmul_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv32i16.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv32i16.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmul_vx_i16m8(vint8m4_t op1, int8_t op2, size_t vl) { @@ -114,7 +114,7 @@ vint16m8_t test_vwmul_vx_i16m8(vint8m4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmul_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv1i32.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv1i32.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmul_vv_i32mf2(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -123,7 +123,7 @@ vint32mf2_t test_vwmul_vv_i32mf2(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmul_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv1i32.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv1i32.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmul_vx_i32mf2(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -132,7 +132,7 @@ vint32mf2_t test_vwmul_vx_i32mf2(vint16mf4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmul_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv2i32.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv2i32.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmul_vv_i32m1(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -141,7 +141,7 @@ vint32m1_t test_vwmul_vv_i32m1(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmul_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv2i32.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv2i32.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmul_vx_i32m1(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -150,7 +150,7 @@ vint32m1_t test_vwmul_vx_i32m1(vint16mf2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmul_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv4i32.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv4i32.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmul_vv_i32m2(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -159,7 +159,7 @@ vint32m2_t test_vwmul_vv_i32m2(vint16m1_t op1, vint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmul_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv4i32.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv4i32.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmul_vx_i32m2(vint16m1_t op1, int16_t op2, size_t vl) { @@ -168,7 +168,7 @@ vint32m2_t test_vwmul_vx_i32m2(vint16m1_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmul_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv8i32.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv8i32.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmul_vv_i32m4(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -177,7 +177,7 @@ vint32m4_t test_vwmul_vv_i32m4(vint16m2_t op1, vint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmul_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv8i32.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv8i32.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmul_vx_i32m4(vint16m2_t op1, int16_t op2, size_t vl) { @@ -186,7 +186,7 @@ vint32m4_t test_vwmul_vx_i32m4(vint16m2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmul_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv16i32.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv16i32.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmul_vv_i32m8(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -195,7 +195,7 @@ vint32m8_t test_vwmul_vv_i32m8(vint16m4_t op1, vint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmul_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv16i32.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv16i32.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmul_vx_i32m8(vint16m4_t op1, int16_t op2, size_t vl) { @@ -204,7 +204,7 @@ vint32m8_t test_vwmul_vx_i32m8(vint16m4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmul_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv1i64.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv1i64.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmul_vv_i64m1(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -213,7 +213,7 @@ vint64m1_t test_vwmul_vv_i64m1(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmul_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv1i64.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv1i64.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmul_vx_i64m1(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -222,7 +222,7 @@ vint64m1_t test_vwmul_vx_i64m1(vint32mf2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmul_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv2i64.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv2i64.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmul_vv_i64m2(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -231,7 +231,7 @@ vint64m2_t test_vwmul_vv_i64m2(vint32m1_t op1, vint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmul_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv2i64.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv2i64.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmul_vx_i64m2(vint32m1_t op1, int32_t op2, size_t vl) { @@ -240,7 +240,7 @@ vint64m2_t test_vwmul_vx_i64m2(vint32m1_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmul_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv4i64.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv4i64.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmul_vv_i64m4(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -249,7 +249,7 @@ vint64m4_t test_vwmul_vv_i64m4(vint32m2_t op1, vint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmul_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv4i64.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv4i64.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmul_vx_i64m4(vint32m2_t op1, int32_t op2, size_t vl) { @@ -258,7 +258,7 @@ vint64m4_t test_vwmul_vx_i64m4(vint32m2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmul_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv8i64.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv8i64.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmul_vv_i64m8(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -267,7 +267,7 @@ vint64m8_t test_vwmul_vv_i64m8(vint32m4_t op1, vint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmul_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv8i64.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv8i64.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmul_vx_i64m8(vint32m4_t op1, int32_t op2, size_t vl) { @@ -276,7 +276,7 @@ vint64m8_t test_vwmul_vx_i64m8(vint32m4_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulu_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv1i16.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv1i16.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwmulu_vv_u16mf4(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -285,7 +285,7 @@ vuint16mf4_t test_vwmulu_vv_u16mf4(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vwmulu_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv1i16.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv1i16.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwmulu_vx_u16mf4(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -294,7 +294,7 @@ vuint16mf4_t test_vwmulu_vx_u16mf4(vuint8mf8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulu_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv2i16.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv2i16.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwmulu_vv_u16mf2(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -303,7 +303,7 @@ vuint16mf2_t test_vwmulu_vv_u16mf2(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vwmulu_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv2i16.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv2i16.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwmulu_vx_u16mf2(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -312,7 +312,7 @@ vuint16mf2_t test_vwmulu_vx_u16mf2(vuint8mf4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulu_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv4i16.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv4i16.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwmulu_vv_u16m1(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -321,7 +321,7 @@ vuint16m1_t test_vwmulu_vv_u16m1(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulu_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv4i16.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv4i16.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwmulu_vx_u16m1(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -330,7 +330,7 @@ vuint16m1_t test_vwmulu_vx_u16m1(vuint8mf2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulu_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv8i16.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv8i16.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwmulu_vv_u16m2(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -339,7 +339,7 @@ vuint16m2_t test_vwmulu_vv_u16m2(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulu_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv8i16.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv8i16.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwmulu_vx_u16m2(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -348,7 +348,7 @@ vuint16m2_t test_vwmulu_vx_u16m2(vuint8m1_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulu_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv16i16.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv16i16.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwmulu_vv_u16m4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -357,7 +357,7 @@ vuint16m4_t test_vwmulu_vv_u16m4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulu_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv16i16.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv16i16.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwmulu_vx_u16m4(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -366,7 +366,7 @@ vuint16m4_t test_vwmulu_vx_u16m4(vuint8m2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulu_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv32i16.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv32i16.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwmulu_vv_u16m8(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -375,7 +375,7 @@ vuint16m8_t test_vwmulu_vv_u16m8(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulu_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv32i16.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv32i16.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwmulu_vx_u16m8(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -384,7 +384,7 @@ vuint16m8_t test_vwmulu_vx_u16m8(vuint8m4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulu_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv1i32.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv1i32.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwmulu_vv_u32mf2(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -393,7 +393,7 @@ vuint32mf2_t test_vwmulu_vv_u32mf2(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl // CHECK-RV64-LABEL: @test_vwmulu_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv1i32.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv1i32.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwmulu_vx_u32mf2(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -402,7 +402,7 @@ vuint32mf2_t test_vwmulu_vx_u32mf2(vuint16mf4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulu_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv2i32.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv2i32.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwmulu_vv_u32m1(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -411,7 +411,7 @@ vuint32m1_t test_vwmulu_vv_u32m1(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vwmulu_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv2i32.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv2i32.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwmulu_vx_u32m1(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -420,7 +420,7 @@ vuint32m1_t test_vwmulu_vx_u32m1(vuint16mf2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulu_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv4i32.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv4i32.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwmulu_vv_u32m2(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -429,7 +429,7 @@ vuint32m2_t test_vwmulu_vv_u32m2(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulu_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv4i32.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv4i32.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwmulu_vx_u32m2(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -438,7 +438,7 @@ vuint32m2_t test_vwmulu_vx_u32m2(vuint16m1_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulu_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv8i32.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv8i32.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwmulu_vv_u32m4(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -447,7 +447,7 @@ vuint32m4_t test_vwmulu_vv_u32m4(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulu_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv8i32.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv8i32.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwmulu_vx_u32m4(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -456,7 +456,7 @@ vuint32m4_t test_vwmulu_vx_u32m4(vuint16m2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulu_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv16i32.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv16i32.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwmulu_vv_u32m8(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -465,7 +465,7 @@ vuint32m8_t test_vwmulu_vv_u32m8(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulu_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv16i32.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv16i32.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwmulu_vx_u32m8(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -474,7 +474,7 @@ vuint32m8_t test_vwmulu_vx_u32m8(vuint16m4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulu_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv1i64.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv1i64.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwmulu_vv_u64m1(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -483,7 +483,7 @@ vuint64m1_t test_vwmulu_vv_u64m1(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vwmulu_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv1i64.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv1i64.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwmulu_vx_u64m1(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -492,7 +492,7 @@ vuint64m1_t test_vwmulu_vx_u64m1(vuint32mf2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulu_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv2i64.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv2i64.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwmulu_vv_u64m2(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -501,7 +501,7 @@ vuint64m2_t test_vwmulu_vv_u64m2(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulu_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv2i64.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv2i64.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwmulu_vx_u64m2(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -510,7 +510,7 @@ vuint64m2_t test_vwmulu_vx_u64m2(vuint32m1_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulu_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv4i64.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv4i64.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwmulu_vv_u64m4(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -519,7 +519,7 @@ vuint64m4_t test_vwmulu_vv_u64m4(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulu_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv4i64.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv4i64.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwmulu_vx_u64m4(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -528,7 +528,7 @@ vuint64m4_t test_vwmulu_vx_u64m4(vuint32m2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulu_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv8i64.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv8i64.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwmulu_vv_u64m8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -537,7 +537,7 @@ vuint64m8_t test_vwmulu_vv_u64m8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulu_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv8i64.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv8i64.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwmulu_vx_u64m8(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -546,7 +546,7 @@ vuint64m8_t test_vwmulu_vx_u64m8(vuint32m4_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmulsu_vv_i16mf4(vint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -555,7 +555,7 @@ vint16mf4_t test_vwmulsu_vv_i16mf4(vint8mf8_t op1, vuint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmulsu_vx_i16mf4(vint8mf8_t op1, uint8_t op2, size_t vl) { @@ -564,7 +564,7 @@ vint16mf4_t test_vwmulsu_vx_i16mf4(vint8mf8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv2i16.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv2i16.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmulsu_vv_i16mf2(vint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -573,7 +573,7 @@ vint16mf2_t test_vwmulsu_vv_i16mf2(vint8mf4_t op1, vuint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv2i16.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv2i16.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmulsu_vx_i16mf2(vint8mf4_t op1, uint8_t op2, size_t vl) { @@ -582,7 +582,7 @@ vint16mf2_t test_vwmulsu_vx_i16mf2(vint8mf4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv4i16.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv4i16.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmulsu_vv_i16m1(vint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -591,7 +591,7 @@ vint16m1_t test_vwmulsu_vv_i16m1(vint8mf2_t op1, vuint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv4i16.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv4i16.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmulsu_vx_i16m1(vint8mf2_t op1, uint8_t op2, size_t vl) { @@ -600,7 +600,7 @@ vint16m1_t test_vwmulsu_vx_i16m1(vint8mf2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv8i16.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv8i16.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmulsu_vv_i16m2(vint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -609,7 +609,7 @@ vint16m2_t test_vwmulsu_vv_i16m2(vint8m1_t op1, vuint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv8i16.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv8i16.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmulsu_vx_i16m2(vint8m1_t op1, uint8_t op2, size_t vl) { @@ -618,7 +618,7 @@ vint16m2_t test_vwmulsu_vx_i16m2(vint8m1_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv16i16.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv16i16.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmulsu_vv_i16m4(vint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -627,7 +627,7 @@ vint16m4_t test_vwmulsu_vv_i16m4(vint8m2_t op1, vuint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv16i16.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv16i16.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmulsu_vx_i16m4(vint8m2_t op1, uint8_t op2, size_t vl) { @@ -636,7 +636,7 @@ vint16m4_t test_vwmulsu_vx_i16m4(vint8m2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv32i16.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv32i16.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmulsu_vv_i16m8(vint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -645,7 +645,7 @@ vint16m8_t test_vwmulsu_vv_i16m8(vint8m4_t op1, vuint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv32i16.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv32i16.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmulsu_vx_i16m8(vint8m4_t op1, uint8_t op2, size_t vl) { @@ -654,7 +654,7 @@ vint16m8_t test_vwmulsu_vx_i16m8(vint8m4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulsu_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv1i32.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv1i32.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmulsu_vv_i32mf2(vint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -663,7 +663,7 @@ vint32mf2_t test_vwmulsu_vv_i32mf2(vint16mf4_t op1, vuint16mf4_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vwmulsu_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv1i32.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv1i32.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmulsu_vx_i32mf2(vint16mf4_t op1, uint16_t op2, size_t vl) { @@ -672,7 +672,7 @@ vint32mf2_t test_vwmulsu_vx_i32mf2(vint16mf4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv2i32.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv2i32.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmulsu_vv_i32m1(vint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -681,7 +681,7 @@ vint32m1_t test_vwmulsu_vv_i32m1(vint16mf2_t op1, vuint16mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv2i32.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv2i32.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmulsu_vx_i32m1(vint16mf2_t op1, uint16_t op2, size_t vl) { @@ -690,7 +690,7 @@ vint32m1_t test_vwmulsu_vx_i32m1(vint16mf2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv4i32.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv4i32.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmulsu_vv_i32m2(vint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -699,7 +699,7 @@ vint32m2_t test_vwmulsu_vv_i32m2(vint16m1_t op1, vuint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv4i32.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv4i32.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmulsu_vx_i32m2(vint16m1_t op1, uint16_t op2, size_t vl) { @@ -708,7 +708,7 @@ vint32m2_t test_vwmulsu_vx_i32m2(vint16m1_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv8i32.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv8i32.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmulsu_vv_i32m4(vint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -717,7 +717,7 @@ vint32m4_t test_vwmulsu_vv_i32m4(vint16m2_t op1, vuint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv8i32.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv8i32.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmulsu_vx_i32m4(vint16m2_t op1, uint16_t op2, size_t vl) { @@ -726,7 +726,7 @@ vint32m4_t test_vwmulsu_vx_i32m4(vint16m2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv16i32.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv16i32.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmulsu_vv_i32m8(vint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -735,7 +735,7 @@ vint32m8_t test_vwmulsu_vv_i32m8(vint16m4_t op1, vuint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv16i32.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv16i32.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmulsu_vx_i32m8(vint16m4_t op1, uint16_t op2, size_t vl) { @@ -744,7 +744,7 @@ vint32m8_t test_vwmulsu_vx_i32m8(vint16m4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmulsu_vv_i64m1(vint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -753,7 +753,7 @@ vint64m1_t test_vwmulsu_vv_i64m1(vint32mf2_t op1, vuint32mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmulsu_vx_i64m1(vint32mf2_t op1, uint32_t op2, size_t vl) { @@ -762,7 +762,7 @@ vint64m1_t test_vwmulsu_vx_i64m1(vint32mf2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv2i64.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv2i64.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmulsu_vv_i64m2(vint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -771,7 +771,7 @@ vint64m2_t test_vwmulsu_vv_i64m2(vint32m1_t op1, vuint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv2i64.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv2i64.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmulsu_vx_i64m2(vint32m1_t op1, uint32_t op2, size_t vl) { @@ -780,7 +780,7 @@ vint64m2_t test_vwmulsu_vx_i64m2(vint32m1_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv4i64.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv4i64.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmulsu_vv_i64m4(vint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -789,7 +789,7 @@ vint64m4_t test_vwmulsu_vv_i64m4(vint32m2_t op1, vuint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv4i64.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv4i64.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmulsu_vx_i64m4(vint32m2_t op1, uint32_t op2, size_t vl) { @@ -798,7 +798,7 @@ vint64m4_t test_vwmulsu_vx_i64m4(vint32m2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv8i64.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv8i64.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmulsu_vv_i64m8(vint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -807,7 +807,7 @@ vint64m8_t test_vwmulsu_vv_i64m8(vint32m4_t op1, vuint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv8i64.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv8i64.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmulsu_vx_i64m8(vint32m4_t op1, uint32_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vwsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vwsub.c index 9422cd4d1b6917e713ebb299e4e38f5681e175b1..f2b40986763831992a4dfe1141aa0d2dcc30a8c1 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vwsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vwsub.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vwsub_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv1i16.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv1i16.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwsub_vv_i16mf4(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -15,7 +15,7 @@ vint16mf4_t test_vwsub_vv_i16mf4(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv1i16.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv1i16.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwsub_vx_i16mf4(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -24,7 +24,7 @@ vint16mf4_t test_vwsub_vx_i16mf4(vint8mf8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_wv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv1i16.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv1i16.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwsub_wv_i16mf4(vint16mf4_t op1, vint8mf8_t op2, size_t vl) { @@ -33,7 +33,7 @@ vint16mf4_t test_vwsub_wv_i16mf4(vint16mf4_t op1, vint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_wx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv1i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv1i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwsub_wx_i16mf4(vint16mf4_t op1, int8_t op2, size_t vl) { @@ -42,7 +42,7 @@ vint16mf4_t test_vwsub_wx_i16mf4(vint16mf4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv2i16.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv2i16.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwsub_vv_i16mf2(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -51,7 +51,7 @@ vint16mf2_t test_vwsub_vv_i16mf2(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv2i16.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv2i16.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwsub_vx_i16mf2(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -60,7 +60,7 @@ vint16mf2_t test_vwsub_vx_i16mf2(vint8mf4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_wv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv2i16.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv2i16.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwsub_wv_i16mf2(vint16mf2_t op1, vint8mf4_t op2, size_t vl) { @@ -69,7 +69,7 @@ vint16mf2_t test_vwsub_wv_i16mf2(vint16mf2_t op1, vint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_wx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv2i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv2i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwsub_wx_i16mf2(vint16mf2_t op1, int8_t op2, size_t vl) { @@ -78,7 +78,7 @@ vint16mf2_t test_vwsub_wx_i16mf2(vint16mf2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv4i16.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv4i16.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwsub_vv_i16m1(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -87,7 +87,7 @@ vint16m1_t test_vwsub_vv_i16m1(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv4i16.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv4i16.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwsub_vx_i16m1(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -96,7 +96,7 @@ vint16m1_t test_vwsub_vx_i16m1(vint8mf2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_wv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv4i16.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv4i16.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwsub_wv_i16m1(vint16m1_t op1, vint8mf2_t op2, size_t vl) { @@ -105,7 +105,7 @@ vint16m1_t test_vwsub_wv_i16m1(vint16m1_t op1, vint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_wx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv4i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv4i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwsub_wx_i16m1(vint16m1_t op1, int8_t op2, size_t vl) { @@ -114,7 +114,7 @@ vint16m1_t test_vwsub_wx_i16m1(vint16m1_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv8i16.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv8i16.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwsub_vv_i16m2(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -123,7 +123,7 @@ vint16m2_t test_vwsub_vv_i16m2(vint8m1_t op1, vint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv8i16.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv8i16.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwsub_vx_i16m2(vint8m1_t op1, int8_t op2, size_t vl) { @@ -132,7 +132,7 @@ vint16m2_t test_vwsub_vx_i16m2(vint8m1_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_wv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv8i16.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv8i16.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwsub_wv_i16m2(vint16m2_t op1, vint8m1_t op2, size_t vl) { @@ -141,7 +141,7 @@ vint16m2_t test_vwsub_wv_i16m2(vint16m2_t op1, vint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_wx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv8i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv8i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwsub_wx_i16m2(vint16m2_t op1, int8_t op2, size_t vl) { @@ -150,7 +150,7 @@ vint16m2_t test_vwsub_wx_i16m2(vint16m2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv16i16.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv16i16.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwsub_vv_i16m4(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -159,7 +159,7 @@ vint16m4_t test_vwsub_vv_i16m4(vint8m2_t op1, vint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv16i16.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv16i16.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwsub_vx_i16m4(vint8m2_t op1, int8_t op2, size_t vl) { @@ -168,7 +168,7 @@ vint16m4_t test_vwsub_vx_i16m4(vint8m2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_wv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv16i16.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv16i16.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwsub_wv_i16m4(vint16m4_t op1, vint8m2_t op2, size_t vl) { @@ -177,7 +177,7 @@ vint16m4_t test_vwsub_wv_i16m4(vint16m4_t op1, vint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_wx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv16i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv16i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwsub_wx_i16m4(vint16m4_t op1, int8_t op2, size_t vl) { @@ -186,7 +186,7 @@ vint16m4_t test_vwsub_wx_i16m4(vint16m4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv32i16.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv32i16.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwsub_vv_i16m8(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -195,7 +195,7 @@ vint16m8_t test_vwsub_vv_i16m8(vint8m4_t op1, vint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv32i16.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv32i16.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwsub_vx_i16m8(vint8m4_t op1, int8_t op2, size_t vl) { @@ -204,7 +204,7 @@ vint16m8_t test_vwsub_vx_i16m8(vint8m4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_wv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv32i16.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv32i16.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwsub_wv_i16m8(vint16m8_t op1, vint8m4_t op2, size_t vl) { @@ -213,7 +213,7 @@ vint16m8_t test_vwsub_wv_i16m8(vint16m8_t op1, vint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_wx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv32i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv32i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwsub_wx_i16m8(vint16m8_t op1, int8_t op2, size_t vl) { @@ -222,7 +222,7 @@ vint16m8_t test_vwsub_wx_i16m8(vint16m8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv1i32.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv1i32.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwsub_vv_i32mf2(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -231,7 +231,7 @@ vint32mf2_t test_vwsub_vv_i32mf2(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv1i32.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv1i32.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwsub_vx_i32mf2(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -240,7 +240,7 @@ vint32mf2_t test_vwsub_vx_i32mf2(vint16mf4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_wv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv1i32.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv1i32.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwsub_wv_i32mf2(vint32mf2_t op1, vint16mf4_t op2, size_t vl) { @@ -249,7 +249,7 @@ vint32mf2_t test_vwsub_wv_i32mf2(vint32mf2_t op1, vint16mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_wx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv1i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv1i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwsub_wx_i32mf2(vint32mf2_t op1, int16_t op2, size_t vl) { @@ -258,7 +258,7 @@ vint32mf2_t test_vwsub_wx_i32mf2(vint32mf2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv2i32.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv2i32.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwsub_vv_i32m1(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -267,7 +267,7 @@ vint32m1_t test_vwsub_vv_i32m1(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv2i32.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv2i32.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwsub_vx_i32m1(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -276,7 +276,7 @@ vint32m1_t test_vwsub_vx_i32m1(vint16mf2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_wv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv2i32.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv2i32.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwsub_wv_i32m1(vint32m1_t op1, vint16mf2_t op2, size_t vl) { @@ -285,7 +285,7 @@ vint32m1_t test_vwsub_wv_i32m1(vint32m1_t op1, vint16mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_wx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv2i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv2i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwsub_wx_i32m1(vint32m1_t op1, int16_t op2, size_t vl) { @@ -294,7 +294,7 @@ vint32m1_t test_vwsub_wx_i32m1(vint32m1_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv4i32.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv4i32.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwsub_vv_i32m2(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -303,7 +303,7 @@ vint32m2_t test_vwsub_vv_i32m2(vint16m1_t op1, vint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv4i32.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv4i32.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwsub_vx_i32m2(vint16m1_t op1, int16_t op2, size_t vl) { @@ -312,7 +312,7 @@ vint32m2_t test_vwsub_vx_i32m2(vint16m1_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_wv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv4i32.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv4i32.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwsub_wv_i32m2(vint32m2_t op1, vint16m1_t op2, size_t vl) { @@ -321,7 +321,7 @@ vint32m2_t test_vwsub_wv_i32m2(vint32m2_t op1, vint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_wx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv4i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv4i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwsub_wx_i32m2(vint32m2_t op1, int16_t op2, size_t vl) { @@ -330,7 +330,7 @@ vint32m2_t test_vwsub_wx_i32m2(vint32m2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv8i32.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv8i32.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwsub_vv_i32m4(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -339,7 +339,7 @@ vint32m4_t test_vwsub_vv_i32m4(vint16m2_t op1, vint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv8i32.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv8i32.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwsub_vx_i32m4(vint16m2_t op1, int16_t op2, size_t vl) { @@ -348,7 +348,7 @@ vint32m4_t test_vwsub_vx_i32m4(vint16m2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_wv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv8i32.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv8i32.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwsub_wv_i32m4(vint32m4_t op1, vint16m2_t op2, size_t vl) { @@ -357,7 +357,7 @@ vint32m4_t test_vwsub_wv_i32m4(vint32m4_t op1, vint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_wx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv8i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv8i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwsub_wx_i32m4(vint32m4_t op1, int16_t op2, size_t vl) { @@ -366,7 +366,7 @@ vint32m4_t test_vwsub_wx_i32m4(vint32m4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv16i32.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv16i32.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwsub_vv_i32m8(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -375,7 +375,7 @@ vint32m8_t test_vwsub_vv_i32m8(vint16m4_t op1, vint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv16i32.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv16i32.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwsub_vx_i32m8(vint16m4_t op1, int16_t op2, size_t vl) { @@ -384,7 +384,7 @@ vint32m8_t test_vwsub_vx_i32m8(vint16m4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_wv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv16i32.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv16i32.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwsub_wv_i32m8(vint32m8_t op1, vint16m4_t op2, size_t vl) { @@ -393,7 +393,7 @@ vint32m8_t test_vwsub_wv_i32m8(vint32m8_t op1, vint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_wx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv16i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv16i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwsub_wx_i32m8(vint32m8_t op1, int16_t op2, size_t vl) { @@ -402,7 +402,7 @@ vint32m8_t test_vwsub_wx_i32m8(vint32m8_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv1i64.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv1i64.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwsub_vv_i64m1(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -411,7 +411,7 @@ vint64m1_t test_vwsub_vv_i64m1(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv1i64.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv1i64.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwsub_vx_i64m1(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -420,7 +420,7 @@ vint64m1_t test_vwsub_vx_i64m1(vint32mf2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_wv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv1i64.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv1i64.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwsub_wv_i64m1(vint64m1_t op1, vint32mf2_t op2, size_t vl) { @@ -429,7 +429,7 @@ vint64m1_t test_vwsub_wv_i64m1(vint64m1_t op1, vint32mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_wx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv1i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv1i64.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwsub_wx_i64m1(vint64m1_t op1, int32_t op2, size_t vl) { @@ -438,7 +438,7 @@ vint64m1_t test_vwsub_wx_i64m1(vint64m1_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv2i64.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv2i64.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwsub_vv_i64m2(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -447,7 +447,7 @@ vint64m2_t test_vwsub_vv_i64m2(vint32m1_t op1, vint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv2i64.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv2i64.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwsub_vx_i64m2(vint32m1_t op1, int32_t op2, size_t vl) { @@ -456,7 +456,7 @@ vint64m2_t test_vwsub_vx_i64m2(vint32m1_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_wv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv2i64.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv2i64.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwsub_wv_i64m2(vint64m2_t op1, vint32m1_t op2, size_t vl) { @@ -465,7 +465,7 @@ vint64m2_t test_vwsub_wv_i64m2(vint64m2_t op1, vint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_wx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv2i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv2i64.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwsub_wx_i64m2(vint64m2_t op1, int32_t op2, size_t vl) { @@ -474,7 +474,7 @@ vint64m2_t test_vwsub_wx_i64m2(vint64m2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv4i64.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv4i64.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwsub_vv_i64m4(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -483,7 +483,7 @@ vint64m4_t test_vwsub_vv_i64m4(vint32m2_t op1, vint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv4i64.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv4i64.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwsub_vx_i64m4(vint32m2_t op1, int32_t op2, size_t vl) { @@ -492,7 +492,7 @@ vint64m4_t test_vwsub_vx_i64m4(vint32m2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_wv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv4i64.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv4i64.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwsub_wv_i64m4(vint64m4_t op1, vint32m2_t op2, size_t vl) { @@ -501,7 +501,7 @@ vint64m4_t test_vwsub_wv_i64m4(vint64m4_t op1, vint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_wx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv4i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv4i64.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwsub_wx_i64m4(vint64m4_t op1, int32_t op2, size_t vl) { @@ -510,7 +510,7 @@ vint64m4_t test_vwsub_wx_i64m4(vint64m4_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv8i64.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv8i64.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwsub_vv_i64m8(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -519,7 +519,7 @@ vint64m8_t test_vwsub_vv_i64m8(vint32m4_t op1, vint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv8i64.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv8i64.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwsub_vx_i64m8(vint32m4_t op1, int32_t op2, size_t vl) { @@ -528,7 +528,7 @@ vint64m8_t test_vwsub_vx_i64m8(vint32m4_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_wv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv8i64.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv8i64.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwsub_wv_i64m8(vint64m8_t op1, vint32m4_t op2, size_t vl) { @@ -537,7 +537,7 @@ vint64m8_t test_vwsub_wv_i64m8(vint64m8_t op1, vint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_wx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv8i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv8i64.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwsub_wx_i64m8(vint64m8_t op1, int32_t op2, size_t vl) { @@ -546,7 +546,7 @@ vint64m8_t test_vwsub_wx_i64m8(vint64m8_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i16.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i16.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwsubu_vv_u16mf4(vuint8mf8_t op1, vuint8mf8_t op2, @@ -556,7 +556,7 @@ vuint16mf4_t test_vwsubu_vv_u16mf4(vuint8mf8_t op1, vuint8mf8_t op2, // CHECK-RV64-LABEL: @test_vwsubu_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i16.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i16.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwsubu_vx_u16mf4(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -565,7 +565,7 @@ vuint16mf4_t test_vwsubu_vx_u16mf4(vuint8mf8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_wv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i16.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i16.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwsubu_wv_u16mf4(vuint16mf4_t op1, vuint8mf8_t op2, @@ -575,7 +575,7 @@ vuint16mf4_t test_vwsubu_wv_u16mf4(vuint16mf4_t op1, vuint8mf8_t op2, // CHECK-RV64-LABEL: @test_vwsubu_wx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwsubu_wx_u16mf4(vuint16mf4_t op1, uint8_t op2, size_t vl) { @@ -584,7 +584,7 @@ vuint16mf4_t test_vwsubu_wx_u16mf4(vuint16mf4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv2i16.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv2i16.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwsubu_vv_u16mf2(vuint8mf4_t op1, vuint8mf4_t op2, @@ -594,7 +594,7 @@ vuint16mf2_t test_vwsubu_vv_u16mf2(vuint8mf4_t op1, vuint8mf4_t op2, // CHECK-RV64-LABEL: @test_vwsubu_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv2i16.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv2i16.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwsubu_vx_u16mf2(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -603,7 +603,7 @@ vuint16mf2_t test_vwsubu_vx_u16mf2(vuint8mf4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_wv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv2i16.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv2i16.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwsubu_wv_u16mf2(vuint16mf2_t op1, vuint8mf4_t op2, @@ -613,7 +613,7 @@ vuint16mf2_t test_vwsubu_wv_u16mf2(vuint16mf2_t op1, vuint8mf4_t op2, // CHECK-RV64-LABEL: @test_vwsubu_wx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv2i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv2i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwsubu_wx_u16mf2(vuint16mf2_t op1, uint8_t op2, size_t vl) { @@ -622,7 +622,7 @@ vuint16mf2_t test_vwsubu_wx_u16mf2(vuint16mf2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv4i16.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv4i16.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwsubu_vv_u16m1(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -631,7 +631,7 @@ vuint16m1_t test_vwsubu_vv_u16m1(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv4i16.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv4i16.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwsubu_vx_u16m1(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -640,7 +640,7 @@ vuint16m1_t test_vwsubu_vx_u16m1(vuint8mf2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_wv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv4i16.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv4i16.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwsubu_wv_u16m1(vuint16m1_t op1, vuint8mf2_t op2, size_t vl) { @@ -649,7 +649,7 @@ vuint16m1_t test_vwsubu_wv_u16m1(vuint16m1_t op1, vuint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_wx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv4i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv4i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwsubu_wx_u16m1(vuint16m1_t op1, uint8_t op2, size_t vl) { @@ -658,7 +658,7 @@ vuint16m1_t test_vwsubu_wx_u16m1(vuint16m1_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv8i16.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv8i16.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwsubu_vv_u16m2(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -667,7 +667,7 @@ vuint16m2_t test_vwsubu_vv_u16m2(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv8i16.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv8i16.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwsubu_vx_u16m2(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -676,7 +676,7 @@ vuint16m2_t test_vwsubu_vx_u16m2(vuint8m1_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_wv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv8i16.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv8i16.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwsubu_wv_u16m2(vuint16m2_t op1, vuint8m1_t op2, size_t vl) { @@ -685,7 +685,7 @@ vuint16m2_t test_vwsubu_wv_u16m2(vuint16m2_t op1, vuint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_wx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv8i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv8i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwsubu_wx_u16m2(vuint16m2_t op1, uint8_t op2, size_t vl) { @@ -694,7 +694,7 @@ vuint16m2_t test_vwsubu_wx_u16m2(vuint16m2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv16i16.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv16i16.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwsubu_vv_u16m4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -703,7 +703,7 @@ vuint16m4_t test_vwsubu_vv_u16m4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv16i16.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv16i16.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwsubu_vx_u16m4(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -712,7 +712,7 @@ vuint16m4_t test_vwsubu_vx_u16m4(vuint8m2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_wv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv16i16.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv16i16.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwsubu_wv_u16m4(vuint16m4_t op1, vuint8m2_t op2, size_t vl) { @@ -721,7 +721,7 @@ vuint16m4_t test_vwsubu_wv_u16m4(vuint16m4_t op1, vuint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_wx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv16i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv16i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwsubu_wx_u16m4(vuint16m4_t op1, uint8_t op2, size_t vl) { @@ -730,7 +730,7 @@ vuint16m4_t test_vwsubu_wx_u16m4(vuint16m4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv32i16.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv32i16.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwsubu_vv_u16m8(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -739,7 +739,7 @@ vuint16m8_t test_vwsubu_vv_u16m8(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv32i16.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv32i16.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwsubu_vx_u16m8(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -748,7 +748,7 @@ vuint16m8_t test_vwsubu_vx_u16m8(vuint8m4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_wv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv32i16.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv32i16.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwsubu_wv_u16m8(vuint16m8_t op1, vuint8m4_t op2, size_t vl) { @@ -757,7 +757,7 @@ vuint16m8_t test_vwsubu_wv_u16m8(vuint16m8_t op1, vuint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_wx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv32i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv32i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwsubu_wx_u16m8(vuint16m8_t op1, uint8_t op2, size_t vl) { @@ -766,7 +766,7 @@ vuint16m8_t test_vwsubu_wx_u16m8(vuint16m8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i32.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i32.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwsubu_vv_u32mf2(vuint16mf4_t op1, vuint16mf4_t op2, @@ -776,7 +776,7 @@ vuint32mf2_t test_vwsubu_vv_u32mf2(vuint16mf4_t op1, vuint16mf4_t op2, // CHECK-RV64-LABEL: @test_vwsubu_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i32.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i32.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwsubu_vx_u32mf2(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -785,7 +785,7 @@ vuint32mf2_t test_vwsubu_vx_u32mf2(vuint16mf4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_wv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i32.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i32.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwsubu_wv_u32mf2(vuint32mf2_t op1, vuint16mf4_t op2, @@ -795,7 +795,7 @@ vuint32mf2_t test_vwsubu_wv_u32mf2(vuint32mf2_t op1, vuint16mf4_t op2, // CHECK-RV64-LABEL: @test_vwsubu_wx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwsubu_wx_u32mf2(vuint32mf2_t op1, uint16_t op2, size_t vl) { @@ -804,7 +804,7 @@ vuint32mf2_t test_vwsubu_wx_u32mf2(vuint32mf2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv2i32.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv2i32.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwsubu_vv_u32m1(vuint16mf2_t op1, vuint16mf2_t op2, @@ -814,7 +814,7 @@ vuint32m1_t test_vwsubu_vv_u32m1(vuint16mf2_t op1, vuint16mf2_t op2, // CHECK-RV64-LABEL: @test_vwsubu_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv2i32.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv2i32.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwsubu_vx_u32m1(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -823,7 +823,7 @@ vuint32m1_t test_vwsubu_vx_u32m1(vuint16mf2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_wv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv2i32.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv2i32.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwsubu_wv_u32m1(vuint32m1_t op1, vuint16mf2_t op2, size_t vl) { @@ -832,7 +832,7 @@ vuint32m1_t test_vwsubu_wv_u32m1(vuint32m1_t op1, vuint16mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_wx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv2i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv2i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwsubu_wx_u32m1(vuint32m1_t op1, uint16_t op2, size_t vl) { @@ -841,7 +841,7 @@ vuint32m1_t test_vwsubu_wx_u32m1(vuint32m1_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv4i32.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv4i32.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwsubu_vv_u32m2(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -850,7 +850,7 @@ vuint32m2_t test_vwsubu_vv_u32m2(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv4i32.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv4i32.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwsubu_vx_u32m2(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -859,7 +859,7 @@ vuint32m2_t test_vwsubu_vx_u32m2(vuint16m1_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_wv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv4i32.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv4i32.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwsubu_wv_u32m2(vuint32m2_t op1, vuint16m1_t op2, size_t vl) { @@ -868,7 +868,7 @@ vuint32m2_t test_vwsubu_wv_u32m2(vuint32m2_t op1, vuint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_wx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv4i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv4i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwsubu_wx_u32m2(vuint32m2_t op1, uint16_t op2, size_t vl) { @@ -877,7 +877,7 @@ vuint32m2_t test_vwsubu_wx_u32m2(vuint32m2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv8i32.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv8i32.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwsubu_vv_u32m4(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -886,7 +886,7 @@ vuint32m4_t test_vwsubu_vv_u32m4(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv8i32.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv8i32.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwsubu_vx_u32m4(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -895,7 +895,7 @@ vuint32m4_t test_vwsubu_vx_u32m4(vuint16m2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_wv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv8i32.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv8i32.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwsubu_wv_u32m4(vuint32m4_t op1, vuint16m2_t op2, size_t vl) { @@ -904,7 +904,7 @@ vuint32m4_t test_vwsubu_wv_u32m4(vuint32m4_t op1, vuint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_wx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv8i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv8i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwsubu_wx_u32m4(vuint32m4_t op1, uint16_t op2, size_t vl) { @@ -913,7 +913,7 @@ vuint32m4_t test_vwsubu_wx_u32m4(vuint32m4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv16i32.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv16i32.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwsubu_vv_u32m8(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -922,7 +922,7 @@ vuint32m8_t test_vwsubu_vv_u32m8(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv16i32.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv16i32.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwsubu_vx_u32m8(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -931,7 +931,7 @@ vuint32m8_t test_vwsubu_vx_u32m8(vuint16m4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_wv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv16i32.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv16i32.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwsubu_wv_u32m8(vuint32m8_t op1, vuint16m4_t op2, size_t vl) { @@ -940,7 +940,7 @@ vuint32m8_t test_vwsubu_wv_u32m8(vuint32m8_t op1, vuint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_wx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv16i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv16i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwsubu_wx_u32m8(vuint32m8_t op1, uint16_t op2, size_t vl) { @@ -949,7 +949,7 @@ vuint32m8_t test_vwsubu_wx_u32m8(vuint32m8_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i64.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i64.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwsubu_vv_u64m1(vuint32mf2_t op1, vuint32mf2_t op2, @@ -959,7 +959,7 @@ vuint64m1_t test_vwsubu_vv_u64m1(vuint32mf2_t op1, vuint32mf2_t op2, // CHECK-RV64-LABEL: @test_vwsubu_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i64.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i64.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwsubu_vx_u64m1(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -968,7 +968,7 @@ vuint64m1_t test_vwsubu_vx_u64m1(vuint32mf2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_wv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i64.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i64.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwsubu_wv_u64m1(vuint64m1_t op1, vuint32mf2_t op2, size_t vl) { @@ -977,7 +977,7 @@ vuint64m1_t test_vwsubu_wv_u64m1(vuint64m1_t op1, vuint32mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_wx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i64.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwsubu_wx_u64m1(vuint64m1_t op1, uint32_t op2, size_t vl) { @@ -986,7 +986,7 @@ vuint64m1_t test_vwsubu_wx_u64m1(vuint64m1_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv2i64.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv2i64.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwsubu_vv_u64m2(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -995,7 +995,7 @@ vuint64m2_t test_vwsubu_vv_u64m2(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv2i64.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv2i64.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwsubu_vx_u64m2(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -1004,7 +1004,7 @@ vuint64m2_t test_vwsubu_vx_u64m2(vuint32m1_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_wv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv2i64.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv2i64.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwsubu_wv_u64m2(vuint64m2_t op1, vuint32m1_t op2, size_t vl) { @@ -1013,7 +1013,7 @@ vuint64m2_t test_vwsubu_wv_u64m2(vuint64m2_t op1, vuint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_wx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv2i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv2i64.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwsubu_wx_u64m2(vuint64m2_t op1, uint32_t op2, size_t vl) { @@ -1022,7 +1022,7 @@ vuint64m2_t test_vwsubu_wx_u64m2(vuint64m2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv4i64.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv4i64.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwsubu_vv_u64m4(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -1031,7 +1031,7 @@ vuint64m4_t test_vwsubu_vv_u64m4(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv4i64.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv4i64.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwsubu_vx_u64m4(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -1040,7 +1040,7 @@ vuint64m4_t test_vwsubu_vx_u64m4(vuint32m2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_wv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv4i64.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv4i64.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwsubu_wv_u64m4(vuint64m4_t op1, vuint32m2_t op2, size_t vl) { @@ -1049,7 +1049,7 @@ vuint64m4_t test_vwsubu_wv_u64m4(vuint64m4_t op1, vuint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_wx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv4i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv4i64.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwsubu_wx_u64m4(vuint64m4_t op1, uint32_t op2, size_t vl) { @@ -1058,7 +1058,7 @@ vuint64m4_t test_vwsubu_wx_u64m4(vuint64m4_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv8i64.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv8i64.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwsubu_vv_u64m8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -1067,7 +1067,7 @@ vuint64m8_t test_vwsubu_vv_u64m8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv8i64.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv8i64.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwsubu_vx_u64m8(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -1076,7 +1076,7 @@ vuint64m8_t test_vwsubu_vx_u64m8(vuint32m4_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_wv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv8i64.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv8i64.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwsubu_wv_u64m8(vuint64m8_t op1, vuint32m4_t op2, size_t vl) { @@ -1085,7 +1085,7 @@ vuint64m8_t test_vwsubu_wv_u64m8(vuint64m8_t op1, vuint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_wx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv8i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv8i64.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwsubu_wx_u64m8(vuint64m8_t op1, uint32_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vxor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vxor.c index be076ea0b5039080d0ef47dcea313cc9b5ccfcae..0e775104dabd959b1bf6f7558e5602e706a85e95 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vxor.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vxor.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vxor_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vxor_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -15,7 +15,7 @@ vint8mf8_t test_vxor_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vxor_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -24,7 +24,7 @@ vint8mf8_t test_vxor_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vxor_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -33,7 +33,7 @@ vint8mf4_t test_vxor_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vxor_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -42,7 +42,7 @@ vint8mf4_t test_vxor_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vxor_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -51,7 +51,7 @@ vint8mf2_t test_vxor_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vxor_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -60,7 +60,7 @@ vint8mf2_t test_vxor_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vxor_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -69,7 +69,7 @@ vint8m1_t test_vxor_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vxor_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { @@ -78,7 +78,7 @@ vint8m1_t test_vxor_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vxor_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -87,7 +87,7 @@ vint8m2_t test_vxor_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vxor_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { @@ -96,7 +96,7 @@ vint8m2_t test_vxor_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vxor_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -105,7 +105,7 @@ vint8m4_t test_vxor_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vxor_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { @@ -114,7 +114,7 @@ vint8m4_t test_vxor_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vxor_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -123,7 +123,7 @@ vint8m8_t test_vxor_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vxor_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { @@ -132,7 +132,7 @@ vint8m8_t test_vxor_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vxor_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -141,7 +141,7 @@ vint16mf4_t test_vxor_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vxor_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -150,7 +150,7 @@ vint16mf4_t test_vxor_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vxor_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -159,7 +159,7 @@ vint16mf2_t test_vxor_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vxor_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -168,7 +168,7 @@ vint16mf2_t test_vxor_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vxor_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -177,7 +177,7 @@ vint16m1_t test_vxor_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vxor_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { @@ -186,7 +186,7 @@ vint16m1_t test_vxor_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vxor_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -195,7 +195,7 @@ vint16m2_t test_vxor_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vxor_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { @@ -204,7 +204,7 @@ vint16m2_t test_vxor_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vxor_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -213,7 +213,7 @@ vint16m4_t test_vxor_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vxor_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { @@ -222,7 +222,7 @@ vint16m4_t test_vxor_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vxor_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -231,7 +231,7 @@ vint16m8_t test_vxor_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vxor_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { @@ -240,7 +240,7 @@ vint16m8_t test_vxor_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vxor_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -249,7 +249,7 @@ vint32mf2_t test_vxor_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vxor_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -258,7 +258,7 @@ vint32mf2_t test_vxor_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vxor_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -267,7 +267,7 @@ vint32m1_t test_vxor_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vxor_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { @@ -276,7 +276,7 @@ vint32m1_t test_vxor_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vxor_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -285,7 +285,7 @@ vint32m2_t test_vxor_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vxor_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { @@ -294,7 +294,7 @@ vint32m2_t test_vxor_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vxor_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -303,7 +303,7 @@ vint32m4_t test_vxor_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vxor_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { @@ -312,7 +312,7 @@ vint32m4_t test_vxor_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vxor_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -321,7 +321,7 @@ vint32m8_t test_vxor_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vxor_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { @@ -330,7 +330,7 @@ vint32m8_t test_vxor_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vxor_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -339,7 +339,7 @@ vint64m1_t test_vxor_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vxor_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { @@ -348,7 +348,7 @@ vint64m1_t test_vxor_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vxor_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -357,7 +357,7 @@ vint64m2_t test_vxor_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vxor_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { @@ -366,7 +366,7 @@ vint64m2_t test_vxor_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vxor_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -375,7 +375,7 @@ vint64m4_t test_vxor_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vxor_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { @@ -384,7 +384,7 @@ vint64m4_t test_vxor_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vxor_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -393,7 +393,7 @@ vint64m8_t test_vxor_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vxor_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { @@ -402,7 +402,7 @@ vint64m8_t test_vxor_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vxor_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -411,7 +411,7 @@ vuint8mf8_t test_vxor_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vxor_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -420,7 +420,7 @@ vuint8mf8_t test_vxor_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vxor_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -429,7 +429,7 @@ vuint8mf4_t test_vxor_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vxor_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -438,7 +438,7 @@ vuint8mf4_t test_vxor_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vxor_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -447,7 +447,7 @@ vuint8mf2_t test_vxor_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vxor_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -456,7 +456,7 @@ vuint8mf2_t test_vxor_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vxor_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -465,7 +465,7 @@ vuint8m1_t test_vxor_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vxor_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -474,7 +474,7 @@ vuint8m1_t test_vxor_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vxor_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -483,7 +483,7 @@ vuint8m2_t test_vxor_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vxor_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -492,7 +492,7 @@ vuint8m2_t test_vxor_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vxor_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -501,7 +501,7 @@ vuint8m4_t test_vxor_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vxor_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -510,7 +510,7 @@ vuint8m4_t test_vxor_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vxor_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -519,7 +519,7 @@ vuint8m8_t test_vxor_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vxor_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -528,7 +528,7 @@ vuint8m8_t test_vxor_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vxor_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -537,7 +537,7 @@ vuint16mf4_t test_vxor_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vxor_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vxor_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -546,7 +546,7 @@ vuint16mf4_t test_vxor_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vxor_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -555,7 +555,7 @@ vuint16mf2_t test_vxor_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vxor_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vxor_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -564,7 +564,7 @@ vuint16mf2_t test_vxor_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vxor_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -573,7 +573,7 @@ vuint16m1_t test_vxor_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vxor_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -582,7 +582,7 @@ vuint16m1_t test_vxor_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vxor_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -591,7 +591,7 @@ vuint16m2_t test_vxor_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vxor_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -600,7 +600,7 @@ vuint16m2_t test_vxor_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vxor_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -609,7 +609,7 @@ vuint16m4_t test_vxor_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vxor_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -618,7 +618,7 @@ vuint16m4_t test_vxor_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vxor_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -627,7 +627,7 @@ vuint16m8_t test_vxor_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vxor_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -636,7 +636,7 @@ vuint16m8_t test_vxor_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vxor_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -645,7 +645,7 @@ vuint32mf2_t test_vxor_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vxor_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vxor_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -654,7 +654,7 @@ vuint32mf2_t test_vxor_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vxor_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -663,7 +663,7 @@ vuint32m1_t test_vxor_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vxor_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -672,7 +672,7 @@ vuint32m1_t test_vxor_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vxor_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -681,7 +681,7 @@ vuint32m2_t test_vxor_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vxor_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -690,7 +690,7 @@ vuint32m2_t test_vxor_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vxor_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -699,7 +699,7 @@ vuint32m4_t test_vxor_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vxor_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -708,7 +708,7 @@ vuint32m4_t test_vxor_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vxor_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -717,7 +717,7 @@ vuint32m8_t test_vxor_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vxor_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -726,7 +726,7 @@ vuint32m8_t test_vxor_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vxor_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -735,7 +735,7 @@ vuint64m1_t test_vxor_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vxor_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -744,7 +744,7 @@ vuint64m1_t test_vxor_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vxor_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -753,7 +753,7 @@ vuint64m2_t test_vxor_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vxor_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -762,7 +762,7 @@ vuint64m2_t test_vxor_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vxor_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -771,7 +771,7 @@ vuint64m4_t test_vxor_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vxor_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -780,7 +780,7 @@ vuint64m4_t test_vxor_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vxor_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -789,7 +789,7 @@ vuint64m8_t test_vxor_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vxor_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { diff --git a/clang/utils/TableGen/RISCVVEmitter.cpp b/clang/utils/TableGen/RISCVVEmitter.cpp index eb718ff8a49ea89e5b9b0c4bcdf5f8d622952b18..9888e6c6862cf99b288e18094909002d2138f183 100644 --- a/clang/utils/TableGen/RISCVVEmitter.cpp +++ b/clang/utils/TableGen/RISCVVEmitter.cpp @@ -162,6 +162,7 @@ private: bool IsMask; bool HasVL; bool HasPolicy; + bool HasNoMaskPassThru; bool HasNoMaskedOverloaded; bool HasAutoDef; // There is automiatic definition in header std::string ManualCodegen; @@ -177,8 +178,8 @@ public: RVVIntrinsic(StringRef Name, StringRef Suffix, StringRef MangledName, StringRef MangledSuffix, StringRef IRName, bool IsMask, bool HasMaskedOffOperand, bool HasVL, bool HasPolicy, - bool HasNoMaskedOverloaded, bool HasAutoDef, - StringRef ManualCodegen, const RVVTypes &Types, + bool HasNoMaskPassThru, bool HasNoMaskedOverloaded, + bool HasAutoDef, StringRef ManualCodegen, const RVVTypes &Types, const std::vector &IntrinsicTypes, const std::vector &RequiredFeatures, unsigned NF); ~RVVIntrinsic() = default; @@ -188,6 +189,7 @@ public: StringRef getMangledName() const { return MangledName; } bool hasVL() const { return HasVL; } bool hasPolicy() const { return HasPolicy; } + bool hasNoMaskPassThru() const { return HasNoMaskPassThru; } bool hasNoMaskedOverloaded() const { return HasNoMaskedOverloaded; } bool hasManualCodegen() const { return !ManualCodegen.empty(); } bool hasAutoDef() const { return HasAutoDef; } @@ -770,12 +772,14 @@ RVVIntrinsic::RVVIntrinsic(StringRef NewName, StringRef Suffix, StringRef NewMangledName, StringRef MangledSuffix, StringRef IRName, bool IsMask, bool HasMaskedOffOperand, bool HasVL, bool HasPolicy, - bool HasNoMaskedOverloaded, bool HasAutoDef, - StringRef ManualCodegen, const RVVTypes &OutInTypes, + bool HasNoMaskPassThru, bool HasNoMaskedOverloaded, + bool HasAutoDef, StringRef ManualCodegen, + const RVVTypes &OutInTypes, const std::vector &NewIntrinsicTypes, const std::vector &RequiredFeatures, unsigned NF) : IRName(IRName), IsMask(IsMask), HasVL(HasVL), HasPolicy(HasPolicy), + HasNoMaskPassThru(HasNoMaskPassThru), HasNoMaskedOverloaded(HasNoMaskedOverloaded), HasAutoDef(HasAutoDef), ManualCodegen(ManualCodegen.str()), NF(NF) { @@ -823,7 +827,7 @@ RVVIntrinsic::RVVIntrinsic(StringRef NewName, StringRef Suffix, // IntrinsicTypes is nonmasked version index. Need to update it // if there is maskedoff operand (It is always in first operand). IntrinsicTypes = NewIntrinsicTypes; - if (IsMask && HasMaskedOffOperand) { + if ((IsMask && HasMaskedOffOperand) || (!IsMask && HasNoMaskPassThru)) { for (auto &I : IntrinsicTypes) { if (I >= 0) I += NF; @@ -860,6 +864,9 @@ void RVVIntrinsic::emitCodeGenSwitchBody(raw_ostream &OS) const { } else { OS << " std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end());\n"; } + } else if (hasNoMaskPassThru()) { + OS << " Ops.push_back(llvm::UndefValue::get(ResultType));\n"; + OS << " std::rotate(Ops.rbegin(), Ops.rbegin() + 1, Ops.rend());\n"; } OS << " IntrinsicTypes = {"; @@ -1107,6 +1114,8 @@ void RVVEmitter::createCodeGen(raw_ostream &OS) { PrintFatalError("Builtin with same name has different HasPolicy"); else if (P.first->second->hasPolicy() != Def->hasPolicy()) PrintFatalError("Builtin with same name has different HasPolicy"); + else if (P.first->second->hasNoMaskPassThru() != Def->hasNoMaskPassThru()) + PrintFatalError("Builtin with same name has different HasNoMaskPassThru"); else if (P.first->second->getIntrinsicTypes() != Def->getIntrinsicTypes()) PrintFatalError("Builtin with same name has different IntrinsicTypes"); } @@ -1154,6 +1163,7 @@ void RVVEmitter::createRVVIntrinsics( bool HasMaskedOffOperand = R->getValueAsBit("HasMaskedOffOperand"); bool HasVL = R->getValueAsBit("HasVL"); bool HasPolicy = R->getValueAsBit("HasPolicy"); + bool HasNoMaskPassThru = R->getValueAsBit("HasNoMaskPassThru"); bool HasNoMaskedOverloaded = R->getValueAsBit("HasNoMaskedOverloaded"); std::vector Log2LMULList = R->getValueAsListOfInts("Log2LMUL"); StringRef ManualCodegen = R->getValueAsString("ManualCodegen"); @@ -1228,8 +1238,8 @@ void RVVEmitter::createRVVIntrinsics( Out.push_back(std::make_unique( Name, SuffixStr, MangledName, MangledSuffixStr, IRName, /*IsMask=*/false, /*HasMaskedOffOperand=*/false, HasVL, HasPolicy, - HasNoMaskedOverloaded, HasAutoDef, ManualCodegen, Types.getValue(), - IntrinsicTypes, RequiredFeatures, NF)); + HasNoMaskPassThru, HasNoMaskedOverloaded, HasAutoDef, ManualCodegen, + Types.getValue(), IntrinsicTypes, RequiredFeatures, NF)); if (HasMask) { // Create a mask intrinsic Optional MaskTypes = @@ -1237,8 +1247,9 @@ void RVVEmitter::createRVVIntrinsics( Out.push_back(std::make_unique( Name, SuffixStr, MangledName, MangledSuffixStr, IRNameMask, /*IsMask=*/true, HasMaskedOffOperand, HasVL, HasPolicy, - HasNoMaskedOverloaded, HasAutoDef, ManualCodegenMask, - MaskTypes.getValue(), IntrinsicTypes, RequiredFeatures, NF)); + HasNoMaskPassThru, HasNoMaskedOverloaded, HasAutoDef, + ManualCodegenMask, MaskTypes.getValue(), IntrinsicTypes, + RequiredFeatures, NF)); } } // end for Log2LMULList } // end for TypeRange diff --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td index 57b0f792ea5641fcb4a5f2d922bc362f016dee42..9ec8a02f3184704a3612b738a288642ef722f183 100644 --- a/llvm/include/llvm/IR/IntrinsicsRISCV.td +++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td @@ -383,12 +383,13 @@ let TargetPrefix = "riscv" in { let VLOperand = 2; } // For destination vector type is the same as first and second source vector. - // Input: (vector_in, int_vector_in, vl) + // Input: (passthru, vector_in, int_vector_in, vl) class RISCVRGatherVVNoMask : Intrinsic<[llvm_anyvector_ty], - [LLVMMatchType<0>, LLVMVectorOfBitcastsToInt<0>, llvm_anyint_ty], + [LLVMMatchType<0>, LLVMMatchType<0>, + LLVMVectorOfBitcastsToInt<0>, llvm_anyint_ty], [IntrNoMem]>, RISCVVIntrinsic { - let VLOperand = 2; + let VLOperand = 3; } // For destination vector type is the same as first and second source vector. // Input: (vector_in, vector_in, int_vector_in, vl, ta) @@ -400,13 +401,14 @@ let TargetPrefix = "riscv" in { [ImmArg>, IntrNoMem]>, RISCVVIntrinsic { let VLOperand = 4; } - // Input: (vector_in, int16_vector_in, vl) + // Input: (passthru, vector_in, int16_vector_in, vl) class RISCVRGatherEI16VVNoMask : Intrinsic<[llvm_anyvector_ty], - [LLVMMatchType<0>, LLVMScalarOrSameVectorWidth<0, llvm_i16_ty>, + [LLVMMatchType<0>, LLVMMatchType<0>, + LLVMScalarOrSameVectorWidth<0, llvm_i16_ty>, llvm_anyint_ty], [IntrNoMem]>, RISCVVIntrinsic { - let VLOperand = 2; + let VLOperand = 3; } // For destination vector type is the same as first and second source vector. // Input: (vector_in, vector_in, int16_vector_in, vl, ta) @@ -421,12 +423,13 @@ let TargetPrefix = "riscv" in { } // For destination vector type is the same as first source vector, and the // second operand is XLen. - // Input: (vector_in, xlen_in, vl) + // Input: (passthru, vector_in, xlen_in, vl) class RISCVGatherVXNoMask : Intrinsic<[llvm_anyvector_ty], - [LLVMMatchType<0>, llvm_anyint_ty, LLVMMatchType<1>], + [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty, + LLVMMatchType<1>], [IntrNoMem]>, RISCVVIntrinsic { - let VLOperand = 2; + let VLOperand = 3; } // For destination vector type is the same as first source vector (with mask). // Second operand is XLen. @@ -440,13 +443,14 @@ let TargetPrefix = "riscv" in { let VLOperand = 4; } // For destination vector type is the same as first source vector. - // Input: (vector_in, vector_in/scalar_in, vl) + // Input: (passthru, vector_in, vector_in/scalar_in, vl) class RISCVBinaryAAXNoMask : Intrinsic<[llvm_anyvector_ty], - [LLVMMatchType<0>, llvm_any_ty, llvm_anyint_ty], + [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty, + llvm_anyint_ty], [IntrNoMem]>, RISCVVIntrinsic { - let SplatOperand = 1; - let VLOperand = 2; + let SplatOperand = 2; + let VLOperand = 3; } // For destination vector type is the same as first source vector (with mask). // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, ta) @@ -461,12 +465,13 @@ let TargetPrefix = "riscv" in { } // For destination vector type is the same as first source vector. The // second source operand must match the destination type or be an XLen scalar. - // Input: (vector_in, vector_in/scalar_in, vl) + // Input: (passthru, vector_in, vector_in/scalar_in, vl) class RISCVBinaryAAShiftNoMask : Intrinsic<[llvm_anyvector_ty], - [LLVMMatchType<0>, llvm_any_ty, llvm_anyint_ty], + [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty, + llvm_anyint_ty], [IntrNoMem]>, RISCVVIntrinsic { - let VLOperand = 2; + let VLOperand = 3; } // For destination vector type is the same as first source vector (with mask). // The second source operand must match the destination type or be an XLen scalar. @@ -480,13 +485,14 @@ let TargetPrefix = "riscv" in { let VLOperand = 4; } // For destination vector type is NOT the same as first source vector. - // Input: (vector_in, vector_in/scalar_in, vl) + // Input: (passthru, vector_in, vector_in/scalar_in, vl) class RISCVBinaryABXNoMask : Intrinsic<[llvm_anyvector_ty], - [llvm_anyvector_ty, llvm_any_ty, llvm_anyint_ty], + [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty, + llvm_anyint_ty], [IntrNoMem]>, RISCVVIntrinsic { - let SplatOperand = 1; - let VLOperand = 2; + let SplatOperand = 2; + let VLOperand = 3; } // For destination vector type is NOT the same as first source vector (with mask). // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, ta) @@ -501,12 +507,13 @@ let TargetPrefix = "riscv" in { } // For destination vector type is NOT the same as first source vector. The // second source operand must match the destination type or be an XLen scalar. - // Input: (vector_in, vector_in/scalar_in, vl) + // Input: (passthru, vector_in, vector_in/scalar_in, vl) class RISCVBinaryABShiftNoMask : Intrinsic<[llvm_anyvector_ty], - [llvm_anyvector_ty, llvm_any_ty, llvm_anyint_ty], + [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty, + llvm_anyint_ty], [IntrNoMem]>, RISCVVIntrinsic { - let VLOperand = 2; + let VLOperand = 3; } // For destination vector type is NOT the same as first source vector (with mask). // The second source operand must match the destination type or be an XLen scalar. @@ -595,13 +602,14 @@ let TargetPrefix = "riscv" in { } // For Saturating binary operations. // The destination vector type is the same as first source vector. - // Input: (vector_in, vector_in/scalar_in, vl) + // Input: (passthru, vector_in, vector_in/scalar_in, vl) class RISCVSaturatingBinaryAAXNoMask : Intrinsic<[llvm_anyvector_ty], - [LLVMMatchType<0>, llvm_any_ty, llvm_anyint_ty], + [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty, + llvm_anyint_ty], [IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic { - let SplatOperand = 1; - let VLOperand = 2; + let SplatOperand = 2; + let VLOperand = 3; } // For Saturating binary operations with mask. // The destination vector type is the same as first source vector. @@ -618,12 +626,13 @@ let TargetPrefix = "riscv" in { // For Saturating binary operations. // The destination vector type is the same as first source vector. // The second source operand matches the destination type or is an XLen scalar. - // Input: (vector_in, vector_in/scalar_in, vl) + // Input: (passthru, vector_in, vector_in/scalar_in, vl) class RISCVSaturatingBinaryAAShiftNoMask : Intrinsic<[llvm_anyvector_ty], - [LLVMMatchType<0>, llvm_any_ty, llvm_anyint_ty], + [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty, + llvm_anyint_ty], [IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic { - let VLOperand = 2; + let VLOperand = 3; } // For Saturating binary operations with mask. // The destination vector type is the same as first source vector. @@ -640,12 +649,13 @@ let TargetPrefix = "riscv" in { // For Saturating binary operations. // The destination vector type is NOT the same as first source vector. // The second source operand matches the destination type or is an XLen scalar. - // Input: (vector_in, vector_in/scalar_in, vl) + // Input: (passthru, vector_in, vector_in/scalar_in, vl) class RISCVSaturatingBinaryABShiftNoMask : Intrinsic<[llvm_anyvector_ty], - [llvm_anyvector_ty, llvm_any_ty, llvm_anyint_ty], + [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty, + llvm_anyint_ty], [IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic { - let VLOperand = 2; + let VLOperand = 3; } // For Saturating binary operations with mask. // The destination vector type is NOT the same as first source vector (with mask). diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp index 8ddbb072656ad05b793f7dde795b82a7bf796210..53bdd4b64904c42750bd84969c237827fc7872d5 100644 --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -4475,10 +4475,12 @@ SDValue RISCVTargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op, ValInVec = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, I32ContainerVT, Zero, InsertI64VL); // First slide in the hi value, then the lo in underneath it. - ValInVec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32ContainerVT, ValInVec, - ValHi, I32Mask, InsertI64VL); - ValInVec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32ContainerVT, ValInVec, - ValLo, I32Mask, InsertI64VL); + ValInVec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32ContainerVT, + DAG.getUNDEF(I32ContainerVT), ValInVec, ValHi, + I32Mask, InsertI64VL); + ValInVec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32ContainerVT, + DAG.getUNDEF(I32ContainerVT), ValInVec, ValLo, + I32Mask, InsertI64VL); // Bitcast back to the right container type. ValInVec = DAG.getBitcast(ContainerVT, ValInVec); } @@ -4774,8 +4776,7 @@ SDValue RISCVTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, // We need to special case these when the scalar is larger than XLen. unsigned NumOps = Op.getNumOperands(); bool IsMasked = NumOps == 7; - unsigned OpOffset = IsMasked ? 1 : 0; - SDValue Scalar = Op.getOperand(2 + OpOffset); + SDValue Scalar = Op.getOperand(3); if (Scalar.getValueType().bitsLE(XLenVT)) break; @@ -4790,7 +4791,7 @@ SDValue RISCVTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, // Convert the vector source to the equivalent nxvXi32 vector. MVT I32VT = MVT::getVectorVT(MVT::i32, VT.getVectorElementCount() * 2); - SDValue Vec = DAG.getBitcast(I32VT, Op.getOperand(1 + OpOffset)); + SDValue Vec = DAG.getBitcast(I32VT, Op.getOperand(2)); SDValue ScalarLo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar, DAG.getConstant(0, DL, XLenVT)); @@ -4807,17 +4808,34 @@ SDValue RISCVTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, // Shift the two scalar parts in using SEW=32 slide1up/slide1down // instructions. - if (IntNo == Intrinsic::riscv_vslide1up || - IntNo == Intrinsic::riscv_vslide1up_mask) { - Vec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32VT, Vec, ScalarHi, - I32Mask, I32VL); - Vec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32VT, Vec, ScalarLo, - I32Mask, I32VL); + SDValue Passthru = DAG.getBitcast(I32VT, Op.getOperand(1)); + if (!IsMasked) { + if (IntNo == Intrinsic::riscv_vslide1up) { + Vec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32VT, Passthru, Vec, + ScalarHi, I32Mask, I32VL); + Vec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32VT, Passthru, Vec, + ScalarLo, I32Mask, I32VL); + } else { + Vec = DAG.getNode(RISCVISD::VSLIDE1DOWN_VL, DL, I32VT, Passthru, Vec, + ScalarLo, I32Mask, I32VL); + Vec = DAG.getNode(RISCVISD::VSLIDE1DOWN_VL, DL, I32VT, Passthru, Vec, + ScalarHi, I32Mask, I32VL); + } } else { - Vec = DAG.getNode(RISCVISD::VSLIDE1DOWN_VL, DL, I32VT, Vec, ScalarLo, - I32Mask, I32VL); - Vec = DAG.getNode(RISCVISD::VSLIDE1DOWN_VL, DL, I32VT, Vec, ScalarHi, - I32Mask, I32VL); + // TODO Those VSLIDE1 could be TAMA because we use vmerge to select + // maskedoff + SDValue Undef = DAG.getUNDEF(I32VT); + if (IntNo == Intrinsic::riscv_vslide1up_mask) { + Vec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32VT, Undef, Vec, + ScalarHi, I32Mask, I32VL); + Vec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32VT, Undef, Vec, + ScalarLo, I32Mask, I32VL); + } else { + Vec = DAG.getNode(RISCVISD::VSLIDE1DOWN_VL, DL, I32VT, Undef, Vec, + ScalarLo, I32Mask, I32VL); + Vec = DAG.getNode(RISCVISD::VSLIDE1DOWN_VL, DL, I32VT, Undef, Vec, + ScalarHi, I32Mask, I32VL); + } } // Convert back to nxvXi64. @@ -4825,11 +4843,21 @@ SDValue RISCVTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, if (!IsMasked) return Vec; - // Apply mask after the operation. SDValue Mask = Op.getOperand(NumOps - 3); SDValue MaskedOff = Op.getOperand(1); - return DAG.getNode(RISCVISD::VSELECT_VL, DL, VT, Mask, Vec, MaskedOff, VL); + // Assume Policy operand is the last operand. + uint64_t Policy = Op.getConstantOperandVal(NumOps - 1); + // We don't need to select maskedoff if it's undef. + if (MaskedOff.isUndef()) + return Vec; + // TAMU + if (Policy == RISCVII::TAIL_AGNOSTIC) + return DAG.getNode(RISCVISD::VSELECT_VL, DL, VT, Mask, Vec, MaskedOff, + VL); + // TUMA or TUMU: Currently we always emit tumu policy regardless of tuma. + // It's fine because vmerge does not care mask policy. + return DAG.getNode(RISCVISD::VP_MERGE_VL, DL, VT, Mask, Vec, MaskedOff, VL); } } diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.h b/llvm/lib/Target/RISCV/RISCVISelLowering.h index 2d1da98588d52d8293a832c60091e29a9d93683e..134d5fa89739fe6c356e8525dbd3e1cdc4006929 100644 --- a/llvm/lib/Target/RISCV/RISCVISelLowering.h +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.h @@ -155,9 +155,9 @@ enum NodeType : unsigned { // and the fifth the VL. VSLIDEUP_VL, VSLIDEDOWN_VL, - // Matches the semantics of vslide1up/slide1down. The first operand is the - // source vector, the second is the XLenVT scalar value. The third and fourth - // operands are the mask and VL operands. + // Matches the semantics of vslide1up/slide1down. The first operand is + // passthru operand, the second is source vector, third is the XLenVT scalar + // value. The fourth and fifth operands are the mask and VL operands. VSLIDE1UP_VL, VSLIDE1DOWN_VL, // Matches the semantics of the vid.v instruction, with a mask and VL diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td index 25b02824c145013bdd80dd84676ebc9d2afb6be6..72543485dc8d998a40cf03fe25b0e9f11713144f 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -996,6 +996,24 @@ class VPseudoBinaryNoMask(PseudoToVInst.VInst); } +class VPseudoBinaryNoMaskTU : + Pseudo<(outs RetClass:$rd), + (ins RetClass:$merge, Op1Class:$rs2, Op2Class:$rs1, AVL:$vl, ixlenimm:$sew), []>, + RISCVVPseudo { + let mayLoad = 0; + let mayStore = 0; + let hasSideEffects = 0; + let Constraints = Join<[Constraint, "$rd = $merge"], ",">.ret; + let HasVLOp = 1; + let HasSEWOp = 1; + let HasDummyMask = 1; + let HasMergeOp = 1; + let BaseInstr = !cast(PseudoToVInst.VInst); +} + // Special version of VPseudoBinaryNoMask where we pretend the first source is // tied to the destination. // This allows maskedoff and rs2 to be the same register. @@ -1017,6 +1035,25 @@ class VPseudoTiedBinaryNoMask(PseudoToVInst.VInst); } +class VPseudoTiedBinaryNoMaskTU : + Pseudo<(outs RetClass:$rd), + (ins RetClass:$merge, + Op2Class:$rs1, + AVL:$vl, ixlenimm:$sew), []>, + RISCVVPseudo { + let mayLoad = 0; + let mayStore = 0; + let hasSideEffects = 0; + let Constraints = Join<[Constraint, "$rd = $merge"], ",">.ret; + let HasVLOp = 1; + let HasSEWOp = 1; + let HasMergeOp = 0; // Merge is also rs2. + let HasDummyMask = 1; + let BaseInstr = !cast(PseudoToVInst.VInst); +} + class VPseudoIStoreNoMask LMUL, bit Ordered>: Pseudo<(outs), @@ -1652,6 +1689,8 @@ multiclass VPseudoBinary; + def "_" # MInfo.MX # "_TU" : VPseudoBinaryNoMaskTU; def "_" # MInfo.MX # "_MASK" : VPseudoBinaryMaskPolicy, RISCVMaskedPseudo; @@ -1681,6 +1720,8 @@ multiclass VPseudoBinaryEmul; + def "_" # lmul.MX # "_" # emul.MX # "_TU": VPseudoBinaryNoMaskTU; def "_" # lmul.MX # "_" # emul.MX # "_MASK" : VPseudoBinaryMaskPolicy; } @@ -1693,6 +1734,8 @@ multiclass VPseudoTiedBinary; + def "_" # MInfo.MX # "_TIED_TU": VPseudoTiedBinaryNoMaskTU; def "_" # MInfo.MX # "_MASK_TIED" : VPseudoTiedBinaryMask; } @@ -2820,14 +2863,14 @@ class VPatUnaryAnyMask; -class VPatBinaryNoMask : +class VPatBinaryM : Pat<(result_type (!cast(intrinsic_name) (op1_type op1_reg_class:$rs1), (op2_type op2_kind:$rs2), @@ -2837,6 +2880,44 @@ class VPatBinaryNoMask; +class VPatBinaryNoMaskTA : + Pat<(result_type (!cast(intrinsic_name) + (result_type (undef)), + (op1_type op1_reg_class:$rs1), + (op2_type op2_kind:$rs2), + VLOpFrag)), + (!cast(inst) + (op1_type op1_reg_class:$rs1), + (op2_type op2_kind:$rs2), + GPR:$vl, sew)>; + +class VPatBinaryNoMaskTU : + Pat<(result_type (!cast(intrinsic_name) + (result_type result_reg_class:$merge), + (op1_type op1_reg_class:$rs1), + (op2_type op2_kind:$rs2), + VLOpFrag)), + (!cast(inst#"_TU") + (result_type result_reg_class:$merge), + (op1_type op1_reg_class:$rs1), + (op2_type op2_kind:$rs2), + GPR:$vl, sew)>; + // Same as above but source operands are swapped. class VPatBinaryNoMaskSwapped : Pat<(result_type (!cast(intrinsic_name) + (result_type (undef)), (result_type result_reg_class:$rs1), (op2_type op2_kind:$rs2), VLOpFrag)), @@ -2938,6 +3020,23 @@ class VPatTiedBinaryNoMask; +class VPatTiedBinaryNoMaskTU : + Pat<(result_type (!cast(intrinsic_name) + (result_type result_reg_class:$merge), + (result_type result_reg_class:$merge), + (op2_type op2_kind:$rs2), + VLOpFrag)), + (!cast(inst#"_TIED_TU") + (result_type result_reg_class:$merge), + (op2_type op2_kind:$rs2), + GPR:$vl, sew)>; + class VPatTiedBinaryMask { GPR:$vl, mti.Log2SEW)>; } -multiclass VPatBinary { - def : VPatBinaryNoMask; + def : VPatBinaryM; def : VPatBinaryMask; @@ -3176,8 +3275,10 @@ multiclass VPatBinaryTA { - def : VPatBinaryNoMask; + def : VPatBinaryNoMaskTA; + def : VPatBinaryNoMaskTU; def : VPatBinaryMaskTA; @@ -3349,9 +3450,9 @@ multiclass VPatBinaryV_VI { foreach mti = AllMasks in - def : VPatBinaryNoMask; + def : VPatBinaryM; } multiclass VPatBinaryW_VV; - let AddedComplexity = 1 in + def : VPatBinaryNoMaskTU; + let AddedComplexity = 1 in { + def : VPatTiedBinaryNoMaskTU; def : VPatTiedBinaryMask; + } def : VPatBinaryMaskTA { multiclass VPatBinaryM_VV vtilist> { foreach vti = vtilist in - defm : VPatBinary; + defm : VPatBinaryM; } multiclass VPatBinarySwappedM_VV vtilist> { foreach vti = vtilist in { defvar kind = "V"#vti.ScalarSuffix; - defm : VPatBinary; + defm : VPatBinaryM; } } multiclass VPatBinaryM_VI vtilist> { foreach vti = vtilist in - defm : VPatBinary; + defm : VPatBinaryM; } multiclass VPatBinaryV_VV_VX_VI("PseudoVSUB_VV_"#vti.LMul.MX) vti.RegClass:$rs1, vti.RegClass:$rs2, GPR:$vl, vti.Log2SEW)>; + def : Pat<(vti.Vector (int_riscv_vrsub (vti.Vector vti.RegClass:$merge), + (vti.Vector vti.RegClass:$rs2), + (vti.Vector vti.RegClass:$rs1), + VLOpFrag)), + (!cast("PseudoVSUB_VV_"#vti.LMul.MX#"_TU") + vti.RegClass:$merge, + vti.RegClass:$rs1, + vti.RegClass:$rs2, + GPR:$vl, + vti.Log2SEW)>; def : Pat<(vti.Vector (int_riscv_vrsub_mask (vti.Vector vti.RegClass:$merge), (vti.Vector vti.RegClass:$rs2), (vti.Vector vti.RegClass:$rs1), @@ -4098,7 +4217,8 @@ foreach vti = AllIntegerVectors in { (XLenVT timm:$policy))>; // Match VSUB with a small immediate to vadd.vi by negating the immediate. - def : Pat<(vti.Vector (int_riscv_vsub (vti.Vector vti.RegClass:$rs1), + def : Pat<(vti.Vector (int_riscv_vsub (vti.Vector (undef)), + (vti.Vector vti.RegClass:$rs1), (vti.Scalar simm5_plus1:$rs2), VLOpFrag)), (!cast("PseudoVADD_VI_"#vti.LMul.MX) vti.RegClass:$rs1, @@ -4684,7 +4804,8 @@ defm : VPatBinaryV_VV_VX_VI<"int_riscv_vsra", "PseudoVSRA", AllIntegerVectors, foreach vti = AllIntegerVectors in { // Emit shift by 1 as an add since it might be faster. - def : Pat<(vti.Vector (int_riscv_vsll (vti.Vector vti.RegClass:$rs1), + def : Pat<(vti.Vector (int_riscv_vsll (vti.Vector undef), + (vti.Vector vti.RegClass:$rs1), (XLenVT 1), VLOpFrag)), (!cast("PseudoVADD_VV_"#vti.LMul.MX) vti.RegClass:$rs1, vti.RegClass:$rs1, diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td index 7f5630456afece1a7b5e4bd25f821d630d0d51b5..cefdd4d3c6098fc899b904244dfed333c1b04352 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td @@ -1643,9 +1643,10 @@ def SDTRVVSlide : SDTypeProfile<1, 5, [ SDTCisVec<0>, SDTCisSameAs<1, 0>, SDTCisSameAs<2, 0>, SDTCisVT<3, XLenVT>, SDTCVecEltisVT<4, i1>, SDTCisSameNumEltsAs<0, 4>, SDTCisVT<5, XLenVT> ]>; -def SDTRVVSlide1 : SDTypeProfile<1, 4, [ - SDTCisVec<0>, SDTCisSameAs<1, 0>, SDTCisInt<0>, SDTCisVT<2, XLenVT>, - SDTCVecEltisVT<3, i1>, SDTCisSameNumEltsAs<0, 3>, SDTCisVT<4, XLenVT> +def SDTRVVSlide1 : SDTypeProfile<1, 5, [ + SDTCisVec<0>, SDTCisSameAs<1, 0>, SDTCisSameAs<2, 0>, SDTCisInt<0>, + SDTCisVT<3, XLenVT>, SDTCVecEltisVT<4, i1>, SDTCisSameNumEltsAs<0, 4>, + SDTCisVT<5, XLenVT> ]>; def riscv_slideup_vl : SDNode<"RISCVISD::VSLIDEUP_VL", SDTRVVSlide, []>; @@ -1660,16 +1661,30 @@ foreach vti = AllIntegerVectors in { VLOpFrag)), (!cast("PseudoVID_V_"#vti.LMul.MX) GPR:$vl, vti.Log2SEW)>; - def : Pat<(vti.Vector (riscv_slide1up_vl (vti.Vector vti.RegClass:$rs1), + def : Pat<(vti.Vector (riscv_slide1up_vl (vti.Vector undef), + (vti.Vector vti.RegClass:$rs1), GPR:$rs2, (vti.Mask true_mask), VLOpFrag)), (!cast("PseudoVSLIDE1UP_VX_"#vti.LMul.MX) vti.RegClass:$rs1, GPR:$rs2, GPR:$vl, vti.Log2SEW)>; - def : Pat<(vti.Vector (riscv_slide1down_vl (vti.Vector vti.RegClass:$rs1), + def : Pat<(vti.Vector (riscv_slide1up_vl (vti.Vector vti.RegClass:$rd), + (vti.Vector vti.RegClass:$rs1), GPR:$rs2, (vti.Mask true_mask), VLOpFrag)), + (!cast("PseudoVSLIDE1UP_VX_"#vti.LMul.MX#"_TU") + vti.RegClass:$rd, vti.RegClass:$rs1, GPR:$rs2, GPR:$vl, vti.Log2SEW)>; + def : Pat<(vti.Vector (riscv_slide1down_vl (vti.Vector undef), + (vti.Vector vti.RegClass:$rs1), + GPR:$rs2, (vti.Mask true_mask), + VLOpFrag)), (!cast("PseudoVSLIDE1DOWN_VX_"#vti.LMul.MX) vti.RegClass:$rs1, GPR:$rs2, GPR:$vl, vti.Log2SEW)>; + def : Pat<(vti.Vector (riscv_slide1down_vl (vti.Vector vti.RegClass:$rd), + (vti.Vector vti.RegClass:$rs1), + GPR:$rs2, (vti.Mask true_mask), + VLOpFrag)), + (!cast("PseudoVSLIDE1DOWN_VX_"#vti.LMul.MX#"_TU") + vti.RegClass:$rd, vti.RegClass:$rs1, GPR:$rs2, GPR:$vl, vti.Log2SEW)>; } foreach vti = !listconcat(AllIntegerVectors, AllFloatVectors) in { diff --git a/llvm/test/CodeGen/RISCV/rvv/access-fixed-objects-by-rvv.ll b/llvm/test/CodeGen/RISCV/rvv/access-fixed-objects-by-rvv.ll index 4216c5952eae89b2a2ccd9067537caa6577cf727..3e370c4e9a0bd134a9ea4ffec85591d5adbd38cc 100644 --- a/llvm/test/CodeGen/RISCV/rvv/access-fixed-objects-by-rvv.ll +++ b/llvm/test/CodeGen/RISCV/rvv/access-fixed-objects-by-rvv.ll @@ -23,6 +23,7 @@ define @access_fixed_object(i64 *%val) { } declare @llvm.riscv.vadd.nxv1i64.nxv1i64( + , , , i64); @@ -54,6 +55,7 @@ define @access_fixed_and_vector_objects(i64 *%val) { %len = load i64, i64* %local %a = call @llvm.riscv.vadd.nxv1i64.nxv1i64( + undef, %v1, %v2, i64 %len) diff --git a/llvm/test/CodeGen/RISCV/rvv/masked-vslide1down-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/masked-vslide1down-rv32.ll new file mode 100644 index 0000000000000000000000000000000000000000..0c24fcf42965c1e59e3c9b3eb07f31e76ef0a368 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/masked-vslide1down-rv32.ll @@ -0,0 +1,115 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+v,+f -verify-machineinstrs \ +; RUN: < %s | FileCheck %s + +declare @llvm.riscv.vslide1down.mask.nxv1i64.i64( + , + , + i64, + , + i32, + i32); + +define @intrinsic_vslide1down_mask_tumu_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vslide1down_mask_tumu_vx_nxv1i64_nxv1i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: slli a3, a2, 1 +; CHECK-NEXT: vsetvli zero, a3, e32, m1, ta, mu +; CHECK-NEXT: vslide1down.vx v9, v9, a0 +; CHECK-NEXT: vslide1down.vx v9, v9, a1 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, tu, mu +; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vslide1down.mask.nxv1i64.i64( + %0, + %1, + i64 %2, + %3, + i32 %4, i32 0) + + ret %a +} + +define @intrinsic_vslide1down_mask_tamu_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vslide1down_mask_tamu_vx_nxv1i64_nxv1i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: slli a3, a2, 1 +; CHECK-NEXT: vsetvli zero, a3, e32, m1, ta, mu +; CHECK-NEXT: vslide1down.vx v9, v9, a0 +; CHECK-NEXT: vslide1down.vx v9, v9, a1 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vslide1down.mask.nxv1i64.i64( + %0, + %1, + i64 %2, + %3, + i32 %4, i32 1) + + ret %a +} + + +; Fallback vslide1 to mask undisturbed until InsertVSETVLI supports mask agnostic. +define @intrinsic_vslide1down_mask_tuma_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vslide1down_mask_tuma_vx_nxv1i64_nxv1i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: slli a3, a2, 1 +; CHECK-NEXT: vsetvli zero, a3, e32, m1, ta, mu +; CHECK-NEXT: vslide1down.vx v9, v9, a0 +; CHECK-NEXT: vslide1down.vx v9, v9, a1 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, tu, mu +; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vslide1down.mask.nxv1i64.i64( + %0, + %1, + i64 %2, + %3, + i32 %4, i32 2) + + ret %a +} + +; Fallback vslide1 to mask undisturbed until InsertVSETVLI supports mask agnostic. +define @intrinsic_vslide1down_mask_tama_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vslide1down_mask_tama_vx_nxv1i64_nxv1i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: slli a2, a2, 1 +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-NEXT: vslide1down.vx v8, v8, a0 +; CHECK-NEXT: vslide1down.vx v8, v8, a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vslide1down.mask.nxv1i64.i64( + undef, + %0, + i64 %1, + %2, + i32 %3, i32 3) + + ret %a +} + +define @intrinsic_vslide1down_mask_tama_undef_mask_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vslide1down_mask_tama_undef_mask_vx_nxv1i64_nxv1i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: slli a2, a2, 1 +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-NEXT: vslide1down.vx v8, v8, a0 +; CHECK-NEXT: vslide1down.vx v8, v8, a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vslide1down.mask.nxv1i64.i64( + undef, + %0, + i64 %1, + undef, + i32 %2, i32 3) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/rv32-spill-vector-csr.ll b/llvm/test/CodeGen/RISCV/rvv/rv32-spill-vector-csr.ll index 314c41296c72a24608988398e4b6873181a7b881..1fe6141f006ed2311358ff295035a7d30855178d 100644 --- a/llvm/test/CodeGen/RISCV/rvv/rv32-spill-vector-csr.ll +++ b/llvm/test/CodeGen/RISCV/rvv/rv32-spill-vector-csr.ll @@ -79,11 +79,11 @@ define @foo( %a, @llvm.riscv.vfadd.nxv1f64.nxv1f64( %a, %b, i32 %gvl) + %x = call @llvm.riscv.vfadd.nxv1f64.nxv1f64( undef, %a, %b, i32 %gvl) %call = call signext i32 @puts(i8* getelementptr inbounds ([6 x i8], [6 x i8]* @.str, i64 0, i64 0)) - %z = call @llvm.riscv.vfadd.nxv1f64.nxv1f64( %a, %x, i32 %gvl) + %z = call @llvm.riscv.vfadd.nxv1f64.nxv1f64( undef, %a, %x, i32 %gvl) ret %z } -declare @llvm.riscv.vfadd.nxv1f64.nxv1f64( %a, %b, i32 %gvl) +declare @llvm.riscv.vfadd.nxv1f64.nxv1f64( %passthru, %a, %b, i32 %gvl) declare i32 @puts(i8*); diff --git a/llvm/test/CodeGen/RISCV/rvv/rv64-spill-vector-csr.ll b/llvm/test/CodeGen/RISCV/rvv/rv64-spill-vector-csr.ll index c2e44e7e4936f19523fa7325770d4d1f76f12712..453aabd3466c2735da6ffa55b1ec9f961fc9afe4 100644 --- a/llvm/test/CodeGen/RISCV/rvv/rv64-spill-vector-csr.ll +++ b/llvm/test/CodeGen/RISCV/rvv/rv64-spill-vector-csr.ll @@ -79,11 +79,11 @@ define @foo( %a, @llvm.riscv.vfadd.nxv1f64.nxv1f64( %a, %b, i64 %gvl) + %x = call @llvm.riscv.vfadd.nxv1f64.nxv1f64( undef, %a, %b, i64 %gvl) %call = call signext i32 @puts(i8* getelementptr inbounds ([6 x i8], [6 x i8]* @.str, i64 0, i64 0)) - %z = call @llvm.riscv.vfadd.nxv1f64.nxv1f64( %a, %x, i64 %gvl) + %z = call @llvm.riscv.vfadd.nxv1f64.nxv1f64( undef, %a, %x, i64 %gvl) ret %z } -declare @llvm.riscv.vfadd.nxv1f64.nxv1f64( %a, %b, i64 %gvl) +declare @llvm.riscv.vfadd.nxv1f64.nxv1f64( %passthru, %a, %b, i64 %gvl) declare i32 @puts(i8*); diff --git a/llvm/test/CodeGen/RISCV/rvv/unmasked-tu.ll b/llvm/test/CodeGen/RISCV/rvv/unmasked-tu.ll index 3d314b1779a3ea7fc623d1c5b9f2f486a2ee2c4c..21bd0ee2a6bdbb989dd643334b83982c09d3a584 100644 --- a/llvm/test/CodeGen/RISCV/rvv/unmasked-tu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/unmasked-tu.ll @@ -1,8 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \ -; RUN: -verify-machineinstrs | FileCheck %s --check-prefix=RV32 -; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ -; RUN: -verify-machineinstrs | FileCheck %s --check-prefix=RV64 +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s --check-prefix=RV32 +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s --check-prefix=RV64 declare @llvm.riscv.vle.nxv1i8( , @@ -118,3 +118,2088 @@ entry: ret %a } + +declare @llvm.riscv.vaadd.nxv1i8.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vaadd_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vaadd_vv_nxv1i8_nxv1i8_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vaadd.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vaadd_vv_nxv1i8_nxv1i8_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vaadd.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vaadd.nxv1i8.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vaaddu.nxv1i8.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vaaddu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vaaddu_vv_nxv1i8_nxv1i8_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vaaddu.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vaaddu_vv_nxv1i8_nxv1i8_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vaaddu.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vaaddu.nxv1i8.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vadd.nxv1i8.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vadd_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vadd_vv_nxv1i8_nxv1i8_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vadd.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vadd_vv_nxv1i8_nxv1i8_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vadd.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.nxv1i8.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} +declare @llvm.riscv.vand.nxv1i8.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vand_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vand_vv_nxv1i8_nxv1i8_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vand.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vand_vv_nxv1i8_nxv1i8_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vand.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vand.nxv1i8.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vasub.nxv1i8.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vasub_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vasub_vv_nxv1i8_nxv1i8_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vasub.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vasub_vv_nxv1i8_nxv1i8_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vasub.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vasub.nxv1i8.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vasubu.nxv1i8.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vasubu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vasubu_vv_nxv1i8_nxv1i8_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vasubu.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vasubu_vv_nxv1i8_nxv1i8_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vasubu.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vasubu.nxv1i8.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vdiv.nxv1i8.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vdiv_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vdiv_vv_nxv1i8_nxv1i8_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vdiv.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vdiv_vv_nxv1i8_nxv1i8_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vdiv.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vdiv.nxv1i8.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vdivu.nxv1i8.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vdivu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vdivu_vv_nxv1i8_nxv1i8_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vdivu.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vdivu_vv_nxv1i8_nxv1i8_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vdivu.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vdivu.nxv1i8.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vfadd.nxv1f16.nxv1f16( + , + , + , + iXLen); + +define @intrinsic_vfadd_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vfadd_vv_nxv1f16_nxv1f16_nxv1f16: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV32-NEXT: vfadd.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vfadd_vv_nxv1f16_nxv1f16_nxv1f16: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV64-NEXT: vfadd.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.nxv1f16.nxv1f16( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vfdiv.nxv1f16.nxv1f16( + , + , + , + iXLen); + +define @intrinsic_vfdiv_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vfdiv_vv_nxv1f16_nxv1f16_nxv1f16: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV32-NEXT: vfdiv.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vfdiv_vv_nxv1f16_nxv1f16_nxv1f16: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV64-NEXT: vfdiv.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vfdiv.nxv1f16.nxv1f16( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vfmax.nxv1f16.nxv1f16( + , + , + , + iXLen); + +define @intrinsic_vfmax_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vfmax_vv_nxv1f16_nxv1f16_nxv1f16: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV32-NEXT: vfmax.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vfmax_vv_nxv1f16_nxv1f16_nxv1f16: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV64-NEXT: vfmax.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vfmax.nxv1f16.nxv1f16( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vfmin.nxv1f16.nxv1f16( + , + , + , + iXLen); + +define @intrinsic_vfmin_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vfmin_vv_nxv1f16_nxv1f16_nxv1f16: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV32-NEXT: vfmin.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vfmin_vv_nxv1f16_nxv1f16_nxv1f16: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV64-NEXT: vfmin.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vfmin.nxv1f16.nxv1f16( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vfmul.nxv1f16.nxv1f16( + , + , + , + iXLen); + +define @intrinsic_vfmul_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vfmul_vv_nxv1f16_nxv1f16_nxv1f16: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV32-NEXT: vfmul.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vfmul_vv_nxv1f16_nxv1f16_nxv1f16: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV64-NEXT: vfmul.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vfmul.nxv1f16.nxv1f16( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vfrdiv.nxv1f16.f16( + , + , + half, + iXLen); + +define @intrinsic_vfrdiv_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vfrdiv_vf_nxv1f16_nxv1f16_f16: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV32-NEXT: vfrdiv.vf v8, v9, fa0 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vfrdiv_vf_nxv1f16_nxv1f16_f16: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV64-NEXT: vfrdiv.vf v8, v9, fa0 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vfrdiv.nxv1f16.f16( + %0, + %1, + half %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vfsgnj.nxv1f16.nxv1f16( + , + , + , + iXLen); + +define @intrinsic_vfsgnj_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vfsgnj_vv_nxv1f16_nxv1f16_nxv1f16: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV32-NEXT: vfsgnj.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vfsgnj_vv_nxv1f16_nxv1f16_nxv1f16: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV64-NEXT: vfsgnj.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnj.nxv1f16.nxv1f16( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vfsgnjn.nxv1f16.nxv1f16( + , + , + , + iXLen); + +define @intrinsic_vfsgnjn_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vfsgnjn_vv_nxv1f16_nxv1f16_nxv1f16: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV32-NEXT: vfsgnjn.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vfsgnjn_vv_nxv1f16_nxv1f16_nxv1f16: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV64-NEXT: vfsgnjn.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnjn.nxv1f16.nxv1f16( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vfsgnjx.nxv1f16.nxv1f16( + , + , + , + iXLen); + +define @intrinsic_vfsgnjx_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vfsgnjx_vv_nxv1f16_nxv1f16_nxv1f16: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV32-NEXT: vfsgnjx.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vfsgnjx_vv_nxv1f16_nxv1f16_nxv1f16: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV64-NEXT: vfsgnjx.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnjx.nxv1f16.nxv1f16( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vfrsub.nxv1f16.f16( + , + , + half, + iXLen); + +define @intrinsic_vfrsub_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vfrsub_vf_nxv1f16_nxv1f16_f16: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV32-NEXT: vfrsub.vf v8, v9, fa0 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vfrsub_vf_nxv1f16_nxv1f16_f16: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV64-NEXT: vfrsub.vf v8, v9, fa0 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vfrsub.nxv1f16.f16( + %0, + %1, + half %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vfslide1down.nxv1f16.f16( + , + , + half, + iXLen); + +define @intrinsic_vfslide1down_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vfslide1down_vf_nxv1f16_nxv1f16_f16: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV32-NEXT: vfslide1down.vf v8, v9, fa0 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vfslide1down_vf_nxv1f16_nxv1f16_f16: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV64-NEXT: vfslide1down.vf v8, v9, fa0 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vfslide1down.nxv1f16.f16( + %0, + %1, + half %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vfslide1up.nxv1f16.f16( + , + , + half, + iXLen); + +define @intrinsic_vfslide1up_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vfslide1up_vf_nxv1f16_nxv1f16_f16: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV32-NEXT: vfslide1up.vf v8, v9, fa0 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vfslide1up_vf_nxv1f16_nxv1f16_f16: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV64-NEXT: vfslide1up.vf v8, v9, fa0 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vfslide1up.nxv1f16.f16( + %0, + %1, + half %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vfwsub.nxv1f32.nxv1f16.nxv1f16( + , + , + , + iXLen); + +define @intrinsic_vfwsub_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vfwsub_vv_nxv1f32_nxv1f16_nxv1f16: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV32-NEXT: vfwsub.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vfwsub_vv_nxv1f32_nxv1f16_nxv1f16: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV64-NEXT: vfwsub.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vfwsub.nxv1f32.nxv1f16.nxv1f16( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vfwsub.w.nxv1f32.nxv1f16( + , + , + , + iXLen); + +define @intrinsic_vfwsub.w_wv_nxv1f32_nxv1f32_nxv1f16( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vfwsub.w_wv_nxv1f32_nxv1f32_nxv1f16: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV32-NEXT: vfwsub.wv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vfwsub.w_wv_nxv1f32_nxv1f32_nxv1f16: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV64-NEXT: vfwsub.wv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vfwsub.w.nxv1f32.nxv1f16( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vfwsub.w.nxv16f32.nxv16f16( + , + , + , + iXLen); + +define @intrinsic_vfwsub.w_wv_nxv16f32_nxv16f32_nxv16f16( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vfwsub.w_wv_nxv16f32_nxv16f32_nxv16f16: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vl4re16.v v24, (a0) +; RV32-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; RV32-NEXT: vfwsub.wv v8, v16, v24 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vfwsub.w_wv_nxv16f32_nxv16f32_nxv16f16: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vl4re16.v v24, (a0) +; RV64-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; RV64-NEXT: vfwsub.wv v8, v16, v24 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vfwsub.w.nxv16f32.nxv16f16( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vfwmul.nxv1f32.nxv1f16.nxv1f16( + , + , + , + iXLen); + +define @intrinsic_vfwmul_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vfwmul_vv_nxv1f32_nxv1f16_nxv1f16: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV32-NEXT: vfwmul.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vfwmul_vv_nxv1f32_nxv1f16_nxv1f16: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV64-NEXT: vfwmul.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vfwmul.nxv1f32.nxv1f16.nxv1f16( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vfwadd.w.nxv1f32.nxv1f16( + , + , + , + iXLen); + +define @intrinsic_vfwadd.w_wv_nxv1f32_nxv1f32_nxv1f16( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vfwadd.w_wv_nxv1f32_nxv1f32_nxv1f16: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV32-NEXT: vfwadd.wv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vfwadd.w_wv_nxv1f32_nxv1f32_nxv1f16: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV64-NEXT: vfwadd.wv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vfwadd.w.nxv1f32.nxv1f16( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vfwadd.nxv1f32.nxv1f16.nxv1f16( + , + , + , + iXLen); + +define @intrinsic_vfwadd_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vfwadd_vv_nxv1f32_nxv1f16_nxv1f16: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV32-NEXT: vfwadd.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vfwadd_vv_nxv1f32_nxv1f16_nxv1f16: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV64-NEXT: vfwadd.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vfwadd.nxv1f32.nxv1f16.nxv1f16( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vfsub.nxv1f16.nxv1f16( + , + , + , + iXLen); + +define @intrinsic_vfsub_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vfsub_vv_nxv1f16_nxv1f16_nxv1f16: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV32-NEXT: vfsub.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vfsub_vv_nxv1f16_nxv1f16_nxv1f16: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV64-NEXT: vfsub.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vfsub.nxv1f16.nxv1f16( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + + +declare @llvm.riscv.vslide1down.nxv1i64( + , + , + i64, + iXLen); + +define @intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: slli a2, a2, 1 +; RV32-NEXT: vsetvli zero, a2, e32, m1, tu, mu +; RV32-NEXT: vmv1r.v v10, v8 +; RV32-NEXT: vslide1down.vx v10, v9, a0 +; RV32-NEXT: vslide1down.vx v8, v10, a1 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; RV64-NEXT: vslide1down.vx v8, v9, a0 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vslide1down.nxv1i64( + %0, + %1, + i64 %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vslide1up.nxv1i64.i64( + , + , + i64, + iXLen); + +define @intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: slli a2, a2, 1 +; RV32-NEXT: vsetvli zero, a2, e32, m1, tu, mu +; RV32-NEXT: vmv1r.v v10, v8 +; RV32-NEXT: vslide1up.vx v10, v9, a1 +; RV32-NEXT: vslide1up.vx v8, v10, a0 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; RV64-NEXT: vslide1up.vx v8, v9, a0 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vslide1up.nxv1i64.i64( + %0, + %1, + i64 %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vmax.nxv1i8.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vmax_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vmax_vv_nxv1i8_nxv1i8_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vmax.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmax_vv_nxv1i8_nxv1i8_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vmax.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vmax.nxv1i8.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vmaxu.nxv1i8.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vmaxu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vmaxu_vv_nxv1i8_nxv1i8_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vmaxu.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmaxu_vv_nxv1i8_nxv1i8_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vmaxu.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vmaxu.nxv1i8.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vmin.nxv1i8.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vmin_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vmin_vv_nxv1i8_nxv1i8_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vmin.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmin_vv_nxv1i8_nxv1i8_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vmin.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vmin.nxv1i8.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vminu.nxv1i8.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vminu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vminu_vv_nxv1i8_nxv1i8_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vminu.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vminu_vv_nxv1i8_nxv1i8_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vminu.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vminu.nxv1i8.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vmul.nxv1i8.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vmul_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vmul_vv_nxv1i8_nxv1i8_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vmul.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmul_vv_nxv1i8_nxv1i8_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vmul.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vmul.nxv1i8.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vmulh.nxv1i8.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vmulh_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vmulh_vv_nxv1i8_nxv1i8_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vmulh.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmulh_vv_nxv1i8_nxv1i8_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vmulh.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vmulh.nxv1i8.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vmulhsu.nxv1i8.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vmulhsu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vmulhsu_vv_nxv1i8_nxv1i8_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vmulhsu.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmulhsu_vv_nxv1i8_nxv1i8_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vmulhsu.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vmulhsu.nxv1i8.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vmulhu.nxv1i8.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vmulhu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vmulhu_vv_nxv1i8_nxv1i8_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vmulhu.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmulhu_vv_nxv1i8_nxv1i8_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vmulhu.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vmulhu.nxv1i8.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vnclip.nxv1i8.nxv1i16.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vnclip_wv_nxv1i8_nxv1i16_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vnclip_wv_nxv1i8_nxv1i16_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vnclip.wv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vnclip_wv_nxv1i8_nxv1i16_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vnclip.wv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vnclip.nxv1i8.nxv1i16.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vnclipu.nxv1i8.nxv1i16.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vnclipu_wv_nxv1i8_nxv1i16_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vnclipu_wv_nxv1i8_nxv1i16_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vnclipu.wv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vnclipu_wv_nxv1i8_nxv1i16_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vnclipu.wv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vnclipu.nxv1i8.nxv1i16.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vnsra.nxv1i8.nxv1i16.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vnsra_wv_nxv1i8_nxv1i16_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vnsra_wv_nxv1i8_nxv1i16_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vnsra.wv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vnsra_wv_nxv1i8_nxv1i16_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vnsra.wv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vnsra.nxv1i8.nxv1i16.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vnsrl.nxv1i8.nxv1i16.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vnsrl_wv_nxv1i8_nxv1i16_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vnsrl_wv_nxv1i8_nxv1i16_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vnsrl.wv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vnsrl_wv_nxv1i8_nxv1i16_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vnsrl.wv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vnsrl.nxv1i8.nxv1i16.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vor.nxv1i8.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vor_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vor_vv_nxv1i8_nxv1i8_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vor.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vor_vv_nxv1i8_nxv1i8_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vor.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vor.nxv1i8.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vrem.nxv1i8.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vrem_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vrem_vv_nxv1i8_nxv1i8_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vrem.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vrem_vv_nxv1i8_nxv1i8_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vrem.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vrem.nxv1i8.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vremu.nxv1i8.nxv1i8( + , + , + , + iXLen); +declare @llvm.riscv.vrgather.vv.nxv1i8.i32( + , + , + , + iXLen); + +define @intrinsic_vrgather_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vrgather_vv_nxv1i8_nxv1i8_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vrgather.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vrgather_vv_nxv1i8_nxv1i8_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vrgather.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vrgather.vv.nxv1i8.i32( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vrgather.vx.nxv1i8( + , + , + iXLen, + iXLen); + +define @intrinsic_vrgather_vx_nxv1i8_nxv1i8( %0, %1, iXLen %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vrgather_vx_nxv1i8_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; RV32-NEXT: vrgather.vx v8, v9, a0 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vrgather_vx_nxv1i8_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; RV64-NEXT: vrgather.vx v8, v9, a0 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vrgather.vx.nxv1i8( + %0, + %1, + iXLen %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vrgatherei16.vv.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vrgatherei16_vv_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vrgatherei16_vv_nxv1i8_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vrgatherei16.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vrgatherei16_vv_nxv1i8_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vrgatherei16.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vrgatherei16.vv.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vrsub.nxv1i64.i64( + , + , + i64, + iXLen); + +define @intrinsic_vrsub_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vrsub_vx_nxv1i64_nxv1i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v10, (a0), zero +; RV32-NEXT: vsetvli zero, zero, e64, m1, tu, mu +; RV32-NEXT: vsub.vv v8, v10, v9 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vrsub_vx_nxv1i64_nxv1i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; RV64-NEXT: vrsub.vx v8, v9, a0 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vrsub.nxv1i64.i64( + %0, + %1, + i64 %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vsadd.nxv1i64.i64( + , + , + i64, + iXLen); + +define @intrinsic_vsadd_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vsadd_vx_nxv1i64_nxv1i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v10, (a0), zero +; RV32-NEXT: vsetvli zero, zero, e64, m1, tu, mu +; RV32-NEXT: vsadd.vv v8, v9, v10 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vsadd_vx_nxv1i64_nxv1i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; RV64-NEXT: vsadd.vx v8, v9, a0 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vsadd.nxv1i64.i64( + %0, + %1, + i64 %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vsaddu.nxv1i8.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vsaddu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vsaddu_vv_nxv1i8_nxv1i8_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vsaddu.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vsaddu_vv_nxv1i8_nxv1i8_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vsaddu.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vsaddu.nxv1i8.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vsll.nxv1i8.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vsll_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vsll_vv_nxv1i8_nxv1i8_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vsll.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vsll_vv_nxv1i8_nxv1i8_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vsll.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vsll.nxv1i8.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vsmul.nxv1i8.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vsmul_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vsmul_vv_nxv1i8_nxv1i8_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vsmul.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vsmul_vv_nxv1i8_nxv1i8_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vsmul.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vsmul.nxv1i8.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vsmul.nxv1i64.i64( + , + , + i64, + iXLen); + +define @intrinsic_vsmul_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vsmul_vx_nxv1i64_nxv1i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v10, (a0), zero +; RV32-NEXT: vsetvli zero, zero, e64, m1, tu, mu +; RV32-NEXT: vsmul.vv v8, v9, v10 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vsmul_vx_nxv1i64_nxv1i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; RV64-NEXT: vsmul.vx v8, v9, a0 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vsmul.nxv1i64.i64( + %0, + %1, + i64 %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vsra.nxv1i8.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vsra_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vsra_vv_nxv1i8_nxv1i8_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vsra.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vsra_vv_nxv1i8_nxv1i8_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vsra.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vsra.nxv1i8.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} +declare @llvm.riscv.vsrl.nxv1i8.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vsrl_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vsrl_vv_nxv1i8_nxv1i8_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vsrl.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vsrl_vv_nxv1i8_nxv1i8_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vsrl.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vsrl.nxv1i8.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vssra.nxv1i8.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vssra_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vssra_vv_nxv1i8_nxv1i8_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vssra.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vssra_vv_nxv1i8_nxv1i8_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vssra.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vssra.nxv1i8.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vssrl.nxv1i8.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vssrl_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vssrl_vv_nxv1i8_nxv1i8_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vssrl.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vssrl_vv_nxv1i8_nxv1i8_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vssrl.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vssrl.nxv1i8.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vssub.nxv1i8.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vssub_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vssub_vv_nxv1i8_nxv1i8_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vssub.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vssub_vv_nxv1i8_nxv1i8_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vssub.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vssub.nxv1i8.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vssubu.nxv1i8.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vssubu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vssubu_vv_nxv1i8_nxv1i8_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vssubu.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vssubu_vv_nxv1i8_nxv1i8_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vssubu.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vssubu.nxv1i8.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vssub.nxv1i64.i64( + , + , + i64, + iXLen); + +define @intrinsic_vssub_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vssub_vx_nxv1i64_nxv1i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v10, (a0), zero +; RV32-NEXT: vsetvli zero, zero, e64, m1, tu, mu +; RV32-NEXT: vssub.vv v8, v9, v10 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vssub_vx_nxv1i64_nxv1i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; RV64-NEXT: vssub.vx v8, v9, a0 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vssub.nxv1i64.i64( + %0, + %1, + i64 %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vssubu.nxv1i64.i64( + , + , + i64, + iXLen); + +define @intrinsic_vssubu_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vssubu_vx_nxv1i64_nxv1i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v10, (a0), zero +; RV32-NEXT: vsetvli zero, zero, e64, m1, tu, mu +; RV32-NEXT: vssubu.vv v8, v9, v10 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vssubu_vx_nxv1i64_nxv1i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; RV64-NEXT: vssubu.vx v8, v9, a0 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vssubu.nxv1i64.i64( + %0, + %1, + i64 %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vsub.nxv1i8.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vsub_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vsub_vv_nxv1i8_nxv1i8_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vsub.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vsub_vv_nxv1i8_nxv1i8_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vsub.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vsub.nxv1i8.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vwadd.nxv1i16.nxv1i8.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vwadd_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vwadd_vv_nxv1i16_nxv1i8_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vwadd.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vwadd_vv_nxv1i16_nxv1i8_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vwadd.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vwadd.nxv1i16.nxv1i8.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vwadd.w.nxv1i16.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vwadd.w_wv_nxv1i16_nxv1i16_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vwadd.w_wv_nxv1i16_nxv1i16_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vwadd.wv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vwadd.w_wv_nxv1i16_nxv1i16_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vwadd.wv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vwadd.w.nxv1i16.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vwaddu.nxv1i16.nxv1i8.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vwaddu_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vwaddu_vv_nxv1i16_nxv1i8_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vwaddu.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vwaddu_vv_nxv1i16_nxv1i8_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vwaddu.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vwaddu.nxv1i16.nxv1i8.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vwmul.nxv1i16.nxv1i8.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vwmul_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vwmul_vv_nxv1i16_nxv1i8_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vwmul.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vwmul_vv_nxv1i16_nxv1i8_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vwmul.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vwmul.nxv1i16.nxv1i8.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vwmulu.nxv1i16.nxv1i8.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vwmulu_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vwmulu_vv_nxv1i16_nxv1i8_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vwmulu.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vwmulu_vv_nxv1i16_nxv1i8_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vwmulu.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vwmulu.nxv1i16.nxv1i8.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vwmulsu_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vwmulsu_vv_nxv1i16_nxv1i8_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vwmulsu.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vwmulsu_vv_nxv1i16_nxv1i8_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vwmulsu.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vwsub.nxv1i16.nxv1i8.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vwsub_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vwsub_vv_nxv1i16_nxv1i8_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vwsub.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vwsub_vv_nxv1i16_nxv1i8_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vwsub.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vwsub.nxv1i16.nxv1i8.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vwsub.w.nxv1i16.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vwsub.w_wv_nxv1i16_nxv1i16_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vwsub.w_wv_nxv1i16_nxv1i16_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vwsub.wv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vwsub.w_wv_nxv1i16_nxv1i16_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vwsub.wv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vwsub.w.nxv1i16.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +define @intrinsic_vwsub.w_wv_nxv1i16_nxv1i16_nxv1i8_tied( %0, %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vwsub.w_wv_nxv1i16_nxv1i16_nxv1i8_tied: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vwsub.wv v8, v8, v9 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vwsub.w_wv_nxv1i16_nxv1i16_nxv1i8_tied: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vwsub.wv v8, v8, v9 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vwsub.w.nxv1i16.nxv1i8( + %0, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vwsubu.nxv1i16.nxv1i8.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vwsubu_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vwsubu_vv_nxv1i16_nxv1i8_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vwsubu.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vwsubu_vv_nxv1i16_nxv1i8_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vwsubu.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vwsubu.nxv1i16.nxv1i8.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vwsubu.w.nxv1i16.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vwsubu.w_wv_nxv1i16_nxv1i16_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vwsubu.w_wv_nxv1i16_nxv1i16_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vwsubu.wv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vwsubu.w_wv_nxv1i16_nxv1i16_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vwsubu.wv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vwsubu.w.nxv1i16.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vxor.nxv1i8.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vxor_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vxor_vv_nxv1i8_nxv1i8_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vxor.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vxor_vv_nxv1i8_nxv1i8_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vxor.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vxor.nxv1i8.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vaadd-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vaadd-rv32.ll index 13eb36dd1a251e08580e57082f12faab239e813f..2d9f005c0d3730fcd44a35ac9fd5ce63b01d7f44 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vaadd-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vaadd-rv32.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vaadd.nxv1i8.nxv1i8( + , , , i32); @@ -14,6 +15,7 @@ define @intrinsic_vaadd_vv_nxv1i8_nxv1i8_nxv1i8( @llvm.riscv.vaadd.nxv1i8.nxv1i8( + undef, %0, %1, i32 %2) @@ -47,6 +49,7 @@ entry: } declare @llvm.riscv.vaadd.nxv2i8.nxv2i8( + , , , i32); @@ -59,6 +62,7 @@ define @intrinsic_vaadd_vv_nxv2i8_nxv2i8_nxv2i8( @llvm.riscv.vaadd.nxv2i8.nxv2i8( + undef, %0, %1, i32 %2) @@ -92,6 +96,7 @@ entry: } declare @llvm.riscv.vaadd.nxv4i8.nxv4i8( + , , , i32); @@ -104,6 +109,7 @@ define @intrinsic_vaadd_vv_nxv4i8_nxv4i8_nxv4i8( @llvm.riscv.vaadd.nxv4i8.nxv4i8( + undef, %0, %1, i32 %2) @@ -137,6 +143,7 @@ entry: } declare @llvm.riscv.vaadd.nxv8i8.nxv8i8( + , , , i32); @@ -149,6 +156,7 @@ define @intrinsic_vaadd_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vaadd.nxv8i8.nxv8i8( + undef, %0, %1, i32 %2) @@ -182,6 +190,7 @@ entry: } declare @llvm.riscv.vaadd.nxv16i8.nxv16i8( + , , , i32); @@ -194,6 +203,7 @@ define @intrinsic_vaadd_vv_nxv16i8_nxv16i8_nxv16i8( @llvm.riscv.vaadd.nxv16i8.nxv16i8( + undef, %0, %1, i32 %2) @@ -227,6 +237,7 @@ entry: } declare @llvm.riscv.vaadd.nxv32i8.nxv32i8( + , , , i32); @@ -239,6 +250,7 @@ define @intrinsic_vaadd_vv_nxv32i8_nxv32i8_nxv32i8( @llvm.riscv.vaadd.nxv32i8.nxv32i8( + undef, %0, %1, i32 %2) @@ -272,6 +284,7 @@ entry: } declare @llvm.riscv.vaadd.nxv64i8.nxv64i8( + , , , i32); @@ -284,6 +297,7 @@ define @intrinsic_vaadd_vv_nxv64i8_nxv64i8_nxv64i8( @llvm.riscv.vaadd.nxv64i8.nxv64i8( + undef, %0, %1, i32 %2) @@ -318,6 +332,7 @@ entry: } declare @llvm.riscv.vaadd.nxv1i16.nxv1i16( + , , , i32); @@ -330,6 +345,7 @@ define @intrinsic_vaadd_vv_nxv1i16_nxv1i16_nxv1i16( @llvm.riscv.vaadd.nxv1i16.nxv1i16( + undef, %0, %1, i32 %2) @@ -363,6 +379,7 @@ entry: } declare @llvm.riscv.vaadd.nxv2i16.nxv2i16( + , , , i32); @@ -375,6 +392,7 @@ define @intrinsic_vaadd_vv_nxv2i16_nxv2i16_nxv2i16( @llvm.riscv.vaadd.nxv2i16.nxv2i16( + undef, %0, %1, i32 %2) @@ -408,6 +426,7 @@ entry: } declare @llvm.riscv.vaadd.nxv4i16.nxv4i16( + , , , i32); @@ -420,6 +439,7 @@ define @intrinsic_vaadd_vv_nxv4i16_nxv4i16_nxv4i16( @llvm.riscv.vaadd.nxv4i16.nxv4i16( + undef, %0, %1, i32 %2) @@ -453,6 +473,7 @@ entry: } declare @llvm.riscv.vaadd.nxv8i16.nxv8i16( + , , , i32); @@ -465,6 +486,7 @@ define @intrinsic_vaadd_vv_nxv8i16_nxv8i16_nxv8i16( @llvm.riscv.vaadd.nxv8i16.nxv8i16( + undef, %0, %1, i32 %2) @@ -498,6 +520,7 @@ entry: } declare @llvm.riscv.vaadd.nxv16i16.nxv16i16( + , , , i32); @@ -510,6 +533,7 @@ define @intrinsic_vaadd_vv_nxv16i16_nxv16i16_nxv16i16( @llvm.riscv.vaadd.nxv16i16.nxv16i16( + undef, %0, %1, i32 %2) @@ -543,6 +567,7 @@ entry: } declare @llvm.riscv.vaadd.nxv32i16.nxv32i16( + , , , i32); @@ -555,6 +580,7 @@ define @intrinsic_vaadd_vv_nxv32i16_nxv32i16_nxv32i16( @llvm.riscv.vaadd.nxv32i16.nxv32i16( + undef, %0, %1, i32 %2) @@ -589,6 +615,7 @@ entry: } declare @llvm.riscv.vaadd.nxv1i32.nxv1i32( + , , , i32); @@ -601,6 +628,7 @@ define @intrinsic_vaadd_vv_nxv1i32_nxv1i32_nxv1i32( @llvm.riscv.vaadd.nxv1i32.nxv1i32( + undef, %0, %1, i32 %2) @@ -634,6 +662,7 @@ entry: } declare @llvm.riscv.vaadd.nxv2i32.nxv2i32( + , , , i32); @@ -646,6 +675,7 @@ define @intrinsic_vaadd_vv_nxv2i32_nxv2i32_nxv2i32( @llvm.riscv.vaadd.nxv2i32.nxv2i32( + undef, %0, %1, i32 %2) @@ -679,6 +709,7 @@ entry: } declare @llvm.riscv.vaadd.nxv4i32.nxv4i32( + , , , i32); @@ -691,6 +722,7 @@ define @intrinsic_vaadd_vv_nxv4i32_nxv4i32_nxv4i32( @llvm.riscv.vaadd.nxv4i32.nxv4i32( + undef, %0, %1, i32 %2) @@ -724,6 +756,7 @@ entry: } declare @llvm.riscv.vaadd.nxv8i32.nxv8i32( + , , , i32); @@ -736,6 +769,7 @@ define @intrinsic_vaadd_vv_nxv8i32_nxv8i32_nxv8i32( @llvm.riscv.vaadd.nxv8i32.nxv8i32( + undef, %0, %1, i32 %2) @@ -769,6 +803,7 @@ entry: } declare @llvm.riscv.vaadd.nxv16i32.nxv16i32( + , , , i32); @@ -781,6 +816,7 @@ define @intrinsic_vaadd_vv_nxv16i32_nxv16i32_nxv16i32( @llvm.riscv.vaadd.nxv16i32.nxv16i32( + undef, %0, %1, i32 %2) @@ -815,6 +851,7 @@ entry: } declare @llvm.riscv.vaadd.nxv1i64.nxv1i64( + , , , i32); @@ -827,6 +864,7 @@ define @intrinsic_vaadd_vv_nxv1i64_nxv1i64_nxv1i64( @llvm.riscv.vaadd.nxv1i64.nxv1i64( + undef, %0, %1, i32 %2) @@ -860,6 +898,7 @@ entry: } declare @llvm.riscv.vaadd.nxv2i64.nxv2i64( + , , , i32); @@ -872,6 +911,7 @@ define @intrinsic_vaadd_vv_nxv2i64_nxv2i64_nxv2i64( @llvm.riscv.vaadd.nxv2i64.nxv2i64( + undef, %0, %1, i32 %2) @@ -905,6 +945,7 @@ entry: } declare @llvm.riscv.vaadd.nxv4i64.nxv4i64( + , , , i32); @@ -917,6 +958,7 @@ define @intrinsic_vaadd_vv_nxv4i64_nxv4i64_nxv4i64( @llvm.riscv.vaadd.nxv4i64.nxv4i64( + undef, %0, %1, i32 %2) @@ -950,6 +992,7 @@ entry: } declare @llvm.riscv.vaadd.nxv8i64.nxv8i64( + , , , i32); @@ -962,6 +1005,7 @@ define @intrinsic_vaadd_vv_nxv8i64_nxv8i64_nxv8i64( @llvm.riscv.vaadd.nxv8i64.nxv8i64( + undef, %0, %1, i32 %2) @@ -996,6 +1040,7 @@ entry: } declare @llvm.riscv.vaadd.nxv1i8.i8( + , , i8, i32); @@ -1008,6 +1053,7 @@ define @intrinsic_vaadd_vx_nxv1i8_nxv1i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaadd.nxv1i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1041,6 +1087,7 @@ entry: } declare @llvm.riscv.vaadd.nxv2i8.i8( + , , i8, i32); @@ -1053,6 +1100,7 @@ define @intrinsic_vaadd_vx_nxv2i8_nxv2i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaadd.nxv2i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1086,6 +1134,7 @@ entry: } declare @llvm.riscv.vaadd.nxv4i8.i8( + , , i8, i32); @@ -1098,6 +1147,7 @@ define @intrinsic_vaadd_vx_nxv4i8_nxv4i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaadd.nxv4i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1131,6 +1181,7 @@ entry: } declare @llvm.riscv.vaadd.nxv8i8.i8( + , , i8, i32); @@ -1143,6 +1194,7 @@ define @intrinsic_vaadd_vx_nxv8i8_nxv8i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaadd.nxv8i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1176,6 +1228,7 @@ entry: } declare @llvm.riscv.vaadd.nxv16i8.i8( + , , i8, i32); @@ -1188,6 +1241,7 @@ define @intrinsic_vaadd_vx_nxv16i8_nxv16i8_i8( @llvm.riscv.vaadd.nxv16i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1221,6 +1275,7 @@ entry: } declare @llvm.riscv.vaadd.nxv32i8.i8( + , , i8, i32); @@ -1233,6 +1288,7 @@ define @intrinsic_vaadd_vx_nxv32i8_nxv32i8_i8( @llvm.riscv.vaadd.nxv32i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1266,6 +1322,7 @@ entry: } declare @llvm.riscv.vaadd.nxv64i8.i8( + , , i8, i32); @@ -1278,6 +1335,7 @@ define @intrinsic_vaadd_vx_nxv64i8_nxv64i8_i8( @llvm.riscv.vaadd.nxv64i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1311,6 +1369,7 @@ entry: } declare @llvm.riscv.vaadd.nxv1i16.i16( + , , i16, i32); @@ -1323,6 +1382,7 @@ define @intrinsic_vaadd_vx_nxv1i16_nxv1i16_i16( @llvm.riscv.vaadd.nxv1i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1356,6 +1416,7 @@ entry: } declare @llvm.riscv.vaadd.nxv2i16.i16( + , , i16, i32); @@ -1368,6 +1429,7 @@ define @intrinsic_vaadd_vx_nxv2i16_nxv2i16_i16( @llvm.riscv.vaadd.nxv2i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1401,6 +1463,7 @@ entry: } declare @llvm.riscv.vaadd.nxv4i16.i16( + , , i16, i32); @@ -1413,6 +1476,7 @@ define @intrinsic_vaadd_vx_nxv4i16_nxv4i16_i16( @llvm.riscv.vaadd.nxv4i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1446,6 +1510,7 @@ entry: } declare @llvm.riscv.vaadd.nxv8i16.i16( + , , i16, i32); @@ -1458,6 +1523,7 @@ define @intrinsic_vaadd_vx_nxv8i16_nxv8i16_i16( @llvm.riscv.vaadd.nxv8i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1491,6 +1557,7 @@ entry: } declare @llvm.riscv.vaadd.nxv16i16.i16( + , , i16, i32); @@ -1503,6 +1570,7 @@ define @intrinsic_vaadd_vx_nxv16i16_nxv16i16_i16( @llvm.riscv.vaadd.nxv16i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1536,6 +1604,7 @@ entry: } declare @llvm.riscv.vaadd.nxv32i16.i16( + , , i16, i32); @@ -1548,6 +1617,7 @@ define @intrinsic_vaadd_vx_nxv32i16_nxv32i16_i16( @llvm.riscv.vaadd.nxv32i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1581,6 +1651,7 @@ entry: } declare @llvm.riscv.vaadd.nxv1i32.i32( + , , i32, i32); @@ -1593,6 +1664,7 @@ define @intrinsic_vaadd_vx_nxv1i32_nxv1i32_i32( @llvm.riscv.vaadd.nxv1i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1626,6 +1698,7 @@ entry: } declare @llvm.riscv.vaadd.nxv2i32.i32( + , , i32, i32); @@ -1638,6 +1711,7 @@ define @intrinsic_vaadd_vx_nxv2i32_nxv2i32_i32( @llvm.riscv.vaadd.nxv2i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1671,6 +1745,7 @@ entry: } declare @llvm.riscv.vaadd.nxv4i32.i32( + , , i32, i32); @@ -1683,6 +1758,7 @@ define @intrinsic_vaadd_vx_nxv4i32_nxv4i32_i32( @llvm.riscv.vaadd.nxv4i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1716,6 +1792,7 @@ entry: } declare @llvm.riscv.vaadd.nxv8i32.i32( + , , i32, i32); @@ -1728,6 +1805,7 @@ define @intrinsic_vaadd_vx_nxv8i32_nxv8i32_i32( @llvm.riscv.vaadd.nxv8i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1761,6 +1839,7 @@ entry: } declare @llvm.riscv.vaadd.nxv16i32.i32( + , , i32, i32); @@ -1773,6 +1852,7 @@ define @intrinsic_vaadd_vx_nxv16i32_nxv16i32_i32( @llvm.riscv.vaadd.nxv16i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1806,6 +1886,7 @@ entry: } declare @llvm.riscv.vaadd.nxv1i64.i64( + , , i64, i32); @@ -1824,6 +1905,7 @@ define @intrinsic_vaadd_vx_nxv1i64_nxv1i64_i64( @llvm.riscv.vaadd.nxv1i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1863,6 +1945,7 @@ entry: } declare @llvm.riscv.vaadd.nxv2i64.i64( + , , i64, i32); @@ -1881,6 +1964,7 @@ define @intrinsic_vaadd_vx_nxv2i64_nxv2i64_i64( @llvm.riscv.vaadd.nxv2i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1920,6 +2004,7 @@ entry: } declare @llvm.riscv.vaadd.nxv4i64.i64( + , , i64, i32); @@ -1938,6 +2023,7 @@ define @intrinsic_vaadd_vx_nxv4i64_nxv4i64_i64( @llvm.riscv.vaadd.nxv4i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1977,6 +2063,7 @@ entry: } declare @llvm.riscv.vaadd.nxv8i64.i64( + , , i64, i32); @@ -1995,6 +2082,7 @@ define @intrinsic_vaadd_vx_nxv8i64_nxv8i64_i64( @llvm.riscv.vaadd.nxv8i64.i64( + undef, %0, i64 %1, i32 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vaadd-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vaadd-rv64.ll index 4d692eb089c3f26c4f39c793f5a77cdf6b580574..76d556d6be40b2870c12e75154d93cc1c7a650db 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vaadd-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vaadd-rv64.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vaadd.nxv1i8.nxv1i8( + , , , i64); @@ -14,6 +15,7 @@ define @intrinsic_vaadd_vv_nxv1i8_nxv1i8_nxv1i8( @llvm.riscv.vaadd.nxv1i8.nxv1i8( + undef, %0, %1, i64 %2) @@ -47,6 +49,7 @@ entry: } declare @llvm.riscv.vaadd.nxv2i8.nxv2i8( + , , , i64); @@ -59,6 +62,7 @@ define @intrinsic_vaadd_vv_nxv2i8_nxv2i8_nxv2i8( @llvm.riscv.vaadd.nxv2i8.nxv2i8( + undef, %0, %1, i64 %2) @@ -92,6 +96,7 @@ entry: } declare @llvm.riscv.vaadd.nxv4i8.nxv4i8( + , , , i64); @@ -104,6 +109,7 @@ define @intrinsic_vaadd_vv_nxv4i8_nxv4i8_nxv4i8( @llvm.riscv.vaadd.nxv4i8.nxv4i8( + undef, %0, %1, i64 %2) @@ -137,6 +143,7 @@ entry: } declare @llvm.riscv.vaadd.nxv8i8.nxv8i8( + , , , i64); @@ -149,6 +156,7 @@ define @intrinsic_vaadd_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vaadd.nxv8i8.nxv8i8( + undef, %0, %1, i64 %2) @@ -182,6 +190,7 @@ entry: } declare @llvm.riscv.vaadd.nxv16i8.nxv16i8( + , , , i64); @@ -194,6 +203,7 @@ define @intrinsic_vaadd_vv_nxv16i8_nxv16i8_nxv16i8( @llvm.riscv.vaadd.nxv16i8.nxv16i8( + undef, %0, %1, i64 %2) @@ -227,6 +237,7 @@ entry: } declare @llvm.riscv.vaadd.nxv32i8.nxv32i8( + , , , i64); @@ -239,6 +250,7 @@ define @intrinsic_vaadd_vv_nxv32i8_nxv32i8_nxv32i8( @llvm.riscv.vaadd.nxv32i8.nxv32i8( + undef, %0, %1, i64 %2) @@ -272,6 +284,7 @@ entry: } declare @llvm.riscv.vaadd.nxv64i8.nxv64i8( + , , , i64); @@ -284,6 +297,7 @@ define @intrinsic_vaadd_vv_nxv64i8_nxv64i8_nxv64i8( @llvm.riscv.vaadd.nxv64i8.nxv64i8( + undef, %0, %1, i64 %2) @@ -318,6 +332,7 @@ entry: } declare @llvm.riscv.vaadd.nxv1i16.nxv1i16( + , , , i64); @@ -330,6 +345,7 @@ define @intrinsic_vaadd_vv_nxv1i16_nxv1i16_nxv1i16( @llvm.riscv.vaadd.nxv1i16.nxv1i16( + undef, %0, %1, i64 %2) @@ -363,6 +379,7 @@ entry: } declare @llvm.riscv.vaadd.nxv2i16.nxv2i16( + , , , i64); @@ -375,6 +392,7 @@ define @intrinsic_vaadd_vv_nxv2i16_nxv2i16_nxv2i16( @llvm.riscv.vaadd.nxv2i16.nxv2i16( + undef, %0, %1, i64 %2) @@ -408,6 +426,7 @@ entry: } declare @llvm.riscv.vaadd.nxv4i16.nxv4i16( + , , , i64); @@ -420,6 +439,7 @@ define @intrinsic_vaadd_vv_nxv4i16_nxv4i16_nxv4i16( @llvm.riscv.vaadd.nxv4i16.nxv4i16( + undef, %0, %1, i64 %2) @@ -453,6 +473,7 @@ entry: } declare @llvm.riscv.vaadd.nxv8i16.nxv8i16( + , , , i64); @@ -465,6 +486,7 @@ define @intrinsic_vaadd_vv_nxv8i16_nxv8i16_nxv8i16( @llvm.riscv.vaadd.nxv8i16.nxv8i16( + undef, %0, %1, i64 %2) @@ -498,6 +520,7 @@ entry: } declare @llvm.riscv.vaadd.nxv16i16.nxv16i16( + , , , i64); @@ -510,6 +533,7 @@ define @intrinsic_vaadd_vv_nxv16i16_nxv16i16_nxv16i16( @llvm.riscv.vaadd.nxv16i16.nxv16i16( + undef, %0, %1, i64 %2) @@ -543,6 +567,7 @@ entry: } declare @llvm.riscv.vaadd.nxv32i16.nxv32i16( + , , , i64); @@ -555,6 +580,7 @@ define @intrinsic_vaadd_vv_nxv32i16_nxv32i16_nxv32i16( @llvm.riscv.vaadd.nxv32i16.nxv32i16( + undef, %0, %1, i64 %2) @@ -589,6 +615,7 @@ entry: } declare @llvm.riscv.vaadd.nxv1i32.nxv1i32( + , , , i64); @@ -601,6 +628,7 @@ define @intrinsic_vaadd_vv_nxv1i32_nxv1i32_nxv1i32( @llvm.riscv.vaadd.nxv1i32.nxv1i32( + undef, %0, %1, i64 %2) @@ -634,6 +662,7 @@ entry: } declare @llvm.riscv.vaadd.nxv2i32.nxv2i32( + , , , i64); @@ -646,6 +675,7 @@ define @intrinsic_vaadd_vv_nxv2i32_nxv2i32_nxv2i32( @llvm.riscv.vaadd.nxv2i32.nxv2i32( + undef, %0, %1, i64 %2) @@ -679,6 +709,7 @@ entry: } declare @llvm.riscv.vaadd.nxv4i32.nxv4i32( + , , , i64); @@ -691,6 +722,7 @@ define @intrinsic_vaadd_vv_nxv4i32_nxv4i32_nxv4i32( @llvm.riscv.vaadd.nxv4i32.nxv4i32( + undef, %0, %1, i64 %2) @@ -724,6 +756,7 @@ entry: } declare @llvm.riscv.vaadd.nxv8i32.nxv8i32( + , , , i64); @@ -736,6 +769,7 @@ define @intrinsic_vaadd_vv_nxv8i32_nxv8i32_nxv8i32( @llvm.riscv.vaadd.nxv8i32.nxv8i32( + undef, %0, %1, i64 %2) @@ -769,6 +803,7 @@ entry: } declare @llvm.riscv.vaadd.nxv16i32.nxv16i32( + , , , i64); @@ -781,6 +816,7 @@ define @intrinsic_vaadd_vv_nxv16i32_nxv16i32_nxv16i32( @llvm.riscv.vaadd.nxv16i32.nxv16i32( + undef, %0, %1, i64 %2) @@ -815,6 +851,7 @@ entry: } declare @llvm.riscv.vaadd.nxv1i64.nxv1i64( + , , , i64); @@ -827,6 +864,7 @@ define @intrinsic_vaadd_vv_nxv1i64_nxv1i64_nxv1i64( @llvm.riscv.vaadd.nxv1i64.nxv1i64( + undef, %0, %1, i64 %2) @@ -860,6 +898,7 @@ entry: } declare @llvm.riscv.vaadd.nxv2i64.nxv2i64( + , , , i64); @@ -872,6 +911,7 @@ define @intrinsic_vaadd_vv_nxv2i64_nxv2i64_nxv2i64( @llvm.riscv.vaadd.nxv2i64.nxv2i64( + undef, %0, %1, i64 %2) @@ -905,6 +945,7 @@ entry: } declare @llvm.riscv.vaadd.nxv4i64.nxv4i64( + , , , i64); @@ -917,6 +958,7 @@ define @intrinsic_vaadd_vv_nxv4i64_nxv4i64_nxv4i64( @llvm.riscv.vaadd.nxv4i64.nxv4i64( + undef, %0, %1, i64 %2) @@ -950,6 +992,7 @@ entry: } declare @llvm.riscv.vaadd.nxv8i64.nxv8i64( + , , , i64); @@ -962,6 +1005,7 @@ define @intrinsic_vaadd_vv_nxv8i64_nxv8i64_nxv8i64( @llvm.riscv.vaadd.nxv8i64.nxv8i64( + undef, %0, %1, i64 %2) @@ -996,6 +1040,7 @@ entry: } declare @llvm.riscv.vaadd.nxv1i8.i8( + , , i8, i64); @@ -1008,6 +1053,7 @@ define @intrinsic_vaadd_vx_nxv1i8_nxv1i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaadd.nxv1i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1041,6 +1087,7 @@ entry: } declare @llvm.riscv.vaadd.nxv2i8.i8( + , , i8, i64); @@ -1053,6 +1100,7 @@ define @intrinsic_vaadd_vx_nxv2i8_nxv2i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaadd.nxv2i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1086,6 +1134,7 @@ entry: } declare @llvm.riscv.vaadd.nxv4i8.i8( + , , i8, i64); @@ -1098,6 +1147,7 @@ define @intrinsic_vaadd_vx_nxv4i8_nxv4i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaadd.nxv4i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1131,6 +1181,7 @@ entry: } declare @llvm.riscv.vaadd.nxv8i8.i8( + , , i8, i64); @@ -1143,6 +1194,7 @@ define @intrinsic_vaadd_vx_nxv8i8_nxv8i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaadd.nxv8i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1176,6 +1228,7 @@ entry: } declare @llvm.riscv.vaadd.nxv16i8.i8( + , , i8, i64); @@ -1188,6 +1241,7 @@ define @intrinsic_vaadd_vx_nxv16i8_nxv16i8_i8( @llvm.riscv.vaadd.nxv16i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1221,6 +1275,7 @@ entry: } declare @llvm.riscv.vaadd.nxv32i8.i8( + , , i8, i64); @@ -1233,6 +1288,7 @@ define @intrinsic_vaadd_vx_nxv32i8_nxv32i8_i8( @llvm.riscv.vaadd.nxv32i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1266,6 +1322,7 @@ entry: } declare @llvm.riscv.vaadd.nxv64i8.i8( + , , i8, i64); @@ -1278,6 +1335,7 @@ define @intrinsic_vaadd_vx_nxv64i8_nxv64i8_i8( @llvm.riscv.vaadd.nxv64i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1311,6 +1369,7 @@ entry: } declare @llvm.riscv.vaadd.nxv1i16.i16( + , , i16, i64); @@ -1323,6 +1382,7 @@ define @intrinsic_vaadd_vx_nxv1i16_nxv1i16_i16( @llvm.riscv.vaadd.nxv1i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1356,6 +1416,7 @@ entry: } declare @llvm.riscv.vaadd.nxv2i16.i16( + , , i16, i64); @@ -1368,6 +1429,7 @@ define @intrinsic_vaadd_vx_nxv2i16_nxv2i16_i16( @llvm.riscv.vaadd.nxv2i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1401,6 +1463,7 @@ entry: } declare @llvm.riscv.vaadd.nxv4i16.i16( + , , i16, i64); @@ -1413,6 +1476,7 @@ define @intrinsic_vaadd_vx_nxv4i16_nxv4i16_i16( @llvm.riscv.vaadd.nxv4i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1446,6 +1510,7 @@ entry: } declare @llvm.riscv.vaadd.nxv8i16.i16( + , , i16, i64); @@ -1458,6 +1523,7 @@ define @intrinsic_vaadd_vx_nxv8i16_nxv8i16_i16( @llvm.riscv.vaadd.nxv8i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1491,6 +1557,7 @@ entry: } declare @llvm.riscv.vaadd.nxv16i16.i16( + , , i16, i64); @@ -1503,6 +1570,7 @@ define @intrinsic_vaadd_vx_nxv16i16_nxv16i16_i16( @llvm.riscv.vaadd.nxv16i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1536,6 +1604,7 @@ entry: } declare @llvm.riscv.vaadd.nxv32i16.i16( + , , i16, i64); @@ -1548,6 +1617,7 @@ define @intrinsic_vaadd_vx_nxv32i16_nxv32i16_i16( @llvm.riscv.vaadd.nxv32i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1581,6 +1651,7 @@ entry: } declare @llvm.riscv.vaadd.nxv1i32.i32( + , , i32, i64); @@ -1593,6 +1664,7 @@ define @intrinsic_vaadd_vx_nxv1i32_nxv1i32_i32( @llvm.riscv.vaadd.nxv1i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1626,6 +1698,7 @@ entry: } declare @llvm.riscv.vaadd.nxv2i32.i32( + , , i32, i64); @@ -1638,6 +1711,7 @@ define @intrinsic_vaadd_vx_nxv2i32_nxv2i32_i32( @llvm.riscv.vaadd.nxv2i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1671,6 +1745,7 @@ entry: } declare @llvm.riscv.vaadd.nxv4i32.i32( + , , i32, i64); @@ -1683,6 +1758,7 @@ define @intrinsic_vaadd_vx_nxv4i32_nxv4i32_i32( @llvm.riscv.vaadd.nxv4i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1716,6 +1792,7 @@ entry: } declare @llvm.riscv.vaadd.nxv8i32.i32( + , , i32, i64); @@ -1728,6 +1805,7 @@ define @intrinsic_vaadd_vx_nxv8i32_nxv8i32_i32( @llvm.riscv.vaadd.nxv8i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1761,6 +1839,7 @@ entry: } declare @llvm.riscv.vaadd.nxv16i32.i32( + , , i32, i64); @@ -1773,6 +1852,7 @@ define @intrinsic_vaadd_vx_nxv16i32_nxv16i32_i32( @llvm.riscv.vaadd.nxv16i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1806,6 +1886,7 @@ entry: } declare @llvm.riscv.vaadd.nxv1i64.i64( + , , i64, i64); @@ -1818,6 +1899,7 @@ define @intrinsic_vaadd_vx_nxv1i64_nxv1i64_i64( @llvm.riscv.vaadd.nxv1i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1851,6 +1933,7 @@ entry: } declare @llvm.riscv.vaadd.nxv2i64.i64( + , , i64, i64); @@ -1863,6 +1946,7 @@ define @intrinsic_vaadd_vx_nxv2i64_nxv2i64_i64( @llvm.riscv.vaadd.nxv2i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1896,6 +1980,7 @@ entry: } declare @llvm.riscv.vaadd.nxv4i64.i64( + , , i64, i64); @@ -1908,6 +1993,7 @@ define @intrinsic_vaadd_vx_nxv4i64_nxv4i64_i64( @llvm.riscv.vaadd.nxv4i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1941,6 +2027,7 @@ entry: } declare @llvm.riscv.vaadd.nxv8i64.i64( + , , i64, i64); @@ -1953,6 +2040,7 @@ define @intrinsic_vaadd_vx_nxv8i64_nxv8i64_i64( @llvm.riscv.vaadd.nxv8i64.i64( + undef, %0, i64 %1, i64 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vaaddu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vaaddu-rv32.ll index 4e796ebd4a32b030a0d1bdd2f83753441ac90627..5d8b24ed84866d30b13a935aa6c64aee23d35d2f 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vaaddu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vaaddu-rv32.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vaaddu.nxv1i8.nxv1i8( + , , , i32); @@ -14,6 +15,7 @@ define @intrinsic_vaaddu_vv_nxv1i8_nxv1i8_nxv1i8( @llvm.riscv.vaaddu.nxv1i8.nxv1i8( + undef, %0, %1, i32 %2) @@ -47,6 +49,7 @@ entry: } declare @llvm.riscv.vaaddu.nxv2i8.nxv2i8( + , , , i32); @@ -59,6 +62,7 @@ define @intrinsic_vaaddu_vv_nxv2i8_nxv2i8_nxv2i8( @llvm.riscv.vaaddu.nxv2i8.nxv2i8( + undef, %0, %1, i32 %2) @@ -92,6 +96,7 @@ entry: } declare @llvm.riscv.vaaddu.nxv4i8.nxv4i8( + , , , i32); @@ -104,6 +109,7 @@ define @intrinsic_vaaddu_vv_nxv4i8_nxv4i8_nxv4i8( @llvm.riscv.vaaddu.nxv4i8.nxv4i8( + undef, %0, %1, i32 %2) @@ -137,6 +143,7 @@ entry: } declare @llvm.riscv.vaaddu.nxv8i8.nxv8i8( + , , , i32); @@ -149,6 +156,7 @@ define @intrinsic_vaaddu_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vaaddu.nxv8i8.nxv8i8( + undef, %0, %1, i32 %2) @@ -182,6 +190,7 @@ entry: } declare @llvm.riscv.vaaddu.nxv16i8.nxv16i8( + , , , i32); @@ -194,6 +203,7 @@ define @intrinsic_vaaddu_vv_nxv16i8_nxv16i8_nxv16i8( @llvm.riscv.vaaddu.nxv16i8.nxv16i8( + undef, %0, %1, i32 %2) @@ -227,6 +237,7 @@ entry: } declare @llvm.riscv.vaaddu.nxv32i8.nxv32i8( + , , , i32); @@ -239,6 +250,7 @@ define @intrinsic_vaaddu_vv_nxv32i8_nxv32i8_nxv32i8( @llvm.riscv.vaaddu.nxv32i8.nxv32i8( + undef, %0, %1, i32 %2) @@ -272,6 +284,7 @@ entry: } declare @llvm.riscv.vaaddu.nxv64i8.nxv64i8( + , , , i32); @@ -284,6 +297,7 @@ define @intrinsic_vaaddu_vv_nxv64i8_nxv64i8_nxv64i8( @llvm.riscv.vaaddu.nxv64i8.nxv64i8( + undef, %0, %1, i32 %2) @@ -318,6 +332,7 @@ entry: } declare @llvm.riscv.vaaddu.nxv1i16.nxv1i16( + , , , i32); @@ -330,6 +345,7 @@ define @intrinsic_vaaddu_vv_nxv1i16_nxv1i16_nxv1i16( @llvm.riscv.vaaddu.nxv1i16.nxv1i16( + undef, %0, %1, i32 %2) @@ -363,6 +379,7 @@ entry: } declare @llvm.riscv.vaaddu.nxv2i16.nxv2i16( + , , , i32); @@ -375,6 +392,7 @@ define @intrinsic_vaaddu_vv_nxv2i16_nxv2i16_nxv2i16( @llvm.riscv.vaaddu.nxv2i16.nxv2i16( + undef, %0, %1, i32 %2) @@ -408,6 +426,7 @@ entry: } declare @llvm.riscv.vaaddu.nxv4i16.nxv4i16( + , , , i32); @@ -420,6 +439,7 @@ define @intrinsic_vaaddu_vv_nxv4i16_nxv4i16_nxv4i16( @llvm.riscv.vaaddu.nxv4i16.nxv4i16( + undef, %0, %1, i32 %2) @@ -453,6 +473,7 @@ entry: } declare @llvm.riscv.vaaddu.nxv8i16.nxv8i16( + , , , i32); @@ -465,6 +486,7 @@ define @intrinsic_vaaddu_vv_nxv8i16_nxv8i16_nxv8i16( @llvm.riscv.vaaddu.nxv8i16.nxv8i16( + undef, %0, %1, i32 %2) @@ -498,6 +520,7 @@ entry: } declare @llvm.riscv.vaaddu.nxv16i16.nxv16i16( + , , , i32); @@ -510,6 +533,7 @@ define @intrinsic_vaaddu_vv_nxv16i16_nxv16i16_nxv16i16( @llvm.riscv.vaaddu.nxv16i16.nxv16i16( + undef, %0, %1, i32 %2) @@ -543,6 +567,7 @@ entry: } declare @llvm.riscv.vaaddu.nxv32i16.nxv32i16( + , , , i32); @@ -555,6 +580,7 @@ define @intrinsic_vaaddu_vv_nxv32i16_nxv32i16_nxv32i16( @llvm.riscv.vaaddu.nxv32i16.nxv32i16( + undef, %0, %1, i32 %2) @@ -589,6 +615,7 @@ entry: } declare @llvm.riscv.vaaddu.nxv1i32.nxv1i32( + , , , i32); @@ -601,6 +628,7 @@ define @intrinsic_vaaddu_vv_nxv1i32_nxv1i32_nxv1i32( @llvm.riscv.vaaddu.nxv1i32.nxv1i32( + undef, %0, %1, i32 %2) @@ -634,6 +662,7 @@ entry: } declare @llvm.riscv.vaaddu.nxv2i32.nxv2i32( + , , , i32); @@ -646,6 +675,7 @@ define @intrinsic_vaaddu_vv_nxv2i32_nxv2i32_nxv2i32( @llvm.riscv.vaaddu.nxv2i32.nxv2i32( + undef, %0, %1, i32 %2) @@ -679,6 +709,7 @@ entry: } declare @llvm.riscv.vaaddu.nxv4i32.nxv4i32( + , , , i32); @@ -691,6 +722,7 @@ define @intrinsic_vaaddu_vv_nxv4i32_nxv4i32_nxv4i32( @llvm.riscv.vaaddu.nxv4i32.nxv4i32( + undef, %0, %1, i32 %2) @@ -724,6 +756,7 @@ entry: } declare @llvm.riscv.vaaddu.nxv8i32.nxv8i32( + , , , i32); @@ -736,6 +769,7 @@ define @intrinsic_vaaddu_vv_nxv8i32_nxv8i32_nxv8i32( @llvm.riscv.vaaddu.nxv8i32.nxv8i32( + undef, %0, %1, i32 %2) @@ -769,6 +803,7 @@ entry: } declare @llvm.riscv.vaaddu.nxv16i32.nxv16i32( + , , , i32); @@ -781,6 +816,7 @@ define @intrinsic_vaaddu_vv_nxv16i32_nxv16i32_nxv16i32( @llvm.riscv.vaaddu.nxv16i32.nxv16i32( + undef, %0, %1, i32 %2) @@ -815,6 +851,7 @@ entry: } declare @llvm.riscv.vaaddu.nxv1i64.nxv1i64( + , , , i32); @@ -827,6 +864,7 @@ define @intrinsic_vaaddu_vv_nxv1i64_nxv1i64_nxv1i64( @llvm.riscv.vaaddu.nxv1i64.nxv1i64( + undef, %0, %1, i32 %2) @@ -860,6 +898,7 @@ entry: } declare @llvm.riscv.vaaddu.nxv2i64.nxv2i64( + , , , i32); @@ -872,6 +911,7 @@ define @intrinsic_vaaddu_vv_nxv2i64_nxv2i64_nxv2i64( @llvm.riscv.vaaddu.nxv2i64.nxv2i64( + undef, %0, %1, i32 %2) @@ -905,6 +945,7 @@ entry: } declare @llvm.riscv.vaaddu.nxv4i64.nxv4i64( + , , , i32); @@ -917,6 +958,7 @@ define @intrinsic_vaaddu_vv_nxv4i64_nxv4i64_nxv4i64( @llvm.riscv.vaaddu.nxv4i64.nxv4i64( + undef, %0, %1, i32 %2) @@ -950,6 +992,7 @@ entry: } declare @llvm.riscv.vaaddu.nxv8i64.nxv8i64( + , , , i32); @@ -962,6 +1005,7 @@ define @intrinsic_vaaddu_vv_nxv8i64_nxv8i64_nxv8i64( @llvm.riscv.vaaddu.nxv8i64.nxv8i64( + undef, %0, %1, i32 %2) @@ -996,6 +1040,7 @@ entry: } declare @llvm.riscv.vaaddu.nxv1i8.i8( + , , i8, i32); @@ -1008,6 +1053,7 @@ define @intrinsic_vaaddu_vx_nxv1i8_nxv1i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaaddu.nxv1i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1041,6 +1087,7 @@ entry: } declare @llvm.riscv.vaaddu.nxv2i8.i8( + , , i8, i32); @@ -1053,6 +1100,7 @@ define @intrinsic_vaaddu_vx_nxv2i8_nxv2i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaaddu.nxv2i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1086,6 +1134,7 @@ entry: } declare @llvm.riscv.vaaddu.nxv4i8.i8( + , , i8, i32); @@ -1098,6 +1147,7 @@ define @intrinsic_vaaddu_vx_nxv4i8_nxv4i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaaddu.nxv4i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1131,6 +1181,7 @@ entry: } declare @llvm.riscv.vaaddu.nxv8i8.i8( + , , i8, i32); @@ -1143,6 +1194,7 @@ define @intrinsic_vaaddu_vx_nxv8i8_nxv8i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaaddu.nxv8i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1176,6 +1228,7 @@ entry: } declare @llvm.riscv.vaaddu.nxv16i8.i8( + , , i8, i32); @@ -1188,6 +1241,7 @@ define @intrinsic_vaaddu_vx_nxv16i8_nxv16i8_i8( @llvm.riscv.vaaddu.nxv16i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1221,6 +1275,7 @@ entry: } declare @llvm.riscv.vaaddu.nxv32i8.i8( + , , i8, i32); @@ -1233,6 +1288,7 @@ define @intrinsic_vaaddu_vx_nxv32i8_nxv32i8_i8( @llvm.riscv.vaaddu.nxv32i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1266,6 +1322,7 @@ entry: } declare @llvm.riscv.vaaddu.nxv64i8.i8( + , , i8, i32); @@ -1278,6 +1335,7 @@ define @intrinsic_vaaddu_vx_nxv64i8_nxv64i8_i8( @llvm.riscv.vaaddu.nxv64i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1311,6 +1369,7 @@ entry: } declare @llvm.riscv.vaaddu.nxv1i16.i16( + , , i16, i32); @@ -1323,6 +1382,7 @@ define @intrinsic_vaaddu_vx_nxv1i16_nxv1i16_i16( @llvm.riscv.vaaddu.nxv1i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1356,6 +1416,7 @@ entry: } declare @llvm.riscv.vaaddu.nxv2i16.i16( + , , i16, i32); @@ -1368,6 +1429,7 @@ define @intrinsic_vaaddu_vx_nxv2i16_nxv2i16_i16( @llvm.riscv.vaaddu.nxv2i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1401,6 +1463,7 @@ entry: } declare @llvm.riscv.vaaddu.nxv4i16.i16( + , , i16, i32); @@ -1413,6 +1476,7 @@ define @intrinsic_vaaddu_vx_nxv4i16_nxv4i16_i16( @llvm.riscv.vaaddu.nxv4i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1446,6 +1510,7 @@ entry: } declare @llvm.riscv.vaaddu.nxv8i16.i16( + , , i16, i32); @@ -1458,6 +1523,7 @@ define @intrinsic_vaaddu_vx_nxv8i16_nxv8i16_i16( @llvm.riscv.vaaddu.nxv8i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1491,6 +1557,7 @@ entry: } declare @llvm.riscv.vaaddu.nxv16i16.i16( + , , i16, i32); @@ -1503,6 +1570,7 @@ define @intrinsic_vaaddu_vx_nxv16i16_nxv16i16_i16( @llvm.riscv.vaaddu.nxv16i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1536,6 +1604,7 @@ entry: } declare @llvm.riscv.vaaddu.nxv32i16.i16( + , , i16, i32); @@ -1548,6 +1617,7 @@ define @intrinsic_vaaddu_vx_nxv32i16_nxv32i16_i16( @llvm.riscv.vaaddu.nxv32i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1581,6 +1651,7 @@ entry: } declare @llvm.riscv.vaaddu.nxv1i32.i32( + , , i32, i32); @@ -1593,6 +1664,7 @@ define @intrinsic_vaaddu_vx_nxv1i32_nxv1i32_i32( @llvm.riscv.vaaddu.nxv1i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1626,6 +1698,7 @@ entry: } declare @llvm.riscv.vaaddu.nxv2i32.i32( + , , i32, i32); @@ -1638,6 +1711,7 @@ define @intrinsic_vaaddu_vx_nxv2i32_nxv2i32_i32( @llvm.riscv.vaaddu.nxv2i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1671,6 +1745,7 @@ entry: } declare @llvm.riscv.vaaddu.nxv4i32.i32( + , , i32, i32); @@ -1683,6 +1758,7 @@ define @intrinsic_vaaddu_vx_nxv4i32_nxv4i32_i32( @llvm.riscv.vaaddu.nxv4i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1716,6 +1792,7 @@ entry: } declare @llvm.riscv.vaaddu.nxv8i32.i32( + , , i32, i32); @@ -1728,6 +1805,7 @@ define @intrinsic_vaaddu_vx_nxv8i32_nxv8i32_i32( @llvm.riscv.vaaddu.nxv8i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1761,6 +1839,7 @@ entry: } declare @llvm.riscv.vaaddu.nxv16i32.i32( + , , i32, i32); @@ -1773,6 +1852,7 @@ define @intrinsic_vaaddu_vx_nxv16i32_nxv16i32_i32( @llvm.riscv.vaaddu.nxv16i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1806,6 +1886,7 @@ entry: } declare @llvm.riscv.vaaddu.nxv1i64.i64( + , , i64, i32); @@ -1824,6 +1905,7 @@ define @intrinsic_vaaddu_vx_nxv1i64_nxv1i64_i64( @llvm.riscv.vaaddu.nxv1i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1863,6 +1945,7 @@ entry: } declare @llvm.riscv.vaaddu.nxv2i64.i64( + , , i64, i32); @@ -1881,6 +1964,7 @@ define @intrinsic_vaaddu_vx_nxv2i64_nxv2i64_i64( @llvm.riscv.vaaddu.nxv2i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1920,6 +2004,7 @@ entry: } declare @llvm.riscv.vaaddu.nxv4i64.i64( + , , i64, i32); @@ -1938,6 +2023,7 @@ define @intrinsic_vaaddu_vx_nxv4i64_nxv4i64_i64( @llvm.riscv.vaaddu.nxv4i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1977,6 +2063,7 @@ entry: } declare @llvm.riscv.vaaddu.nxv8i64.i64( + , , i64, i32); @@ -1995,6 +2082,7 @@ define @intrinsic_vaaddu_vx_nxv8i64_nxv8i64_i64( @llvm.riscv.vaaddu.nxv8i64.i64( + undef, %0, i64 %1, i32 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vaaddu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vaaddu-rv64.ll index 3f9ed340dede90b2bf54702e1014c206e1abb498..4fad38c164c729fb8808b14547d30b3663dbb7f5 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vaaddu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vaaddu-rv64.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vaaddu.nxv1i8.nxv1i8( + , , , i64); @@ -14,6 +15,7 @@ define @intrinsic_vaaddu_vv_nxv1i8_nxv1i8_nxv1i8( @llvm.riscv.vaaddu.nxv1i8.nxv1i8( + undef, %0, %1, i64 %2) @@ -47,6 +49,7 @@ entry: } declare @llvm.riscv.vaaddu.nxv2i8.nxv2i8( + , , , i64); @@ -59,6 +62,7 @@ define @intrinsic_vaaddu_vv_nxv2i8_nxv2i8_nxv2i8( @llvm.riscv.vaaddu.nxv2i8.nxv2i8( + undef, %0, %1, i64 %2) @@ -92,6 +96,7 @@ entry: } declare @llvm.riscv.vaaddu.nxv4i8.nxv4i8( + , , , i64); @@ -104,6 +109,7 @@ define @intrinsic_vaaddu_vv_nxv4i8_nxv4i8_nxv4i8( @llvm.riscv.vaaddu.nxv4i8.nxv4i8( + undef, %0, %1, i64 %2) @@ -137,6 +143,7 @@ entry: } declare @llvm.riscv.vaaddu.nxv8i8.nxv8i8( + , , , i64); @@ -149,6 +156,7 @@ define @intrinsic_vaaddu_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vaaddu.nxv8i8.nxv8i8( + undef, %0, %1, i64 %2) @@ -182,6 +190,7 @@ entry: } declare @llvm.riscv.vaaddu.nxv16i8.nxv16i8( + , , , i64); @@ -194,6 +203,7 @@ define @intrinsic_vaaddu_vv_nxv16i8_nxv16i8_nxv16i8( @llvm.riscv.vaaddu.nxv16i8.nxv16i8( + undef, %0, %1, i64 %2) @@ -227,6 +237,7 @@ entry: } declare @llvm.riscv.vaaddu.nxv32i8.nxv32i8( + , , , i64); @@ -239,6 +250,7 @@ define @intrinsic_vaaddu_vv_nxv32i8_nxv32i8_nxv32i8( @llvm.riscv.vaaddu.nxv32i8.nxv32i8( + undef, %0, %1, i64 %2) @@ -272,6 +284,7 @@ entry: } declare @llvm.riscv.vaaddu.nxv64i8.nxv64i8( + , , , i64); @@ -284,6 +297,7 @@ define @intrinsic_vaaddu_vv_nxv64i8_nxv64i8_nxv64i8( @llvm.riscv.vaaddu.nxv64i8.nxv64i8( + undef, %0, %1, i64 %2) @@ -318,6 +332,7 @@ entry: } declare @llvm.riscv.vaaddu.nxv1i16.nxv1i16( + , , , i64); @@ -330,6 +345,7 @@ define @intrinsic_vaaddu_vv_nxv1i16_nxv1i16_nxv1i16( @llvm.riscv.vaaddu.nxv1i16.nxv1i16( + undef, %0, %1, i64 %2) @@ -363,6 +379,7 @@ entry: } declare @llvm.riscv.vaaddu.nxv2i16.nxv2i16( + , , , i64); @@ -375,6 +392,7 @@ define @intrinsic_vaaddu_vv_nxv2i16_nxv2i16_nxv2i16( @llvm.riscv.vaaddu.nxv2i16.nxv2i16( + undef, %0, %1, i64 %2) @@ -408,6 +426,7 @@ entry: } declare @llvm.riscv.vaaddu.nxv4i16.nxv4i16( + , , , i64); @@ -420,6 +439,7 @@ define @intrinsic_vaaddu_vv_nxv4i16_nxv4i16_nxv4i16( @llvm.riscv.vaaddu.nxv4i16.nxv4i16( + undef, %0, %1, i64 %2) @@ -453,6 +473,7 @@ entry: } declare @llvm.riscv.vaaddu.nxv8i16.nxv8i16( + , , , i64); @@ -465,6 +486,7 @@ define @intrinsic_vaaddu_vv_nxv8i16_nxv8i16_nxv8i16( @llvm.riscv.vaaddu.nxv8i16.nxv8i16( + undef, %0, %1, i64 %2) @@ -498,6 +520,7 @@ entry: } declare @llvm.riscv.vaaddu.nxv16i16.nxv16i16( + , , , i64); @@ -510,6 +533,7 @@ define @intrinsic_vaaddu_vv_nxv16i16_nxv16i16_nxv16i16( @llvm.riscv.vaaddu.nxv16i16.nxv16i16( + undef, %0, %1, i64 %2) @@ -543,6 +567,7 @@ entry: } declare @llvm.riscv.vaaddu.nxv32i16.nxv32i16( + , , , i64); @@ -555,6 +580,7 @@ define @intrinsic_vaaddu_vv_nxv32i16_nxv32i16_nxv32i16( @llvm.riscv.vaaddu.nxv32i16.nxv32i16( + undef, %0, %1, i64 %2) @@ -589,6 +615,7 @@ entry: } declare @llvm.riscv.vaaddu.nxv1i32.nxv1i32( + , , , i64); @@ -601,6 +628,7 @@ define @intrinsic_vaaddu_vv_nxv1i32_nxv1i32_nxv1i32( @llvm.riscv.vaaddu.nxv1i32.nxv1i32( + undef, %0, %1, i64 %2) @@ -634,6 +662,7 @@ entry: } declare @llvm.riscv.vaaddu.nxv2i32.nxv2i32( + , , , i64); @@ -646,6 +675,7 @@ define @intrinsic_vaaddu_vv_nxv2i32_nxv2i32_nxv2i32( @llvm.riscv.vaaddu.nxv2i32.nxv2i32( + undef, %0, %1, i64 %2) @@ -679,6 +709,7 @@ entry: } declare @llvm.riscv.vaaddu.nxv4i32.nxv4i32( + , , , i64); @@ -691,6 +722,7 @@ define @intrinsic_vaaddu_vv_nxv4i32_nxv4i32_nxv4i32( @llvm.riscv.vaaddu.nxv4i32.nxv4i32( + undef, %0, %1, i64 %2) @@ -724,6 +756,7 @@ entry: } declare @llvm.riscv.vaaddu.nxv8i32.nxv8i32( + , , , i64); @@ -736,6 +769,7 @@ define @intrinsic_vaaddu_vv_nxv8i32_nxv8i32_nxv8i32( @llvm.riscv.vaaddu.nxv8i32.nxv8i32( + undef, %0, %1, i64 %2) @@ -769,6 +803,7 @@ entry: } declare @llvm.riscv.vaaddu.nxv16i32.nxv16i32( + , , , i64); @@ -781,6 +816,7 @@ define @intrinsic_vaaddu_vv_nxv16i32_nxv16i32_nxv16i32( @llvm.riscv.vaaddu.nxv16i32.nxv16i32( + undef, %0, %1, i64 %2) @@ -815,6 +851,7 @@ entry: } declare @llvm.riscv.vaaddu.nxv1i64.nxv1i64( + , , , i64); @@ -827,6 +864,7 @@ define @intrinsic_vaaddu_vv_nxv1i64_nxv1i64_nxv1i64( @llvm.riscv.vaaddu.nxv1i64.nxv1i64( + undef, %0, %1, i64 %2) @@ -860,6 +898,7 @@ entry: } declare @llvm.riscv.vaaddu.nxv2i64.nxv2i64( + , , , i64); @@ -872,6 +911,7 @@ define @intrinsic_vaaddu_vv_nxv2i64_nxv2i64_nxv2i64( @llvm.riscv.vaaddu.nxv2i64.nxv2i64( + undef, %0, %1, i64 %2) @@ -905,6 +945,7 @@ entry: } declare @llvm.riscv.vaaddu.nxv4i64.nxv4i64( + , , , i64); @@ -917,6 +958,7 @@ define @intrinsic_vaaddu_vv_nxv4i64_nxv4i64_nxv4i64( @llvm.riscv.vaaddu.nxv4i64.nxv4i64( + undef, %0, %1, i64 %2) @@ -950,6 +992,7 @@ entry: } declare @llvm.riscv.vaaddu.nxv8i64.nxv8i64( + , , , i64); @@ -962,6 +1005,7 @@ define @intrinsic_vaaddu_vv_nxv8i64_nxv8i64_nxv8i64( @llvm.riscv.vaaddu.nxv8i64.nxv8i64( + undef, %0, %1, i64 %2) @@ -996,6 +1040,7 @@ entry: } declare @llvm.riscv.vaaddu.nxv1i8.i8( + , , i8, i64); @@ -1008,6 +1053,7 @@ define @intrinsic_vaaddu_vx_nxv1i8_nxv1i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaaddu.nxv1i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1041,6 +1087,7 @@ entry: } declare @llvm.riscv.vaaddu.nxv2i8.i8( + , , i8, i64); @@ -1053,6 +1100,7 @@ define @intrinsic_vaaddu_vx_nxv2i8_nxv2i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaaddu.nxv2i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1086,6 +1134,7 @@ entry: } declare @llvm.riscv.vaaddu.nxv4i8.i8( + , , i8, i64); @@ -1098,6 +1147,7 @@ define @intrinsic_vaaddu_vx_nxv4i8_nxv4i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaaddu.nxv4i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1131,6 +1181,7 @@ entry: } declare @llvm.riscv.vaaddu.nxv8i8.i8( + , , i8, i64); @@ -1143,6 +1194,7 @@ define @intrinsic_vaaddu_vx_nxv8i8_nxv8i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaaddu.nxv8i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1176,6 +1228,7 @@ entry: } declare @llvm.riscv.vaaddu.nxv16i8.i8( + , , i8, i64); @@ -1188,6 +1241,7 @@ define @intrinsic_vaaddu_vx_nxv16i8_nxv16i8_i8( @llvm.riscv.vaaddu.nxv16i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1221,6 +1275,7 @@ entry: } declare @llvm.riscv.vaaddu.nxv32i8.i8( + , , i8, i64); @@ -1233,6 +1288,7 @@ define @intrinsic_vaaddu_vx_nxv32i8_nxv32i8_i8( @llvm.riscv.vaaddu.nxv32i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1266,6 +1322,7 @@ entry: } declare @llvm.riscv.vaaddu.nxv64i8.i8( + , , i8, i64); @@ -1278,6 +1335,7 @@ define @intrinsic_vaaddu_vx_nxv64i8_nxv64i8_i8( @llvm.riscv.vaaddu.nxv64i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1311,6 +1369,7 @@ entry: } declare @llvm.riscv.vaaddu.nxv1i16.i16( + , , i16, i64); @@ -1323,6 +1382,7 @@ define @intrinsic_vaaddu_vx_nxv1i16_nxv1i16_i16( @llvm.riscv.vaaddu.nxv1i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1356,6 +1416,7 @@ entry: } declare @llvm.riscv.vaaddu.nxv2i16.i16( + , , i16, i64); @@ -1368,6 +1429,7 @@ define @intrinsic_vaaddu_vx_nxv2i16_nxv2i16_i16( @llvm.riscv.vaaddu.nxv2i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1401,6 +1463,7 @@ entry: } declare @llvm.riscv.vaaddu.nxv4i16.i16( + , , i16, i64); @@ -1413,6 +1476,7 @@ define @intrinsic_vaaddu_vx_nxv4i16_nxv4i16_i16( @llvm.riscv.vaaddu.nxv4i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1446,6 +1510,7 @@ entry: } declare @llvm.riscv.vaaddu.nxv8i16.i16( + , , i16, i64); @@ -1458,6 +1523,7 @@ define @intrinsic_vaaddu_vx_nxv8i16_nxv8i16_i16( @llvm.riscv.vaaddu.nxv8i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1491,6 +1557,7 @@ entry: } declare @llvm.riscv.vaaddu.nxv16i16.i16( + , , i16, i64); @@ -1503,6 +1570,7 @@ define @intrinsic_vaaddu_vx_nxv16i16_nxv16i16_i16( @llvm.riscv.vaaddu.nxv16i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1536,6 +1604,7 @@ entry: } declare @llvm.riscv.vaaddu.nxv32i16.i16( + , , i16, i64); @@ -1548,6 +1617,7 @@ define @intrinsic_vaaddu_vx_nxv32i16_nxv32i16_i16( @llvm.riscv.vaaddu.nxv32i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1581,6 +1651,7 @@ entry: } declare @llvm.riscv.vaaddu.nxv1i32.i32( + , , i32, i64); @@ -1593,6 +1664,7 @@ define @intrinsic_vaaddu_vx_nxv1i32_nxv1i32_i32( @llvm.riscv.vaaddu.nxv1i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1626,6 +1698,7 @@ entry: } declare @llvm.riscv.vaaddu.nxv2i32.i32( + , , i32, i64); @@ -1638,6 +1711,7 @@ define @intrinsic_vaaddu_vx_nxv2i32_nxv2i32_i32( @llvm.riscv.vaaddu.nxv2i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1671,6 +1745,7 @@ entry: } declare @llvm.riscv.vaaddu.nxv4i32.i32( + , , i32, i64); @@ -1683,6 +1758,7 @@ define @intrinsic_vaaddu_vx_nxv4i32_nxv4i32_i32( @llvm.riscv.vaaddu.nxv4i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1716,6 +1792,7 @@ entry: } declare @llvm.riscv.vaaddu.nxv8i32.i32( + , , i32, i64); @@ -1728,6 +1805,7 @@ define @intrinsic_vaaddu_vx_nxv8i32_nxv8i32_i32( @llvm.riscv.vaaddu.nxv8i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1761,6 +1839,7 @@ entry: } declare @llvm.riscv.vaaddu.nxv16i32.i32( + , , i32, i64); @@ -1773,6 +1852,7 @@ define @intrinsic_vaaddu_vx_nxv16i32_nxv16i32_i32( @llvm.riscv.vaaddu.nxv16i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1806,6 +1886,7 @@ entry: } declare @llvm.riscv.vaaddu.nxv1i64.i64( + , , i64, i64); @@ -1818,6 +1899,7 @@ define @intrinsic_vaaddu_vx_nxv1i64_nxv1i64_i64( @llvm.riscv.vaaddu.nxv1i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1851,6 +1933,7 @@ entry: } declare @llvm.riscv.vaaddu.nxv2i64.i64( + , , i64, i64); @@ -1863,6 +1946,7 @@ define @intrinsic_vaaddu_vx_nxv2i64_nxv2i64_i64( @llvm.riscv.vaaddu.nxv2i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1896,6 +1980,7 @@ entry: } declare @llvm.riscv.vaaddu.nxv4i64.i64( + , , i64, i64); @@ -1908,6 +1993,7 @@ define @intrinsic_vaaddu_vx_nxv4i64_nxv4i64_i64( @llvm.riscv.vaaddu.nxv4i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1941,6 +2027,7 @@ entry: } declare @llvm.riscv.vaaddu.nxv8i64.i64( + , , i64, i64); @@ -1953,6 +2040,7 @@ define @intrinsic_vaaddu_vx_nxv8i64_nxv8i64_i64( @llvm.riscv.vaaddu.nxv8i64.i64( + undef, %0, i64 %1, i64 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vadd-policy.ll b/llvm/test/CodeGen/RISCV/rvv/vadd-policy.ll index 57db5175df5484c69df14ac58abea8a4026ff78b..4436a39dac78df4d5f97d8b3cfd0eb0e9112863a 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vadd-policy.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vadd-policy.ll @@ -3,6 +3,7 @@ ; RUN: --riscv-no-aliases < %s | FileCheck %s declare @llvm.riscv.vadd.nxv8i8.nxv8i8( + , , , i64); @@ -15,6 +16,7 @@ define @intrinsic_vadd_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vadd.nxv8i8.nxv8i8( + undef, %0, %1, i64 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vadd-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vadd-rv32.ll index 4e7b12753aeb2fa1676c417a9c55d227b4f7e6ec..5f39548b45cc28c00dc71c4fee9fe851cb2f1cdc 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vadd-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vadd-rv32.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vadd.nxv1i8.nxv1i8( + , , , i32); @@ -14,6 +15,7 @@ define @intrinsic_vadd_vv_nxv1i8_nxv1i8_nxv1i8( @llvm.riscv.vadd.nxv1i8.nxv1i8( + undef, %0, %1, i32 %2) @@ -46,6 +48,7 @@ entry: } declare @llvm.riscv.vadd.nxv2i8.nxv2i8( + , , , i32); @@ -58,6 +61,7 @@ define @intrinsic_vadd_vv_nxv2i8_nxv2i8_nxv2i8( @llvm.riscv.vadd.nxv2i8.nxv2i8( + undef, %0, %1, i32 %2) @@ -90,6 +94,7 @@ entry: } declare @llvm.riscv.vadd.nxv4i8.nxv4i8( + , , , i32); @@ -102,6 +107,7 @@ define @intrinsic_vadd_vv_nxv4i8_nxv4i8_nxv4i8( @llvm.riscv.vadd.nxv4i8.nxv4i8( + undef, %0, %1, i32 %2) @@ -134,6 +140,7 @@ entry: } declare @llvm.riscv.vadd.nxv8i8.nxv8i8( + , , , i32); @@ -146,6 +153,7 @@ define @intrinsic_vadd_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vadd.nxv8i8.nxv8i8( + undef, %0, %1, i32 %2) @@ -178,6 +186,7 @@ entry: } declare @llvm.riscv.vadd.nxv16i8.nxv16i8( + , , , i32); @@ -190,6 +199,7 @@ define @intrinsic_vadd_vv_nxv16i8_nxv16i8_nxv16i8( @llvm.riscv.vadd.nxv16i8.nxv16i8( + undef, %0, %1, i32 %2) @@ -222,6 +232,7 @@ entry: } declare @llvm.riscv.vadd.nxv32i8.nxv32i8( + , , , i32); @@ -234,6 +245,7 @@ define @intrinsic_vadd_vv_nxv32i8_nxv32i8_nxv32i8( @llvm.riscv.vadd.nxv32i8.nxv32i8( + undef, %0, %1, i32 %2) @@ -266,6 +278,7 @@ entry: } declare @llvm.riscv.vadd.nxv64i8.nxv64i8( + , , , i32); @@ -278,6 +291,7 @@ define @intrinsic_vadd_vv_nxv64i8_nxv64i8_nxv64i8( @llvm.riscv.vadd.nxv64i8.nxv64i8( + undef, %0, %1, i32 %2) @@ -311,6 +325,7 @@ entry: } declare @llvm.riscv.vadd.nxv1i16.nxv1i16( + , , , i32); @@ -323,6 +338,7 @@ define @intrinsic_vadd_vv_nxv1i16_nxv1i16_nxv1i16( @llvm.riscv.vadd.nxv1i16.nxv1i16( + undef, %0, %1, i32 %2) @@ -355,6 +371,7 @@ entry: } declare @llvm.riscv.vadd.nxv2i16.nxv2i16( + , , , i32); @@ -367,6 +384,7 @@ define @intrinsic_vadd_vv_nxv2i16_nxv2i16_nxv2i16( @llvm.riscv.vadd.nxv2i16.nxv2i16( + undef, %0, %1, i32 %2) @@ -399,6 +417,7 @@ entry: } declare @llvm.riscv.vadd.nxv4i16.nxv4i16( + , , , i32); @@ -411,6 +430,7 @@ define @intrinsic_vadd_vv_nxv4i16_nxv4i16_nxv4i16( @llvm.riscv.vadd.nxv4i16.nxv4i16( + undef, %0, %1, i32 %2) @@ -443,6 +463,7 @@ entry: } declare @llvm.riscv.vadd.nxv8i16.nxv8i16( + , , , i32); @@ -455,6 +476,7 @@ define @intrinsic_vadd_vv_nxv8i16_nxv8i16_nxv8i16( @llvm.riscv.vadd.nxv8i16.nxv8i16( + undef, %0, %1, i32 %2) @@ -487,6 +509,7 @@ entry: } declare @llvm.riscv.vadd.nxv16i16.nxv16i16( + , , , i32); @@ -499,6 +522,7 @@ define @intrinsic_vadd_vv_nxv16i16_nxv16i16_nxv16i16( @llvm.riscv.vadd.nxv16i16.nxv16i16( + undef, %0, %1, i32 %2) @@ -531,6 +555,7 @@ entry: } declare @llvm.riscv.vadd.nxv32i16.nxv32i16( + , , , i32); @@ -543,6 +568,7 @@ define @intrinsic_vadd_vv_nxv32i16_nxv32i16_nxv32i16( @llvm.riscv.vadd.nxv32i16.nxv32i16( + undef, %0, %1, i32 %2) @@ -576,6 +602,7 @@ entry: } declare @llvm.riscv.vadd.nxv1i32.nxv1i32( + , , , i32); @@ -588,6 +615,7 @@ define @intrinsic_vadd_vv_nxv1i32_nxv1i32_nxv1i32( @llvm.riscv.vadd.nxv1i32.nxv1i32( + undef, %0, %1, i32 %2) @@ -620,6 +648,7 @@ entry: } declare @llvm.riscv.vadd.nxv2i32.nxv2i32( + , , , i32); @@ -632,6 +661,7 @@ define @intrinsic_vadd_vv_nxv2i32_nxv2i32_nxv2i32( @llvm.riscv.vadd.nxv2i32.nxv2i32( + undef, %0, %1, i32 %2) @@ -664,6 +694,7 @@ entry: } declare @llvm.riscv.vadd.nxv4i32.nxv4i32( + , , , i32); @@ -676,6 +707,7 @@ define @intrinsic_vadd_vv_nxv4i32_nxv4i32_nxv4i32( @llvm.riscv.vadd.nxv4i32.nxv4i32( + undef, %0, %1, i32 %2) @@ -708,6 +740,7 @@ entry: } declare @llvm.riscv.vadd.nxv8i32.nxv8i32( + , , , i32); @@ -720,6 +753,7 @@ define @intrinsic_vadd_vv_nxv8i32_nxv8i32_nxv8i32( @llvm.riscv.vadd.nxv8i32.nxv8i32( + undef, %0, %1, i32 %2) @@ -752,6 +786,7 @@ entry: } declare @llvm.riscv.vadd.nxv16i32.nxv16i32( + , , , i32); @@ -764,6 +799,7 @@ define @intrinsic_vadd_vv_nxv16i32_nxv16i32_nxv16i32( @llvm.riscv.vadd.nxv16i32.nxv16i32( + undef, %0, %1, i32 %2) @@ -797,6 +833,7 @@ entry: } declare @llvm.riscv.vadd.nxv1i64.nxv1i64( + , , , i32); @@ -809,6 +846,7 @@ define @intrinsic_vadd_vv_nxv1i64_nxv1i64_nxv1i64( @llvm.riscv.vadd.nxv1i64.nxv1i64( + undef, %0, %1, i32 %2) @@ -841,6 +879,7 @@ entry: } declare @llvm.riscv.vadd.nxv2i64.nxv2i64( + , , , i32); @@ -853,6 +892,7 @@ define @intrinsic_vadd_vv_nxv2i64_nxv2i64_nxv2i64( @llvm.riscv.vadd.nxv2i64.nxv2i64( + undef, %0, %1, i32 %2) @@ -885,6 +925,7 @@ entry: } declare @llvm.riscv.vadd.nxv4i64.nxv4i64( + , , , i32); @@ -897,6 +938,7 @@ define @intrinsic_vadd_vv_nxv4i64_nxv4i64_nxv4i64( @llvm.riscv.vadd.nxv4i64.nxv4i64( + undef, %0, %1, i32 %2) @@ -929,6 +971,7 @@ entry: } declare @llvm.riscv.vadd.nxv8i64.nxv8i64( + , , , i32); @@ -941,6 +984,7 @@ define @intrinsic_vadd_vv_nxv8i64_nxv8i64_nxv8i64( @llvm.riscv.vadd.nxv8i64.nxv8i64( + undef, %0, %1, i32 %2) @@ -974,6 +1018,7 @@ entry: } declare @llvm.riscv.vadd.nxv1i8.i8( + , , i8, i32); @@ -986,6 +1031,7 @@ define @intrinsic_vadd_vx_nxv1i8_nxv1i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv1i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1018,6 +1064,7 @@ entry: } declare @llvm.riscv.vadd.nxv2i8.i8( + , , i8, i32); @@ -1030,6 +1077,7 @@ define @intrinsic_vadd_vx_nxv2i8_nxv2i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv2i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1062,6 +1110,7 @@ entry: } declare @llvm.riscv.vadd.nxv4i8.i8( + , , i8, i32); @@ -1074,6 +1123,7 @@ define @intrinsic_vadd_vx_nxv4i8_nxv4i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv4i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1106,6 +1156,7 @@ entry: } declare @llvm.riscv.vadd.nxv8i8.i8( + , , i8, i32); @@ -1118,6 +1169,7 @@ define @intrinsic_vadd_vx_nxv8i8_nxv8i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv8i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1150,6 +1202,7 @@ entry: } declare @llvm.riscv.vadd.nxv16i8.i8( + , , i8, i32); @@ -1162,6 +1215,7 @@ define @intrinsic_vadd_vx_nxv16i8_nxv16i8_i8( @llvm.riscv.vadd.nxv16i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1194,6 +1248,7 @@ entry: } declare @llvm.riscv.vadd.nxv32i8.i8( + , , i8, i32); @@ -1206,6 +1261,7 @@ define @intrinsic_vadd_vx_nxv32i8_nxv32i8_i8( @llvm.riscv.vadd.nxv32i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1238,6 +1294,7 @@ entry: } declare @llvm.riscv.vadd.nxv64i8.i8( + , , i8, i32); @@ -1250,6 +1307,7 @@ define @intrinsic_vadd_vx_nxv64i8_nxv64i8_i8( @llvm.riscv.vadd.nxv64i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1282,6 +1340,7 @@ entry: } declare @llvm.riscv.vadd.nxv1i16.i16( + , , i16, i32); @@ -1294,6 +1353,7 @@ define @intrinsic_vadd_vx_nxv1i16_nxv1i16_i16( @llvm.riscv.vadd.nxv1i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1326,6 +1386,7 @@ entry: } declare @llvm.riscv.vadd.nxv2i16.i16( + , , i16, i32); @@ -1338,6 +1399,7 @@ define @intrinsic_vadd_vx_nxv2i16_nxv2i16_i16( @llvm.riscv.vadd.nxv2i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1370,6 +1432,7 @@ entry: } declare @llvm.riscv.vadd.nxv4i16.i16( + , , i16, i32); @@ -1382,6 +1445,7 @@ define @intrinsic_vadd_vx_nxv4i16_nxv4i16_i16( @llvm.riscv.vadd.nxv4i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1414,6 +1478,7 @@ entry: } declare @llvm.riscv.vadd.nxv8i16.i16( + , , i16, i32); @@ -1426,6 +1491,7 @@ define @intrinsic_vadd_vx_nxv8i16_nxv8i16_i16( @llvm.riscv.vadd.nxv8i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1458,6 +1524,7 @@ entry: } declare @llvm.riscv.vadd.nxv16i16.i16( + , , i16, i32); @@ -1470,6 +1537,7 @@ define @intrinsic_vadd_vx_nxv16i16_nxv16i16_i16( @llvm.riscv.vadd.nxv16i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1502,6 +1570,7 @@ entry: } declare @llvm.riscv.vadd.nxv32i16.i16( + , , i16, i32); @@ -1514,6 +1583,7 @@ define @intrinsic_vadd_vx_nxv32i16_nxv32i16_i16( @llvm.riscv.vadd.nxv32i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1546,6 +1616,7 @@ entry: } declare @llvm.riscv.vadd.nxv1i32.i32( + , , i32, i32); @@ -1558,6 +1629,7 @@ define @intrinsic_vadd_vx_nxv1i32_nxv1i32_i32( @llvm.riscv.vadd.nxv1i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1590,6 +1662,7 @@ entry: } declare @llvm.riscv.vadd.nxv2i32.i32( + , , i32, i32); @@ -1602,6 +1675,7 @@ define @intrinsic_vadd_vx_nxv2i32_nxv2i32_i32( @llvm.riscv.vadd.nxv2i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1634,6 +1708,7 @@ entry: } declare @llvm.riscv.vadd.nxv4i32.i32( + , , i32, i32); @@ -1646,6 +1721,7 @@ define @intrinsic_vadd_vx_nxv4i32_nxv4i32_i32( @llvm.riscv.vadd.nxv4i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1678,6 +1754,7 @@ entry: } declare @llvm.riscv.vadd.nxv8i32.i32( + , , i32, i32); @@ -1690,6 +1767,7 @@ define @intrinsic_vadd_vx_nxv8i32_nxv8i32_i32( @llvm.riscv.vadd.nxv8i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1722,6 +1800,7 @@ entry: } declare @llvm.riscv.vadd.nxv16i32.i32( + , , i32, i32); @@ -1734,6 +1813,7 @@ define @intrinsic_vadd_vx_nxv16i32_nxv16i32_i32( @llvm.riscv.vadd.nxv16i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1766,6 +1846,7 @@ entry: } declare @llvm.riscv.vadd.nxv1i64.i64( + , , i64, i32); @@ -1784,6 +1865,7 @@ define @intrinsic_vadd_vx_nxv1i64_nxv1i64_i64( @llvm.riscv.vadd.nxv1i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1822,6 +1904,7 @@ entry: } declare @llvm.riscv.vadd.nxv2i64.i64( + , , i64, i32); @@ -1840,6 +1923,7 @@ define @intrinsic_vadd_vx_nxv2i64_nxv2i64_i64( @llvm.riscv.vadd.nxv2i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1878,6 +1962,7 @@ entry: } declare @llvm.riscv.vadd.nxv4i64.i64( + , , i64, i32); @@ -1896,6 +1981,7 @@ define @intrinsic_vadd_vx_nxv4i64_nxv4i64_i64( @llvm.riscv.vadd.nxv4i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1934,6 +2020,7 @@ entry: } declare @llvm.riscv.vadd.nxv8i64.i64( + , , i64, i32); @@ -1952,6 +2039,7 @@ define @intrinsic_vadd_vx_nxv8i64_nxv8i64_i64( @llvm.riscv.vadd.nxv8i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1997,6 +2085,7 @@ define @intrinsic_vadd_vi_nxv1i8_nxv1i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv1i8.i8( + undef, %0, i8 9, i32 %1) @@ -2029,6 +2118,7 @@ define @intrinsic_vadd_vi_nxv2i8_nxv2i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv2i8.i8( + undef, %0, i8 9, i32 %1) @@ -2061,6 +2151,7 @@ define @intrinsic_vadd_vi_nxv4i8_nxv4i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv4i8.i8( + undef, %0, i8 9, i32 %1) @@ -2093,6 +2184,7 @@ define @intrinsic_vadd_vi_nxv8i8_nxv8i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv8i8.i8( + undef, %0, i8 9, i32 %1) @@ -2125,6 +2217,7 @@ define @intrinsic_vadd_vi_nxv16i8_nxv16i8_i8( @llvm.riscv.vadd.nxv16i8.i8( + undef, %0, i8 9, i32 %1) @@ -2157,6 +2250,7 @@ define @intrinsic_vadd_vi_nxv32i8_nxv32i8_i8( @llvm.riscv.vadd.nxv32i8.i8( + undef, %0, i8 9, i32 %1) @@ -2189,6 +2283,7 @@ define @intrinsic_vadd_vi_nxv64i8_nxv64i8_i8( @llvm.riscv.vadd.nxv64i8.i8( + undef, %0, i8 -9, i32 %1) @@ -2221,6 +2316,7 @@ define @intrinsic_vadd_vi_nxv1i16_nxv1i16_i16( @llvm.riscv.vadd.nxv1i16.i16( + undef, %0, i16 9, i32 %1) @@ -2253,6 +2349,7 @@ define @intrinsic_vadd_vi_nxv2i16_nxv2i16_i16( @llvm.riscv.vadd.nxv2i16.i16( + undef, %0, i16 9, i32 %1) @@ -2285,6 +2382,7 @@ define @intrinsic_vadd_vi_nxv4i16_nxv4i16_i16( @llvm.riscv.vadd.nxv4i16.i16( + undef, %0, i16 9, i32 %1) @@ -2317,6 +2415,7 @@ define @intrinsic_vadd_vi_nxv8i16_nxv8i16_i16( @llvm.riscv.vadd.nxv8i16.i16( + undef, %0, i16 9, i32 %1) @@ -2349,6 +2448,7 @@ define @intrinsic_vadd_vi_nxv16i16_nxv16i16_i16( @llvm.riscv.vadd.nxv16i16.i16( + undef, %0, i16 9, i32 %1) @@ -2381,6 +2481,7 @@ define @intrinsic_vadd_vi_nxv32i16_nxv32i16_i16( @llvm.riscv.vadd.nxv32i16.i16( + undef, %0, i16 9, i32 %1) @@ -2413,6 +2514,7 @@ define @intrinsic_vadd_vi_nxv1i32_nxv1i32_i32( @llvm.riscv.vadd.nxv1i32.i32( + undef, %0, i32 9, i32 %1) @@ -2445,6 +2547,7 @@ define @intrinsic_vadd_vi_nxv2i32_nxv2i32_i32( @llvm.riscv.vadd.nxv2i32.i32( + undef, %0, i32 9, i32 %1) @@ -2477,6 +2580,7 @@ define @intrinsic_vadd_vi_nxv4i32_nxv4i32_i32( @llvm.riscv.vadd.nxv4i32.i32( + undef, %0, i32 9, i32 %1) @@ -2509,6 +2613,7 @@ define @intrinsic_vadd_vi_nxv8i32_nxv8i32_i32( @llvm.riscv.vadd.nxv8i32.i32( + undef, %0, i32 9, i32 %1) @@ -2541,6 +2646,7 @@ define @intrinsic_vadd_vi_nxv16i32_nxv16i32_i32( @llvm.riscv.vadd.nxv16i32.i32( + undef, %0, i32 9, i32 %1) @@ -2573,6 +2679,7 @@ define @intrinsic_vadd_vi_nxv1i64_nxv1i64_i64( @llvm.riscv.vadd.nxv1i64.i64( + undef, %0, i64 9, i32 %1) @@ -2605,6 +2712,7 @@ define @intrinsic_vadd_vi_nxv2i64_nxv2i64_i64( @llvm.riscv.vadd.nxv2i64.i64( + undef, %0, i64 9, i32 %1) @@ -2637,6 +2745,7 @@ define @intrinsic_vadd_vi_nxv4i64_nxv4i64_i64( @llvm.riscv.vadd.nxv4i64.i64( + undef, %0, i64 9, i32 %1) @@ -2669,6 +2778,7 @@ define @intrinsic_vadd_vi_nxv8i64_nxv8i64_i64( @llvm.riscv.vadd.nxv8i64.i64( + undef, %0, i64 9, i32 %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vadd-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vadd-rv64.ll index a56bf1bf97e6e001a6f5388bd20b9fc033739e0a..5f1bd6842cc290265337972af17b812bc064d169 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vadd-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vadd-rv64.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vadd.nxv1i8.nxv1i8( + , , , i64); @@ -14,6 +15,7 @@ define @intrinsic_vadd_vv_nxv1i8_nxv1i8_nxv1i8( @llvm.riscv.vadd.nxv1i8.nxv1i8( + undef, %0, %1, i64 %2) @@ -47,6 +49,7 @@ entry: } declare @llvm.riscv.vadd.nxv2i8.nxv2i8( + , , , i64); @@ -59,6 +62,7 @@ define @intrinsic_vadd_vv_nxv2i8_nxv2i8_nxv2i8( @llvm.riscv.vadd.nxv2i8.nxv2i8( + undef, %0, %1, i64 %2) @@ -92,6 +96,7 @@ entry: } declare @llvm.riscv.vadd.nxv4i8.nxv4i8( + , , , i64); @@ -104,6 +109,7 @@ define @intrinsic_vadd_vv_nxv4i8_nxv4i8_nxv4i8( @llvm.riscv.vadd.nxv4i8.nxv4i8( + undef, %0, %1, i64 %2) @@ -137,6 +143,7 @@ entry: } declare @llvm.riscv.vadd.nxv8i8.nxv8i8( + , , , i64); @@ -149,6 +156,7 @@ define @intrinsic_vadd_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vadd.nxv8i8.nxv8i8( + undef, %0, %1, i64 %2) @@ -182,6 +190,7 @@ entry: } declare @llvm.riscv.vadd.nxv16i8.nxv16i8( + , , , i64); @@ -194,6 +203,7 @@ define @intrinsic_vadd_vv_nxv16i8_nxv16i8_nxv16i8( @llvm.riscv.vadd.nxv16i8.nxv16i8( + undef, %0, %1, i64 %2) @@ -227,6 +237,7 @@ entry: } declare @llvm.riscv.vadd.nxv32i8.nxv32i8( + , , , i64); @@ -239,6 +250,7 @@ define @intrinsic_vadd_vv_nxv32i8_nxv32i8_nxv32i8( @llvm.riscv.vadd.nxv32i8.nxv32i8( + undef, %0, %1, i64 %2) @@ -272,6 +284,7 @@ entry: } declare @llvm.riscv.vadd.nxv64i8.nxv64i8( + , , , i64); @@ -284,6 +297,7 @@ define @intrinsic_vadd_vv_nxv64i8_nxv64i8_nxv64i8( @llvm.riscv.vadd.nxv64i8.nxv64i8( + undef, %0, %1, i64 %2) @@ -318,6 +332,7 @@ entry: } declare @llvm.riscv.vadd.nxv1i16.nxv1i16( + , , , i64); @@ -330,6 +345,7 @@ define @intrinsic_vadd_vv_nxv1i16_nxv1i16_nxv1i16( @llvm.riscv.vadd.nxv1i16.nxv1i16( + undef, %0, %1, i64 %2) @@ -363,6 +379,7 @@ entry: } declare @llvm.riscv.vadd.nxv2i16.nxv2i16( + , , , i64); @@ -375,6 +392,7 @@ define @intrinsic_vadd_vv_nxv2i16_nxv2i16_nxv2i16( @llvm.riscv.vadd.nxv2i16.nxv2i16( + undef, %0, %1, i64 %2) @@ -408,6 +426,7 @@ entry: } declare @llvm.riscv.vadd.nxv4i16.nxv4i16( + , , , i64); @@ -420,6 +439,7 @@ define @intrinsic_vadd_vv_nxv4i16_nxv4i16_nxv4i16( @llvm.riscv.vadd.nxv4i16.nxv4i16( + undef, %0, %1, i64 %2) @@ -453,6 +473,7 @@ entry: } declare @llvm.riscv.vadd.nxv8i16.nxv8i16( + , , , i64); @@ -465,6 +486,7 @@ define @intrinsic_vadd_vv_nxv8i16_nxv8i16_nxv8i16( @llvm.riscv.vadd.nxv8i16.nxv8i16( + undef, %0, %1, i64 %2) @@ -498,6 +520,7 @@ entry: } declare @llvm.riscv.vadd.nxv16i16.nxv16i16( + , , , i64); @@ -510,6 +533,7 @@ define @intrinsic_vadd_vv_nxv16i16_nxv16i16_nxv16i16( @llvm.riscv.vadd.nxv16i16.nxv16i16( + undef, %0, %1, i64 %2) @@ -543,6 +567,7 @@ entry: } declare @llvm.riscv.vadd.nxv32i16.nxv32i16( + , , , i64); @@ -555,6 +580,7 @@ define @intrinsic_vadd_vv_nxv32i16_nxv32i16_nxv32i16( @llvm.riscv.vadd.nxv32i16.nxv32i16( + undef, %0, %1, i64 %2) @@ -589,6 +615,7 @@ entry: } declare @llvm.riscv.vadd.nxv1i32.nxv1i32( + , , , i64); @@ -601,6 +628,7 @@ define @intrinsic_vadd_vv_nxv1i32_nxv1i32_nxv1i32( @llvm.riscv.vadd.nxv1i32.nxv1i32( + undef, %0, %1, i64 %2) @@ -634,6 +662,7 @@ entry: } declare @llvm.riscv.vadd.nxv2i32.nxv2i32( + , , , i64); @@ -646,6 +675,7 @@ define @intrinsic_vadd_vv_nxv2i32_nxv2i32_nxv2i32( @llvm.riscv.vadd.nxv2i32.nxv2i32( + undef, %0, %1, i64 %2) @@ -679,6 +709,7 @@ entry: } declare @llvm.riscv.vadd.nxv4i32.nxv4i32( + , , , i64); @@ -691,6 +722,7 @@ define @intrinsic_vadd_vv_nxv4i32_nxv4i32_nxv4i32( @llvm.riscv.vadd.nxv4i32.nxv4i32( + undef, %0, %1, i64 %2) @@ -724,6 +756,7 @@ entry: } declare @llvm.riscv.vadd.nxv8i32.nxv8i32( + , , , i64); @@ -736,6 +769,7 @@ define @intrinsic_vadd_vv_nxv8i32_nxv8i32_nxv8i32( @llvm.riscv.vadd.nxv8i32.nxv8i32( + undef, %0, %1, i64 %2) @@ -769,6 +803,7 @@ entry: } declare @llvm.riscv.vadd.nxv16i32.nxv16i32( + , , , i64); @@ -781,6 +816,7 @@ define @intrinsic_vadd_vv_nxv16i32_nxv16i32_nxv16i32( @llvm.riscv.vadd.nxv16i32.nxv16i32( + undef, %0, %1, i64 %2) @@ -815,6 +851,7 @@ entry: } declare @llvm.riscv.vadd.nxv1i64.nxv1i64( + , , , i64); @@ -827,6 +864,7 @@ define @intrinsic_vadd_vv_nxv1i64_nxv1i64_nxv1i64( @llvm.riscv.vadd.nxv1i64.nxv1i64( + undef, %0, %1, i64 %2) @@ -860,6 +898,7 @@ entry: } declare @llvm.riscv.vadd.nxv2i64.nxv2i64( + , , , i64); @@ -872,6 +911,7 @@ define @intrinsic_vadd_vv_nxv2i64_nxv2i64_nxv2i64( @llvm.riscv.vadd.nxv2i64.nxv2i64( + undef, %0, %1, i64 %2) @@ -905,6 +945,7 @@ entry: } declare @llvm.riscv.vadd.nxv4i64.nxv4i64( + , , , i64); @@ -917,6 +958,7 @@ define @intrinsic_vadd_vv_nxv4i64_nxv4i64_nxv4i64( @llvm.riscv.vadd.nxv4i64.nxv4i64( + undef, %0, %1, i64 %2) @@ -950,6 +992,7 @@ entry: } declare @llvm.riscv.vadd.nxv8i64.nxv8i64( + , , , i64); @@ -962,6 +1005,7 @@ define @intrinsic_vadd_vv_nxv8i64_nxv8i64_nxv8i64( @llvm.riscv.vadd.nxv8i64.nxv8i64( + undef, %0, %1, i64 %2) @@ -996,6 +1040,7 @@ entry: } declare @llvm.riscv.vadd.nxv1i8.i8( + , , i8, i64); @@ -1008,6 +1053,7 @@ define @intrinsic_vadd_vx_nxv1i8_nxv1i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv1i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1041,6 +1087,7 @@ entry: } declare @llvm.riscv.vadd.nxv2i8.i8( + , , i8, i64); @@ -1053,6 +1100,7 @@ define @intrinsic_vadd_vx_nxv2i8_nxv2i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv2i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1086,6 +1134,7 @@ entry: } declare @llvm.riscv.vadd.nxv4i8.i8( + , , i8, i64); @@ -1098,6 +1147,7 @@ define @intrinsic_vadd_vx_nxv4i8_nxv4i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv4i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1131,6 +1181,7 @@ entry: } declare @llvm.riscv.vadd.nxv8i8.i8( + , , i8, i64); @@ -1143,6 +1194,7 @@ define @intrinsic_vadd_vx_nxv8i8_nxv8i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv8i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1176,6 +1228,7 @@ entry: } declare @llvm.riscv.vadd.nxv16i8.i8( + , , i8, i64); @@ -1188,6 +1241,7 @@ define @intrinsic_vadd_vx_nxv16i8_nxv16i8_i8( @llvm.riscv.vadd.nxv16i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1221,6 +1275,7 @@ entry: } declare @llvm.riscv.vadd.nxv32i8.i8( + , , i8, i64); @@ -1233,6 +1288,7 @@ define @intrinsic_vadd_vx_nxv32i8_nxv32i8_i8( @llvm.riscv.vadd.nxv32i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1266,6 +1322,7 @@ entry: } declare @llvm.riscv.vadd.nxv64i8.i8( + , , i8, i64); @@ -1278,6 +1335,7 @@ define @intrinsic_vadd_vx_nxv64i8_nxv64i8_i8( @llvm.riscv.vadd.nxv64i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1311,6 +1369,7 @@ entry: } declare @llvm.riscv.vadd.nxv1i16.i16( + , , i16, i64); @@ -1323,6 +1382,7 @@ define @intrinsic_vadd_vx_nxv1i16_nxv1i16_i16( @llvm.riscv.vadd.nxv1i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1356,6 +1416,7 @@ entry: } declare @llvm.riscv.vadd.nxv2i16.i16( + , , i16, i64); @@ -1368,6 +1429,7 @@ define @intrinsic_vadd_vx_nxv2i16_nxv2i16_i16( @llvm.riscv.vadd.nxv2i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1401,6 +1463,7 @@ entry: } declare @llvm.riscv.vadd.nxv4i16.i16( + , , i16, i64); @@ -1413,6 +1476,7 @@ define @intrinsic_vadd_vx_nxv4i16_nxv4i16_i16( @llvm.riscv.vadd.nxv4i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1446,6 +1510,7 @@ entry: } declare @llvm.riscv.vadd.nxv8i16.i16( + , , i16, i64); @@ -1458,6 +1523,7 @@ define @intrinsic_vadd_vx_nxv8i16_nxv8i16_i16( @llvm.riscv.vadd.nxv8i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1491,6 +1557,7 @@ entry: } declare @llvm.riscv.vadd.nxv16i16.i16( + , , i16, i64); @@ -1503,6 +1570,7 @@ define @intrinsic_vadd_vx_nxv16i16_nxv16i16_i16( @llvm.riscv.vadd.nxv16i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1536,6 +1604,7 @@ entry: } declare @llvm.riscv.vadd.nxv32i16.i16( + , , i16, i64); @@ -1548,6 +1617,7 @@ define @intrinsic_vadd_vx_nxv32i16_nxv32i16_i16( @llvm.riscv.vadd.nxv32i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1581,6 +1651,7 @@ entry: } declare @llvm.riscv.vadd.nxv1i32.i32( + , , i32, i64); @@ -1593,6 +1664,7 @@ define @intrinsic_vadd_vx_nxv1i32_nxv1i32_i32( @llvm.riscv.vadd.nxv1i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1626,6 +1698,7 @@ entry: } declare @llvm.riscv.vadd.nxv2i32.i32( + , , i32, i64); @@ -1638,6 +1711,7 @@ define @intrinsic_vadd_vx_nxv2i32_nxv2i32_i32( @llvm.riscv.vadd.nxv2i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1671,6 +1745,7 @@ entry: } declare @llvm.riscv.vadd.nxv4i32.i32( + , , i32, i64); @@ -1683,6 +1758,7 @@ define @intrinsic_vadd_vx_nxv4i32_nxv4i32_i32( @llvm.riscv.vadd.nxv4i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1716,6 +1792,7 @@ entry: } declare @llvm.riscv.vadd.nxv8i32.i32( + , , i32, i64); @@ -1728,6 +1805,7 @@ define @intrinsic_vadd_vx_nxv8i32_nxv8i32_i32( @llvm.riscv.vadd.nxv8i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1761,6 +1839,7 @@ entry: } declare @llvm.riscv.vadd.nxv16i32.i32( + , , i32, i64); @@ -1773,6 +1852,7 @@ define @intrinsic_vadd_vx_nxv16i32_nxv16i32_i32( @llvm.riscv.vadd.nxv16i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1806,6 +1886,7 @@ entry: } declare @llvm.riscv.vadd.nxv1i64.i64( + , , i64, i64); @@ -1818,6 +1899,7 @@ define @intrinsic_vadd_vx_nxv1i64_nxv1i64_i64( @llvm.riscv.vadd.nxv1i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1851,6 +1933,7 @@ entry: } declare @llvm.riscv.vadd.nxv2i64.i64( + , , i64, i64); @@ -1863,6 +1946,7 @@ define @intrinsic_vadd_vx_nxv2i64_nxv2i64_i64( @llvm.riscv.vadd.nxv2i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1896,6 +1980,7 @@ entry: } declare @llvm.riscv.vadd.nxv4i64.i64( + , , i64, i64); @@ -1908,6 +1993,7 @@ define @intrinsic_vadd_vx_nxv4i64_nxv4i64_i64( @llvm.riscv.vadd.nxv4i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1941,6 +2027,7 @@ entry: } declare @llvm.riscv.vadd.nxv8i64.i64( + , , i64, i64); @@ -1953,6 +2040,7 @@ define @intrinsic_vadd_vx_nxv8i64_nxv8i64_i64( @llvm.riscv.vadd.nxv8i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1993,6 +2081,7 @@ define @intrinsic_vadd_vi_nxv1i8_nxv1i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv1i8.i8( + undef, %0, i8 9, i64 %1) @@ -2025,6 +2114,7 @@ define @intrinsic_vadd_vi_nxv2i8_nxv2i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv2i8.i8( + undef, %0, i8 9, i64 %1) @@ -2057,6 +2147,7 @@ define @intrinsic_vadd_vi_nxv4i8_nxv4i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv4i8.i8( + undef, %0, i8 9, i64 %1) @@ -2089,6 +2180,7 @@ define @intrinsic_vadd_vi_nxv8i8_nxv8i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv8i8.i8( + undef, %0, i8 9, i64 %1) @@ -2121,6 +2213,7 @@ define @intrinsic_vadd_vi_nxv16i8_nxv16i8_i8( @llvm.riscv.vadd.nxv16i8.i8( + undef, %0, i8 9, i64 %1) @@ -2153,6 +2246,7 @@ define @intrinsic_vadd_vi_nxv32i8_nxv32i8_i8( @llvm.riscv.vadd.nxv32i8.i8( + undef, %0, i8 9, i64 %1) @@ -2185,6 +2279,7 @@ define @intrinsic_vadd_vi_nxv64i8_nxv64i8_i8( @llvm.riscv.vadd.nxv64i8.i8( + undef, %0, i8 9, i64 %1) @@ -2217,6 +2312,7 @@ define @intrinsic_vadd_vi_nxv1i16_nxv1i16_i16( @llvm.riscv.vadd.nxv1i16.i16( + undef, %0, i16 9, i64 %1) @@ -2249,6 +2345,7 @@ define @intrinsic_vadd_vi_nxv2i16_nxv2i16_i16( @llvm.riscv.vadd.nxv2i16.i16( + undef, %0, i16 9, i64 %1) @@ -2281,6 +2378,7 @@ define @intrinsic_vadd_vi_nxv4i16_nxv4i16_i16( @llvm.riscv.vadd.nxv4i16.i16( + undef, %0, i16 9, i64 %1) @@ -2313,6 +2411,7 @@ define @intrinsic_vadd_vi_nxv8i16_nxv8i16_i16( @llvm.riscv.vadd.nxv8i16.i16( + undef, %0, i16 9, i64 %1) @@ -2345,6 +2444,7 @@ define @intrinsic_vadd_vi_nxv16i16_nxv16i16_i16( @llvm.riscv.vadd.nxv16i16.i16( + undef, %0, i16 9, i64 %1) @@ -2377,6 +2477,7 @@ define @intrinsic_vadd_vi_nxv32i16_nxv32i16_i16( @llvm.riscv.vadd.nxv32i16.i16( + undef, %0, i16 9, i64 %1) @@ -2409,6 +2510,7 @@ define @intrinsic_vadd_vi_nxv1i32_nxv1i32_i32( @llvm.riscv.vadd.nxv1i32.i32( + undef, %0, i32 9, i64 %1) @@ -2441,6 +2543,7 @@ define @intrinsic_vadd_vi_nxv2i32_nxv2i32_i32( @llvm.riscv.vadd.nxv2i32.i32( + undef, %0, i32 9, i64 %1) @@ -2473,6 +2576,7 @@ define @intrinsic_vadd_vi_nxv4i32_nxv4i32_i32( @llvm.riscv.vadd.nxv4i32.i32( + undef, %0, i32 9, i64 %1) @@ -2505,6 +2609,7 @@ define @intrinsic_vadd_vi_nxv8i32_nxv8i32_i32( @llvm.riscv.vadd.nxv8i32.i32( + undef, %0, i32 9, i64 %1) @@ -2537,6 +2642,7 @@ define @intrinsic_vadd_vi_nxv16i32_nxv16i32_i32( @llvm.riscv.vadd.nxv16i32.i32( + undef, %0, i32 9, i64 %1) @@ -2569,6 +2675,7 @@ define @intrinsic_vadd_vi_nxv1i64_nxv1i64_i64( @llvm.riscv.vadd.nxv1i64.i64( + undef, %0, i64 9, i64 %1) @@ -2601,6 +2708,7 @@ define @intrinsic_vadd_vi_nxv2i64_nxv2i64_i64( @llvm.riscv.vadd.nxv2i64.i64( + undef, %0, i64 9, i64 %1) @@ -2633,6 +2741,7 @@ define @intrinsic_vadd_vi_nxv4i64_nxv4i64_i64( @llvm.riscv.vadd.nxv4i64.i64( + undef, %0, i64 9, i64 %1) @@ -2665,6 +2774,7 @@ define @intrinsic_vadd_vi_nxv8i64_nxv8i64_i64( @llvm.riscv.vadd.nxv8i64.i64( + undef, %0, i64 9, i64 %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vand-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vand-rv32.ll index 89b9dbcb57c0b3c8550586c6349c5da5d36d3987..85041080b20cb0f30f957f4e337158161559ac69 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vand-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vand-rv32.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v,+f -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vand.nxv1i8.nxv1i8( + , , , i32); @@ -14,6 +15,7 @@ define @intrinsic_vand_vv_nxv1i8_nxv1i8_nxv1i8( @llvm.riscv.vand.nxv1i8.nxv1i8( + undef, %0, %1, i32 %2) @@ -47,6 +49,7 @@ entry: } declare @llvm.riscv.vand.nxv2i8.nxv2i8( + , , , i32); @@ -59,6 +62,7 @@ define @intrinsic_vand_vv_nxv2i8_nxv2i8_nxv2i8( @llvm.riscv.vand.nxv2i8.nxv2i8( + undef, %0, %1, i32 %2) @@ -92,6 +96,7 @@ entry: } declare @llvm.riscv.vand.nxv4i8.nxv4i8( + , , , i32); @@ -104,6 +109,7 @@ define @intrinsic_vand_vv_nxv4i8_nxv4i8_nxv4i8( @llvm.riscv.vand.nxv4i8.nxv4i8( + undef, %0, %1, i32 %2) @@ -137,6 +143,7 @@ entry: } declare @llvm.riscv.vand.nxv8i8.nxv8i8( + , , , i32); @@ -149,6 +156,7 @@ define @intrinsic_vand_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vand.nxv8i8.nxv8i8( + undef, %0, %1, i32 %2) @@ -182,6 +190,7 @@ entry: } declare @llvm.riscv.vand.nxv16i8.nxv16i8( + , , , i32); @@ -194,6 +203,7 @@ define @intrinsic_vand_vv_nxv16i8_nxv16i8_nxv16i8( @llvm.riscv.vand.nxv16i8.nxv16i8( + undef, %0, %1, i32 %2) @@ -227,6 +237,7 @@ entry: } declare @llvm.riscv.vand.nxv32i8.nxv32i8( + , , , i32); @@ -239,6 +250,7 @@ define @intrinsic_vand_vv_nxv32i8_nxv32i8_nxv32i8( @llvm.riscv.vand.nxv32i8.nxv32i8( + undef, %0, %1, i32 %2) @@ -272,6 +284,7 @@ entry: } declare @llvm.riscv.vand.nxv64i8.nxv64i8( + , , , i32); @@ -284,6 +297,7 @@ define @intrinsic_vand_vv_nxv64i8_nxv64i8_nxv64i8( @llvm.riscv.vand.nxv64i8.nxv64i8( + undef, %0, %1, i32 %2) @@ -318,6 +332,7 @@ entry: } declare @llvm.riscv.vand.nxv1i16.nxv1i16( + , , , i32); @@ -330,6 +345,7 @@ define @intrinsic_vand_vv_nxv1i16_nxv1i16_nxv1i16( @llvm.riscv.vand.nxv1i16.nxv1i16( + undef, %0, %1, i32 %2) @@ -363,6 +379,7 @@ entry: } declare @llvm.riscv.vand.nxv2i16.nxv2i16( + , , , i32); @@ -375,6 +392,7 @@ define @intrinsic_vand_vv_nxv2i16_nxv2i16_nxv2i16( @llvm.riscv.vand.nxv2i16.nxv2i16( + undef, %0, %1, i32 %2) @@ -408,6 +426,7 @@ entry: } declare @llvm.riscv.vand.nxv4i16.nxv4i16( + , , , i32); @@ -420,6 +439,7 @@ define @intrinsic_vand_vv_nxv4i16_nxv4i16_nxv4i16( @llvm.riscv.vand.nxv4i16.nxv4i16( + undef, %0, %1, i32 %2) @@ -453,6 +473,7 @@ entry: } declare @llvm.riscv.vand.nxv8i16.nxv8i16( + , , , i32); @@ -465,6 +486,7 @@ define @intrinsic_vand_vv_nxv8i16_nxv8i16_nxv8i16( @llvm.riscv.vand.nxv8i16.nxv8i16( + undef, %0, %1, i32 %2) @@ -498,6 +520,7 @@ entry: } declare @llvm.riscv.vand.nxv16i16.nxv16i16( + , , , i32); @@ -510,6 +533,7 @@ define @intrinsic_vand_vv_nxv16i16_nxv16i16_nxv16i16( @llvm.riscv.vand.nxv16i16.nxv16i16( + undef, %0, %1, i32 %2) @@ -543,6 +567,7 @@ entry: } declare @llvm.riscv.vand.nxv32i16.nxv32i16( + , , , i32); @@ -555,6 +580,7 @@ define @intrinsic_vand_vv_nxv32i16_nxv32i16_nxv32i16( @llvm.riscv.vand.nxv32i16.nxv32i16( + undef, %0, %1, i32 %2) @@ -589,6 +615,7 @@ entry: } declare @llvm.riscv.vand.nxv1i32.nxv1i32( + , , , i32); @@ -601,6 +628,7 @@ define @intrinsic_vand_vv_nxv1i32_nxv1i32_nxv1i32( @llvm.riscv.vand.nxv1i32.nxv1i32( + undef, %0, %1, i32 %2) @@ -634,6 +662,7 @@ entry: } declare @llvm.riscv.vand.nxv2i32.nxv2i32( + , , , i32); @@ -646,6 +675,7 @@ define @intrinsic_vand_vv_nxv2i32_nxv2i32_nxv2i32( @llvm.riscv.vand.nxv2i32.nxv2i32( + undef, %0, %1, i32 %2) @@ -679,6 +709,7 @@ entry: } declare @llvm.riscv.vand.nxv4i32.nxv4i32( + , , , i32); @@ -691,6 +722,7 @@ define @intrinsic_vand_vv_nxv4i32_nxv4i32_nxv4i32( @llvm.riscv.vand.nxv4i32.nxv4i32( + undef, %0, %1, i32 %2) @@ -724,6 +756,7 @@ entry: } declare @llvm.riscv.vand.nxv8i32.nxv8i32( + , , , i32); @@ -736,6 +769,7 @@ define @intrinsic_vand_vv_nxv8i32_nxv8i32_nxv8i32( @llvm.riscv.vand.nxv8i32.nxv8i32( + undef, %0, %1, i32 %2) @@ -769,6 +803,7 @@ entry: } declare @llvm.riscv.vand.nxv16i32.nxv16i32( + , , , i32); @@ -781,6 +816,7 @@ define @intrinsic_vand_vv_nxv16i32_nxv16i32_nxv16i32( @llvm.riscv.vand.nxv16i32.nxv16i32( + undef, %0, %1, i32 %2) @@ -815,6 +851,7 @@ entry: } declare @llvm.riscv.vand.nxv1i64.nxv1i64( + , , , i32); @@ -827,6 +864,7 @@ define @intrinsic_vand_vv_nxv1i64_nxv1i64_nxv1i64( @llvm.riscv.vand.nxv1i64.nxv1i64( + undef, %0, %1, i32 %2) @@ -860,6 +898,7 @@ entry: } declare @llvm.riscv.vand.nxv2i64.nxv2i64( + , , , i32); @@ -872,6 +911,7 @@ define @intrinsic_vand_vv_nxv2i64_nxv2i64_nxv2i64( @llvm.riscv.vand.nxv2i64.nxv2i64( + undef, %0, %1, i32 %2) @@ -905,6 +945,7 @@ entry: } declare @llvm.riscv.vand.nxv4i64.nxv4i64( + , , , i32); @@ -917,6 +958,7 @@ define @intrinsic_vand_vv_nxv4i64_nxv4i64_nxv4i64( @llvm.riscv.vand.nxv4i64.nxv4i64( + undef, %0, %1, i32 %2) @@ -950,6 +992,7 @@ entry: } declare @llvm.riscv.vand.nxv8i64.nxv8i64( + , , , i32); @@ -962,6 +1005,7 @@ define @intrinsic_vand_vv_nxv8i64_nxv8i64_nxv8i64( @llvm.riscv.vand.nxv8i64.nxv8i64( + undef, %0, %1, i32 %2) @@ -996,6 +1040,7 @@ entry: } declare @llvm.riscv.vand.nxv1i8.i8( + , , i8, i32); @@ -1008,6 +1053,7 @@ define @intrinsic_vand_vx_nxv1i8_nxv1i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv1i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1041,6 +1087,7 @@ entry: } declare @llvm.riscv.vand.nxv2i8.i8( + , , i8, i32); @@ -1053,6 +1100,7 @@ define @intrinsic_vand_vx_nxv2i8_nxv2i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv2i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1086,6 +1134,7 @@ entry: } declare @llvm.riscv.vand.nxv4i8.i8( + , , i8, i32); @@ -1098,6 +1147,7 @@ define @intrinsic_vand_vx_nxv4i8_nxv4i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv4i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1131,6 +1181,7 @@ entry: } declare @llvm.riscv.vand.nxv8i8.i8( + , , i8, i32); @@ -1143,6 +1194,7 @@ define @intrinsic_vand_vx_nxv8i8_nxv8i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv8i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1176,6 +1228,7 @@ entry: } declare @llvm.riscv.vand.nxv16i8.i8( + , , i8, i32); @@ -1188,6 +1241,7 @@ define @intrinsic_vand_vx_nxv16i8_nxv16i8_i8( @llvm.riscv.vand.nxv16i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1221,6 +1275,7 @@ entry: } declare @llvm.riscv.vand.nxv32i8.i8( + , , i8, i32); @@ -1233,6 +1288,7 @@ define @intrinsic_vand_vx_nxv32i8_nxv32i8_i8( @llvm.riscv.vand.nxv32i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1266,6 +1322,7 @@ entry: } declare @llvm.riscv.vand.nxv64i8.i8( + , , i8, i32); @@ -1278,6 +1335,7 @@ define @intrinsic_vand_vx_nxv64i8_nxv64i8_i8( @llvm.riscv.vand.nxv64i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1311,6 +1369,7 @@ entry: } declare @llvm.riscv.vand.nxv1i16.i16( + , , i16, i32); @@ -1323,6 +1382,7 @@ define @intrinsic_vand_vx_nxv1i16_nxv1i16_i16( @llvm.riscv.vand.nxv1i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1356,6 +1416,7 @@ entry: } declare @llvm.riscv.vand.nxv2i16.i16( + , , i16, i32); @@ -1368,6 +1429,7 @@ define @intrinsic_vand_vx_nxv2i16_nxv2i16_i16( @llvm.riscv.vand.nxv2i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1401,6 +1463,7 @@ entry: } declare @llvm.riscv.vand.nxv4i16.i16( + , , i16, i32); @@ -1413,6 +1476,7 @@ define @intrinsic_vand_vx_nxv4i16_nxv4i16_i16( @llvm.riscv.vand.nxv4i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1446,6 +1510,7 @@ entry: } declare @llvm.riscv.vand.nxv8i16.i16( + , , i16, i32); @@ -1458,6 +1523,7 @@ define @intrinsic_vand_vx_nxv8i16_nxv8i16_i16( @llvm.riscv.vand.nxv8i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1491,6 +1557,7 @@ entry: } declare @llvm.riscv.vand.nxv16i16.i16( + , , i16, i32); @@ -1503,6 +1570,7 @@ define @intrinsic_vand_vx_nxv16i16_nxv16i16_i16( @llvm.riscv.vand.nxv16i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1536,6 +1604,7 @@ entry: } declare @llvm.riscv.vand.nxv32i16.i16( + , , i16, i32); @@ -1548,6 +1617,7 @@ define @intrinsic_vand_vx_nxv32i16_nxv32i16_i16( @llvm.riscv.vand.nxv32i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1581,6 +1651,7 @@ entry: } declare @llvm.riscv.vand.nxv1i32.i32( + , , i32, i32); @@ -1593,6 +1664,7 @@ define @intrinsic_vand_vx_nxv1i32_nxv1i32_i32( @llvm.riscv.vand.nxv1i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1626,6 +1698,7 @@ entry: } declare @llvm.riscv.vand.nxv2i32.i32( + , , i32, i32); @@ -1638,6 +1711,7 @@ define @intrinsic_vand_vx_nxv2i32_nxv2i32_i32( @llvm.riscv.vand.nxv2i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1671,6 +1745,7 @@ entry: } declare @llvm.riscv.vand.nxv4i32.i32( + , , i32, i32); @@ -1683,6 +1758,7 @@ define @intrinsic_vand_vx_nxv4i32_nxv4i32_i32( @llvm.riscv.vand.nxv4i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1716,6 +1792,7 @@ entry: } declare @llvm.riscv.vand.nxv8i32.i32( + , , i32, i32); @@ -1728,6 +1805,7 @@ define @intrinsic_vand_vx_nxv8i32_nxv8i32_i32( @llvm.riscv.vand.nxv8i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1761,6 +1839,7 @@ entry: } declare @llvm.riscv.vand.nxv16i32.i32( + , , i32, i32); @@ -1773,6 +1852,7 @@ define @intrinsic_vand_vx_nxv16i32_nxv16i32_i32( @llvm.riscv.vand.nxv16i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1806,6 +1886,7 @@ entry: } declare @llvm.riscv.vand.nxv1i64.i64( + , , i64, i32); @@ -1824,6 +1905,7 @@ define @intrinsic_vand_vx_nxv1i64_nxv1i64_i64( @llvm.riscv.vand.nxv1i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1863,6 +1945,7 @@ entry: } declare @llvm.riscv.vand.nxv2i64.i64( + , , i64, i32); @@ -1881,6 +1964,7 @@ define @intrinsic_vand_vx_nxv2i64_nxv2i64_i64( @llvm.riscv.vand.nxv2i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1920,6 +2004,7 @@ entry: } declare @llvm.riscv.vand.nxv4i64.i64( + , , i64, i32); @@ -1938,6 +2023,7 @@ define @intrinsic_vand_vx_nxv4i64_nxv4i64_i64( @llvm.riscv.vand.nxv4i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1977,6 +2063,7 @@ entry: } declare @llvm.riscv.vand.nxv8i64.i64( + , , i64, i32); @@ -1995,6 +2082,7 @@ define @intrinsic_vand_vx_nxv8i64_nxv8i64_i64( @llvm.riscv.vand.nxv8i64.i64( + undef, %0, i64 %1, i32 %2) @@ -2041,6 +2129,7 @@ define @intrinsic_vand_vi_nxv1i8_nxv1i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv1i8.i8( + undef, %0, i8 9, i32 %1) @@ -2073,6 +2162,7 @@ define @intrinsic_vand_vi_nxv2i8_nxv2i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv2i8.i8( + undef, %0, i8 9, i32 %1) @@ -2105,6 +2195,7 @@ define @intrinsic_vand_vi_nxv4i8_nxv4i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv4i8.i8( + undef, %0, i8 9, i32 %1) @@ -2137,6 +2228,7 @@ define @intrinsic_vand_vi_nxv8i8_nxv8i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv8i8.i8( + undef, %0, i8 9, i32 %1) @@ -2169,6 +2261,7 @@ define @intrinsic_vand_vi_nxv16i8_nxv16i8_i8( @llvm.riscv.vand.nxv16i8.i8( + undef, %0, i8 9, i32 %1) @@ -2201,6 +2294,7 @@ define @intrinsic_vand_vi_nxv32i8_nxv32i8_i8( @llvm.riscv.vand.nxv32i8.i8( + undef, %0, i8 9, i32 %1) @@ -2233,6 +2327,7 @@ define @intrinsic_vand_vi_nxv64i8_nxv64i8_i8( @llvm.riscv.vand.nxv64i8.i8( + undef, %0, i8 9, i32 %1) @@ -2265,6 +2360,7 @@ define @intrinsic_vand_vi_nxv1i16_nxv1i16_i16( @llvm.riscv.vand.nxv1i16.i16( + undef, %0, i16 9, i32 %1) @@ -2297,6 +2393,7 @@ define @intrinsic_vand_vi_nxv2i16_nxv2i16_i16( @llvm.riscv.vand.nxv2i16.i16( + undef, %0, i16 9, i32 %1) @@ -2329,6 +2426,7 @@ define @intrinsic_vand_vi_nxv4i16_nxv4i16_i16( @llvm.riscv.vand.nxv4i16.i16( + undef, %0, i16 9, i32 %1) @@ -2361,6 +2459,7 @@ define @intrinsic_vand_vi_nxv8i16_nxv8i16_i16( @llvm.riscv.vand.nxv8i16.i16( + undef, %0, i16 9, i32 %1) @@ -2393,6 +2492,7 @@ define @intrinsic_vand_vi_nxv16i16_nxv16i16_i16( @llvm.riscv.vand.nxv16i16.i16( + undef, %0, i16 9, i32 %1) @@ -2425,6 +2525,7 @@ define @intrinsic_vand_vi_nxv32i16_nxv32i16_i16( @llvm.riscv.vand.nxv32i16.i16( + undef, %0, i16 9, i32 %1) @@ -2457,6 +2558,7 @@ define @intrinsic_vand_vi_nxv1i32_nxv1i32_i32( @llvm.riscv.vand.nxv1i32.i32( + undef, %0, i32 9, i32 %1) @@ -2489,6 +2591,7 @@ define @intrinsic_vand_vi_nxv2i32_nxv2i32_i32( @llvm.riscv.vand.nxv2i32.i32( + undef, %0, i32 9, i32 %1) @@ -2521,6 +2624,7 @@ define @intrinsic_vand_vi_nxv4i32_nxv4i32_i32( @llvm.riscv.vand.nxv4i32.i32( + undef, %0, i32 9, i32 %1) @@ -2553,6 +2657,7 @@ define @intrinsic_vand_vi_nxv8i32_nxv8i32_i32( @llvm.riscv.vand.nxv8i32.i32( + undef, %0, i32 9, i32 %1) @@ -2585,6 +2690,7 @@ define @intrinsic_vand_vi_nxv16i32_nxv16i32_i32( @llvm.riscv.vand.nxv16i32.i32( + undef, %0, i32 9, i32 %1) @@ -2617,6 +2723,7 @@ define @intrinsic_vand_vi_nxv1i64_nxv1i64_i64( @llvm.riscv.vand.nxv1i64.i64( + undef, %0, i64 9, i32 %1) @@ -2649,6 +2756,7 @@ define @intrinsic_vand_vi_nxv2i64_nxv2i64_i64( @llvm.riscv.vand.nxv2i64.i64( + undef, %0, i64 9, i32 %1) @@ -2681,6 +2789,7 @@ define @intrinsic_vand_vi_nxv4i64_nxv4i64_i64( @llvm.riscv.vand.nxv4i64.i64( + undef, %0, i64 9, i32 %1) @@ -2713,6 +2822,7 @@ define @intrinsic_vand_vi_nxv8i64_nxv8i64_i64( @llvm.riscv.vand.nxv8i64.i64( + undef, %0, i64 9, i32 %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vand-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vand-rv64.ll index 39a1d9bc4a0815fab6c770d326a7aa5512a09554..2dfd3332001b25bbabf3a8968a11e6b669bd92d2 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vand-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vand-rv64.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v,+d -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vand.nxv1i8.nxv1i8( + , , , i64); @@ -14,6 +15,7 @@ define @intrinsic_vand_vv_nxv1i8_nxv1i8_nxv1i8( @llvm.riscv.vand.nxv1i8.nxv1i8( + undef, %0, %1, i64 %2) @@ -47,6 +49,7 @@ entry: } declare @llvm.riscv.vand.nxv2i8.nxv2i8( + , , , i64); @@ -59,6 +62,7 @@ define @intrinsic_vand_vv_nxv2i8_nxv2i8_nxv2i8( @llvm.riscv.vand.nxv2i8.nxv2i8( + undef, %0, %1, i64 %2) @@ -92,6 +96,7 @@ entry: } declare @llvm.riscv.vand.nxv4i8.nxv4i8( + , , , i64); @@ -104,6 +109,7 @@ define @intrinsic_vand_vv_nxv4i8_nxv4i8_nxv4i8( @llvm.riscv.vand.nxv4i8.nxv4i8( + undef, %0, %1, i64 %2) @@ -137,6 +143,7 @@ entry: } declare @llvm.riscv.vand.nxv8i8.nxv8i8( + , , , i64); @@ -149,6 +156,7 @@ define @intrinsic_vand_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vand.nxv8i8.nxv8i8( + undef, %0, %1, i64 %2) @@ -182,6 +190,7 @@ entry: } declare @llvm.riscv.vand.nxv16i8.nxv16i8( + , , , i64); @@ -194,6 +203,7 @@ define @intrinsic_vand_vv_nxv16i8_nxv16i8_nxv16i8( @llvm.riscv.vand.nxv16i8.nxv16i8( + undef, %0, %1, i64 %2) @@ -227,6 +237,7 @@ entry: } declare @llvm.riscv.vand.nxv32i8.nxv32i8( + , , , i64); @@ -239,6 +250,7 @@ define @intrinsic_vand_vv_nxv32i8_nxv32i8_nxv32i8( @llvm.riscv.vand.nxv32i8.nxv32i8( + undef, %0, %1, i64 %2) @@ -272,6 +284,7 @@ entry: } declare @llvm.riscv.vand.nxv64i8.nxv64i8( + , , , i64); @@ -284,6 +297,7 @@ define @intrinsic_vand_vv_nxv64i8_nxv64i8_nxv64i8( @llvm.riscv.vand.nxv64i8.nxv64i8( + undef, %0, %1, i64 %2) @@ -318,6 +332,7 @@ entry: } declare @llvm.riscv.vand.nxv1i16.nxv1i16( + , , , i64); @@ -330,6 +345,7 @@ define @intrinsic_vand_vv_nxv1i16_nxv1i16_nxv1i16( @llvm.riscv.vand.nxv1i16.nxv1i16( + undef, %0, %1, i64 %2) @@ -363,6 +379,7 @@ entry: } declare @llvm.riscv.vand.nxv2i16.nxv2i16( + , , , i64); @@ -375,6 +392,7 @@ define @intrinsic_vand_vv_nxv2i16_nxv2i16_nxv2i16( @llvm.riscv.vand.nxv2i16.nxv2i16( + undef, %0, %1, i64 %2) @@ -408,6 +426,7 @@ entry: } declare @llvm.riscv.vand.nxv4i16.nxv4i16( + , , , i64); @@ -420,6 +439,7 @@ define @intrinsic_vand_vv_nxv4i16_nxv4i16_nxv4i16( @llvm.riscv.vand.nxv4i16.nxv4i16( + undef, %0, %1, i64 %2) @@ -453,6 +473,7 @@ entry: } declare @llvm.riscv.vand.nxv8i16.nxv8i16( + , , , i64); @@ -465,6 +486,7 @@ define @intrinsic_vand_vv_nxv8i16_nxv8i16_nxv8i16( @llvm.riscv.vand.nxv8i16.nxv8i16( + undef, %0, %1, i64 %2) @@ -498,6 +520,7 @@ entry: } declare @llvm.riscv.vand.nxv16i16.nxv16i16( + , , , i64); @@ -510,6 +533,7 @@ define @intrinsic_vand_vv_nxv16i16_nxv16i16_nxv16i16( @llvm.riscv.vand.nxv16i16.nxv16i16( + undef, %0, %1, i64 %2) @@ -543,6 +567,7 @@ entry: } declare @llvm.riscv.vand.nxv32i16.nxv32i16( + , , , i64); @@ -555,6 +580,7 @@ define @intrinsic_vand_vv_nxv32i16_nxv32i16_nxv32i16( @llvm.riscv.vand.nxv32i16.nxv32i16( + undef, %0, %1, i64 %2) @@ -589,6 +615,7 @@ entry: } declare @llvm.riscv.vand.nxv1i32.nxv1i32( + , , , i64); @@ -601,6 +628,7 @@ define @intrinsic_vand_vv_nxv1i32_nxv1i32_nxv1i32( @llvm.riscv.vand.nxv1i32.nxv1i32( + undef, %0, %1, i64 %2) @@ -634,6 +662,7 @@ entry: } declare @llvm.riscv.vand.nxv2i32.nxv2i32( + , , , i64); @@ -646,6 +675,7 @@ define @intrinsic_vand_vv_nxv2i32_nxv2i32_nxv2i32( @llvm.riscv.vand.nxv2i32.nxv2i32( + undef, %0, %1, i64 %2) @@ -679,6 +709,7 @@ entry: } declare @llvm.riscv.vand.nxv4i32.nxv4i32( + , , , i64); @@ -691,6 +722,7 @@ define @intrinsic_vand_vv_nxv4i32_nxv4i32_nxv4i32( @llvm.riscv.vand.nxv4i32.nxv4i32( + undef, %0, %1, i64 %2) @@ -724,6 +756,7 @@ entry: } declare @llvm.riscv.vand.nxv8i32.nxv8i32( + , , , i64); @@ -736,6 +769,7 @@ define @intrinsic_vand_vv_nxv8i32_nxv8i32_nxv8i32( @llvm.riscv.vand.nxv8i32.nxv8i32( + undef, %0, %1, i64 %2) @@ -769,6 +803,7 @@ entry: } declare @llvm.riscv.vand.nxv16i32.nxv16i32( + , , , i64); @@ -781,6 +816,7 @@ define @intrinsic_vand_vv_nxv16i32_nxv16i32_nxv16i32( @llvm.riscv.vand.nxv16i32.nxv16i32( + undef, %0, %1, i64 %2) @@ -815,6 +851,7 @@ entry: } declare @llvm.riscv.vand.nxv1i64.nxv1i64( + , , , i64); @@ -827,6 +864,7 @@ define @intrinsic_vand_vv_nxv1i64_nxv1i64_nxv1i64( @llvm.riscv.vand.nxv1i64.nxv1i64( + undef, %0, %1, i64 %2) @@ -860,6 +898,7 @@ entry: } declare @llvm.riscv.vand.nxv2i64.nxv2i64( + , , , i64); @@ -872,6 +911,7 @@ define @intrinsic_vand_vv_nxv2i64_nxv2i64_nxv2i64( @llvm.riscv.vand.nxv2i64.nxv2i64( + undef, %0, %1, i64 %2) @@ -905,6 +945,7 @@ entry: } declare @llvm.riscv.vand.nxv4i64.nxv4i64( + , , , i64); @@ -917,6 +958,7 @@ define @intrinsic_vand_vv_nxv4i64_nxv4i64_nxv4i64( @llvm.riscv.vand.nxv4i64.nxv4i64( + undef, %0, %1, i64 %2) @@ -950,6 +992,7 @@ entry: } declare @llvm.riscv.vand.nxv8i64.nxv8i64( + , , , i64); @@ -962,6 +1005,7 @@ define @intrinsic_vand_vv_nxv8i64_nxv8i64_nxv8i64( @llvm.riscv.vand.nxv8i64.nxv8i64( + undef, %0, %1, i64 %2) @@ -996,6 +1040,7 @@ entry: } declare @llvm.riscv.vand.nxv1i8.i8( + , , i8, i64); @@ -1008,6 +1053,7 @@ define @intrinsic_vand_vx_nxv1i8_nxv1i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv1i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1041,6 +1087,7 @@ entry: } declare @llvm.riscv.vand.nxv2i8.i8( + , , i8, i64); @@ -1053,6 +1100,7 @@ define @intrinsic_vand_vx_nxv2i8_nxv2i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv2i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1086,6 +1134,7 @@ entry: } declare @llvm.riscv.vand.nxv4i8.i8( + , , i8, i64); @@ -1098,6 +1147,7 @@ define @intrinsic_vand_vx_nxv4i8_nxv4i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv4i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1131,6 +1181,7 @@ entry: } declare @llvm.riscv.vand.nxv8i8.i8( + , , i8, i64); @@ -1143,6 +1194,7 @@ define @intrinsic_vand_vx_nxv8i8_nxv8i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv8i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1176,6 +1228,7 @@ entry: } declare @llvm.riscv.vand.nxv16i8.i8( + , , i8, i64); @@ -1188,6 +1241,7 @@ define @intrinsic_vand_vx_nxv16i8_nxv16i8_i8( @llvm.riscv.vand.nxv16i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1221,6 +1275,7 @@ entry: } declare @llvm.riscv.vand.nxv32i8.i8( + , , i8, i64); @@ -1233,6 +1288,7 @@ define @intrinsic_vand_vx_nxv32i8_nxv32i8_i8( @llvm.riscv.vand.nxv32i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1266,6 +1322,7 @@ entry: } declare @llvm.riscv.vand.nxv64i8.i8( + , , i8, i64); @@ -1278,6 +1335,7 @@ define @intrinsic_vand_vx_nxv64i8_nxv64i8_i8( @llvm.riscv.vand.nxv64i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1311,6 +1369,7 @@ entry: } declare @llvm.riscv.vand.nxv1i16.i16( + , , i16, i64); @@ -1323,6 +1382,7 @@ define @intrinsic_vand_vx_nxv1i16_nxv1i16_i16( @llvm.riscv.vand.nxv1i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1356,6 +1416,7 @@ entry: } declare @llvm.riscv.vand.nxv2i16.i16( + , , i16, i64); @@ -1368,6 +1429,7 @@ define @intrinsic_vand_vx_nxv2i16_nxv2i16_i16( @llvm.riscv.vand.nxv2i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1401,6 +1463,7 @@ entry: } declare @llvm.riscv.vand.nxv4i16.i16( + , , i16, i64); @@ -1413,6 +1476,7 @@ define @intrinsic_vand_vx_nxv4i16_nxv4i16_i16( @llvm.riscv.vand.nxv4i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1446,6 +1510,7 @@ entry: } declare @llvm.riscv.vand.nxv8i16.i16( + , , i16, i64); @@ -1458,6 +1523,7 @@ define @intrinsic_vand_vx_nxv8i16_nxv8i16_i16( @llvm.riscv.vand.nxv8i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1491,6 +1557,7 @@ entry: } declare @llvm.riscv.vand.nxv16i16.i16( + , , i16, i64); @@ -1503,6 +1570,7 @@ define @intrinsic_vand_vx_nxv16i16_nxv16i16_i16( @llvm.riscv.vand.nxv16i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1536,6 +1604,7 @@ entry: } declare @llvm.riscv.vand.nxv32i16.i16( + , , i16, i64); @@ -1548,6 +1617,7 @@ define @intrinsic_vand_vx_nxv32i16_nxv32i16_i16( @llvm.riscv.vand.nxv32i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1581,6 +1651,7 @@ entry: } declare @llvm.riscv.vand.nxv1i32.i32( + , , i32, i64); @@ -1593,6 +1664,7 @@ define @intrinsic_vand_vx_nxv1i32_nxv1i32_i32( @llvm.riscv.vand.nxv1i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1626,6 +1698,7 @@ entry: } declare @llvm.riscv.vand.nxv2i32.i32( + , , i32, i64); @@ -1638,6 +1711,7 @@ define @intrinsic_vand_vx_nxv2i32_nxv2i32_i32( @llvm.riscv.vand.nxv2i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1671,6 +1745,7 @@ entry: } declare @llvm.riscv.vand.nxv4i32.i32( + , , i32, i64); @@ -1683,6 +1758,7 @@ define @intrinsic_vand_vx_nxv4i32_nxv4i32_i32( @llvm.riscv.vand.nxv4i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1716,6 +1792,7 @@ entry: } declare @llvm.riscv.vand.nxv8i32.i32( + , , i32, i64); @@ -1728,6 +1805,7 @@ define @intrinsic_vand_vx_nxv8i32_nxv8i32_i32( @llvm.riscv.vand.nxv8i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1761,6 +1839,7 @@ entry: } declare @llvm.riscv.vand.nxv16i32.i32( + , , i32, i64); @@ -1773,6 +1852,7 @@ define @intrinsic_vand_vx_nxv16i32_nxv16i32_i32( @llvm.riscv.vand.nxv16i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1806,6 +1886,7 @@ entry: } declare @llvm.riscv.vand.nxv1i64.i64( + , , i64, i64); @@ -1818,6 +1899,7 @@ define @intrinsic_vand_vx_nxv1i64_nxv1i64_i64( @llvm.riscv.vand.nxv1i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1851,6 +1933,7 @@ entry: } declare @llvm.riscv.vand.nxv2i64.i64( + , , i64, i64); @@ -1863,6 +1946,7 @@ define @intrinsic_vand_vx_nxv2i64_nxv2i64_i64( @llvm.riscv.vand.nxv2i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1896,6 +1980,7 @@ entry: } declare @llvm.riscv.vand.nxv4i64.i64( + , , i64, i64); @@ -1908,6 +1993,7 @@ define @intrinsic_vand_vx_nxv4i64_nxv4i64_i64( @llvm.riscv.vand.nxv4i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1941,6 +2027,7 @@ entry: } declare @llvm.riscv.vand.nxv8i64.i64( + , , i64, i64); @@ -1953,6 +2040,7 @@ define @intrinsic_vand_vx_nxv8i64_nxv8i64_i64( @llvm.riscv.vand.nxv8i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1993,6 +2081,7 @@ define @intrinsic_vand_vi_nxv1i8_nxv1i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv1i8.i8( + undef, %0, i8 9, i64 %1) @@ -2025,6 +2114,7 @@ define @intrinsic_vand_vi_nxv2i8_nxv2i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv2i8.i8( + undef, %0, i8 9, i64 %1) @@ -2057,6 +2147,7 @@ define @intrinsic_vand_vi_nxv4i8_nxv4i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv4i8.i8( + undef, %0, i8 9, i64 %1) @@ -2089,6 +2180,7 @@ define @intrinsic_vand_vi_nxv8i8_nxv8i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv8i8.i8( + undef, %0, i8 9, i64 %1) @@ -2121,6 +2213,7 @@ define @intrinsic_vand_vi_nxv16i8_nxv16i8_i8( @llvm.riscv.vand.nxv16i8.i8( + undef, %0, i8 9, i64 %1) @@ -2153,6 +2246,7 @@ define @intrinsic_vand_vi_nxv32i8_nxv32i8_i8( @llvm.riscv.vand.nxv32i8.i8( + undef, %0, i8 9, i64 %1) @@ -2185,6 +2279,7 @@ define @intrinsic_vand_vi_nxv64i8_nxv64i8_i8( @llvm.riscv.vand.nxv64i8.i8( + undef, %0, i8 9, i64 %1) @@ -2217,6 +2312,7 @@ define @intrinsic_vand_vi_nxv1i16_nxv1i16_i16( @llvm.riscv.vand.nxv1i16.i16( + undef, %0, i16 9, i64 %1) @@ -2249,6 +2345,7 @@ define @intrinsic_vand_vi_nxv2i16_nxv2i16_i16( @llvm.riscv.vand.nxv2i16.i16( + undef, %0, i16 9, i64 %1) @@ -2281,6 +2378,7 @@ define @intrinsic_vand_vi_nxv4i16_nxv4i16_i16( @llvm.riscv.vand.nxv4i16.i16( + undef, %0, i16 9, i64 %1) @@ -2313,6 +2411,7 @@ define @intrinsic_vand_vi_nxv8i16_nxv8i16_i16( @llvm.riscv.vand.nxv8i16.i16( + undef, %0, i16 9, i64 %1) @@ -2345,6 +2444,7 @@ define @intrinsic_vand_vi_nxv16i16_nxv16i16_i16( @llvm.riscv.vand.nxv16i16.i16( + undef, %0, i16 9, i64 %1) @@ -2377,6 +2477,7 @@ define @intrinsic_vand_vi_nxv32i16_nxv32i16_i16( @llvm.riscv.vand.nxv32i16.i16( + undef, %0, i16 9, i64 %1) @@ -2409,6 +2510,7 @@ define @intrinsic_vand_vi_nxv1i32_nxv1i32_i32( @llvm.riscv.vand.nxv1i32.i32( + undef, %0, i32 9, i64 %1) @@ -2441,6 +2543,7 @@ define @intrinsic_vand_vi_nxv2i32_nxv2i32_i32( @llvm.riscv.vand.nxv2i32.i32( + undef, %0, i32 9, i64 %1) @@ -2473,6 +2576,7 @@ define @intrinsic_vand_vi_nxv4i32_nxv4i32_i32( @llvm.riscv.vand.nxv4i32.i32( + undef, %0, i32 9, i64 %1) @@ -2505,6 +2609,7 @@ define @intrinsic_vand_vi_nxv8i32_nxv8i32_i32( @llvm.riscv.vand.nxv8i32.i32( + undef, %0, i32 9, i64 %1) @@ -2537,6 +2642,7 @@ define @intrinsic_vand_vi_nxv16i32_nxv16i32_i32( @llvm.riscv.vand.nxv16i32.i32( + undef, %0, i32 9, i64 %1) @@ -2569,6 +2675,7 @@ define @intrinsic_vand_vi_nxv1i64_nxv1i64_i64( @llvm.riscv.vand.nxv1i64.i64( + undef, %0, i64 9, i64 %1) @@ -2601,6 +2708,7 @@ define @intrinsic_vand_vi_nxv2i64_nxv2i64_i64( @llvm.riscv.vand.nxv2i64.i64( + undef, %0, i64 9, i64 %1) @@ -2633,6 +2741,7 @@ define @intrinsic_vand_vi_nxv4i64_nxv4i64_i64( @llvm.riscv.vand.nxv4i64.i64( + undef, %0, i64 9, i64 %1) @@ -2665,6 +2774,7 @@ define @intrinsic_vand_vi_nxv8i64_nxv8i64_i64( @llvm.riscv.vand.nxv8i64.i64( + undef, %0, i64 9, i64 %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vasub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vasub-rv32.ll index d1cfc9704c1e75d0b7737bf4d9ae6fc3b4537bf3..fb1f8927d85b46d0418d14a7ff6cd74b81de8b19 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vasub-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vasub-rv32.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vasub.nxv1i8.nxv1i8( + , , , i32); @@ -14,6 +15,7 @@ define @intrinsic_vasub_vv_nxv1i8_nxv1i8_nxv1i8( @llvm.riscv.vasub.nxv1i8.nxv1i8( + undef, %0, %1, i32 %2) @@ -47,6 +49,7 @@ entry: } declare @llvm.riscv.vasub.nxv2i8.nxv2i8( + , , , i32); @@ -59,6 +62,7 @@ define @intrinsic_vasub_vv_nxv2i8_nxv2i8_nxv2i8( @llvm.riscv.vasub.nxv2i8.nxv2i8( + undef, %0, %1, i32 %2) @@ -92,6 +96,7 @@ entry: } declare @llvm.riscv.vasub.nxv4i8.nxv4i8( + , , , i32); @@ -104,6 +109,7 @@ define @intrinsic_vasub_vv_nxv4i8_nxv4i8_nxv4i8( @llvm.riscv.vasub.nxv4i8.nxv4i8( + undef, %0, %1, i32 %2) @@ -137,6 +143,7 @@ entry: } declare @llvm.riscv.vasub.nxv8i8.nxv8i8( + , , , i32); @@ -149,6 +156,7 @@ define @intrinsic_vasub_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vasub.nxv8i8.nxv8i8( + undef, %0, %1, i32 %2) @@ -182,6 +190,7 @@ entry: } declare @llvm.riscv.vasub.nxv16i8.nxv16i8( + , , , i32); @@ -194,6 +203,7 @@ define @intrinsic_vasub_vv_nxv16i8_nxv16i8_nxv16i8( @llvm.riscv.vasub.nxv16i8.nxv16i8( + undef, %0, %1, i32 %2) @@ -227,6 +237,7 @@ entry: } declare @llvm.riscv.vasub.nxv32i8.nxv32i8( + , , , i32); @@ -239,6 +250,7 @@ define @intrinsic_vasub_vv_nxv32i8_nxv32i8_nxv32i8( @llvm.riscv.vasub.nxv32i8.nxv32i8( + undef, %0, %1, i32 %2) @@ -272,6 +284,7 @@ entry: } declare @llvm.riscv.vasub.nxv64i8.nxv64i8( + , , , i32); @@ -284,6 +297,7 @@ define @intrinsic_vasub_vv_nxv64i8_nxv64i8_nxv64i8( @llvm.riscv.vasub.nxv64i8.nxv64i8( + undef, %0, %1, i32 %2) @@ -318,6 +332,7 @@ entry: } declare @llvm.riscv.vasub.nxv1i16.nxv1i16( + , , , i32); @@ -330,6 +345,7 @@ define @intrinsic_vasub_vv_nxv1i16_nxv1i16_nxv1i16( @llvm.riscv.vasub.nxv1i16.nxv1i16( + undef, %0, %1, i32 %2) @@ -363,6 +379,7 @@ entry: } declare @llvm.riscv.vasub.nxv2i16.nxv2i16( + , , , i32); @@ -375,6 +392,7 @@ define @intrinsic_vasub_vv_nxv2i16_nxv2i16_nxv2i16( @llvm.riscv.vasub.nxv2i16.nxv2i16( + undef, %0, %1, i32 %2) @@ -408,6 +426,7 @@ entry: } declare @llvm.riscv.vasub.nxv4i16.nxv4i16( + , , , i32); @@ -420,6 +439,7 @@ define @intrinsic_vasub_vv_nxv4i16_nxv4i16_nxv4i16( @llvm.riscv.vasub.nxv4i16.nxv4i16( + undef, %0, %1, i32 %2) @@ -453,6 +473,7 @@ entry: } declare @llvm.riscv.vasub.nxv8i16.nxv8i16( + , , , i32); @@ -465,6 +486,7 @@ define @intrinsic_vasub_vv_nxv8i16_nxv8i16_nxv8i16( @llvm.riscv.vasub.nxv8i16.nxv8i16( + undef, %0, %1, i32 %2) @@ -498,6 +520,7 @@ entry: } declare @llvm.riscv.vasub.nxv16i16.nxv16i16( + , , , i32); @@ -510,6 +533,7 @@ define @intrinsic_vasub_vv_nxv16i16_nxv16i16_nxv16i16( @llvm.riscv.vasub.nxv16i16.nxv16i16( + undef, %0, %1, i32 %2) @@ -543,6 +567,7 @@ entry: } declare @llvm.riscv.vasub.nxv32i16.nxv32i16( + , , , i32); @@ -555,6 +580,7 @@ define @intrinsic_vasub_vv_nxv32i16_nxv32i16_nxv32i16( @llvm.riscv.vasub.nxv32i16.nxv32i16( + undef, %0, %1, i32 %2) @@ -589,6 +615,7 @@ entry: } declare @llvm.riscv.vasub.nxv1i32.nxv1i32( + , , , i32); @@ -601,6 +628,7 @@ define @intrinsic_vasub_vv_nxv1i32_nxv1i32_nxv1i32( @llvm.riscv.vasub.nxv1i32.nxv1i32( + undef, %0, %1, i32 %2) @@ -634,6 +662,7 @@ entry: } declare @llvm.riscv.vasub.nxv2i32.nxv2i32( + , , , i32); @@ -646,6 +675,7 @@ define @intrinsic_vasub_vv_nxv2i32_nxv2i32_nxv2i32( @llvm.riscv.vasub.nxv2i32.nxv2i32( + undef, %0, %1, i32 %2) @@ -679,6 +709,7 @@ entry: } declare @llvm.riscv.vasub.nxv4i32.nxv4i32( + , , , i32); @@ -691,6 +722,7 @@ define @intrinsic_vasub_vv_nxv4i32_nxv4i32_nxv4i32( @llvm.riscv.vasub.nxv4i32.nxv4i32( + undef, %0, %1, i32 %2) @@ -724,6 +756,7 @@ entry: } declare @llvm.riscv.vasub.nxv8i32.nxv8i32( + , , , i32); @@ -736,6 +769,7 @@ define @intrinsic_vasub_vv_nxv8i32_nxv8i32_nxv8i32( @llvm.riscv.vasub.nxv8i32.nxv8i32( + undef, %0, %1, i32 %2) @@ -769,6 +803,7 @@ entry: } declare @llvm.riscv.vasub.nxv16i32.nxv16i32( + , , , i32); @@ -781,6 +816,7 @@ define @intrinsic_vasub_vv_nxv16i32_nxv16i32_nxv16i32( @llvm.riscv.vasub.nxv16i32.nxv16i32( + undef, %0, %1, i32 %2) @@ -815,6 +851,7 @@ entry: } declare @llvm.riscv.vasub.nxv1i64.nxv1i64( + , , , i32); @@ -827,6 +864,7 @@ define @intrinsic_vasub_vv_nxv1i64_nxv1i64_nxv1i64( @llvm.riscv.vasub.nxv1i64.nxv1i64( + undef, %0, %1, i32 %2) @@ -860,6 +898,7 @@ entry: } declare @llvm.riscv.vasub.nxv2i64.nxv2i64( + , , , i32); @@ -872,6 +911,7 @@ define @intrinsic_vasub_vv_nxv2i64_nxv2i64_nxv2i64( @llvm.riscv.vasub.nxv2i64.nxv2i64( + undef, %0, %1, i32 %2) @@ -905,6 +945,7 @@ entry: } declare @llvm.riscv.vasub.nxv4i64.nxv4i64( + , , , i32); @@ -917,6 +958,7 @@ define @intrinsic_vasub_vv_nxv4i64_nxv4i64_nxv4i64( @llvm.riscv.vasub.nxv4i64.nxv4i64( + undef, %0, %1, i32 %2) @@ -950,6 +992,7 @@ entry: } declare @llvm.riscv.vasub.nxv8i64.nxv8i64( + , , , i32); @@ -962,6 +1005,7 @@ define @intrinsic_vasub_vv_nxv8i64_nxv8i64_nxv8i64( @llvm.riscv.vasub.nxv8i64.nxv8i64( + undef, %0, %1, i32 %2) @@ -996,6 +1040,7 @@ entry: } declare @llvm.riscv.vasub.nxv1i8.i8( + , , i8, i32); @@ -1008,6 +1053,7 @@ define @intrinsic_vasub_vx_nxv1i8_nxv1i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasub.nxv1i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1041,6 +1087,7 @@ entry: } declare @llvm.riscv.vasub.nxv2i8.i8( + , , i8, i32); @@ -1053,6 +1100,7 @@ define @intrinsic_vasub_vx_nxv2i8_nxv2i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasub.nxv2i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1086,6 +1134,7 @@ entry: } declare @llvm.riscv.vasub.nxv4i8.i8( + , , i8, i32); @@ -1098,6 +1147,7 @@ define @intrinsic_vasub_vx_nxv4i8_nxv4i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasub.nxv4i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1131,6 +1181,7 @@ entry: } declare @llvm.riscv.vasub.nxv8i8.i8( + , , i8, i32); @@ -1143,6 +1194,7 @@ define @intrinsic_vasub_vx_nxv8i8_nxv8i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasub.nxv8i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1176,6 +1228,7 @@ entry: } declare @llvm.riscv.vasub.nxv16i8.i8( + , , i8, i32); @@ -1188,6 +1241,7 @@ define @intrinsic_vasub_vx_nxv16i8_nxv16i8_i8( @llvm.riscv.vasub.nxv16i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1221,6 +1275,7 @@ entry: } declare @llvm.riscv.vasub.nxv32i8.i8( + , , i8, i32); @@ -1233,6 +1288,7 @@ define @intrinsic_vasub_vx_nxv32i8_nxv32i8_i8( @llvm.riscv.vasub.nxv32i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1266,6 +1322,7 @@ entry: } declare @llvm.riscv.vasub.nxv64i8.i8( + , , i8, i32); @@ -1278,6 +1335,7 @@ define @intrinsic_vasub_vx_nxv64i8_nxv64i8_i8( @llvm.riscv.vasub.nxv64i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1311,6 +1369,7 @@ entry: } declare @llvm.riscv.vasub.nxv1i16.i16( + , , i16, i32); @@ -1323,6 +1382,7 @@ define @intrinsic_vasub_vx_nxv1i16_nxv1i16_i16( @llvm.riscv.vasub.nxv1i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1356,6 +1416,7 @@ entry: } declare @llvm.riscv.vasub.nxv2i16.i16( + , , i16, i32); @@ -1368,6 +1429,7 @@ define @intrinsic_vasub_vx_nxv2i16_nxv2i16_i16( @llvm.riscv.vasub.nxv2i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1401,6 +1463,7 @@ entry: } declare @llvm.riscv.vasub.nxv4i16.i16( + , , i16, i32); @@ -1413,6 +1476,7 @@ define @intrinsic_vasub_vx_nxv4i16_nxv4i16_i16( @llvm.riscv.vasub.nxv4i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1446,6 +1510,7 @@ entry: } declare @llvm.riscv.vasub.nxv8i16.i16( + , , i16, i32); @@ -1458,6 +1523,7 @@ define @intrinsic_vasub_vx_nxv8i16_nxv8i16_i16( @llvm.riscv.vasub.nxv8i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1491,6 +1557,7 @@ entry: } declare @llvm.riscv.vasub.nxv16i16.i16( + , , i16, i32); @@ -1503,6 +1570,7 @@ define @intrinsic_vasub_vx_nxv16i16_nxv16i16_i16( @llvm.riscv.vasub.nxv16i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1536,6 +1604,7 @@ entry: } declare @llvm.riscv.vasub.nxv32i16.i16( + , , i16, i32); @@ -1548,6 +1617,7 @@ define @intrinsic_vasub_vx_nxv32i16_nxv32i16_i16( @llvm.riscv.vasub.nxv32i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1581,6 +1651,7 @@ entry: } declare @llvm.riscv.vasub.nxv1i32.i32( + , , i32, i32); @@ -1593,6 +1664,7 @@ define @intrinsic_vasub_vx_nxv1i32_nxv1i32_i32( @llvm.riscv.vasub.nxv1i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1626,6 +1698,7 @@ entry: } declare @llvm.riscv.vasub.nxv2i32.i32( + , , i32, i32); @@ -1638,6 +1711,7 @@ define @intrinsic_vasub_vx_nxv2i32_nxv2i32_i32( @llvm.riscv.vasub.nxv2i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1671,6 +1745,7 @@ entry: } declare @llvm.riscv.vasub.nxv4i32.i32( + , , i32, i32); @@ -1683,6 +1758,7 @@ define @intrinsic_vasub_vx_nxv4i32_nxv4i32_i32( @llvm.riscv.vasub.nxv4i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1716,6 +1792,7 @@ entry: } declare @llvm.riscv.vasub.nxv8i32.i32( + , , i32, i32); @@ -1728,6 +1805,7 @@ define @intrinsic_vasub_vx_nxv8i32_nxv8i32_i32( @llvm.riscv.vasub.nxv8i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1761,6 +1839,7 @@ entry: } declare @llvm.riscv.vasub.nxv16i32.i32( + , , i32, i32); @@ -1773,6 +1852,7 @@ define @intrinsic_vasub_vx_nxv16i32_nxv16i32_i32( @llvm.riscv.vasub.nxv16i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1806,6 +1886,7 @@ entry: } declare @llvm.riscv.vasub.nxv1i64.i64( + , , i64, i32); @@ -1824,6 +1905,7 @@ define @intrinsic_vasub_vx_nxv1i64_nxv1i64_i64( @llvm.riscv.vasub.nxv1i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1863,6 +1945,7 @@ entry: } declare @llvm.riscv.vasub.nxv2i64.i64( + , , i64, i32); @@ -1881,6 +1964,7 @@ define @intrinsic_vasub_vx_nxv2i64_nxv2i64_i64( @llvm.riscv.vasub.nxv2i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1920,6 +2004,7 @@ entry: } declare @llvm.riscv.vasub.nxv4i64.i64( + , , i64, i32); @@ -1938,6 +2023,7 @@ define @intrinsic_vasub_vx_nxv4i64_nxv4i64_i64( @llvm.riscv.vasub.nxv4i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1977,6 +2063,7 @@ entry: } declare @llvm.riscv.vasub.nxv8i64.i64( + , , i64, i32); @@ -1995,6 +2082,7 @@ define @intrinsic_vasub_vx_nxv8i64_nxv8i64_i64( @llvm.riscv.vasub.nxv8i64.i64( + undef, %0, i64 %1, i32 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vasub-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vasub-rv64.ll index 6ad70daafb25d8b5825aae078e0ffe16babeef71..595c05baa7a0f503b5f49b7ba35a0293d8c548bb 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vasub-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vasub-rv64.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vasub.nxv1i8.nxv1i8( + , , , i64); @@ -14,6 +15,7 @@ define @intrinsic_vasub_vv_nxv1i8_nxv1i8_nxv1i8( @llvm.riscv.vasub.nxv1i8.nxv1i8( + undef, %0, %1, i64 %2) @@ -47,6 +49,7 @@ entry: } declare @llvm.riscv.vasub.nxv2i8.nxv2i8( + , , , i64); @@ -59,6 +62,7 @@ define @intrinsic_vasub_vv_nxv2i8_nxv2i8_nxv2i8( @llvm.riscv.vasub.nxv2i8.nxv2i8( + undef, %0, %1, i64 %2) @@ -92,6 +96,7 @@ entry: } declare @llvm.riscv.vasub.nxv4i8.nxv4i8( + , , , i64); @@ -104,6 +109,7 @@ define @intrinsic_vasub_vv_nxv4i8_nxv4i8_nxv4i8( @llvm.riscv.vasub.nxv4i8.nxv4i8( + undef, %0, %1, i64 %2) @@ -137,6 +143,7 @@ entry: } declare @llvm.riscv.vasub.nxv8i8.nxv8i8( + , , , i64); @@ -149,6 +156,7 @@ define @intrinsic_vasub_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vasub.nxv8i8.nxv8i8( + undef, %0, %1, i64 %2) @@ -182,6 +190,7 @@ entry: } declare @llvm.riscv.vasub.nxv16i8.nxv16i8( + , , , i64); @@ -194,6 +203,7 @@ define @intrinsic_vasub_vv_nxv16i8_nxv16i8_nxv16i8( @llvm.riscv.vasub.nxv16i8.nxv16i8( + undef, %0, %1, i64 %2) @@ -227,6 +237,7 @@ entry: } declare @llvm.riscv.vasub.nxv32i8.nxv32i8( + , , , i64); @@ -239,6 +250,7 @@ define @intrinsic_vasub_vv_nxv32i8_nxv32i8_nxv32i8( @llvm.riscv.vasub.nxv32i8.nxv32i8( + undef, %0, %1, i64 %2) @@ -272,6 +284,7 @@ entry: } declare @llvm.riscv.vasub.nxv64i8.nxv64i8( + , , , i64); @@ -284,6 +297,7 @@ define @intrinsic_vasub_vv_nxv64i8_nxv64i8_nxv64i8( @llvm.riscv.vasub.nxv64i8.nxv64i8( + undef, %0, %1, i64 %2) @@ -318,6 +332,7 @@ entry: } declare @llvm.riscv.vasub.nxv1i16.nxv1i16( + , , , i64); @@ -330,6 +345,7 @@ define @intrinsic_vasub_vv_nxv1i16_nxv1i16_nxv1i16( @llvm.riscv.vasub.nxv1i16.nxv1i16( + undef, %0, %1, i64 %2) @@ -363,6 +379,7 @@ entry: } declare @llvm.riscv.vasub.nxv2i16.nxv2i16( + , , , i64); @@ -375,6 +392,7 @@ define @intrinsic_vasub_vv_nxv2i16_nxv2i16_nxv2i16( @llvm.riscv.vasub.nxv2i16.nxv2i16( + undef, %0, %1, i64 %2) @@ -408,6 +426,7 @@ entry: } declare @llvm.riscv.vasub.nxv4i16.nxv4i16( + , , , i64); @@ -420,6 +439,7 @@ define @intrinsic_vasub_vv_nxv4i16_nxv4i16_nxv4i16( @llvm.riscv.vasub.nxv4i16.nxv4i16( + undef, %0, %1, i64 %2) @@ -453,6 +473,7 @@ entry: } declare @llvm.riscv.vasub.nxv8i16.nxv8i16( + , , , i64); @@ -465,6 +486,7 @@ define @intrinsic_vasub_vv_nxv8i16_nxv8i16_nxv8i16( @llvm.riscv.vasub.nxv8i16.nxv8i16( + undef, %0, %1, i64 %2) @@ -498,6 +520,7 @@ entry: } declare @llvm.riscv.vasub.nxv16i16.nxv16i16( + , , , i64); @@ -510,6 +533,7 @@ define @intrinsic_vasub_vv_nxv16i16_nxv16i16_nxv16i16( @llvm.riscv.vasub.nxv16i16.nxv16i16( + undef, %0, %1, i64 %2) @@ -543,6 +567,7 @@ entry: } declare @llvm.riscv.vasub.nxv32i16.nxv32i16( + , , , i64); @@ -555,6 +580,7 @@ define @intrinsic_vasub_vv_nxv32i16_nxv32i16_nxv32i16( @llvm.riscv.vasub.nxv32i16.nxv32i16( + undef, %0, %1, i64 %2) @@ -589,6 +615,7 @@ entry: } declare @llvm.riscv.vasub.nxv1i32.nxv1i32( + , , , i64); @@ -601,6 +628,7 @@ define @intrinsic_vasub_vv_nxv1i32_nxv1i32_nxv1i32( @llvm.riscv.vasub.nxv1i32.nxv1i32( + undef, %0, %1, i64 %2) @@ -634,6 +662,7 @@ entry: } declare @llvm.riscv.vasub.nxv2i32.nxv2i32( + , , , i64); @@ -646,6 +675,7 @@ define @intrinsic_vasub_vv_nxv2i32_nxv2i32_nxv2i32( @llvm.riscv.vasub.nxv2i32.nxv2i32( + undef, %0, %1, i64 %2) @@ -679,6 +709,7 @@ entry: } declare @llvm.riscv.vasub.nxv4i32.nxv4i32( + , , , i64); @@ -691,6 +722,7 @@ define @intrinsic_vasub_vv_nxv4i32_nxv4i32_nxv4i32( @llvm.riscv.vasub.nxv4i32.nxv4i32( + undef, %0, %1, i64 %2) @@ -724,6 +756,7 @@ entry: } declare @llvm.riscv.vasub.nxv8i32.nxv8i32( + , , , i64); @@ -736,6 +769,7 @@ define @intrinsic_vasub_vv_nxv8i32_nxv8i32_nxv8i32( @llvm.riscv.vasub.nxv8i32.nxv8i32( + undef, %0, %1, i64 %2) @@ -769,6 +803,7 @@ entry: } declare @llvm.riscv.vasub.nxv16i32.nxv16i32( + , , , i64); @@ -781,6 +816,7 @@ define @intrinsic_vasub_vv_nxv16i32_nxv16i32_nxv16i32( @llvm.riscv.vasub.nxv16i32.nxv16i32( + undef, %0, %1, i64 %2) @@ -815,6 +851,7 @@ entry: } declare @llvm.riscv.vasub.nxv1i64.nxv1i64( + , , , i64); @@ -827,6 +864,7 @@ define @intrinsic_vasub_vv_nxv1i64_nxv1i64_nxv1i64( @llvm.riscv.vasub.nxv1i64.nxv1i64( + undef, %0, %1, i64 %2) @@ -860,6 +898,7 @@ entry: } declare @llvm.riscv.vasub.nxv2i64.nxv2i64( + , , , i64); @@ -872,6 +911,7 @@ define @intrinsic_vasub_vv_nxv2i64_nxv2i64_nxv2i64( @llvm.riscv.vasub.nxv2i64.nxv2i64( + undef, %0, %1, i64 %2) @@ -905,6 +945,7 @@ entry: } declare @llvm.riscv.vasub.nxv4i64.nxv4i64( + , , , i64); @@ -917,6 +958,7 @@ define @intrinsic_vasub_vv_nxv4i64_nxv4i64_nxv4i64( @llvm.riscv.vasub.nxv4i64.nxv4i64( + undef, %0, %1, i64 %2) @@ -950,6 +992,7 @@ entry: } declare @llvm.riscv.vasub.nxv8i64.nxv8i64( + , , , i64); @@ -962,6 +1005,7 @@ define @intrinsic_vasub_vv_nxv8i64_nxv8i64_nxv8i64( @llvm.riscv.vasub.nxv8i64.nxv8i64( + undef, %0, %1, i64 %2) @@ -996,6 +1040,7 @@ entry: } declare @llvm.riscv.vasub.nxv1i8.i8( + , , i8, i64); @@ -1008,6 +1053,7 @@ define @intrinsic_vasub_vx_nxv1i8_nxv1i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasub.nxv1i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1041,6 +1087,7 @@ entry: } declare @llvm.riscv.vasub.nxv2i8.i8( + , , i8, i64); @@ -1053,6 +1100,7 @@ define @intrinsic_vasub_vx_nxv2i8_nxv2i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasub.nxv2i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1086,6 +1134,7 @@ entry: } declare @llvm.riscv.vasub.nxv4i8.i8( + , , i8, i64); @@ -1098,6 +1147,7 @@ define @intrinsic_vasub_vx_nxv4i8_nxv4i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasub.nxv4i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1131,6 +1181,7 @@ entry: } declare @llvm.riscv.vasub.nxv8i8.i8( + , , i8, i64); @@ -1143,6 +1194,7 @@ define @intrinsic_vasub_vx_nxv8i8_nxv8i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasub.nxv8i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1176,6 +1228,7 @@ entry: } declare @llvm.riscv.vasub.nxv16i8.i8( + , , i8, i64); @@ -1188,6 +1241,7 @@ define @intrinsic_vasub_vx_nxv16i8_nxv16i8_i8( @llvm.riscv.vasub.nxv16i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1221,6 +1275,7 @@ entry: } declare @llvm.riscv.vasub.nxv32i8.i8( + , , i8, i64); @@ -1233,6 +1288,7 @@ define @intrinsic_vasub_vx_nxv32i8_nxv32i8_i8( @llvm.riscv.vasub.nxv32i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1266,6 +1322,7 @@ entry: } declare @llvm.riscv.vasub.nxv64i8.i8( + , , i8, i64); @@ -1278,6 +1335,7 @@ define @intrinsic_vasub_vx_nxv64i8_nxv64i8_i8( @llvm.riscv.vasub.nxv64i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1311,6 +1369,7 @@ entry: } declare @llvm.riscv.vasub.nxv1i16.i16( + , , i16, i64); @@ -1323,6 +1382,7 @@ define @intrinsic_vasub_vx_nxv1i16_nxv1i16_i16( @llvm.riscv.vasub.nxv1i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1356,6 +1416,7 @@ entry: } declare @llvm.riscv.vasub.nxv2i16.i16( + , , i16, i64); @@ -1368,6 +1429,7 @@ define @intrinsic_vasub_vx_nxv2i16_nxv2i16_i16( @llvm.riscv.vasub.nxv2i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1401,6 +1463,7 @@ entry: } declare @llvm.riscv.vasub.nxv4i16.i16( + , , i16, i64); @@ -1413,6 +1476,7 @@ define @intrinsic_vasub_vx_nxv4i16_nxv4i16_i16( @llvm.riscv.vasub.nxv4i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1446,6 +1510,7 @@ entry: } declare @llvm.riscv.vasub.nxv8i16.i16( + , , i16, i64); @@ -1458,6 +1523,7 @@ define @intrinsic_vasub_vx_nxv8i16_nxv8i16_i16( @llvm.riscv.vasub.nxv8i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1491,6 +1557,7 @@ entry: } declare @llvm.riscv.vasub.nxv16i16.i16( + , , i16, i64); @@ -1503,6 +1570,7 @@ define @intrinsic_vasub_vx_nxv16i16_nxv16i16_i16( @llvm.riscv.vasub.nxv16i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1536,6 +1604,7 @@ entry: } declare @llvm.riscv.vasub.nxv32i16.i16( + , , i16, i64); @@ -1548,6 +1617,7 @@ define @intrinsic_vasub_vx_nxv32i16_nxv32i16_i16( @llvm.riscv.vasub.nxv32i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1581,6 +1651,7 @@ entry: } declare @llvm.riscv.vasub.nxv1i32.i32( + , , i32, i64); @@ -1593,6 +1664,7 @@ define @intrinsic_vasub_vx_nxv1i32_nxv1i32_i32( @llvm.riscv.vasub.nxv1i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1626,6 +1698,7 @@ entry: } declare @llvm.riscv.vasub.nxv2i32.i32( + , , i32, i64); @@ -1638,6 +1711,7 @@ define @intrinsic_vasub_vx_nxv2i32_nxv2i32_i32( @llvm.riscv.vasub.nxv2i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1671,6 +1745,7 @@ entry: } declare @llvm.riscv.vasub.nxv4i32.i32( + , , i32, i64); @@ -1683,6 +1758,7 @@ define @intrinsic_vasub_vx_nxv4i32_nxv4i32_i32( @llvm.riscv.vasub.nxv4i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1716,6 +1792,7 @@ entry: } declare @llvm.riscv.vasub.nxv8i32.i32( + , , i32, i64); @@ -1728,6 +1805,7 @@ define @intrinsic_vasub_vx_nxv8i32_nxv8i32_i32( @llvm.riscv.vasub.nxv8i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1761,6 +1839,7 @@ entry: } declare @llvm.riscv.vasub.nxv16i32.i32( + , , i32, i64); @@ -1773,6 +1852,7 @@ define @intrinsic_vasub_vx_nxv16i32_nxv16i32_i32( @llvm.riscv.vasub.nxv16i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1806,6 +1886,7 @@ entry: } declare @llvm.riscv.vasub.nxv1i64.i64( + , , i64, i64); @@ -1818,6 +1899,7 @@ define @intrinsic_vasub_vx_nxv1i64_nxv1i64_i64( @llvm.riscv.vasub.nxv1i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1851,6 +1933,7 @@ entry: } declare @llvm.riscv.vasub.nxv2i64.i64( + , , i64, i64); @@ -1863,6 +1946,7 @@ define @intrinsic_vasub_vx_nxv2i64_nxv2i64_i64( @llvm.riscv.vasub.nxv2i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1896,6 +1980,7 @@ entry: } declare @llvm.riscv.vasub.nxv4i64.i64( + , , i64, i64); @@ -1908,6 +1993,7 @@ define @intrinsic_vasub_vx_nxv4i64_nxv4i64_i64( @llvm.riscv.vasub.nxv4i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1941,6 +2027,7 @@ entry: } declare @llvm.riscv.vasub.nxv8i64.i64( + , , i64, i64); @@ -1953,6 +2040,7 @@ define @intrinsic_vasub_vx_nxv8i64_nxv8i64_i64( @llvm.riscv.vasub.nxv8i64.i64( + undef, %0, i64 %1, i64 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vasubu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vasubu-rv32.ll index 4707aff206d82c6e3f58ceb9ae3b1629db2522a4..1845da27c7b490d33f568cd4009e64cd3732242d 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vasubu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vasubu-rv32.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vasubu.nxv1i8.nxv1i8( + , , , i32); @@ -14,6 +15,7 @@ define @intrinsic_vasubu_vv_nxv1i8_nxv1i8_nxv1i8( @llvm.riscv.vasubu.nxv1i8.nxv1i8( + undef, %0, %1, i32 %2) @@ -47,6 +49,7 @@ entry: } declare @llvm.riscv.vasubu.nxv2i8.nxv2i8( + , , , i32); @@ -59,6 +62,7 @@ define @intrinsic_vasubu_vv_nxv2i8_nxv2i8_nxv2i8( @llvm.riscv.vasubu.nxv2i8.nxv2i8( + undef, %0, %1, i32 %2) @@ -92,6 +96,7 @@ entry: } declare @llvm.riscv.vasubu.nxv4i8.nxv4i8( + , , , i32); @@ -104,6 +109,7 @@ define @intrinsic_vasubu_vv_nxv4i8_nxv4i8_nxv4i8( @llvm.riscv.vasubu.nxv4i8.nxv4i8( + undef, %0, %1, i32 %2) @@ -137,6 +143,7 @@ entry: } declare @llvm.riscv.vasubu.nxv8i8.nxv8i8( + , , , i32); @@ -149,6 +156,7 @@ define @intrinsic_vasubu_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vasubu.nxv8i8.nxv8i8( + undef, %0, %1, i32 %2) @@ -182,6 +190,7 @@ entry: } declare @llvm.riscv.vasubu.nxv16i8.nxv16i8( + , , , i32); @@ -194,6 +203,7 @@ define @intrinsic_vasubu_vv_nxv16i8_nxv16i8_nxv16i8( @llvm.riscv.vasubu.nxv16i8.nxv16i8( + undef, %0, %1, i32 %2) @@ -227,6 +237,7 @@ entry: } declare @llvm.riscv.vasubu.nxv32i8.nxv32i8( + , , , i32); @@ -239,6 +250,7 @@ define @intrinsic_vasubu_vv_nxv32i8_nxv32i8_nxv32i8( @llvm.riscv.vasubu.nxv32i8.nxv32i8( + undef, %0, %1, i32 %2) @@ -272,6 +284,7 @@ entry: } declare @llvm.riscv.vasubu.nxv64i8.nxv64i8( + , , , i32); @@ -284,6 +297,7 @@ define @intrinsic_vasubu_vv_nxv64i8_nxv64i8_nxv64i8( @llvm.riscv.vasubu.nxv64i8.nxv64i8( + undef, %0, %1, i32 %2) @@ -318,6 +332,7 @@ entry: } declare @llvm.riscv.vasubu.nxv1i16.nxv1i16( + , , , i32); @@ -330,6 +345,7 @@ define @intrinsic_vasubu_vv_nxv1i16_nxv1i16_nxv1i16( @llvm.riscv.vasubu.nxv1i16.nxv1i16( + undef, %0, %1, i32 %2) @@ -363,6 +379,7 @@ entry: } declare @llvm.riscv.vasubu.nxv2i16.nxv2i16( + , , , i32); @@ -375,6 +392,7 @@ define @intrinsic_vasubu_vv_nxv2i16_nxv2i16_nxv2i16( @llvm.riscv.vasubu.nxv2i16.nxv2i16( + undef, %0, %1, i32 %2) @@ -408,6 +426,7 @@ entry: } declare @llvm.riscv.vasubu.nxv4i16.nxv4i16( + , , , i32); @@ -420,6 +439,7 @@ define @intrinsic_vasubu_vv_nxv4i16_nxv4i16_nxv4i16( @llvm.riscv.vasubu.nxv4i16.nxv4i16( + undef, %0, %1, i32 %2) @@ -453,6 +473,7 @@ entry: } declare @llvm.riscv.vasubu.nxv8i16.nxv8i16( + , , , i32); @@ -465,6 +486,7 @@ define @intrinsic_vasubu_vv_nxv8i16_nxv8i16_nxv8i16( @llvm.riscv.vasubu.nxv8i16.nxv8i16( + undef, %0, %1, i32 %2) @@ -498,6 +520,7 @@ entry: } declare @llvm.riscv.vasubu.nxv16i16.nxv16i16( + , , , i32); @@ -510,6 +533,7 @@ define @intrinsic_vasubu_vv_nxv16i16_nxv16i16_nxv16i16( @llvm.riscv.vasubu.nxv16i16.nxv16i16( + undef, %0, %1, i32 %2) @@ -543,6 +567,7 @@ entry: } declare @llvm.riscv.vasubu.nxv32i16.nxv32i16( + , , , i32); @@ -555,6 +580,7 @@ define @intrinsic_vasubu_vv_nxv32i16_nxv32i16_nxv32i16( @llvm.riscv.vasubu.nxv32i16.nxv32i16( + undef, %0, %1, i32 %2) @@ -589,6 +615,7 @@ entry: } declare @llvm.riscv.vasubu.nxv1i32.nxv1i32( + , , , i32); @@ -601,6 +628,7 @@ define @intrinsic_vasubu_vv_nxv1i32_nxv1i32_nxv1i32( @llvm.riscv.vasubu.nxv1i32.nxv1i32( + undef, %0, %1, i32 %2) @@ -634,6 +662,7 @@ entry: } declare @llvm.riscv.vasubu.nxv2i32.nxv2i32( + , , , i32); @@ -646,6 +675,7 @@ define @intrinsic_vasubu_vv_nxv2i32_nxv2i32_nxv2i32( @llvm.riscv.vasubu.nxv2i32.nxv2i32( + undef, %0, %1, i32 %2) @@ -679,6 +709,7 @@ entry: } declare @llvm.riscv.vasubu.nxv4i32.nxv4i32( + , , , i32); @@ -691,6 +722,7 @@ define @intrinsic_vasubu_vv_nxv4i32_nxv4i32_nxv4i32( @llvm.riscv.vasubu.nxv4i32.nxv4i32( + undef, %0, %1, i32 %2) @@ -724,6 +756,7 @@ entry: } declare @llvm.riscv.vasubu.nxv8i32.nxv8i32( + , , , i32); @@ -736,6 +769,7 @@ define @intrinsic_vasubu_vv_nxv8i32_nxv8i32_nxv8i32( @llvm.riscv.vasubu.nxv8i32.nxv8i32( + undef, %0, %1, i32 %2) @@ -769,6 +803,7 @@ entry: } declare @llvm.riscv.vasubu.nxv16i32.nxv16i32( + , , , i32); @@ -781,6 +816,7 @@ define @intrinsic_vasubu_vv_nxv16i32_nxv16i32_nxv16i32( @llvm.riscv.vasubu.nxv16i32.nxv16i32( + undef, %0, %1, i32 %2) @@ -815,6 +851,7 @@ entry: } declare @llvm.riscv.vasubu.nxv1i64.nxv1i64( + , , , i32); @@ -827,6 +864,7 @@ define @intrinsic_vasubu_vv_nxv1i64_nxv1i64_nxv1i64( @llvm.riscv.vasubu.nxv1i64.nxv1i64( + undef, %0, %1, i32 %2) @@ -860,6 +898,7 @@ entry: } declare @llvm.riscv.vasubu.nxv2i64.nxv2i64( + , , , i32); @@ -872,6 +911,7 @@ define @intrinsic_vasubu_vv_nxv2i64_nxv2i64_nxv2i64( @llvm.riscv.vasubu.nxv2i64.nxv2i64( + undef, %0, %1, i32 %2) @@ -905,6 +945,7 @@ entry: } declare @llvm.riscv.vasubu.nxv4i64.nxv4i64( + , , , i32); @@ -917,6 +958,7 @@ define @intrinsic_vasubu_vv_nxv4i64_nxv4i64_nxv4i64( @llvm.riscv.vasubu.nxv4i64.nxv4i64( + undef, %0, %1, i32 %2) @@ -950,6 +992,7 @@ entry: } declare @llvm.riscv.vasubu.nxv8i64.nxv8i64( + , , , i32); @@ -962,6 +1005,7 @@ define @intrinsic_vasubu_vv_nxv8i64_nxv8i64_nxv8i64( @llvm.riscv.vasubu.nxv8i64.nxv8i64( + undef, %0, %1, i32 %2) @@ -996,6 +1040,7 @@ entry: } declare @llvm.riscv.vasubu.nxv1i8.i8( + , , i8, i32); @@ -1008,6 +1053,7 @@ define @intrinsic_vasubu_vx_nxv1i8_nxv1i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasubu.nxv1i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1041,6 +1087,7 @@ entry: } declare @llvm.riscv.vasubu.nxv2i8.i8( + , , i8, i32); @@ -1053,6 +1100,7 @@ define @intrinsic_vasubu_vx_nxv2i8_nxv2i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasubu.nxv2i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1086,6 +1134,7 @@ entry: } declare @llvm.riscv.vasubu.nxv4i8.i8( + , , i8, i32); @@ -1098,6 +1147,7 @@ define @intrinsic_vasubu_vx_nxv4i8_nxv4i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasubu.nxv4i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1131,6 +1181,7 @@ entry: } declare @llvm.riscv.vasubu.nxv8i8.i8( + , , i8, i32); @@ -1143,6 +1194,7 @@ define @intrinsic_vasubu_vx_nxv8i8_nxv8i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasubu.nxv8i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1176,6 +1228,7 @@ entry: } declare @llvm.riscv.vasubu.nxv16i8.i8( + , , i8, i32); @@ -1188,6 +1241,7 @@ define @intrinsic_vasubu_vx_nxv16i8_nxv16i8_i8( @llvm.riscv.vasubu.nxv16i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1221,6 +1275,7 @@ entry: } declare @llvm.riscv.vasubu.nxv32i8.i8( + , , i8, i32); @@ -1233,6 +1288,7 @@ define @intrinsic_vasubu_vx_nxv32i8_nxv32i8_i8( @llvm.riscv.vasubu.nxv32i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1266,6 +1322,7 @@ entry: } declare @llvm.riscv.vasubu.nxv64i8.i8( + , , i8, i32); @@ -1278,6 +1335,7 @@ define @intrinsic_vasubu_vx_nxv64i8_nxv64i8_i8( @llvm.riscv.vasubu.nxv64i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1311,6 +1369,7 @@ entry: } declare @llvm.riscv.vasubu.nxv1i16.i16( + , , i16, i32); @@ -1323,6 +1382,7 @@ define @intrinsic_vasubu_vx_nxv1i16_nxv1i16_i16( @llvm.riscv.vasubu.nxv1i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1356,6 +1416,7 @@ entry: } declare @llvm.riscv.vasubu.nxv2i16.i16( + , , i16, i32); @@ -1368,6 +1429,7 @@ define @intrinsic_vasubu_vx_nxv2i16_nxv2i16_i16( @llvm.riscv.vasubu.nxv2i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1401,6 +1463,7 @@ entry: } declare @llvm.riscv.vasubu.nxv4i16.i16( + , , i16, i32); @@ -1413,6 +1476,7 @@ define @intrinsic_vasubu_vx_nxv4i16_nxv4i16_i16( @llvm.riscv.vasubu.nxv4i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1446,6 +1510,7 @@ entry: } declare @llvm.riscv.vasubu.nxv8i16.i16( + , , i16, i32); @@ -1458,6 +1523,7 @@ define @intrinsic_vasubu_vx_nxv8i16_nxv8i16_i16( @llvm.riscv.vasubu.nxv8i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1491,6 +1557,7 @@ entry: } declare @llvm.riscv.vasubu.nxv16i16.i16( + , , i16, i32); @@ -1503,6 +1570,7 @@ define @intrinsic_vasubu_vx_nxv16i16_nxv16i16_i16( @llvm.riscv.vasubu.nxv16i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1536,6 +1604,7 @@ entry: } declare @llvm.riscv.vasubu.nxv32i16.i16( + , , i16, i32); @@ -1548,6 +1617,7 @@ define @intrinsic_vasubu_vx_nxv32i16_nxv32i16_i16( @llvm.riscv.vasubu.nxv32i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1581,6 +1651,7 @@ entry: } declare @llvm.riscv.vasubu.nxv1i32.i32( + , , i32, i32); @@ -1593,6 +1664,7 @@ define @intrinsic_vasubu_vx_nxv1i32_nxv1i32_i32( @llvm.riscv.vasubu.nxv1i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1626,6 +1698,7 @@ entry: } declare @llvm.riscv.vasubu.nxv2i32.i32( + , , i32, i32); @@ -1638,6 +1711,7 @@ define @intrinsic_vasubu_vx_nxv2i32_nxv2i32_i32( @llvm.riscv.vasubu.nxv2i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1671,6 +1745,7 @@ entry: } declare @llvm.riscv.vasubu.nxv4i32.i32( + , , i32, i32); @@ -1683,6 +1758,7 @@ define @intrinsic_vasubu_vx_nxv4i32_nxv4i32_i32( @llvm.riscv.vasubu.nxv4i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1716,6 +1792,7 @@ entry: } declare @llvm.riscv.vasubu.nxv8i32.i32( + , , i32, i32); @@ -1728,6 +1805,7 @@ define @intrinsic_vasubu_vx_nxv8i32_nxv8i32_i32( @llvm.riscv.vasubu.nxv8i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1761,6 +1839,7 @@ entry: } declare @llvm.riscv.vasubu.nxv16i32.i32( + , , i32, i32); @@ -1773,6 +1852,7 @@ define @intrinsic_vasubu_vx_nxv16i32_nxv16i32_i32( @llvm.riscv.vasubu.nxv16i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1806,6 +1886,7 @@ entry: } declare @llvm.riscv.vasubu.nxv1i64.i64( + , , i64, i32); @@ -1824,6 +1905,7 @@ define @intrinsic_vasubu_vx_nxv1i64_nxv1i64_i64( @llvm.riscv.vasubu.nxv1i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1863,6 +1945,7 @@ entry: } declare @llvm.riscv.vasubu.nxv2i64.i64( + , , i64, i32); @@ -1881,6 +1964,7 @@ define @intrinsic_vasubu_vx_nxv2i64_nxv2i64_i64( @llvm.riscv.vasubu.nxv2i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1920,6 +2004,7 @@ entry: } declare @llvm.riscv.vasubu.nxv4i64.i64( + , , i64, i32); @@ -1938,6 +2023,7 @@ define @intrinsic_vasubu_vx_nxv4i64_nxv4i64_i64( @llvm.riscv.vasubu.nxv4i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1977,6 +2063,7 @@ entry: } declare @llvm.riscv.vasubu.nxv8i64.i64( + , , i64, i32); @@ -1995,6 +2082,7 @@ define @intrinsic_vasubu_vx_nxv8i64_nxv8i64_i64( @llvm.riscv.vasubu.nxv8i64.i64( + undef, %0, i64 %1, i32 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vasubu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vasubu-rv64.ll index 74d4690a8c8db32063a1bb1f78b952218251545b..7b9824e5592a8d4e1aabdff715d287718b7d69f4 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vasubu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vasubu-rv64.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vasubu.nxv1i8.nxv1i8( + , , , i64); @@ -14,6 +15,7 @@ define @intrinsic_vasubu_vv_nxv1i8_nxv1i8_nxv1i8( @llvm.riscv.vasubu.nxv1i8.nxv1i8( + undef, %0, %1, i64 %2) @@ -47,6 +49,7 @@ entry: } declare @llvm.riscv.vasubu.nxv2i8.nxv2i8( + , , , i64); @@ -59,6 +62,7 @@ define @intrinsic_vasubu_vv_nxv2i8_nxv2i8_nxv2i8( @llvm.riscv.vasubu.nxv2i8.nxv2i8( + undef, %0, %1, i64 %2) @@ -92,6 +96,7 @@ entry: } declare @llvm.riscv.vasubu.nxv4i8.nxv4i8( + , , , i64); @@ -104,6 +109,7 @@ define @intrinsic_vasubu_vv_nxv4i8_nxv4i8_nxv4i8( @llvm.riscv.vasubu.nxv4i8.nxv4i8( + undef, %0, %1, i64 %2) @@ -137,6 +143,7 @@ entry: } declare @llvm.riscv.vasubu.nxv8i8.nxv8i8( + , , , i64); @@ -149,6 +156,7 @@ define @intrinsic_vasubu_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vasubu.nxv8i8.nxv8i8( + undef, %0, %1, i64 %2) @@ -182,6 +190,7 @@ entry: } declare @llvm.riscv.vasubu.nxv16i8.nxv16i8( + , , , i64); @@ -194,6 +203,7 @@ define @intrinsic_vasubu_vv_nxv16i8_nxv16i8_nxv16i8( @llvm.riscv.vasubu.nxv16i8.nxv16i8( + undef, %0, %1, i64 %2) @@ -227,6 +237,7 @@ entry: } declare @llvm.riscv.vasubu.nxv32i8.nxv32i8( + , , , i64); @@ -239,6 +250,7 @@ define @intrinsic_vasubu_vv_nxv32i8_nxv32i8_nxv32i8( @llvm.riscv.vasubu.nxv32i8.nxv32i8( + undef, %0, %1, i64 %2) @@ -272,6 +284,7 @@ entry: } declare @llvm.riscv.vasubu.nxv64i8.nxv64i8( + , , , i64); @@ -284,6 +297,7 @@ define @intrinsic_vasubu_vv_nxv64i8_nxv64i8_nxv64i8( @llvm.riscv.vasubu.nxv64i8.nxv64i8( + undef, %0, %1, i64 %2) @@ -318,6 +332,7 @@ entry: } declare @llvm.riscv.vasubu.nxv1i16.nxv1i16( + , , , i64); @@ -330,6 +345,7 @@ define @intrinsic_vasubu_vv_nxv1i16_nxv1i16_nxv1i16( @llvm.riscv.vasubu.nxv1i16.nxv1i16( + undef, %0, %1, i64 %2) @@ -363,6 +379,7 @@ entry: } declare @llvm.riscv.vasubu.nxv2i16.nxv2i16( + , , , i64); @@ -375,6 +392,7 @@ define @intrinsic_vasubu_vv_nxv2i16_nxv2i16_nxv2i16( @llvm.riscv.vasubu.nxv2i16.nxv2i16( + undef, %0, %1, i64 %2) @@ -408,6 +426,7 @@ entry: } declare @llvm.riscv.vasubu.nxv4i16.nxv4i16( + , , , i64); @@ -420,6 +439,7 @@ define @intrinsic_vasubu_vv_nxv4i16_nxv4i16_nxv4i16( @llvm.riscv.vasubu.nxv4i16.nxv4i16( + undef, %0, %1, i64 %2) @@ -453,6 +473,7 @@ entry: } declare @llvm.riscv.vasubu.nxv8i16.nxv8i16( + , , , i64); @@ -465,6 +486,7 @@ define @intrinsic_vasubu_vv_nxv8i16_nxv8i16_nxv8i16( @llvm.riscv.vasubu.nxv8i16.nxv8i16( + undef, %0, %1, i64 %2) @@ -498,6 +520,7 @@ entry: } declare @llvm.riscv.vasubu.nxv16i16.nxv16i16( + , , , i64); @@ -510,6 +533,7 @@ define @intrinsic_vasubu_vv_nxv16i16_nxv16i16_nxv16i16( @llvm.riscv.vasubu.nxv16i16.nxv16i16( + undef, %0, %1, i64 %2) @@ -543,6 +567,7 @@ entry: } declare @llvm.riscv.vasubu.nxv32i16.nxv32i16( + , , , i64); @@ -555,6 +580,7 @@ define @intrinsic_vasubu_vv_nxv32i16_nxv32i16_nxv32i16( @llvm.riscv.vasubu.nxv32i16.nxv32i16( + undef, %0, %1, i64 %2) @@ -589,6 +615,7 @@ entry: } declare @llvm.riscv.vasubu.nxv1i32.nxv1i32( + , , , i64); @@ -601,6 +628,7 @@ define @intrinsic_vasubu_vv_nxv1i32_nxv1i32_nxv1i32( @llvm.riscv.vasubu.nxv1i32.nxv1i32( + undef, %0, %1, i64 %2) @@ -634,6 +662,7 @@ entry: } declare @llvm.riscv.vasubu.nxv2i32.nxv2i32( + , , , i64); @@ -646,6 +675,7 @@ define @intrinsic_vasubu_vv_nxv2i32_nxv2i32_nxv2i32( @llvm.riscv.vasubu.nxv2i32.nxv2i32( + undef, %0, %1, i64 %2) @@ -679,6 +709,7 @@ entry: } declare @llvm.riscv.vasubu.nxv4i32.nxv4i32( + , , , i64); @@ -691,6 +722,7 @@ define @intrinsic_vasubu_vv_nxv4i32_nxv4i32_nxv4i32( @llvm.riscv.vasubu.nxv4i32.nxv4i32( + undef, %0, %1, i64 %2) @@ -724,6 +756,7 @@ entry: } declare @llvm.riscv.vasubu.nxv8i32.nxv8i32( + , , , i64); @@ -736,6 +769,7 @@ define @intrinsic_vasubu_vv_nxv8i32_nxv8i32_nxv8i32( @llvm.riscv.vasubu.nxv8i32.nxv8i32( + undef, %0, %1, i64 %2) @@ -769,6 +803,7 @@ entry: } declare @llvm.riscv.vasubu.nxv16i32.nxv16i32( + , , , i64); @@ -781,6 +816,7 @@ define @intrinsic_vasubu_vv_nxv16i32_nxv16i32_nxv16i32( @llvm.riscv.vasubu.nxv16i32.nxv16i32( + undef, %0, %1, i64 %2) @@ -815,6 +851,7 @@ entry: } declare @llvm.riscv.vasubu.nxv1i64.nxv1i64( + , , , i64); @@ -827,6 +864,7 @@ define @intrinsic_vasubu_vv_nxv1i64_nxv1i64_nxv1i64( @llvm.riscv.vasubu.nxv1i64.nxv1i64( + undef, %0, %1, i64 %2) @@ -860,6 +898,7 @@ entry: } declare @llvm.riscv.vasubu.nxv2i64.nxv2i64( + , , , i64); @@ -872,6 +911,7 @@ define @intrinsic_vasubu_vv_nxv2i64_nxv2i64_nxv2i64( @llvm.riscv.vasubu.nxv2i64.nxv2i64( + undef, %0, %1, i64 %2) @@ -905,6 +945,7 @@ entry: } declare @llvm.riscv.vasubu.nxv4i64.nxv4i64( + , , , i64); @@ -917,6 +958,7 @@ define @intrinsic_vasubu_vv_nxv4i64_nxv4i64_nxv4i64( @llvm.riscv.vasubu.nxv4i64.nxv4i64( + undef, %0, %1, i64 %2) @@ -950,6 +992,7 @@ entry: } declare @llvm.riscv.vasubu.nxv8i64.nxv8i64( + , , , i64); @@ -962,6 +1005,7 @@ define @intrinsic_vasubu_vv_nxv8i64_nxv8i64_nxv8i64( @llvm.riscv.vasubu.nxv8i64.nxv8i64( + undef, %0, %1, i64 %2) @@ -996,6 +1040,7 @@ entry: } declare @llvm.riscv.vasubu.nxv1i8.i8( + , , i8, i64); @@ -1008,6 +1053,7 @@ define @intrinsic_vasubu_vx_nxv1i8_nxv1i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasubu.nxv1i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1041,6 +1087,7 @@ entry: } declare @llvm.riscv.vasubu.nxv2i8.i8( + , , i8, i64); @@ -1053,6 +1100,7 @@ define @intrinsic_vasubu_vx_nxv2i8_nxv2i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasubu.nxv2i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1086,6 +1134,7 @@ entry: } declare @llvm.riscv.vasubu.nxv4i8.i8( + , , i8, i64); @@ -1098,6 +1147,7 @@ define @intrinsic_vasubu_vx_nxv4i8_nxv4i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasubu.nxv4i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1131,6 +1181,7 @@ entry: } declare @llvm.riscv.vasubu.nxv8i8.i8( + , , i8, i64); @@ -1143,6 +1194,7 @@ define @intrinsic_vasubu_vx_nxv8i8_nxv8i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasubu.nxv8i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1176,6 +1228,7 @@ entry: } declare @llvm.riscv.vasubu.nxv16i8.i8( + , , i8, i64); @@ -1188,6 +1241,7 @@ define @intrinsic_vasubu_vx_nxv16i8_nxv16i8_i8( @llvm.riscv.vasubu.nxv16i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1221,6 +1275,7 @@ entry: } declare @llvm.riscv.vasubu.nxv32i8.i8( + , , i8, i64); @@ -1233,6 +1288,7 @@ define @intrinsic_vasubu_vx_nxv32i8_nxv32i8_i8( @llvm.riscv.vasubu.nxv32i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1266,6 +1322,7 @@ entry: } declare @llvm.riscv.vasubu.nxv64i8.i8( + , , i8, i64); @@ -1278,6 +1335,7 @@ define @intrinsic_vasubu_vx_nxv64i8_nxv64i8_i8( @llvm.riscv.vasubu.nxv64i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1311,6 +1369,7 @@ entry: } declare @llvm.riscv.vasubu.nxv1i16.i16( + , , i16, i64); @@ -1323,6 +1382,7 @@ define @intrinsic_vasubu_vx_nxv1i16_nxv1i16_i16( @llvm.riscv.vasubu.nxv1i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1356,6 +1416,7 @@ entry: } declare @llvm.riscv.vasubu.nxv2i16.i16( + , , i16, i64); @@ -1368,6 +1429,7 @@ define @intrinsic_vasubu_vx_nxv2i16_nxv2i16_i16( @llvm.riscv.vasubu.nxv2i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1401,6 +1463,7 @@ entry: } declare @llvm.riscv.vasubu.nxv4i16.i16( + , , i16, i64); @@ -1413,6 +1476,7 @@ define @intrinsic_vasubu_vx_nxv4i16_nxv4i16_i16( @llvm.riscv.vasubu.nxv4i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1446,6 +1510,7 @@ entry: } declare @llvm.riscv.vasubu.nxv8i16.i16( + , , i16, i64); @@ -1458,6 +1523,7 @@ define @intrinsic_vasubu_vx_nxv8i16_nxv8i16_i16( @llvm.riscv.vasubu.nxv8i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1491,6 +1557,7 @@ entry: } declare @llvm.riscv.vasubu.nxv16i16.i16( + , , i16, i64); @@ -1503,6 +1570,7 @@ define @intrinsic_vasubu_vx_nxv16i16_nxv16i16_i16( @llvm.riscv.vasubu.nxv16i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1536,6 +1604,7 @@ entry: } declare @llvm.riscv.vasubu.nxv32i16.i16( + , , i16, i64); @@ -1548,6 +1617,7 @@ define @intrinsic_vasubu_vx_nxv32i16_nxv32i16_i16( @llvm.riscv.vasubu.nxv32i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1581,6 +1651,7 @@ entry: } declare @llvm.riscv.vasubu.nxv1i32.i32( + , , i32, i64); @@ -1593,6 +1664,7 @@ define @intrinsic_vasubu_vx_nxv1i32_nxv1i32_i32( @llvm.riscv.vasubu.nxv1i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1626,6 +1698,7 @@ entry: } declare @llvm.riscv.vasubu.nxv2i32.i32( + , , i32, i64); @@ -1638,6 +1711,7 @@ define @intrinsic_vasubu_vx_nxv2i32_nxv2i32_i32( @llvm.riscv.vasubu.nxv2i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1671,6 +1745,7 @@ entry: } declare @llvm.riscv.vasubu.nxv4i32.i32( + , , i32, i64); @@ -1683,6 +1758,7 @@ define @intrinsic_vasubu_vx_nxv4i32_nxv4i32_i32( @llvm.riscv.vasubu.nxv4i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1716,6 +1792,7 @@ entry: } declare @llvm.riscv.vasubu.nxv8i32.i32( + , , i32, i64); @@ -1728,6 +1805,7 @@ define @intrinsic_vasubu_vx_nxv8i32_nxv8i32_i32( @llvm.riscv.vasubu.nxv8i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1761,6 +1839,7 @@ entry: } declare @llvm.riscv.vasubu.nxv16i32.i32( + , , i32, i64); @@ -1773,6 +1852,7 @@ define @intrinsic_vasubu_vx_nxv16i32_nxv16i32_i32( @llvm.riscv.vasubu.nxv16i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1806,6 +1886,7 @@ entry: } declare @llvm.riscv.vasubu.nxv1i64.i64( + , , i64, i64); @@ -1818,6 +1899,7 @@ define @intrinsic_vasubu_vx_nxv1i64_nxv1i64_i64( @llvm.riscv.vasubu.nxv1i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1851,6 +1933,7 @@ entry: } declare @llvm.riscv.vasubu.nxv2i64.i64( + , , i64, i64); @@ -1863,6 +1946,7 @@ define @intrinsic_vasubu_vx_nxv2i64_nxv2i64_i64( @llvm.riscv.vasubu.nxv2i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1896,6 +1980,7 @@ entry: } declare @llvm.riscv.vasubu.nxv4i64.i64( + , , i64, i64); @@ -1908,6 +1993,7 @@ define @intrinsic_vasubu_vx_nxv4i64_nxv4i64_i64( @llvm.riscv.vasubu.nxv4i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1941,6 +2027,7 @@ entry: } declare @llvm.riscv.vasubu.nxv8i64.i64( + , , i64, i64); @@ -1953,6 +2040,7 @@ define @intrinsic_vasubu_vx_nxv8i64_nxv8i64_i64( @llvm.riscv.vasubu.nxv8i64.i64( + undef, %0, i64 %1, i64 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vdiv-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vdiv-rv32.ll index e0620693119562b6f998cfbdd56e770b8b816992..bfc267bfbb276c2f09ffad287bbd77a91613717a 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vdiv-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vdiv-rv32.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vdiv.nxv1i8.nxv1i8( + , , , i32); @@ -14,6 +15,7 @@ define @intrinsic_vdiv_vv_nxv1i8_nxv1i8_nxv1i8( @llvm.riscv.vdiv.nxv1i8.nxv1i8( + undef, %0, %1, i32 %2) @@ -47,6 +49,7 @@ entry: } declare @llvm.riscv.vdiv.nxv2i8.nxv2i8( + , , , i32); @@ -59,6 +62,7 @@ define @intrinsic_vdiv_vv_nxv2i8_nxv2i8_nxv2i8( @llvm.riscv.vdiv.nxv2i8.nxv2i8( + undef, %0, %1, i32 %2) @@ -92,6 +96,7 @@ entry: } declare @llvm.riscv.vdiv.nxv4i8.nxv4i8( + , , , i32); @@ -104,6 +109,7 @@ define @intrinsic_vdiv_vv_nxv4i8_nxv4i8_nxv4i8( @llvm.riscv.vdiv.nxv4i8.nxv4i8( + undef, %0, %1, i32 %2) @@ -137,6 +143,7 @@ entry: } declare @llvm.riscv.vdiv.nxv8i8.nxv8i8( + , , , i32); @@ -149,6 +156,7 @@ define @intrinsic_vdiv_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vdiv.nxv8i8.nxv8i8( + undef, %0, %1, i32 %2) @@ -182,6 +190,7 @@ entry: } declare @llvm.riscv.vdiv.nxv16i8.nxv16i8( + , , , i32); @@ -194,6 +203,7 @@ define @intrinsic_vdiv_vv_nxv16i8_nxv16i8_nxv16i8( @llvm.riscv.vdiv.nxv16i8.nxv16i8( + undef, %0, %1, i32 %2) @@ -227,6 +237,7 @@ entry: } declare @llvm.riscv.vdiv.nxv32i8.nxv32i8( + , , , i32); @@ -239,6 +250,7 @@ define @intrinsic_vdiv_vv_nxv32i8_nxv32i8_nxv32i8( @llvm.riscv.vdiv.nxv32i8.nxv32i8( + undef, %0, %1, i32 %2) @@ -272,6 +284,7 @@ entry: } declare @llvm.riscv.vdiv.nxv64i8.nxv64i8( + , , , i32); @@ -284,6 +297,7 @@ define @intrinsic_vdiv_vv_nxv64i8_nxv64i8_nxv64i8( @llvm.riscv.vdiv.nxv64i8.nxv64i8( + undef, %0, %1, i32 %2) @@ -318,6 +332,7 @@ entry: } declare @llvm.riscv.vdiv.nxv1i16.nxv1i16( + , , , i32); @@ -330,6 +345,7 @@ define @intrinsic_vdiv_vv_nxv1i16_nxv1i16_nxv1i16( @llvm.riscv.vdiv.nxv1i16.nxv1i16( + undef, %0, %1, i32 %2) @@ -363,6 +379,7 @@ entry: } declare @llvm.riscv.vdiv.nxv2i16.nxv2i16( + , , , i32); @@ -375,6 +392,7 @@ define @intrinsic_vdiv_vv_nxv2i16_nxv2i16_nxv2i16( @llvm.riscv.vdiv.nxv2i16.nxv2i16( + undef, %0, %1, i32 %2) @@ -408,6 +426,7 @@ entry: } declare @llvm.riscv.vdiv.nxv4i16.nxv4i16( + , , , i32); @@ -420,6 +439,7 @@ define @intrinsic_vdiv_vv_nxv4i16_nxv4i16_nxv4i16( @llvm.riscv.vdiv.nxv4i16.nxv4i16( + undef, %0, %1, i32 %2) @@ -453,6 +473,7 @@ entry: } declare @llvm.riscv.vdiv.nxv8i16.nxv8i16( + , , , i32); @@ -465,6 +486,7 @@ define @intrinsic_vdiv_vv_nxv8i16_nxv8i16_nxv8i16( @llvm.riscv.vdiv.nxv8i16.nxv8i16( + undef, %0, %1, i32 %2) @@ -498,6 +520,7 @@ entry: } declare @llvm.riscv.vdiv.nxv16i16.nxv16i16( + , , , i32); @@ -510,6 +533,7 @@ define @intrinsic_vdiv_vv_nxv16i16_nxv16i16_nxv16i16( @llvm.riscv.vdiv.nxv16i16.nxv16i16( + undef, %0, %1, i32 %2) @@ -543,6 +567,7 @@ entry: } declare @llvm.riscv.vdiv.nxv32i16.nxv32i16( + , , , i32); @@ -555,6 +580,7 @@ define @intrinsic_vdiv_vv_nxv32i16_nxv32i16_nxv32i16( @llvm.riscv.vdiv.nxv32i16.nxv32i16( + undef, %0, %1, i32 %2) @@ -589,6 +615,7 @@ entry: } declare @llvm.riscv.vdiv.nxv1i32.nxv1i32( + , , , i32); @@ -601,6 +628,7 @@ define @intrinsic_vdiv_vv_nxv1i32_nxv1i32_nxv1i32( @llvm.riscv.vdiv.nxv1i32.nxv1i32( + undef, %0, %1, i32 %2) @@ -634,6 +662,7 @@ entry: } declare @llvm.riscv.vdiv.nxv2i32.nxv2i32( + , , , i32); @@ -646,6 +675,7 @@ define @intrinsic_vdiv_vv_nxv2i32_nxv2i32_nxv2i32( @llvm.riscv.vdiv.nxv2i32.nxv2i32( + undef, %0, %1, i32 %2) @@ -679,6 +709,7 @@ entry: } declare @llvm.riscv.vdiv.nxv4i32.nxv4i32( + , , , i32); @@ -691,6 +722,7 @@ define @intrinsic_vdiv_vv_nxv4i32_nxv4i32_nxv4i32( @llvm.riscv.vdiv.nxv4i32.nxv4i32( + undef, %0, %1, i32 %2) @@ -724,6 +756,7 @@ entry: } declare @llvm.riscv.vdiv.nxv8i32.nxv8i32( + , , , i32); @@ -736,6 +769,7 @@ define @intrinsic_vdiv_vv_nxv8i32_nxv8i32_nxv8i32( @llvm.riscv.vdiv.nxv8i32.nxv8i32( + undef, %0, %1, i32 %2) @@ -769,6 +803,7 @@ entry: } declare @llvm.riscv.vdiv.nxv16i32.nxv16i32( + , , , i32); @@ -781,6 +816,7 @@ define @intrinsic_vdiv_vv_nxv16i32_nxv16i32_nxv16i32( @llvm.riscv.vdiv.nxv16i32.nxv16i32( + undef, %0, %1, i32 %2) @@ -815,6 +851,7 @@ entry: } declare @llvm.riscv.vdiv.nxv1i64.nxv1i64( + , , , i32); @@ -827,6 +864,7 @@ define @intrinsic_vdiv_vv_nxv1i64_nxv1i64_nxv1i64( @llvm.riscv.vdiv.nxv1i64.nxv1i64( + undef, %0, %1, i32 %2) @@ -860,6 +898,7 @@ entry: } declare @llvm.riscv.vdiv.nxv2i64.nxv2i64( + , , , i32); @@ -872,6 +911,7 @@ define @intrinsic_vdiv_vv_nxv2i64_nxv2i64_nxv2i64( @llvm.riscv.vdiv.nxv2i64.nxv2i64( + undef, %0, %1, i32 %2) @@ -905,6 +945,7 @@ entry: } declare @llvm.riscv.vdiv.nxv4i64.nxv4i64( + , , , i32); @@ -917,6 +958,7 @@ define @intrinsic_vdiv_vv_nxv4i64_nxv4i64_nxv4i64( @llvm.riscv.vdiv.nxv4i64.nxv4i64( + undef, %0, %1, i32 %2) @@ -950,6 +992,7 @@ entry: } declare @llvm.riscv.vdiv.nxv8i64.nxv8i64( + , , , i32); @@ -962,6 +1005,7 @@ define @intrinsic_vdiv_vv_nxv8i64_nxv8i64_nxv8i64( @llvm.riscv.vdiv.nxv8i64.nxv8i64( + undef, %0, %1, i32 %2) @@ -996,6 +1040,7 @@ entry: } declare @llvm.riscv.vdiv.nxv1i8.i8( + , , i8, i32); @@ -1008,6 +1053,7 @@ define @intrinsic_vdiv_vx_nxv1i8_nxv1i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdiv.nxv1i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1041,6 +1087,7 @@ entry: } declare @llvm.riscv.vdiv.nxv2i8.i8( + , , i8, i32); @@ -1053,6 +1100,7 @@ define @intrinsic_vdiv_vx_nxv2i8_nxv2i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdiv.nxv2i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1086,6 +1134,7 @@ entry: } declare @llvm.riscv.vdiv.nxv4i8.i8( + , , i8, i32); @@ -1098,6 +1147,7 @@ define @intrinsic_vdiv_vx_nxv4i8_nxv4i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdiv.nxv4i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1131,6 +1181,7 @@ entry: } declare @llvm.riscv.vdiv.nxv8i8.i8( + , , i8, i32); @@ -1143,6 +1194,7 @@ define @intrinsic_vdiv_vx_nxv8i8_nxv8i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdiv.nxv8i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1176,6 +1228,7 @@ entry: } declare @llvm.riscv.vdiv.nxv16i8.i8( + , , i8, i32); @@ -1188,6 +1241,7 @@ define @intrinsic_vdiv_vx_nxv16i8_nxv16i8_i8( @llvm.riscv.vdiv.nxv16i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1221,6 +1275,7 @@ entry: } declare @llvm.riscv.vdiv.nxv32i8.i8( + , , i8, i32); @@ -1233,6 +1288,7 @@ define @intrinsic_vdiv_vx_nxv32i8_nxv32i8_i8( @llvm.riscv.vdiv.nxv32i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1266,6 +1322,7 @@ entry: } declare @llvm.riscv.vdiv.nxv64i8.i8( + , , i8, i32); @@ -1278,6 +1335,7 @@ define @intrinsic_vdiv_vx_nxv64i8_nxv64i8_i8( @llvm.riscv.vdiv.nxv64i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1311,6 +1369,7 @@ entry: } declare @llvm.riscv.vdiv.nxv1i16.i16( + , , i16, i32); @@ -1323,6 +1382,7 @@ define @intrinsic_vdiv_vx_nxv1i16_nxv1i16_i16( @llvm.riscv.vdiv.nxv1i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1356,6 +1416,7 @@ entry: } declare @llvm.riscv.vdiv.nxv2i16.i16( + , , i16, i32); @@ -1368,6 +1429,7 @@ define @intrinsic_vdiv_vx_nxv2i16_nxv2i16_i16( @llvm.riscv.vdiv.nxv2i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1401,6 +1463,7 @@ entry: } declare @llvm.riscv.vdiv.nxv4i16.i16( + , , i16, i32); @@ -1413,6 +1476,7 @@ define @intrinsic_vdiv_vx_nxv4i16_nxv4i16_i16( @llvm.riscv.vdiv.nxv4i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1446,6 +1510,7 @@ entry: } declare @llvm.riscv.vdiv.nxv8i16.i16( + , , i16, i32); @@ -1458,6 +1523,7 @@ define @intrinsic_vdiv_vx_nxv8i16_nxv8i16_i16( @llvm.riscv.vdiv.nxv8i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1491,6 +1557,7 @@ entry: } declare @llvm.riscv.vdiv.nxv16i16.i16( + , , i16, i32); @@ -1503,6 +1570,7 @@ define @intrinsic_vdiv_vx_nxv16i16_nxv16i16_i16( @llvm.riscv.vdiv.nxv16i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1536,6 +1604,7 @@ entry: } declare @llvm.riscv.vdiv.nxv32i16.i16( + , , i16, i32); @@ -1548,6 +1617,7 @@ define @intrinsic_vdiv_vx_nxv32i16_nxv32i16_i16( @llvm.riscv.vdiv.nxv32i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1581,6 +1651,7 @@ entry: } declare @llvm.riscv.vdiv.nxv1i32.i32( + , , i32, i32); @@ -1593,6 +1664,7 @@ define @intrinsic_vdiv_vx_nxv1i32_nxv1i32_i32( @llvm.riscv.vdiv.nxv1i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1626,6 +1698,7 @@ entry: } declare @llvm.riscv.vdiv.nxv2i32.i32( + , , i32, i32); @@ -1638,6 +1711,7 @@ define @intrinsic_vdiv_vx_nxv2i32_nxv2i32_i32( @llvm.riscv.vdiv.nxv2i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1671,6 +1745,7 @@ entry: } declare @llvm.riscv.vdiv.nxv4i32.i32( + , , i32, i32); @@ -1683,6 +1758,7 @@ define @intrinsic_vdiv_vx_nxv4i32_nxv4i32_i32( @llvm.riscv.vdiv.nxv4i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1716,6 +1792,7 @@ entry: } declare @llvm.riscv.vdiv.nxv8i32.i32( + , , i32, i32); @@ -1728,6 +1805,7 @@ define @intrinsic_vdiv_vx_nxv8i32_nxv8i32_i32( @llvm.riscv.vdiv.nxv8i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1761,6 +1839,7 @@ entry: } declare @llvm.riscv.vdiv.nxv16i32.i32( + , , i32, i32); @@ -1773,6 +1852,7 @@ define @intrinsic_vdiv_vx_nxv16i32_nxv16i32_i32( @llvm.riscv.vdiv.nxv16i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1806,6 +1886,7 @@ entry: } declare @llvm.riscv.vdiv.nxv1i64.i64( + , , i64, i32); @@ -1824,6 +1905,7 @@ define @intrinsic_vdiv_vx_nxv1i64_nxv1i64_i64( @llvm.riscv.vdiv.nxv1i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1863,6 +1945,7 @@ entry: } declare @llvm.riscv.vdiv.nxv2i64.i64( + , , i64, i32); @@ -1881,6 +1964,7 @@ define @intrinsic_vdiv_vx_nxv2i64_nxv2i64_i64( @llvm.riscv.vdiv.nxv2i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1920,6 +2004,7 @@ entry: } declare @llvm.riscv.vdiv.nxv4i64.i64( + , , i64, i32); @@ -1938,6 +2023,7 @@ define @intrinsic_vdiv_vx_nxv4i64_nxv4i64_i64( @llvm.riscv.vdiv.nxv4i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1977,6 +2063,7 @@ entry: } declare @llvm.riscv.vdiv.nxv8i64.i64( + , , i64, i32); @@ -1995,6 +2082,7 @@ define @intrinsic_vdiv_vx_nxv8i64_nxv8i64_i64( @llvm.riscv.vdiv.nxv8i64.i64( + undef, %0, i64 %1, i32 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vdiv-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vdiv-rv64.ll index 01f1100045567a49feece51af85fe897f3de66b8..349052d118587d0c4fc3b144d622470165c2655f 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vdiv-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vdiv-rv64.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vdiv.nxv1i8.nxv1i8( + , , , i64); @@ -14,6 +15,7 @@ define @intrinsic_vdiv_vv_nxv1i8_nxv1i8_nxv1i8( @llvm.riscv.vdiv.nxv1i8.nxv1i8( + undef, %0, %1, i64 %2) @@ -47,6 +49,7 @@ entry: } declare @llvm.riscv.vdiv.nxv2i8.nxv2i8( + , , , i64); @@ -59,6 +62,7 @@ define @intrinsic_vdiv_vv_nxv2i8_nxv2i8_nxv2i8( @llvm.riscv.vdiv.nxv2i8.nxv2i8( + undef, %0, %1, i64 %2) @@ -92,6 +96,7 @@ entry: } declare @llvm.riscv.vdiv.nxv4i8.nxv4i8( + , , , i64); @@ -104,6 +109,7 @@ define @intrinsic_vdiv_vv_nxv4i8_nxv4i8_nxv4i8( @llvm.riscv.vdiv.nxv4i8.nxv4i8( + undef, %0, %1, i64 %2) @@ -137,6 +143,7 @@ entry: } declare @llvm.riscv.vdiv.nxv8i8.nxv8i8( + , , , i64); @@ -149,6 +156,7 @@ define @intrinsic_vdiv_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vdiv.nxv8i8.nxv8i8( + undef, %0, %1, i64 %2) @@ -182,6 +190,7 @@ entry: } declare @llvm.riscv.vdiv.nxv16i8.nxv16i8( + , , , i64); @@ -194,6 +203,7 @@ define @intrinsic_vdiv_vv_nxv16i8_nxv16i8_nxv16i8( @llvm.riscv.vdiv.nxv16i8.nxv16i8( + undef, %0, %1, i64 %2) @@ -227,6 +237,7 @@ entry: } declare @llvm.riscv.vdiv.nxv32i8.nxv32i8( + , , , i64); @@ -239,6 +250,7 @@ define @intrinsic_vdiv_vv_nxv32i8_nxv32i8_nxv32i8( @llvm.riscv.vdiv.nxv32i8.nxv32i8( + undef, %0, %1, i64 %2) @@ -272,6 +284,7 @@ entry: } declare @llvm.riscv.vdiv.nxv64i8.nxv64i8( + , , , i64); @@ -284,6 +297,7 @@ define @intrinsic_vdiv_vv_nxv64i8_nxv64i8_nxv64i8( @llvm.riscv.vdiv.nxv64i8.nxv64i8( + undef, %0, %1, i64 %2) @@ -318,6 +332,7 @@ entry: } declare @llvm.riscv.vdiv.nxv1i16.nxv1i16( + , , , i64); @@ -330,6 +345,7 @@ define @intrinsic_vdiv_vv_nxv1i16_nxv1i16_nxv1i16( @llvm.riscv.vdiv.nxv1i16.nxv1i16( + undef, %0, %1, i64 %2) @@ -363,6 +379,7 @@ entry: } declare @llvm.riscv.vdiv.nxv2i16.nxv2i16( + , , , i64); @@ -375,6 +392,7 @@ define @intrinsic_vdiv_vv_nxv2i16_nxv2i16_nxv2i16( @llvm.riscv.vdiv.nxv2i16.nxv2i16( + undef, %0, %1, i64 %2) @@ -408,6 +426,7 @@ entry: } declare @llvm.riscv.vdiv.nxv4i16.nxv4i16( + , , , i64); @@ -420,6 +439,7 @@ define @intrinsic_vdiv_vv_nxv4i16_nxv4i16_nxv4i16( @llvm.riscv.vdiv.nxv4i16.nxv4i16( + undef, %0, %1, i64 %2) @@ -453,6 +473,7 @@ entry: } declare @llvm.riscv.vdiv.nxv8i16.nxv8i16( + , , , i64); @@ -465,6 +486,7 @@ define @intrinsic_vdiv_vv_nxv8i16_nxv8i16_nxv8i16( @llvm.riscv.vdiv.nxv8i16.nxv8i16( + undef, %0, %1, i64 %2) @@ -498,6 +520,7 @@ entry: } declare @llvm.riscv.vdiv.nxv16i16.nxv16i16( + , , , i64); @@ -510,6 +533,7 @@ define @intrinsic_vdiv_vv_nxv16i16_nxv16i16_nxv16i16( @llvm.riscv.vdiv.nxv16i16.nxv16i16( + undef, %0, %1, i64 %2) @@ -543,6 +567,7 @@ entry: } declare @llvm.riscv.vdiv.nxv32i16.nxv32i16( + , , , i64); @@ -555,6 +580,7 @@ define @intrinsic_vdiv_vv_nxv32i16_nxv32i16_nxv32i16( @llvm.riscv.vdiv.nxv32i16.nxv32i16( + undef, %0, %1, i64 %2) @@ -589,6 +615,7 @@ entry: } declare @llvm.riscv.vdiv.nxv1i32.nxv1i32( + , , , i64); @@ -601,6 +628,7 @@ define @intrinsic_vdiv_vv_nxv1i32_nxv1i32_nxv1i32( @llvm.riscv.vdiv.nxv1i32.nxv1i32( + undef, %0, %1, i64 %2) @@ -634,6 +662,7 @@ entry: } declare @llvm.riscv.vdiv.nxv2i32.nxv2i32( + , , , i64); @@ -646,6 +675,7 @@ define @intrinsic_vdiv_vv_nxv2i32_nxv2i32_nxv2i32( @llvm.riscv.vdiv.nxv2i32.nxv2i32( + undef, %0, %1, i64 %2) @@ -679,6 +709,7 @@ entry: } declare @llvm.riscv.vdiv.nxv4i32.nxv4i32( + , , , i64); @@ -691,6 +722,7 @@ define @intrinsic_vdiv_vv_nxv4i32_nxv4i32_nxv4i32( @llvm.riscv.vdiv.nxv4i32.nxv4i32( + undef, %0, %1, i64 %2) @@ -724,6 +756,7 @@ entry: } declare @llvm.riscv.vdiv.nxv8i32.nxv8i32( + , , , i64); @@ -736,6 +769,7 @@ define @intrinsic_vdiv_vv_nxv8i32_nxv8i32_nxv8i32( @llvm.riscv.vdiv.nxv8i32.nxv8i32( + undef, %0, %1, i64 %2) @@ -769,6 +803,7 @@ entry: } declare @llvm.riscv.vdiv.nxv16i32.nxv16i32( + , , , i64); @@ -781,6 +816,7 @@ define @intrinsic_vdiv_vv_nxv16i32_nxv16i32_nxv16i32( @llvm.riscv.vdiv.nxv16i32.nxv16i32( + undef, %0, %1, i64 %2) @@ -815,6 +851,7 @@ entry: } declare @llvm.riscv.vdiv.nxv1i64.nxv1i64( + , , , i64); @@ -827,6 +864,7 @@ define @intrinsic_vdiv_vv_nxv1i64_nxv1i64_nxv1i64( @llvm.riscv.vdiv.nxv1i64.nxv1i64( + undef, %0, %1, i64 %2) @@ -860,6 +898,7 @@ entry: } declare @llvm.riscv.vdiv.nxv2i64.nxv2i64( + , , , i64); @@ -872,6 +911,7 @@ define @intrinsic_vdiv_vv_nxv2i64_nxv2i64_nxv2i64( @llvm.riscv.vdiv.nxv2i64.nxv2i64( + undef, %0, %1, i64 %2) @@ -905,6 +945,7 @@ entry: } declare @llvm.riscv.vdiv.nxv4i64.nxv4i64( + , , , i64); @@ -917,6 +958,7 @@ define @intrinsic_vdiv_vv_nxv4i64_nxv4i64_nxv4i64( @llvm.riscv.vdiv.nxv4i64.nxv4i64( + undef, %0, %1, i64 %2) @@ -950,6 +992,7 @@ entry: } declare @llvm.riscv.vdiv.nxv8i64.nxv8i64( + , , , i64); @@ -962,6 +1005,7 @@ define @intrinsic_vdiv_vv_nxv8i64_nxv8i64_nxv8i64( @llvm.riscv.vdiv.nxv8i64.nxv8i64( + undef, %0, %1, i64 %2) @@ -996,6 +1040,7 @@ entry: } declare @llvm.riscv.vdiv.nxv1i8.i8( + , , i8, i64); @@ -1008,6 +1053,7 @@ define @intrinsic_vdiv_vx_nxv1i8_nxv1i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdiv.nxv1i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1041,6 +1087,7 @@ entry: } declare @llvm.riscv.vdiv.nxv2i8.i8( + , , i8, i64); @@ -1053,6 +1100,7 @@ define @intrinsic_vdiv_vx_nxv2i8_nxv2i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdiv.nxv2i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1086,6 +1134,7 @@ entry: } declare @llvm.riscv.vdiv.nxv4i8.i8( + , , i8, i64); @@ -1098,6 +1147,7 @@ define @intrinsic_vdiv_vx_nxv4i8_nxv4i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdiv.nxv4i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1131,6 +1181,7 @@ entry: } declare @llvm.riscv.vdiv.nxv8i8.i8( + , , i8, i64); @@ -1143,6 +1194,7 @@ define @intrinsic_vdiv_vx_nxv8i8_nxv8i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdiv.nxv8i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1176,6 +1228,7 @@ entry: } declare @llvm.riscv.vdiv.nxv16i8.i8( + , , i8, i64); @@ -1188,6 +1241,7 @@ define @intrinsic_vdiv_vx_nxv16i8_nxv16i8_i8( @llvm.riscv.vdiv.nxv16i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1221,6 +1275,7 @@ entry: } declare @llvm.riscv.vdiv.nxv32i8.i8( + , , i8, i64); @@ -1233,6 +1288,7 @@ define @intrinsic_vdiv_vx_nxv32i8_nxv32i8_i8( @llvm.riscv.vdiv.nxv32i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1266,6 +1322,7 @@ entry: } declare @llvm.riscv.vdiv.nxv64i8.i8( + , , i8, i64); @@ -1278,6 +1335,7 @@ define @intrinsic_vdiv_vx_nxv64i8_nxv64i8_i8( @llvm.riscv.vdiv.nxv64i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1311,6 +1369,7 @@ entry: } declare @llvm.riscv.vdiv.nxv1i16.i16( + , , i16, i64); @@ -1323,6 +1382,7 @@ define @intrinsic_vdiv_vx_nxv1i16_nxv1i16_i16( @llvm.riscv.vdiv.nxv1i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1356,6 +1416,7 @@ entry: } declare @llvm.riscv.vdiv.nxv2i16.i16( + , , i16, i64); @@ -1368,6 +1429,7 @@ define @intrinsic_vdiv_vx_nxv2i16_nxv2i16_i16( @llvm.riscv.vdiv.nxv2i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1401,6 +1463,7 @@ entry: } declare @llvm.riscv.vdiv.nxv4i16.i16( + , , i16, i64); @@ -1413,6 +1476,7 @@ define @intrinsic_vdiv_vx_nxv4i16_nxv4i16_i16( @llvm.riscv.vdiv.nxv4i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1446,6 +1510,7 @@ entry: } declare @llvm.riscv.vdiv.nxv8i16.i16( + , , i16, i64); @@ -1458,6 +1523,7 @@ define @intrinsic_vdiv_vx_nxv8i16_nxv8i16_i16( @llvm.riscv.vdiv.nxv8i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1491,6 +1557,7 @@ entry: } declare @llvm.riscv.vdiv.nxv16i16.i16( + , , i16, i64); @@ -1503,6 +1570,7 @@ define @intrinsic_vdiv_vx_nxv16i16_nxv16i16_i16( @llvm.riscv.vdiv.nxv16i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1536,6 +1604,7 @@ entry: } declare @llvm.riscv.vdiv.nxv32i16.i16( + , , i16, i64); @@ -1548,6 +1617,7 @@ define @intrinsic_vdiv_vx_nxv32i16_nxv32i16_i16( @llvm.riscv.vdiv.nxv32i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1581,6 +1651,7 @@ entry: } declare @llvm.riscv.vdiv.nxv1i32.i32( + , , i32, i64); @@ -1593,6 +1664,7 @@ define @intrinsic_vdiv_vx_nxv1i32_nxv1i32_i32( @llvm.riscv.vdiv.nxv1i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1626,6 +1698,7 @@ entry: } declare @llvm.riscv.vdiv.nxv2i32.i32( + , , i32, i64); @@ -1638,6 +1711,7 @@ define @intrinsic_vdiv_vx_nxv2i32_nxv2i32_i32( @llvm.riscv.vdiv.nxv2i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1671,6 +1745,7 @@ entry: } declare @llvm.riscv.vdiv.nxv4i32.i32( + , , i32, i64); @@ -1683,6 +1758,7 @@ define @intrinsic_vdiv_vx_nxv4i32_nxv4i32_i32( @llvm.riscv.vdiv.nxv4i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1716,6 +1792,7 @@ entry: } declare @llvm.riscv.vdiv.nxv8i32.i32( + , , i32, i64); @@ -1728,6 +1805,7 @@ define @intrinsic_vdiv_vx_nxv8i32_nxv8i32_i32( @llvm.riscv.vdiv.nxv8i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1761,6 +1839,7 @@ entry: } declare @llvm.riscv.vdiv.nxv16i32.i32( + , , i32, i64); @@ -1773,6 +1852,7 @@ define @intrinsic_vdiv_vx_nxv16i32_nxv16i32_i32( @llvm.riscv.vdiv.nxv16i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1806,6 +1886,7 @@ entry: } declare @llvm.riscv.vdiv.nxv1i64.i64( + , , i64, i64); @@ -1818,6 +1899,7 @@ define @intrinsic_vdiv_vx_nxv1i64_nxv1i64_i64( @llvm.riscv.vdiv.nxv1i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1851,6 +1933,7 @@ entry: } declare @llvm.riscv.vdiv.nxv2i64.i64( + , , i64, i64); @@ -1863,6 +1946,7 @@ define @intrinsic_vdiv_vx_nxv2i64_nxv2i64_i64( @llvm.riscv.vdiv.nxv2i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1896,6 +1980,7 @@ entry: } declare @llvm.riscv.vdiv.nxv4i64.i64( + , , i64, i64); @@ -1908,6 +1993,7 @@ define @intrinsic_vdiv_vx_nxv4i64_nxv4i64_i64( @llvm.riscv.vdiv.nxv4i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1941,6 +2027,7 @@ entry: } declare @llvm.riscv.vdiv.nxv8i64.i64( + , , i64, i64); @@ -1953,6 +2040,7 @@ define @intrinsic_vdiv_vx_nxv8i64_nxv8i64_i64( @llvm.riscv.vdiv.nxv8i64.i64( + undef, %0, i64 %1, i64 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vdivu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vdivu-rv32.ll index b86b902962ea5e0c9e7e9a5a829c9fbf8a15dbdb..da9582452bbb13819d392868919c1908185d5f59 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vdivu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vdivu-rv32.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vdivu.nxv1i8.nxv1i8( + , , , i32); @@ -14,6 +15,7 @@ define @intrinsic_vdivu_vv_nxv1i8_nxv1i8_nxv1i8( @llvm.riscv.vdivu.nxv1i8.nxv1i8( + undef, %0, %1, i32 %2) @@ -47,6 +49,7 @@ entry: } declare @llvm.riscv.vdivu.nxv2i8.nxv2i8( + , , , i32); @@ -59,6 +62,7 @@ define @intrinsic_vdivu_vv_nxv2i8_nxv2i8_nxv2i8( @llvm.riscv.vdivu.nxv2i8.nxv2i8( + undef, %0, %1, i32 %2) @@ -92,6 +96,7 @@ entry: } declare @llvm.riscv.vdivu.nxv4i8.nxv4i8( + , , , i32); @@ -104,6 +109,7 @@ define @intrinsic_vdivu_vv_nxv4i8_nxv4i8_nxv4i8( @llvm.riscv.vdivu.nxv4i8.nxv4i8( + undef, %0, %1, i32 %2) @@ -137,6 +143,7 @@ entry: } declare @llvm.riscv.vdivu.nxv8i8.nxv8i8( + , , , i32); @@ -149,6 +156,7 @@ define @intrinsic_vdivu_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vdivu.nxv8i8.nxv8i8( + undef, %0, %1, i32 %2) @@ -182,6 +190,7 @@ entry: } declare @llvm.riscv.vdivu.nxv16i8.nxv16i8( + , , , i32); @@ -194,6 +203,7 @@ define @intrinsic_vdivu_vv_nxv16i8_nxv16i8_nxv16i8( @llvm.riscv.vdivu.nxv16i8.nxv16i8( + undef, %0, %1, i32 %2) @@ -227,6 +237,7 @@ entry: } declare @llvm.riscv.vdivu.nxv32i8.nxv32i8( + , , , i32); @@ -239,6 +250,7 @@ define @intrinsic_vdivu_vv_nxv32i8_nxv32i8_nxv32i8( @llvm.riscv.vdivu.nxv32i8.nxv32i8( + undef, %0, %1, i32 %2) @@ -272,6 +284,7 @@ entry: } declare @llvm.riscv.vdivu.nxv64i8.nxv64i8( + , , , i32); @@ -284,6 +297,7 @@ define @intrinsic_vdivu_vv_nxv64i8_nxv64i8_nxv64i8( @llvm.riscv.vdivu.nxv64i8.nxv64i8( + undef, %0, %1, i32 %2) @@ -318,6 +332,7 @@ entry: } declare @llvm.riscv.vdivu.nxv1i16.nxv1i16( + , , , i32); @@ -330,6 +345,7 @@ define @intrinsic_vdivu_vv_nxv1i16_nxv1i16_nxv1i16( @llvm.riscv.vdivu.nxv1i16.nxv1i16( + undef, %0, %1, i32 %2) @@ -363,6 +379,7 @@ entry: } declare @llvm.riscv.vdivu.nxv2i16.nxv2i16( + , , , i32); @@ -375,6 +392,7 @@ define @intrinsic_vdivu_vv_nxv2i16_nxv2i16_nxv2i16( @llvm.riscv.vdivu.nxv2i16.nxv2i16( + undef, %0, %1, i32 %2) @@ -408,6 +426,7 @@ entry: } declare @llvm.riscv.vdivu.nxv4i16.nxv4i16( + , , , i32); @@ -420,6 +439,7 @@ define @intrinsic_vdivu_vv_nxv4i16_nxv4i16_nxv4i16( @llvm.riscv.vdivu.nxv4i16.nxv4i16( + undef, %0, %1, i32 %2) @@ -453,6 +473,7 @@ entry: } declare @llvm.riscv.vdivu.nxv8i16.nxv8i16( + , , , i32); @@ -465,6 +486,7 @@ define @intrinsic_vdivu_vv_nxv8i16_nxv8i16_nxv8i16( @llvm.riscv.vdivu.nxv8i16.nxv8i16( + undef, %0, %1, i32 %2) @@ -498,6 +520,7 @@ entry: } declare @llvm.riscv.vdivu.nxv16i16.nxv16i16( + , , , i32); @@ -510,6 +533,7 @@ define @intrinsic_vdivu_vv_nxv16i16_nxv16i16_nxv16i16( @llvm.riscv.vdivu.nxv16i16.nxv16i16( + undef, %0, %1, i32 %2) @@ -543,6 +567,7 @@ entry: } declare @llvm.riscv.vdivu.nxv32i16.nxv32i16( + , , , i32); @@ -555,6 +580,7 @@ define @intrinsic_vdivu_vv_nxv32i16_nxv32i16_nxv32i16( @llvm.riscv.vdivu.nxv32i16.nxv32i16( + undef, %0, %1, i32 %2) @@ -589,6 +615,7 @@ entry: } declare @llvm.riscv.vdivu.nxv1i32.nxv1i32( + , , , i32); @@ -601,6 +628,7 @@ define @intrinsic_vdivu_vv_nxv1i32_nxv1i32_nxv1i32( @llvm.riscv.vdivu.nxv1i32.nxv1i32( + undef, %0, %1, i32 %2) @@ -634,6 +662,7 @@ entry: } declare @llvm.riscv.vdivu.nxv2i32.nxv2i32( + , , , i32); @@ -646,6 +675,7 @@ define @intrinsic_vdivu_vv_nxv2i32_nxv2i32_nxv2i32( @llvm.riscv.vdivu.nxv2i32.nxv2i32( + undef, %0, %1, i32 %2) @@ -679,6 +709,7 @@ entry: } declare @llvm.riscv.vdivu.nxv4i32.nxv4i32( + , , , i32); @@ -691,6 +722,7 @@ define @intrinsic_vdivu_vv_nxv4i32_nxv4i32_nxv4i32( @llvm.riscv.vdivu.nxv4i32.nxv4i32( + undef, %0, %1, i32 %2) @@ -724,6 +756,7 @@ entry: } declare @llvm.riscv.vdivu.nxv8i32.nxv8i32( + , , , i32); @@ -736,6 +769,7 @@ define @intrinsic_vdivu_vv_nxv8i32_nxv8i32_nxv8i32( @llvm.riscv.vdivu.nxv8i32.nxv8i32( + undef, %0, %1, i32 %2) @@ -769,6 +803,7 @@ entry: } declare @llvm.riscv.vdivu.nxv16i32.nxv16i32( + , , , i32); @@ -781,6 +816,7 @@ define @intrinsic_vdivu_vv_nxv16i32_nxv16i32_nxv16i32( @llvm.riscv.vdivu.nxv16i32.nxv16i32( + undef, %0, %1, i32 %2) @@ -815,6 +851,7 @@ entry: } declare @llvm.riscv.vdivu.nxv1i64.nxv1i64( + , , , i32); @@ -827,6 +864,7 @@ define @intrinsic_vdivu_vv_nxv1i64_nxv1i64_nxv1i64( @llvm.riscv.vdivu.nxv1i64.nxv1i64( + undef, %0, %1, i32 %2) @@ -860,6 +898,7 @@ entry: } declare @llvm.riscv.vdivu.nxv2i64.nxv2i64( + , , , i32); @@ -872,6 +911,7 @@ define @intrinsic_vdivu_vv_nxv2i64_nxv2i64_nxv2i64( @llvm.riscv.vdivu.nxv2i64.nxv2i64( + undef, %0, %1, i32 %2) @@ -905,6 +945,7 @@ entry: } declare @llvm.riscv.vdivu.nxv4i64.nxv4i64( + , , , i32); @@ -917,6 +958,7 @@ define @intrinsic_vdivu_vv_nxv4i64_nxv4i64_nxv4i64( @llvm.riscv.vdivu.nxv4i64.nxv4i64( + undef, %0, %1, i32 %2) @@ -950,6 +992,7 @@ entry: } declare @llvm.riscv.vdivu.nxv8i64.nxv8i64( + , , , i32); @@ -962,6 +1005,7 @@ define @intrinsic_vdivu_vv_nxv8i64_nxv8i64_nxv8i64( @llvm.riscv.vdivu.nxv8i64.nxv8i64( + undef, %0, %1, i32 %2) @@ -996,6 +1040,7 @@ entry: } declare @llvm.riscv.vdivu.nxv1i8.i8( + , , i8, i32); @@ -1008,6 +1053,7 @@ define @intrinsic_vdivu_vx_nxv1i8_nxv1i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdivu.nxv1i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1041,6 +1087,7 @@ entry: } declare @llvm.riscv.vdivu.nxv2i8.i8( + , , i8, i32); @@ -1053,6 +1100,7 @@ define @intrinsic_vdivu_vx_nxv2i8_nxv2i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdivu.nxv2i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1086,6 +1134,7 @@ entry: } declare @llvm.riscv.vdivu.nxv4i8.i8( + , , i8, i32); @@ -1098,6 +1147,7 @@ define @intrinsic_vdivu_vx_nxv4i8_nxv4i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdivu.nxv4i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1131,6 +1181,7 @@ entry: } declare @llvm.riscv.vdivu.nxv8i8.i8( + , , i8, i32); @@ -1143,6 +1194,7 @@ define @intrinsic_vdivu_vx_nxv8i8_nxv8i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdivu.nxv8i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1176,6 +1228,7 @@ entry: } declare @llvm.riscv.vdivu.nxv16i8.i8( + , , i8, i32); @@ -1188,6 +1241,7 @@ define @intrinsic_vdivu_vx_nxv16i8_nxv16i8_i8( @llvm.riscv.vdivu.nxv16i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1221,6 +1275,7 @@ entry: } declare @llvm.riscv.vdivu.nxv32i8.i8( + , , i8, i32); @@ -1233,6 +1288,7 @@ define @intrinsic_vdivu_vx_nxv32i8_nxv32i8_i8( @llvm.riscv.vdivu.nxv32i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1266,6 +1322,7 @@ entry: } declare @llvm.riscv.vdivu.nxv64i8.i8( + , , i8, i32); @@ -1278,6 +1335,7 @@ define @intrinsic_vdivu_vx_nxv64i8_nxv64i8_i8( @llvm.riscv.vdivu.nxv64i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1311,6 +1369,7 @@ entry: } declare @llvm.riscv.vdivu.nxv1i16.i16( + , , i16, i32); @@ -1323,6 +1382,7 @@ define @intrinsic_vdivu_vx_nxv1i16_nxv1i16_i16( @llvm.riscv.vdivu.nxv1i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1356,6 +1416,7 @@ entry: } declare @llvm.riscv.vdivu.nxv2i16.i16( + , , i16, i32); @@ -1368,6 +1429,7 @@ define @intrinsic_vdivu_vx_nxv2i16_nxv2i16_i16( @llvm.riscv.vdivu.nxv2i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1401,6 +1463,7 @@ entry: } declare @llvm.riscv.vdivu.nxv4i16.i16( + , , i16, i32); @@ -1413,6 +1476,7 @@ define @intrinsic_vdivu_vx_nxv4i16_nxv4i16_i16( @llvm.riscv.vdivu.nxv4i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1446,6 +1510,7 @@ entry: } declare @llvm.riscv.vdivu.nxv8i16.i16( + , , i16, i32); @@ -1458,6 +1523,7 @@ define @intrinsic_vdivu_vx_nxv8i16_nxv8i16_i16( @llvm.riscv.vdivu.nxv8i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1491,6 +1557,7 @@ entry: } declare @llvm.riscv.vdivu.nxv16i16.i16( + , , i16, i32); @@ -1503,6 +1570,7 @@ define @intrinsic_vdivu_vx_nxv16i16_nxv16i16_i16( @llvm.riscv.vdivu.nxv16i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1536,6 +1604,7 @@ entry: } declare @llvm.riscv.vdivu.nxv32i16.i16( + , , i16, i32); @@ -1548,6 +1617,7 @@ define @intrinsic_vdivu_vx_nxv32i16_nxv32i16_i16( @llvm.riscv.vdivu.nxv32i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1581,6 +1651,7 @@ entry: } declare @llvm.riscv.vdivu.nxv1i32.i32( + , , i32, i32); @@ -1593,6 +1664,7 @@ define @intrinsic_vdivu_vx_nxv1i32_nxv1i32_i32( @llvm.riscv.vdivu.nxv1i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1626,6 +1698,7 @@ entry: } declare @llvm.riscv.vdivu.nxv2i32.i32( + , , i32, i32); @@ -1638,6 +1711,7 @@ define @intrinsic_vdivu_vx_nxv2i32_nxv2i32_i32( @llvm.riscv.vdivu.nxv2i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1671,6 +1745,7 @@ entry: } declare @llvm.riscv.vdivu.nxv4i32.i32( + , , i32, i32); @@ -1683,6 +1758,7 @@ define @intrinsic_vdivu_vx_nxv4i32_nxv4i32_i32( @llvm.riscv.vdivu.nxv4i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1716,6 +1792,7 @@ entry: } declare @llvm.riscv.vdivu.nxv8i32.i32( + , , i32, i32); @@ -1728,6 +1805,7 @@ define @intrinsic_vdivu_vx_nxv8i32_nxv8i32_i32( @llvm.riscv.vdivu.nxv8i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1761,6 +1839,7 @@ entry: } declare @llvm.riscv.vdivu.nxv16i32.i32( + , , i32, i32); @@ -1773,6 +1852,7 @@ define @intrinsic_vdivu_vx_nxv16i32_nxv16i32_i32( @llvm.riscv.vdivu.nxv16i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1806,6 +1886,7 @@ entry: } declare @llvm.riscv.vdivu.nxv1i64.i64( + , , i64, i32); @@ -1824,6 +1905,7 @@ define @intrinsic_vdivu_vx_nxv1i64_nxv1i64_i64( @llvm.riscv.vdivu.nxv1i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1863,6 +1945,7 @@ entry: } declare @llvm.riscv.vdivu.nxv2i64.i64( + , , i64, i32); @@ -1881,6 +1964,7 @@ define @intrinsic_vdivu_vx_nxv2i64_nxv2i64_i64( @llvm.riscv.vdivu.nxv2i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1920,6 +2004,7 @@ entry: } declare @llvm.riscv.vdivu.nxv4i64.i64( + , , i64, i32); @@ -1938,6 +2023,7 @@ define @intrinsic_vdivu_vx_nxv4i64_nxv4i64_i64( @llvm.riscv.vdivu.nxv4i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1977,6 +2063,7 @@ entry: } declare @llvm.riscv.vdivu.nxv8i64.i64( + , , i64, i32); @@ -1995,6 +2082,7 @@ define @intrinsic_vdivu_vx_nxv8i64_nxv8i64_i64( @llvm.riscv.vdivu.nxv8i64.i64( + undef, %0, i64 %1, i32 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vdivu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vdivu-rv64.ll index 7efea7cfe3fbf4b7f2537e6b70fbafef45ee4770..d5355e87a3c1cef48518a44dd8d18ae51a9a6318 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vdivu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vdivu-rv64.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vdivu.nxv1i8.nxv1i8( + , , , i64); @@ -14,6 +15,7 @@ define @intrinsic_vdivu_vv_nxv1i8_nxv1i8_nxv1i8( @llvm.riscv.vdivu.nxv1i8.nxv1i8( + undef, %0, %1, i64 %2) @@ -47,6 +49,7 @@ entry: } declare @llvm.riscv.vdivu.nxv2i8.nxv2i8( + , , , i64); @@ -59,6 +62,7 @@ define @intrinsic_vdivu_vv_nxv2i8_nxv2i8_nxv2i8( @llvm.riscv.vdivu.nxv2i8.nxv2i8( + undef, %0, %1, i64 %2) @@ -92,6 +96,7 @@ entry: } declare @llvm.riscv.vdivu.nxv4i8.nxv4i8( + , , , i64); @@ -104,6 +109,7 @@ define @intrinsic_vdivu_vv_nxv4i8_nxv4i8_nxv4i8( @llvm.riscv.vdivu.nxv4i8.nxv4i8( + undef, %0, %1, i64 %2) @@ -137,6 +143,7 @@ entry: } declare @llvm.riscv.vdivu.nxv8i8.nxv8i8( + , , , i64); @@ -149,6 +156,7 @@ define @intrinsic_vdivu_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vdivu.nxv8i8.nxv8i8( + undef, %0, %1, i64 %2) @@ -182,6 +190,7 @@ entry: } declare @llvm.riscv.vdivu.nxv16i8.nxv16i8( + , , , i64); @@ -194,6 +203,7 @@ define @intrinsic_vdivu_vv_nxv16i8_nxv16i8_nxv16i8( @llvm.riscv.vdivu.nxv16i8.nxv16i8( + undef, %0, %1, i64 %2) @@ -227,6 +237,7 @@ entry: } declare @llvm.riscv.vdivu.nxv32i8.nxv32i8( + , , , i64); @@ -239,6 +250,7 @@ define @intrinsic_vdivu_vv_nxv32i8_nxv32i8_nxv32i8( @llvm.riscv.vdivu.nxv32i8.nxv32i8( + undef, %0, %1, i64 %2) @@ -272,6 +284,7 @@ entry: } declare @llvm.riscv.vdivu.nxv64i8.nxv64i8( + , , , i64); @@ -284,6 +297,7 @@ define @intrinsic_vdivu_vv_nxv64i8_nxv64i8_nxv64i8( @llvm.riscv.vdivu.nxv64i8.nxv64i8( + undef, %0, %1, i64 %2) @@ -318,6 +332,7 @@ entry: } declare @llvm.riscv.vdivu.nxv1i16.nxv1i16( + , , , i64); @@ -330,6 +345,7 @@ define @intrinsic_vdivu_vv_nxv1i16_nxv1i16_nxv1i16( @llvm.riscv.vdivu.nxv1i16.nxv1i16( + undef, %0, %1, i64 %2) @@ -363,6 +379,7 @@ entry: } declare @llvm.riscv.vdivu.nxv2i16.nxv2i16( + , , , i64); @@ -375,6 +392,7 @@ define @intrinsic_vdivu_vv_nxv2i16_nxv2i16_nxv2i16( @llvm.riscv.vdivu.nxv2i16.nxv2i16( + undef, %0, %1, i64 %2) @@ -408,6 +426,7 @@ entry: } declare @llvm.riscv.vdivu.nxv4i16.nxv4i16( + , , , i64); @@ -420,6 +439,7 @@ define @intrinsic_vdivu_vv_nxv4i16_nxv4i16_nxv4i16( @llvm.riscv.vdivu.nxv4i16.nxv4i16( + undef, %0, %1, i64 %2) @@ -453,6 +473,7 @@ entry: } declare @llvm.riscv.vdivu.nxv8i16.nxv8i16( + , , , i64); @@ -465,6 +486,7 @@ define @intrinsic_vdivu_vv_nxv8i16_nxv8i16_nxv8i16( @llvm.riscv.vdivu.nxv8i16.nxv8i16( + undef, %0, %1, i64 %2) @@ -498,6 +520,7 @@ entry: } declare @llvm.riscv.vdivu.nxv16i16.nxv16i16( + , , , i64); @@ -510,6 +533,7 @@ define @intrinsic_vdivu_vv_nxv16i16_nxv16i16_nxv16i16( @llvm.riscv.vdivu.nxv16i16.nxv16i16( + undef, %0, %1, i64 %2) @@ -543,6 +567,7 @@ entry: } declare @llvm.riscv.vdivu.nxv32i16.nxv32i16( + , , , i64); @@ -555,6 +580,7 @@ define @intrinsic_vdivu_vv_nxv32i16_nxv32i16_nxv32i16( @llvm.riscv.vdivu.nxv32i16.nxv32i16( + undef, %0, %1, i64 %2) @@ -589,6 +615,7 @@ entry: } declare @llvm.riscv.vdivu.nxv1i32.nxv1i32( + , , , i64); @@ -601,6 +628,7 @@ define @intrinsic_vdivu_vv_nxv1i32_nxv1i32_nxv1i32( @llvm.riscv.vdivu.nxv1i32.nxv1i32( + undef, %0, %1, i64 %2) @@ -634,6 +662,7 @@ entry: } declare @llvm.riscv.vdivu.nxv2i32.nxv2i32( + , , , i64); @@ -646,6 +675,7 @@ define @intrinsic_vdivu_vv_nxv2i32_nxv2i32_nxv2i32( @llvm.riscv.vdivu.nxv2i32.nxv2i32( + undef, %0, %1, i64 %2) @@ -679,6 +709,7 @@ entry: } declare @llvm.riscv.vdivu.nxv4i32.nxv4i32( + , , , i64); @@ -691,6 +722,7 @@ define @intrinsic_vdivu_vv_nxv4i32_nxv4i32_nxv4i32( @llvm.riscv.vdivu.nxv4i32.nxv4i32( + undef, %0, %1, i64 %2) @@ -724,6 +756,7 @@ entry: } declare @llvm.riscv.vdivu.nxv8i32.nxv8i32( + , , , i64); @@ -736,6 +769,7 @@ define @intrinsic_vdivu_vv_nxv8i32_nxv8i32_nxv8i32( @llvm.riscv.vdivu.nxv8i32.nxv8i32( + undef, %0, %1, i64 %2) @@ -769,6 +803,7 @@ entry: } declare @llvm.riscv.vdivu.nxv16i32.nxv16i32( + , , , i64); @@ -781,6 +816,7 @@ define @intrinsic_vdivu_vv_nxv16i32_nxv16i32_nxv16i32( @llvm.riscv.vdivu.nxv16i32.nxv16i32( + undef, %0, %1, i64 %2) @@ -815,6 +851,7 @@ entry: } declare @llvm.riscv.vdivu.nxv1i64.nxv1i64( + , , , i64); @@ -827,6 +864,7 @@ define @intrinsic_vdivu_vv_nxv1i64_nxv1i64_nxv1i64( @llvm.riscv.vdivu.nxv1i64.nxv1i64( + undef, %0, %1, i64 %2) @@ -860,6 +898,7 @@ entry: } declare @llvm.riscv.vdivu.nxv2i64.nxv2i64( + , , , i64); @@ -872,6 +911,7 @@ define @intrinsic_vdivu_vv_nxv2i64_nxv2i64_nxv2i64( @llvm.riscv.vdivu.nxv2i64.nxv2i64( + undef, %0, %1, i64 %2) @@ -905,6 +945,7 @@ entry: } declare @llvm.riscv.vdivu.nxv4i64.nxv4i64( + , , , i64); @@ -917,6 +958,7 @@ define @intrinsic_vdivu_vv_nxv4i64_nxv4i64_nxv4i64( @llvm.riscv.vdivu.nxv4i64.nxv4i64( + undef, %0, %1, i64 %2) @@ -950,6 +992,7 @@ entry: } declare @llvm.riscv.vdivu.nxv8i64.nxv8i64( + , , , i64); @@ -962,6 +1005,7 @@ define @intrinsic_vdivu_vv_nxv8i64_nxv8i64_nxv8i64( @llvm.riscv.vdivu.nxv8i64.nxv8i64( + undef, %0, %1, i64 %2) @@ -996,6 +1040,7 @@ entry: } declare @llvm.riscv.vdivu.nxv1i8.i8( + , , i8, i64); @@ -1008,6 +1053,7 @@ define @intrinsic_vdivu_vx_nxv1i8_nxv1i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdivu.nxv1i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1041,6 +1087,7 @@ entry: } declare @llvm.riscv.vdivu.nxv2i8.i8( + , , i8, i64); @@ -1053,6 +1100,7 @@ define @intrinsic_vdivu_vx_nxv2i8_nxv2i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdivu.nxv2i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1086,6 +1134,7 @@ entry: } declare @llvm.riscv.vdivu.nxv4i8.i8( + , , i8, i64); @@ -1098,6 +1147,7 @@ define @intrinsic_vdivu_vx_nxv4i8_nxv4i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdivu.nxv4i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1131,6 +1181,7 @@ entry: } declare @llvm.riscv.vdivu.nxv8i8.i8( + , , i8, i64); @@ -1143,6 +1194,7 @@ define @intrinsic_vdivu_vx_nxv8i8_nxv8i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdivu.nxv8i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1176,6 +1228,7 @@ entry: } declare @llvm.riscv.vdivu.nxv16i8.i8( + , , i8, i64); @@ -1188,6 +1241,7 @@ define @intrinsic_vdivu_vx_nxv16i8_nxv16i8_i8( @llvm.riscv.vdivu.nxv16i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1221,6 +1275,7 @@ entry: } declare @llvm.riscv.vdivu.nxv32i8.i8( + , , i8, i64); @@ -1233,6 +1288,7 @@ define @intrinsic_vdivu_vx_nxv32i8_nxv32i8_i8( @llvm.riscv.vdivu.nxv32i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1266,6 +1322,7 @@ entry: } declare @llvm.riscv.vdivu.nxv64i8.i8( + , , i8, i64); @@ -1278,6 +1335,7 @@ define @intrinsic_vdivu_vx_nxv64i8_nxv64i8_i8( @llvm.riscv.vdivu.nxv64i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1311,6 +1369,7 @@ entry: } declare @llvm.riscv.vdivu.nxv1i16.i16( + , , i16, i64); @@ -1323,6 +1382,7 @@ define @intrinsic_vdivu_vx_nxv1i16_nxv1i16_i16( @llvm.riscv.vdivu.nxv1i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1356,6 +1416,7 @@ entry: } declare @llvm.riscv.vdivu.nxv2i16.i16( + , , i16, i64); @@ -1368,6 +1429,7 @@ define @intrinsic_vdivu_vx_nxv2i16_nxv2i16_i16( @llvm.riscv.vdivu.nxv2i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1401,6 +1463,7 @@ entry: } declare @llvm.riscv.vdivu.nxv4i16.i16( + , , i16, i64); @@ -1413,6 +1476,7 @@ define @intrinsic_vdivu_vx_nxv4i16_nxv4i16_i16( @llvm.riscv.vdivu.nxv4i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1446,6 +1510,7 @@ entry: } declare @llvm.riscv.vdivu.nxv8i16.i16( + , , i16, i64); @@ -1458,6 +1523,7 @@ define @intrinsic_vdivu_vx_nxv8i16_nxv8i16_i16( @llvm.riscv.vdivu.nxv8i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1491,6 +1557,7 @@ entry: } declare @llvm.riscv.vdivu.nxv16i16.i16( + , , i16, i64); @@ -1503,6 +1570,7 @@ define @intrinsic_vdivu_vx_nxv16i16_nxv16i16_i16( @llvm.riscv.vdivu.nxv16i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1536,6 +1604,7 @@ entry: } declare @llvm.riscv.vdivu.nxv32i16.i16( + , , i16, i64); @@ -1548,6 +1617,7 @@ define @intrinsic_vdivu_vx_nxv32i16_nxv32i16_i16( @llvm.riscv.vdivu.nxv32i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1581,6 +1651,7 @@ entry: } declare @llvm.riscv.vdivu.nxv1i32.i32( + , , i32, i64); @@ -1593,6 +1664,7 @@ define @intrinsic_vdivu_vx_nxv1i32_nxv1i32_i32( @llvm.riscv.vdivu.nxv1i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1626,6 +1698,7 @@ entry: } declare @llvm.riscv.vdivu.nxv2i32.i32( + , , i32, i64); @@ -1638,6 +1711,7 @@ define @intrinsic_vdivu_vx_nxv2i32_nxv2i32_i32( @llvm.riscv.vdivu.nxv2i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1671,6 +1745,7 @@ entry: } declare @llvm.riscv.vdivu.nxv4i32.i32( + , , i32, i64); @@ -1683,6 +1758,7 @@ define @intrinsic_vdivu_vx_nxv4i32_nxv4i32_i32( @llvm.riscv.vdivu.nxv4i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1716,6 +1792,7 @@ entry: } declare @llvm.riscv.vdivu.nxv8i32.i32( + , , i32, i64); @@ -1728,6 +1805,7 @@ define @intrinsic_vdivu_vx_nxv8i32_nxv8i32_i32( @llvm.riscv.vdivu.nxv8i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1761,6 +1839,7 @@ entry: } declare @llvm.riscv.vdivu.nxv16i32.i32( + , , i32, i64); @@ -1773,6 +1852,7 @@ define @intrinsic_vdivu_vx_nxv16i32_nxv16i32_i32( @llvm.riscv.vdivu.nxv16i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1806,6 +1886,7 @@ entry: } declare @llvm.riscv.vdivu.nxv1i64.i64( + , , i64, i64); @@ -1818,6 +1899,7 @@ define @intrinsic_vdivu_vx_nxv1i64_nxv1i64_i64( @llvm.riscv.vdivu.nxv1i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1851,6 +1933,7 @@ entry: } declare @llvm.riscv.vdivu.nxv2i64.i64( + , , i64, i64); @@ -1863,6 +1946,7 @@ define @intrinsic_vdivu_vx_nxv2i64_nxv2i64_i64( @llvm.riscv.vdivu.nxv2i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1896,6 +1980,7 @@ entry: } declare @llvm.riscv.vdivu.nxv4i64.i64( + , , i64, i64); @@ -1908,6 +1993,7 @@ define @intrinsic_vdivu_vx_nxv4i64_nxv4i64_i64( @llvm.riscv.vdivu.nxv4i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1941,6 +2027,7 @@ entry: } declare @llvm.riscv.vdivu.nxv8i64.i64( + , , i64, i64); @@ -1953,6 +2040,7 @@ define @intrinsic_vdivu_vx_nxv8i64_nxv8i64_i64( @llvm.riscv.vdivu.nxv8i64.i64( + undef, %0, i64 %1, i64 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfadd.ll b/llvm/test/CodeGen/RISCV/rvv/vfadd.ll index 041580b2b49d8abee295f7019d27e892aac9cc44..817c2514909f1dbb6fb453d02571edce33be9301 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfadd.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfadd.ll @@ -4,6 +4,7 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfadd.nxv1f16.nxv1f16( + , , , iXLen); @@ -16,6 +17,7 @@ define @intrinsic_vfadd_vv_nxv1f16_nxv1f16_nxv1f16( @llvm.riscv.vfadd.nxv1f16.nxv1f16( + undef, %0, %1, iXLen %2) @@ -49,6 +51,7 @@ entry: } declare @llvm.riscv.vfadd.nxv2f16.nxv2f16( + , , , iXLen); @@ -61,6 +64,7 @@ define @intrinsic_vfadd_vv_nxv2f16_nxv2f16_nxv2f16( @llvm.riscv.vfadd.nxv2f16.nxv2f16( + undef, %0, %1, iXLen %2) @@ -94,6 +98,7 @@ entry: } declare @llvm.riscv.vfadd.nxv4f16.nxv4f16( + , , , iXLen); @@ -106,6 +111,7 @@ define @intrinsic_vfadd_vv_nxv4f16_nxv4f16_nxv4f16( @llvm.riscv.vfadd.nxv4f16.nxv4f16( + undef, %0, %1, iXLen %2) @@ -139,6 +145,7 @@ entry: } declare @llvm.riscv.vfadd.nxv8f16.nxv8f16( + , , , iXLen); @@ -151,6 +158,7 @@ define @intrinsic_vfadd_vv_nxv8f16_nxv8f16_nxv8f16( @llvm.riscv.vfadd.nxv8f16.nxv8f16( + undef, %0, %1, iXLen %2) @@ -184,6 +192,7 @@ entry: } declare @llvm.riscv.vfadd.nxv16f16.nxv16f16( + , , , iXLen); @@ -196,6 +205,7 @@ define @intrinsic_vfadd_vv_nxv16f16_nxv16f16_nxv16f16( @llvm.riscv.vfadd.nxv16f16.nxv16f16( + undef, %0, %1, iXLen %2) @@ -229,6 +239,7 @@ entry: } declare @llvm.riscv.vfadd.nxv32f16.nxv32f16( + , , , iXLen); @@ -241,6 +252,7 @@ define @intrinsic_vfadd_vv_nxv32f16_nxv32f16_nxv32f16( @llvm.riscv.vfadd.nxv32f16.nxv32f16( + undef, %0, %1, iXLen %2) @@ -275,6 +287,7 @@ entry: } declare @llvm.riscv.vfadd.nxv1f32.nxv1f32( + , , , iXLen); @@ -287,6 +300,7 @@ define @intrinsic_vfadd_vv_nxv1f32_nxv1f32_nxv1f32( @llvm.riscv.vfadd.nxv1f32.nxv1f32( + undef, %0, %1, iXLen %2) @@ -320,6 +334,7 @@ entry: } declare @llvm.riscv.vfadd.nxv2f32.nxv2f32( + , , , iXLen); @@ -332,6 +347,7 @@ define @intrinsic_vfadd_vv_nxv2f32_nxv2f32_nxv2f32( @llvm.riscv.vfadd.nxv2f32.nxv2f32( + undef, %0, %1, iXLen %2) @@ -365,6 +381,7 @@ entry: } declare @llvm.riscv.vfadd.nxv4f32.nxv4f32( + , , , iXLen); @@ -377,6 +394,7 @@ define @intrinsic_vfadd_vv_nxv4f32_nxv4f32_nxv4f32( @llvm.riscv.vfadd.nxv4f32.nxv4f32( + undef, %0, %1, iXLen %2) @@ -410,6 +428,7 @@ entry: } declare @llvm.riscv.vfadd.nxv8f32.nxv8f32( + , , , iXLen); @@ -422,6 +441,7 @@ define @intrinsic_vfadd_vv_nxv8f32_nxv8f32_nxv8f32( @llvm.riscv.vfadd.nxv8f32.nxv8f32( + undef, %0, %1, iXLen %2) @@ -455,6 +475,7 @@ entry: } declare @llvm.riscv.vfadd.nxv16f32.nxv16f32( + , , , iXLen); @@ -467,6 +488,7 @@ define @intrinsic_vfadd_vv_nxv16f32_nxv16f32_nxv16f32( @llvm.riscv.vfadd.nxv16f32.nxv16f32( + undef, %0, %1, iXLen %2) @@ -501,6 +523,7 @@ entry: } declare @llvm.riscv.vfadd.nxv1f64.nxv1f64( + , , , iXLen); @@ -513,6 +536,7 @@ define @intrinsic_vfadd_vv_nxv1f64_nxv1f64_nxv1f64( @llvm.riscv.vfadd.nxv1f64.nxv1f64( + undef, %0, %1, iXLen %2) @@ -546,6 +570,7 @@ entry: } declare @llvm.riscv.vfadd.nxv2f64.nxv2f64( + , , , iXLen); @@ -558,6 +583,7 @@ define @intrinsic_vfadd_vv_nxv2f64_nxv2f64_nxv2f64( @llvm.riscv.vfadd.nxv2f64.nxv2f64( + undef, %0, %1, iXLen %2) @@ -591,6 +617,7 @@ entry: } declare @llvm.riscv.vfadd.nxv4f64.nxv4f64( + , , , iXLen); @@ -603,6 +630,7 @@ define @intrinsic_vfadd_vv_nxv4f64_nxv4f64_nxv4f64( @llvm.riscv.vfadd.nxv4f64.nxv4f64( + undef, %0, %1, iXLen %2) @@ -636,6 +664,7 @@ entry: } declare @llvm.riscv.vfadd.nxv8f64.nxv8f64( + , , , iXLen); @@ -648,6 +677,7 @@ define @intrinsic_vfadd_vv_nxv8f64_nxv8f64_nxv8f64( @llvm.riscv.vfadd.nxv8f64.nxv8f64( + undef, %0, %1, iXLen %2) @@ -682,6 +712,7 @@ entry: } declare @llvm.riscv.vfadd.nxv1f16.f16( + , , half, iXLen); @@ -694,6 +725,7 @@ define @intrinsic_vfadd_vf_nxv1f16_nxv1f16_f16( @llvm.riscv.vfadd.nxv1f16.f16( + undef, %0, half %1, iXLen %2) @@ -727,6 +759,7 @@ entry: } declare @llvm.riscv.vfadd.nxv2f16.f16( + , , half, iXLen); @@ -739,6 +772,7 @@ define @intrinsic_vfadd_vf_nxv2f16_nxv2f16_f16( @llvm.riscv.vfadd.nxv2f16.f16( + undef, %0, half %1, iXLen %2) @@ -772,6 +806,7 @@ entry: } declare @llvm.riscv.vfadd.nxv4f16.f16( + , , half, iXLen); @@ -784,6 +819,7 @@ define @intrinsic_vfadd_vf_nxv4f16_nxv4f16_f16( @llvm.riscv.vfadd.nxv4f16.f16( + undef, %0, half %1, iXLen %2) @@ -817,6 +853,7 @@ entry: } declare @llvm.riscv.vfadd.nxv8f16.f16( + , , half, iXLen); @@ -829,6 +866,7 @@ define @intrinsic_vfadd_vf_nxv8f16_nxv8f16_f16( @llvm.riscv.vfadd.nxv8f16.f16( + undef, %0, half %1, iXLen %2) @@ -862,6 +900,7 @@ entry: } declare @llvm.riscv.vfadd.nxv16f16.f16( + , , half, iXLen); @@ -874,6 +913,7 @@ define @intrinsic_vfadd_vf_nxv16f16_nxv16f16_f16( @llvm.riscv.vfadd.nxv16f16.f16( + undef, %0, half %1, iXLen %2) @@ -907,6 +947,7 @@ entry: } declare @llvm.riscv.vfadd.nxv32f16.f16( + , , half, iXLen); @@ -919,6 +960,7 @@ define @intrinsic_vfadd_vf_nxv32f16_nxv32f16_f16( @llvm.riscv.vfadd.nxv32f16.f16( + undef, %0, half %1, iXLen %2) @@ -952,6 +994,7 @@ entry: } declare @llvm.riscv.vfadd.nxv1f32.f32( + , , float, iXLen); @@ -964,6 +1007,7 @@ define @intrinsic_vfadd_vf_nxv1f32_nxv1f32_f32( @llvm.riscv.vfadd.nxv1f32.f32( + undef, %0, float %1, iXLen %2) @@ -997,6 +1041,7 @@ entry: } declare @llvm.riscv.vfadd.nxv2f32.f32( + , , float, iXLen); @@ -1009,6 +1054,7 @@ define @intrinsic_vfadd_vf_nxv2f32_nxv2f32_f32( @llvm.riscv.vfadd.nxv2f32.f32( + undef, %0, float %1, iXLen %2) @@ -1042,6 +1088,7 @@ entry: } declare @llvm.riscv.vfadd.nxv4f32.f32( + , , float, iXLen); @@ -1054,6 +1101,7 @@ define @intrinsic_vfadd_vf_nxv4f32_nxv4f32_f32( @llvm.riscv.vfadd.nxv4f32.f32( + undef, %0, float %1, iXLen %2) @@ -1087,6 +1135,7 @@ entry: } declare @llvm.riscv.vfadd.nxv8f32.f32( + , , float, iXLen); @@ -1099,6 +1148,7 @@ define @intrinsic_vfadd_vf_nxv8f32_nxv8f32_f32( @llvm.riscv.vfadd.nxv8f32.f32( + undef, %0, float %1, iXLen %2) @@ -1132,6 +1182,7 @@ entry: } declare @llvm.riscv.vfadd.nxv16f32.f32( + , , float, iXLen); @@ -1144,6 +1195,7 @@ define @intrinsic_vfadd_vf_nxv16f32_nxv16f32_f32( @llvm.riscv.vfadd.nxv16f32.f32( + undef, %0, float %1, iXLen %2) @@ -1177,6 +1229,7 @@ entry: } declare @llvm.riscv.vfadd.nxv1f64.f64( + , , double, iXLen); @@ -1189,6 +1242,7 @@ define @intrinsic_vfadd_vf_nxv1f64_nxv1f64_f64( @llvm.riscv.vfadd.nxv1f64.f64( + undef, %0, double %1, iXLen %2) @@ -1222,6 +1276,7 @@ entry: } declare @llvm.riscv.vfadd.nxv2f64.f64( + , , double, iXLen); @@ -1234,6 +1289,7 @@ define @intrinsic_vfadd_vf_nxv2f64_nxv2f64_f64( @llvm.riscv.vfadd.nxv2f64.f64( + undef, %0, double %1, iXLen %2) @@ -1267,6 +1323,7 @@ entry: } declare @llvm.riscv.vfadd.nxv4f64.f64( + , , double, iXLen); @@ -1279,6 +1336,7 @@ define @intrinsic_vfadd_vf_nxv4f64_nxv4f64_f64( @llvm.riscv.vfadd.nxv4f64.f64( + undef, %0, double %1, iXLen %2) @@ -1312,6 +1370,7 @@ entry: } declare @llvm.riscv.vfadd.nxv8f64.f64( + , , double, iXLen); @@ -1324,6 +1383,7 @@ define @intrinsic_vfadd_vf_nxv8f64_nxv8f64_f64( @llvm.riscv.vfadd.nxv8f64.f64( + undef, %0, double %1, iXLen %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfdiv.ll b/llvm/test/CodeGen/RISCV/rvv/vfdiv.ll index 0145f2ad764ecadc93a5dbba6646c407edcad119..c0e4b5e49b6a5a044d383503eecc3be02e6dbc6f 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfdiv.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfdiv.ll @@ -4,6 +4,7 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfdiv.nxv1f16.nxv1f16( + , , , iXLen); @@ -16,6 +17,7 @@ define @intrinsic_vfdiv_vv_nxv1f16_nxv1f16_nxv1f16( @llvm.riscv.vfdiv.nxv1f16.nxv1f16( + undef, %0, %1, iXLen %2) @@ -49,6 +51,7 @@ entry: } declare @llvm.riscv.vfdiv.nxv2f16.nxv2f16( + , , , iXLen); @@ -61,6 +64,7 @@ define @intrinsic_vfdiv_vv_nxv2f16_nxv2f16_nxv2f16( @llvm.riscv.vfdiv.nxv2f16.nxv2f16( + undef, %0, %1, iXLen %2) @@ -94,6 +98,7 @@ entry: } declare @llvm.riscv.vfdiv.nxv4f16.nxv4f16( + , , , iXLen); @@ -106,6 +111,7 @@ define @intrinsic_vfdiv_vv_nxv4f16_nxv4f16_nxv4f16( @llvm.riscv.vfdiv.nxv4f16.nxv4f16( + undef, %0, %1, iXLen %2) @@ -139,6 +145,7 @@ entry: } declare @llvm.riscv.vfdiv.nxv8f16.nxv8f16( + , , , iXLen); @@ -151,6 +158,7 @@ define @intrinsic_vfdiv_vv_nxv8f16_nxv8f16_nxv8f16( @llvm.riscv.vfdiv.nxv8f16.nxv8f16( + undef, %0, %1, iXLen %2) @@ -184,6 +192,7 @@ entry: } declare @llvm.riscv.vfdiv.nxv16f16.nxv16f16( + , , , iXLen); @@ -196,6 +205,7 @@ define @intrinsic_vfdiv_vv_nxv16f16_nxv16f16_nxv16f16( @llvm.riscv.vfdiv.nxv16f16.nxv16f16( + undef, %0, %1, iXLen %2) @@ -229,6 +239,7 @@ entry: } declare @llvm.riscv.vfdiv.nxv32f16.nxv32f16( + , , , iXLen); @@ -241,6 +252,7 @@ define @intrinsic_vfdiv_vv_nxv32f16_nxv32f16_nxv32f16( @llvm.riscv.vfdiv.nxv32f16.nxv32f16( + undef, %0, %1, iXLen %2) @@ -275,6 +287,7 @@ entry: } declare @llvm.riscv.vfdiv.nxv1f32.nxv1f32( + , , , iXLen); @@ -287,6 +300,7 @@ define @intrinsic_vfdiv_vv_nxv1f32_nxv1f32_nxv1f32( @llvm.riscv.vfdiv.nxv1f32.nxv1f32( + undef, %0, %1, iXLen %2) @@ -320,6 +334,7 @@ entry: } declare @llvm.riscv.vfdiv.nxv2f32.nxv2f32( + , , , iXLen); @@ -332,6 +347,7 @@ define @intrinsic_vfdiv_vv_nxv2f32_nxv2f32_nxv2f32( @llvm.riscv.vfdiv.nxv2f32.nxv2f32( + undef, %0, %1, iXLen %2) @@ -365,6 +381,7 @@ entry: } declare @llvm.riscv.vfdiv.nxv4f32.nxv4f32( + , , , iXLen); @@ -377,6 +394,7 @@ define @intrinsic_vfdiv_vv_nxv4f32_nxv4f32_nxv4f32( @llvm.riscv.vfdiv.nxv4f32.nxv4f32( + undef, %0, %1, iXLen %2) @@ -410,6 +428,7 @@ entry: } declare @llvm.riscv.vfdiv.nxv8f32.nxv8f32( + , , , iXLen); @@ -422,6 +441,7 @@ define @intrinsic_vfdiv_vv_nxv8f32_nxv8f32_nxv8f32( @llvm.riscv.vfdiv.nxv8f32.nxv8f32( + undef, %0, %1, iXLen %2) @@ -455,6 +475,7 @@ entry: } declare @llvm.riscv.vfdiv.nxv16f32.nxv16f32( + , , , iXLen); @@ -467,6 +488,7 @@ define @intrinsic_vfdiv_vv_nxv16f32_nxv16f32_nxv16f32( @llvm.riscv.vfdiv.nxv16f32.nxv16f32( + undef, %0, %1, iXLen %2) @@ -501,6 +523,7 @@ entry: } declare @llvm.riscv.vfdiv.nxv1f64.nxv1f64( + , , , iXLen); @@ -513,6 +536,7 @@ define @intrinsic_vfdiv_vv_nxv1f64_nxv1f64_nxv1f64( @llvm.riscv.vfdiv.nxv1f64.nxv1f64( + undef, %0, %1, iXLen %2) @@ -546,6 +570,7 @@ entry: } declare @llvm.riscv.vfdiv.nxv2f64.nxv2f64( + , , , iXLen); @@ -558,6 +583,7 @@ define @intrinsic_vfdiv_vv_nxv2f64_nxv2f64_nxv2f64( @llvm.riscv.vfdiv.nxv2f64.nxv2f64( + undef, %0, %1, iXLen %2) @@ -591,6 +617,7 @@ entry: } declare @llvm.riscv.vfdiv.nxv4f64.nxv4f64( + , , , iXLen); @@ -603,6 +630,7 @@ define @intrinsic_vfdiv_vv_nxv4f64_nxv4f64_nxv4f64( @llvm.riscv.vfdiv.nxv4f64.nxv4f64( + undef, %0, %1, iXLen %2) @@ -636,6 +664,7 @@ entry: } declare @llvm.riscv.vfdiv.nxv8f64.nxv8f64( + , , , iXLen); @@ -648,6 +677,7 @@ define @intrinsic_vfdiv_vv_nxv8f64_nxv8f64_nxv8f64( @llvm.riscv.vfdiv.nxv8f64.nxv8f64( + undef, %0, %1, iXLen %2) @@ -682,6 +712,7 @@ entry: } declare @llvm.riscv.vfdiv.nxv1f16.f16( + , , half, iXLen); @@ -694,6 +725,7 @@ define @intrinsic_vfdiv_vf_nxv1f16_nxv1f16_f16( @llvm.riscv.vfdiv.nxv1f16.f16( + undef, %0, half %1, iXLen %2) @@ -727,6 +759,7 @@ entry: } declare @llvm.riscv.vfdiv.nxv2f16.f16( + , , half, iXLen); @@ -739,6 +772,7 @@ define @intrinsic_vfdiv_vf_nxv2f16_nxv2f16_f16( @llvm.riscv.vfdiv.nxv2f16.f16( + undef, %0, half %1, iXLen %2) @@ -772,6 +806,7 @@ entry: } declare @llvm.riscv.vfdiv.nxv4f16.f16( + , , half, iXLen); @@ -784,6 +819,7 @@ define @intrinsic_vfdiv_vf_nxv4f16_nxv4f16_f16( @llvm.riscv.vfdiv.nxv4f16.f16( + undef, %0, half %1, iXLen %2) @@ -817,6 +853,7 @@ entry: } declare @llvm.riscv.vfdiv.nxv8f16.f16( + , , half, iXLen); @@ -829,6 +866,7 @@ define @intrinsic_vfdiv_vf_nxv8f16_nxv8f16_f16( @llvm.riscv.vfdiv.nxv8f16.f16( + undef, %0, half %1, iXLen %2) @@ -862,6 +900,7 @@ entry: } declare @llvm.riscv.vfdiv.nxv16f16.f16( + , , half, iXLen); @@ -874,6 +913,7 @@ define @intrinsic_vfdiv_vf_nxv16f16_nxv16f16_f16( @llvm.riscv.vfdiv.nxv16f16.f16( + undef, %0, half %1, iXLen %2) @@ -907,6 +947,7 @@ entry: } declare @llvm.riscv.vfdiv.nxv32f16.f16( + , , half, iXLen); @@ -919,6 +960,7 @@ define @intrinsic_vfdiv_vf_nxv32f16_nxv32f16_f16( @llvm.riscv.vfdiv.nxv32f16.f16( + undef, %0, half %1, iXLen %2) @@ -952,6 +994,7 @@ entry: } declare @llvm.riscv.vfdiv.nxv1f32.f32( + , , float, iXLen); @@ -964,6 +1007,7 @@ define @intrinsic_vfdiv_vf_nxv1f32_nxv1f32_f32( @llvm.riscv.vfdiv.nxv1f32.f32( + undef, %0, float %1, iXLen %2) @@ -997,6 +1041,7 @@ entry: } declare @llvm.riscv.vfdiv.nxv2f32.f32( + , , float, iXLen); @@ -1009,6 +1054,7 @@ define @intrinsic_vfdiv_vf_nxv2f32_nxv2f32_f32( @llvm.riscv.vfdiv.nxv2f32.f32( + undef, %0, float %1, iXLen %2) @@ -1042,6 +1088,7 @@ entry: } declare @llvm.riscv.vfdiv.nxv4f32.f32( + , , float, iXLen); @@ -1054,6 +1101,7 @@ define @intrinsic_vfdiv_vf_nxv4f32_nxv4f32_f32( @llvm.riscv.vfdiv.nxv4f32.f32( + undef, %0, float %1, iXLen %2) @@ -1087,6 +1135,7 @@ entry: } declare @llvm.riscv.vfdiv.nxv8f32.f32( + , , float, iXLen); @@ -1099,6 +1148,7 @@ define @intrinsic_vfdiv_vf_nxv8f32_nxv8f32_f32( @llvm.riscv.vfdiv.nxv8f32.f32( + undef, %0, float %1, iXLen %2) @@ -1132,6 +1182,7 @@ entry: } declare @llvm.riscv.vfdiv.nxv16f32.f32( + , , float, iXLen); @@ -1144,6 +1195,7 @@ define @intrinsic_vfdiv_vf_nxv16f32_nxv16f32_f32( @llvm.riscv.vfdiv.nxv16f32.f32( + undef, %0, float %1, iXLen %2) @@ -1177,6 +1229,7 @@ entry: } declare @llvm.riscv.vfdiv.nxv1f64.f64( + , , double, iXLen); @@ -1189,6 +1242,7 @@ define @intrinsic_vfdiv_vf_nxv1f64_nxv1f64_f64( @llvm.riscv.vfdiv.nxv1f64.f64( + undef, %0, double %1, iXLen %2) @@ -1222,6 +1276,7 @@ entry: } declare @llvm.riscv.vfdiv.nxv2f64.f64( + , , double, iXLen); @@ -1234,6 +1289,7 @@ define @intrinsic_vfdiv_vf_nxv2f64_nxv2f64_f64( @llvm.riscv.vfdiv.nxv2f64.f64( + undef, %0, double %1, iXLen %2) @@ -1267,6 +1323,7 @@ entry: } declare @llvm.riscv.vfdiv.nxv4f64.f64( + , , double, iXLen); @@ -1279,6 +1336,7 @@ define @intrinsic_vfdiv_vf_nxv4f64_nxv4f64_f64( @llvm.riscv.vfdiv.nxv4f64.f64( + undef, %0, double %1, iXLen %2) @@ -1312,6 +1370,7 @@ entry: } declare @llvm.riscv.vfdiv.nxv8f64.f64( + , , double, iXLen); @@ -1324,6 +1383,7 @@ define @intrinsic_vfdiv_vf_nxv8f64_nxv8f64_f64( @llvm.riscv.vfdiv.nxv8f64.f64( + undef, %0, double %1, iXLen %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmax.ll b/llvm/test/CodeGen/RISCV/rvv/vfmax.ll index 446981928b6cf6436b557c3eb5ba04bafeee2513..cb9c3cd0706ce538a8325bbacd4edefbce4d55f4 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfmax.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmax.ll @@ -4,6 +4,7 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfmax.nxv1f16.nxv1f16( + , , , iXLen); @@ -16,6 +17,7 @@ define @intrinsic_vfmax_vv_nxv1f16_nxv1f16_nxv1f16( @llvm.riscv.vfmax.nxv1f16.nxv1f16( + undef, %0, %1, iXLen %2) @@ -49,6 +51,7 @@ entry: } declare @llvm.riscv.vfmax.nxv2f16.nxv2f16( + , , , iXLen); @@ -61,6 +64,7 @@ define @intrinsic_vfmax_vv_nxv2f16_nxv2f16_nxv2f16( @llvm.riscv.vfmax.nxv2f16.nxv2f16( + undef, %0, %1, iXLen %2) @@ -94,6 +98,7 @@ entry: } declare @llvm.riscv.vfmax.nxv4f16.nxv4f16( + , , , iXLen); @@ -106,6 +111,7 @@ define @intrinsic_vfmax_vv_nxv4f16_nxv4f16_nxv4f16( @llvm.riscv.vfmax.nxv4f16.nxv4f16( + undef, %0, %1, iXLen %2) @@ -139,6 +145,7 @@ entry: } declare @llvm.riscv.vfmax.nxv8f16.nxv8f16( + , , , iXLen); @@ -151,6 +158,7 @@ define @intrinsic_vfmax_vv_nxv8f16_nxv8f16_nxv8f16( @llvm.riscv.vfmax.nxv8f16.nxv8f16( + undef, %0, %1, iXLen %2) @@ -184,6 +192,7 @@ entry: } declare @llvm.riscv.vfmax.nxv16f16.nxv16f16( + , , , iXLen); @@ -196,6 +205,7 @@ define @intrinsic_vfmax_vv_nxv16f16_nxv16f16_nxv16f16( @llvm.riscv.vfmax.nxv16f16.nxv16f16( + undef, %0, %1, iXLen %2) @@ -229,6 +239,7 @@ entry: } declare @llvm.riscv.vfmax.nxv32f16.nxv32f16( + , , , iXLen); @@ -241,6 +252,7 @@ define @intrinsic_vfmax_vv_nxv32f16_nxv32f16_nxv32f16( @llvm.riscv.vfmax.nxv32f16.nxv32f16( + undef, %0, %1, iXLen %2) @@ -275,6 +287,7 @@ entry: } declare @llvm.riscv.vfmax.nxv1f32.nxv1f32( + , , , iXLen); @@ -287,6 +300,7 @@ define @intrinsic_vfmax_vv_nxv1f32_nxv1f32_nxv1f32( @llvm.riscv.vfmax.nxv1f32.nxv1f32( + undef, %0, %1, iXLen %2) @@ -320,6 +334,7 @@ entry: } declare @llvm.riscv.vfmax.nxv2f32.nxv2f32( + , , , iXLen); @@ -332,6 +347,7 @@ define @intrinsic_vfmax_vv_nxv2f32_nxv2f32_nxv2f32( @llvm.riscv.vfmax.nxv2f32.nxv2f32( + undef, %0, %1, iXLen %2) @@ -365,6 +381,7 @@ entry: } declare @llvm.riscv.vfmax.nxv4f32.nxv4f32( + , , , iXLen); @@ -377,6 +394,7 @@ define @intrinsic_vfmax_vv_nxv4f32_nxv4f32_nxv4f32( @llvm.riscv.vfmax.nxv4f32.nxv4f32( + undef, %0, %1, iXLen %2) @@ -410,6 +428,7 @@ entry: } declare @llvm.riscv.vfmax.nxv8f32.nxv8f32( + , , , iXLen); @@ -422,6 +441,7 @@ define @intrinsic_vfmax_vv_nxv8f32_nxv8f32_nxv8f32( @llvm.riscv.vfmax.nxv8f32.nxv8f32( + undef, %0, %1, iXLen %2) @@ -455,6 +475,7 @@ entry: } declare @llvm.riscv.vfmax.nxv16f32.nxv16f32( + , , , iXLen); @@ -467,6 +488,7 @@ define @intrinsic_vfmax_vv_nxv16f32_nxv16f32_nxv16f32( @llvm.riscv.vfmax.nxv16f32.nxv16f32( + undef, %0, %1, iXLen %2) @@ -501,6 +523,7 @@ entry: } declare @llvm.riscv.vfmax.nxv1f64.nxv1f64( + , , , iXLen); @@ -513,6 +536,7 @@ define @intrinsic_vfmax_vv_nxv1f64_nxv1f64_nxv1f64( @llvm.riscv.vfmax.nxv1f64.nxv1f64( + undef, %0, %1, iXLen %2) @@ -546,6 +570,7 @@ entry: } declare @llvm.riscv.vfmax.nxv2f64.nxv2f64( + , , , iXLen); @@ -558,6 +583,7 @@ define @intrinsic_vfmax_vv_nxv2f64_nxv2f64_nxv2f64( @llvm.riscv.vfmax.nxv2f64.nxv2f64( + undef, %0, %1, iXLen %2) @@ -591,6 +617,7 @@ entry: } declare @llvm.riscv.vfmax.nxv4f64.nxv4f64( + , , , iXLen); @@ -603,6 +630,7 @@ define @intrinsic_vfmax_vv_nxv4f64_nxv4f64_nxv4f64( @llvm.riscv.vfmax.nxv4f64.nxv4f64( + undef, %0, %1, iXLen %2) @@ -636,6 +664,7 @@ entry: } declare @llvm.riscv.vfmax.nxv8f64.nxv8f64( + , , , iXLen); @@ -648,6 +677,7 @@ define @intrinsic_vfmax_vv_nxv8f64_nxv8f64_nxv8f64( @llvm.riscv.vfmax.nxv8f64.nxv8f64( + undef, %0, %1, iXLen %2) @@ -682,6 +712,7 @@ entry: } declare @llvm.riscv.vfmax.nxv1f16.f16( + , , half, iXLen); @@ -694,6 +725,7 @@ define @intrinsic_vfmax_vf_nxv1f16_nxv1f16_f16( @llvm.riscv.vfmax.nxv1f16.f16( + undef, %0, half %1, iXLen %2) @@ -727,6 +759,7 @@ entry: } declare @llvm.riscv.vfmax.nxv2f16.f16( + , , half, iXLen); @@ -739,6 +772,7 @@ define @intrinsic_vfmax_vf_nxv2f16_nxv2f16_f16( @llvm.riscv.vfmax.nxv2f16.f16( + undef, %0, half %1, iXLen %2) @@ -772,6 +806,7 @@ entry: } declare @llvm.riscv.vfmax.nxv4f16.f16( + , , half, iXLen); @@ -784,6 +819,7 @@ define @intrinsic_vfmax_vf_nxv4f16_nxv4f16_f16( @llvm.riscv.vfmax.nxv4f16.f16( + undef, %0, half %1, iXLen %2) @@ -817,6 +853,7 @@ entry: } declare @llvm.riscv.vfmax.nxv8f16.f16( + , , half, iXLen); @@ -829,6 +866,7 @@ define @intrinsic_vfmax_vf_nxv8f16_nxv8f16_f16( @llvm.riscv.vfmax.nxv8f16.f16( + undef, %0, half %1, iXLen %2) @@ -862,6 +900,7 @@ entry: } declare @llvm.riscv.vfmax.nxv16f16.f16( + , , half, iXLen); @@ -874,6 +913,7 @@ define @intrinsic_vfmax_vf_nxv16f16_nxv16f16_f16( @llvm.riscv.vfmax.nxv16f16.f16( + undef, %0, half %1, iXLen %2) @@ -907,6 +947,7 @@ entry: } declare @llvm.riscv.vfmax.nxv32f16.f16( + , , half, iXLen); @@ -919,6 +960,7 @@ define @intrinsic_vfmax_vf_nxv32f16_nxv32f16_f16( @llvm.riscv.vfmax.nxv32f16.f16( + undef, %0, half %1, iXLen %2) @@ -952,6 +994,7 @@ entry: } declare @llvm.riscv.vfmax.nxv1f32.f32( + , , float, iXLen); @@ -964,6 +1007,7 @@ define @intrinsic_vfmax_vf_nxv1f32_nxv1f32_f32( @llvm.riscv.vfmax.nxv1f32.f32( + undef, %0, float %1, iXLen %2) @@ -997,6 +1041,7 @@ entry: } declare @llvm.riscv.vfmax.nxv2f32.f32( + , , float, iXLen); @@ -1009,6 +1054,7 @@ define @intrinsic_vfmax_vf_nxv2f32_nxv2f32_f32( @llvm.riscv.vfmax.nxv2f32.f32( + undef, %0, float %1, iXLen %2) @@ -1042,6 +1088,7 @@ entry: } declare @llvm.riscv.vfmax.nxv4f32.f32( + , , float, iXLen); @@ -1054,6 +1101,7 @@ define @intrinsic_vfmax_vf_nxv4f32_nxv4f32_f32( @llvm.riscv.vfmax.nxv4f32.f32( + undef, %0, float %1, iXLen %2) @@ -1087,6 +1135,7 @@ entry: } declare @llvm.riscv.vfmax.nxv8f32.f32( + , , float, iXLen); @@ -1099,6 +1148,7 @@ define @intrinsic_vfmax_vf_nxv8f32_nxv8f32_f32( @llvm.riscv.vfmax.nxv8f32.f32( + undef, %0, float %1, iXLen %2) @@ -1132,6 +1182,7 @@ entry: } declare @llvm.riscv.vfmax.nxv16f32.f32( + , , float, iXLen); @@ -1144,6 +1195,7 @@ define @intrinsic_vfmax_vf_nxv16f32_nxv16f32_f32( @llvm.riscv.vfmax.nxv16f32.f32( + undef, %0, float %1, iXLen %2) @@ -1177,6 +1229,7 @@ entry: } declare @llvm.riscv.vfmax.nxv1f64.f64( + , , double, iXLen); @@ -1189,6 +1242,7 @@ define @intrinsic_vfmax_vf_nxv1f64_nxv1f64_f64( @llvm.riscv.vfmax.nxv1f64.f64( + undef, %0, double %1, iXLen %2) @@ -1222,6 +1276,7 @@ entry: } declare @llvm.riscv.vfmax.nxv2f64.f64( + , , double, iXLen); @@ -1234,6 +1289,7 @@ define @intrinsic_vfmax_vf_nxv2f64_nxv2f64_f64( @llvm.riscv.vfmax.nxv2f64.f64( + undef, %0, double %1, iXLen %2) @@ -1267,6 +1323,7 @@ entry: } declare @llvm.riscv.vfmax.nxv4f64.f64( + , , double, iXLen); @@ -1279,6 +1336,7 @@ define @intrinsic_vfmax_vf_nxv4f64_nxv4f64_f64( @llvm.riscv.vfmax.nxv4f64.f64( + undef, %0, double %1, iXLen %2) @@ -1312,6 +1370,7 @@ entry: } declare @llvm.riscv.vfmax.nxv8f64.f64( + , , double, iXLen); @@ -1324,6 +1383,7 @@ define @intrinsic_vfmax_vf_nxv8f64_nxv8f64_f64( @llvm.riscv.vfmax.nxv8f64.f64( + undef, %0, double %1, iXLen %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmin.ll b/llvm/test/CodeGen/RISCV/rvv/vfmin.ll index e151e9fb695de19cbd2a35dc1e6472f0f86acd0e..4d6da26a75cf7ecc306e3213042b4522abfe5cd1 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfmin.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmin.ll @@ -4,6 +4,7 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfmin.nxv1f16.nxv1f16( + , , , iXLen); @@ -16,6 +17,7 @@ define @intrinsic_vfmin_vv_nxv1f16_nxv1f16_nxv1f16( @llvm.riscv.vfmin.nxv1f16.nxv1f16( + undef, %0, %1, iXLen %2) @@ -49,6 +51,7 @@ entry: } declare @llvm.riscv.vfmin.nxv2f16.nxv2f16( + , , , iXLen); @@ -61,6 +64,7 @@ define @intrinsic_vfmin_vv_nxv2f16_nxv2f16_nxv2f16( @llvm.riscv.vfmin.nxv2f16.nxv2f16( + undef, %0, %1, iXLen %2) @@ -94,6 +98,7 @@ entry: } declare @llvm.riscv.vfmin.nxv4f16.nxv4f16( + , , , iXLen); @@ -106,6 +111,7 @@ define @intrinsic_vfmin_vv_nxv4f16_nxv4f16_nxv4f16( @llvm.riscv.vfmin.nxv4f16.nxv4f16( + undef, %0, %1, iXLen %2) @@ -139,6 +145,7 @@ entry: } declare @llvm.riscv.vfmin.nxv8f16.nxv8f16( + , , , iXLen); @@ -151,6 +158,7 @@ define @intrinsic_vfmin_vv_nxv8f16_nxv8f16_nxv8f16( @llvm.riscv.vfmin.nxv8f16.nxv8f16( + undef, %0, %1, iXLen %2) @@ -184,6 +192,7 @@ entry: } declare @llvm.riscv.vfmin.nxv16f16.nxv16f16( + , , , iXLen); @@ -196,6 +205,7 @@ define @intrinsic_vfmin_vv_nxv16f16_nxv16f16_nxv16f16( @llvm.riscv.vfmin.nxv16f16.nxv16f16( + undef, %0, %1, iXLen %2) @@ -229,6 +239,7 @@ entry: } declare @llvm.riscv.vfmin.nxv32f16.nxv32f16( + , , , iXLen); @@ -241,6 +252,7 @@ define @intrinsic_vfmin_vv_nxv32f16_nxv32f16_nxv32f16( @llvm.riscv.vfmin.nxv32f16.nxv32f16( + undef, %0, %1, iXLen %2) @@ -275,6 +287,7 @@ entry: } declare @llvm.riscv.vfmin.nxv1f32.nxv1f32( + , , , iXLen); @@ -287,6 +300,7 @@ define @intrinsic_vfmin_vv_nxv1f32_nxv1f32_nxv1f32( @llvm.riscv.vfmin.nxv1f32.nxv1f32( + undef, %0, %1, iXLen %2) @@ -320,6 +334,7 @@ entry: } declare @llvm.riscv.vfmin.nxv2f32.nxv2f32( + , , , iXLen); @@ -332,6 +347,7 @@ define @intrinsic_vfmin_vv_nxv2f32_nxv2f32_nxv2f32( @llvm.riscv.vfmin.nxv2f32.nxv2f32( + undef, %0, %1, iXLen %2) @@ -365,6 +381,7 @@ entry: } declare @llvm.riscv.vfmin.nxv4f32.nxv4f32( + , , , iXLen); @@ -377,6 +394,7 @@ define @intrinsic_vfmin_vv_nxv4f32_nxv4f32_nxv4f32( @llvm.riscv.vfmin.nxv4f32.nxv4f32( + undef, %0, %1, iXLen %2) @@ -410,6 +428,7 @@ entry: } declare @llvm.riscv.vfmin.nxv8f32.nxv8f32( + , , , iXLen); @@ -422,6 +441,7 @@ define @intrinsic_vfmin_vv_nxv8f32_nxv8f32_nxv8f32( @llvm.riscv.vfmin.nxv8f32.nxv8f32( + undef, %0, %1, iXLen %2) @@ -455,6 +475,7 @@ entry: } declare @llvm.riscv.vfmin.nxv16f32.nxv16f32( + , , , iXLen); @@ -467,6 +488,7 @@ define @intrinsic_vfmin_vv_nxv16f32_nxv16f32_nxv16f32( @llvm.riscv.vfmin.nxv16f32.nxv16f32( + undef, %0, %1, iXLen %2) @@ -501,6 +523,7 @@ entry: } declare @llvm.riscv.vfmin.nxv1f64.nxv1f64( + , , , iXLen); @@ -513,6 +536,7 @@ define @intrinsic_vfmin_vv_nxv1f64_nxv1f64_nxv1f64( @llvm.riscv.vfmin.nxv1f64.nxv1f64( + undef, %0, %1, iXLen %2) @@ -546,6 +570,7 @@ entry: } declare @llvm.riscv.vfmin.nxv2f64.nxv2f64( + , , , iXLen); @@ -558,6 +583,7 @@ define @intrinsic_vfmin_vv_nxv2f64_nxv2f64_nxv2f64( @llvm.riscv.vfmin.nxv2f64.nxv2f64( + undef, %0, %1, iXLen %2) @@ -591,6 +617,7 @@ entry: } declare @llvm.riscv.vfmin.nxv4f64.nxv4f64( + , , , iXLen); @@ -603,6 +630,7 @@ define @intrinsic_vfmin_vv_nxv4f64_nxv4f64_nxv4f64( @llvm.riscv.vfmin.nxv4f64.nxv4f64( + undef, %0, %1, iXLen %2) @@ -636,6 +664,7 @@ entry: } declare @llvm.riscv.vfmin.nxv8f64.nxv8f64( + , , , iXLen); @@ -648,6 +677,7 @@ define @intrinsic_vfmin_vv_nxv8f64_nxv8f64_nxv8f64( @llvm.riscv.vfmin.nxv8f64.nxv8f64( + undef, %0, %1, iXLen %2) @@ -682,6 +712,7 @@ entry: } declare @llvm.riscv.vfmin.nxv1f16.f16( + , , half, iXLen); @@ -694,6 +725,7 @@ define @intrinsic_vfmin_vf_nxv1f16_nxv1f16_f16( @llvm.riscv.vfmin.nxv1f16.f16( + undef, %0, half %1, iXLen %2) @@ -727,6 +759,7 @@ entry: } declare @llvm.riscv.vfmin.nxv2f16.f16( + , , half, iXLen); @@ -739,6 +772,7 @@ define @intrinsic_vfmin_vf_nxv2f16_nxv2f16_f16( @llvm.riscv.vfmin.nxv2f16.f16( + undef, %0, half %1, iXLen %2) @@ -772,6 +806,7 @@ entry: } declare @llvm.riscv.vfmin.nxv4f16.f16( + , , half, iXLen); @@ -784,6 +819,7 @@ define @intrinsic_vfmin_vf_nxv4f16_nxv4f16_f16( @llvm.riscv.vfmin.nxv4f16.f16( + undef, %0, half %1, iXLen %2) @@ -817,6 +853,7 @@ entry: } declare @llvm.riscv.vfmin.nxv8f16.f16( + , , half, iXLen); @@ -829,6 +866,7 @@ define @intrinsic_vfmin_vf_nxv8f16_nxv8f16_f16( @llvm.riscv.vfmin.nxv8f16.f16( + undef, %0, half %1, iXLen %2) @@ -862,6 +900,7 @@ entry: } declare @llvm.riscv.vfmin.nxv16f16.f16( + , , half, iXLen); @@ -874,6 +913,7 @@ define @intrinsic_vfmin_vf_nxv16f16_nxv16f16_f16( @llvm.riscv.vfmin.nxv16f16.f16( + undef, %0, half %1, iXLen %2) @@ -907,6 +947,7 @@ entry: } declare @llvm.riscv.vfmin.nxv32f16.f16( + , , half, iXLen); @@ -919,6 +960,7 @@ define @intrinsic_vfmin_vf_nxv32f16_nxv32f16_f16( @llvm.riscv.vfmin.nxv32f16.f16( + undef, %0, half %1, iXLen %2) @@ -952,6 +994,7 @@ entry: } declare @llvm.riscv.vfmin.nxv1f32.f32( + , , float, iXLen); @@ -964,6 +1007,7 @@ define @intrinsic_vfmin_vf_nxv1f32_nxv1f32_f32( @llvm.riscv.vfmin.nxv1f32.f32( + undef, %0, float %1, iXLen %2) @@ -997,6 +1041,7 @@ entry: } declare @llvm.riscv.vfmin.nxv2f32.f32( + , , float, iXLen); @@ -1009,6 +1054,7 @@ define @intrinsic_vfmin_vf_nxv2f32_nxv2f32_f32( @llvm.riscv.vfmin.nxv2f32.f32( + undef, %0, float %1, iXLen %2) @@ -1042,6 +1088,7 @@ entry: } declare @llvm.riscv.vfmin.nxv4f32.f32( + , , float, iXLen); @@ -1054,6 +1101,7 @@ define @intrinsic_vfmin_vf_nxv4f32_nxv4f32_f32( @llvm.riscv.vfmin.nxv4f32.f32( + undef, %0, float %1, iXLen %2) @@ -1087,6 +1135,7 @@ entry: } declare @llvm.riscv.vfmin.nxv8f32.f32( + , , float, iXLen); @@ -1099,6 +1148,7 @@ define @intrinsic_vfmin_vf_nxv8f32_nxv8f32_f32( @llvm.riscv.vfmin.nxv8f32.f32( + undef, %0, float %1, iXLen %2) @@ -1132,6 +1182,7 @@ entry: } declare @llvm.riscv.vfmin.nxv16f32.f32( + , , float, iXLen); @@ -1144,6 +1195,7 @@ define @intrinsic_vfmin_vf_nxv16f32_nxv16f32_f32( @llvm.riscv.vfmin.nxv16f32.f32( + undef, %0, float %1, iXLen %2) @@ -1177,6 +1229,7 @@ entry: } declare @llvm.riscv.vfmin.nxv1f64.f64( + , , double, iXLen); @@ -1189,6 +1242,7 @@ define @intrinsic_vfmin_vf_nxv1f64_nxv1f64_f64( @llvm.riscv.vfmin.nxv1f64.f64( + undef, %0, double %1, iXLen %2) @@ -1222,6 +1276,7 @@ entry: } declare @llvm.riscv.vfmin.nxv2f64.f64( + , , double, iXLen); @@ -1234,6 +1289,7 @@ define @intrinsic_vfmin_vf_nxv2f64_nxv2f64_f64( @llvm.riscv.vfmin.nxv2f64.f64( + undef, %0, double %1, iXLen %2) @@ -1267,6 +1323,7 @@ entry: } declare @llvm.riscv.vfmin.nxv4f64.f64( + , , double, iXLen); @@ -1279,6 +1336,7 @@ define @intrinsic_vfmin_vf_nxv4f64_nxv4f64_f64( @llvm.riscv.vfmin.nxv4f64.f64( + undef, %0, double %1, iXLen %2) @@ -1312,6 +1370,7 @@ entry: } declare @llvm.riscv.vfmin.nxv8f64.f64( + , , double, iXLen); @@ -1324,6 +1383,7 @@ define @intrinsic_vfmin_vf_nxv8f64_nxv8f64_f64( @llvm.riscv.vfmin.nxv8f64.f64( + undef, %0, double %1, iXLen %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmul.ll b/llvm/test/CodeGen/RISCV/rvv/vfmul.ll index 0f4c738025617b4375ef316480d4d461ef6210d5..2301057994d7e5857cc25feb5f2eda07c590419e 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfmul.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmul.ll @@ -4,6 +4,7 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfmul.nxv1f16.nxv1f16( + , , , iXLen); @@ -16,6 +17,7 @@ define @intrinsic_vfmul_vv_nxv1f16_nxv1f16_nxv1f16( @llvm.riscv.vfmul.nxv1f16.nxv1f16( + undef, %0, %1, iXLen %2) @@ -49,6 +51,7 @@ entry: } declare @llvm.riscv.vfmul.nxv2f16.nxv2f16( + , , , iXLen); @@ -61,6 +64,7 @@ define @intrinsic_vfmul_vv_nxv2f16_nxv2f16_nxv2f16( @llvm.riscv.vfmul.nxv2f16.nxv2f16( + undef, %0, %1, iXLen %2) @@ -94,6 +98,7 @@ entry: } declare @llvm.riscv.vfmul.nxv4f16.nxv4f16( + , , , iXLen); @@ -106,6 +111,7 @@ define @intrinsic_vfmul_vv_nxv4f16_nxv4f16_nxv4f16( @llvm.riscv.vfmul.nxv4f16.nxv4f16( + undef, %0, %1, iXLen %2) @@ -139,6 +145,7 @@ entry: } declare @llvm.riscv.vfmul.nxv8f16.nxv8f16( + , , , iXLen); @@ -151,6 +158,7 @@ define @intrinsic_vfmul_vv_nxv8f16_nxv8f16_nxv8f16( @llvm.riscv.vfmul.nxv8f16.nxv8f16( + undef, %0, %1, iXLen %2) @@ -184,6 +192,7 @@ entry: } declare @llvm.riscv.vfmul.nxv16f16.nxv16f16( + , , , iXLen); @@ -196,6 +205,7 @@ define @intrinsic_vfmul_vv_nxv16f16_nxv16f16_nxv16f16( @llvm.riscv.vfmul.nxv16f16.nxv16f16( + undef, %0, %1, iXLen %2) @@ -229,6 +239,7 @@ entry: } declare @llvm.riscv.vfmul.nxv32f16.nxv32f16( + , , , iXLen); @@ -241,6 +252,7 @@ define @intrinsic_vfmul_vv_nxv32f16_nxv32f16_nxv32f16( @llvm.riscv.vfmul.nxv32f16.nxv32f16( + undef, %0, %1, iXLen %2) @@ -275,6 +287,7 @@ entry: } declare @llvm.riscv.vfmul.nxv1f32.nxv1f32( + , , , iXLen); @@ -287,6 +300,7 @@ define @intrinsic_vfmul_vv_nxv1f32_nxv1f32_nxv1f32( @llvm.riscv.vfmul.nxv1f32.nxv1f32( + undef, %0, %1, iXLen %2) @@ -320,6 +334,7 @@ entry: } declare @llvm.riscv.vfmul.nxv2f32.nxv2f32( + , , , iXLen); @@ -332,6 +347,7 @@ define @intrinsic_vfmul_vv_nxv2f32_nxv2f32_nxv2f32( @llvm.riscv.vfmul.nxv2f32.nxv2f32( + undef, %0, %1, iXLen %2) @@ -365,6 +381,7 @@ entry: } declare @llvm.riscv.vfmul.nxv4f32.nxv4f32( + , , , iXLen); @@ -377,6 +394,7 @@ define @intrinsic_vfmul_vv_nxv4f32_nxv4f32_nxv4f32( @llvm.riscv.vfmul.nxv4f32.nxv4f32( + undef, %0, %1, iXLen %2) @@ -410,6 +428,7 @@ entry: } declare @llvm.riscv.vfmul.nxv8f32.nxv8f32( + , , , iXLen); @@ -422,6 +441,7 @@ define @intrinsic_vfmul_vv_nxv8f32_nxv8f32_nxv8f32( @llvm.riscv.vfmul.nxv8f32.nxv8f32( + undef, %0, %1, iXLen %2) @@ -455,6 +475,7 @@ entry: } declare @llvm.riscv.vfmul.nxv16f32.nxv16f32( + , , , iXLen); @@ -467,6 +488,7 @@ define @intrinsic_vfmul_vv_nxv16f32_nxv16f32_nxv16f32( @llvm.riscv.vfmul.nxv16f32.nxv16f32( + undef, %0, %1, iXLen %2) @@ -501,6 +523,7 @@ entry: } declare @llvm.riscv.vfmul.nxv1f64.nxv1f64( + , , , iXLen); @@ -513,6 +536,7 @@ define @intrinsic_vfmul_vv_nxv1f64_nxv1f64_nxv1f64( @llvm.riscv.vfmul.nxv1f64.nxv1f64( + undef, %0, %1, iXLen %2) @@ -546,6 +570,7 @@ entry: } declare @llvm.riscv.vfmul.nxv2f64.nxv2f64( + , , , iXLen); @@ -558,6 +583,7 @@ define @intrinsic_vfmul_vv_nxv2f64_nxv2f64_nxv2f64( @llvm.riscv.vfmul.nxv2f64.nxv2f64( + undef, %0, %1, iXLen %2) @@ -591,6 +617,7 @@ entry: } declare @llvm.riscv.vfmul.nxv4f64.nxv4f64( + , , , iXLen); @@ -603,6 +630,7 @@ define @intrinsic_vfmul_vv_nxv4f64_nxv4f64_nxv4f64( @llvm.riscv.vfmul.nxv4f64.nxv4f64( + undef, %0, %1, iXLen %2) @@ -636,6 +664,7 @@ entry: } declare @llvm.riscv.vfmul.nxv8f64.nxv8f64( + , , , iXLen); @@ -648,6 +677,7 @@ define @intrinsic_vfmul_vv_nxv8f64_nxv8f64_nxv8f64( @llvm.riscv.vfmul.nxv8f64.nxv8f64( + undef, %0, %1, iXLen %2) @@ -682,6 +712,7 @@ entry: } declare @llvm.riscv.vfmul.nxv1f16.f16( + , , half, iXLen); @@ -694,6 +725,7 @@ define @intrinsic_vfmul_vf_nxv1f16_nxv1f16_f16( @llvm.riscv.vfmul.nxv1f16.f16( + undef, %0, half %1, iXLen %2) @@ -727,6 +759,7 @@ entry: } declare @llvm.riscv.vfmul.nxv2f16.f16( + , , half, iXLen); @@ -739,6 +772,7 @@ define @intrinsic_vfmul_vf_nxv2f16_nxv2f16_f16( @llvm.riscv.vfmul.nxv2f16.f16( + undef, %0, half %1, iXLen %2) @@ -772,6 +806,7 @@ entry: } declare @llvm.riscv.vfmul.nxv4f16.f16( + , , half, iXLen); @@ -784,6 +819,7 @@ define @intrinsic_vfmul_vf_nxv4f16_nxv4f16_f16( @llvm.riscv.vfmul.nxv4f16.f16( + undef, %0, half %1, iXLen %2) @@ -817,6 +853,7 @@ entry: } declare @llvm.riscv.vfmul.nxv8f16.f16( + , , half, iXLen); @@ -829,6 +866,7 @@ define @intrinsic_vfmul_vf_nxv8f16_nxv8f16_f16( @llvm.riscv.vfmul.nxv8f16.f16( + undef, %0, half %1, iXLen %2) @@ -862,6 +900,7 @@ entry: } declare @llvm.riscv.vfmul.nxv16f16.f16( + , , half, iXLen); @@ -874,6 +913,7 @@ define @intrinsic_vfmul_vf_nxv16f16_nxv16f16_f16( @llvm.riscv.vfmul.nxv16f16.f16( + undef, %0, half %1, iXLen %2) @@ -907,6 +947,7 @@ entry: } declare @llvm.riscv.vfmul.nxv32f16.f16( + , , half, iXLen); @@ -919,6 +960,7 @@ define @intrinsic_vfmul_vf_nxv32f16_nxv32f16_f16( @llvm.riscv.vfmul.nxv32f16.f16( + undef, %0, half %1, iXLen %2) @@ -952,6 +994,7 @@ entry: } declare @llvm.riscv.vfmul.nxv1f32.f32( + , , float, iXLen); @@ -964,6 +1007,7 @@ define @intrinsic_vfmul_vf_nxv1f32_nxv1f32_f32( @llvm.riscv.vfmul.nxv1f32.f32( + undef, %0, float %1, iXLen %2) @@ -997,6 +1041,7 @@ entry: } declare @llvm.riscv.vfmul.nxv2f32.f32( + , , float, iXLen); @@ -1009,6 +1054,7 @@ define @intrinsic_vfmul_vf_nxv2f32_nxv2f32_f32( @llvm.riscv.vfmul.nxv2f32.f32( + undef, %0, float %1, iXLen %2) @@ -1042,6 +1088,7 @@ entry: } declare @llvm.riscv.vfmul.nxv4f32.f32( + , , float, iXLen); @@ -1054,6 +1101,7 @@ define @intrinsic_vfmul_vf_nxv4f32_nxv4f32_f32( @llvm.riscv.vfmul.nxv4f32.f32( + undef, %0, float %1, iXLen %2) @@ -1087,6 +1135,7 @@ entry: } declare @llvm.riscv.vfmul.nxv8f32.f32( + , , float, iXLen); @@ -1099,6 +1148,7 @@ define @intrinsic_vfmul_vf_nxv8f32_nxv8f32_f32( @llvm.riscv.vfmul.nxv8f32.f32( + undef, %0, float %1, iXLen %2) @@ -1132,6 +1182,7 @@ entry: } declare @llvm.riscv.vfmul.nxv16f32.f32( + , , float, iXLen); @@ -1144,6 +1195,7 @@ define @intrinsic_vfmul_vf_nxv16f32_nxv16f32_f32( @llvm.riscv.vfmul.nxv16f32.f32( + undef, %0, float %1, iXLen %2) @@ -1177,6 +1229,7 @@ entry: } declare @llvm.riscv.vfmul.nxv1f64.f64( + , , double, iXLen); @@ -1189,6 +1242,7 @@ define @intrinsic_vfmul_vf_nxv1f64_nxv1f64_f64( @llvm.riscv.vfmul.nxv1f64.f64( + undef, %0, double %1, iXLen %2) @@ -1222,6 +1276,7 @@ entry: } declare @llvm.riscv.vfmul.nxv2f64.f64( + , , double, iXLen); @@ -1234,6 +1289,7 @@ define @intrinsic_vfmul_vf_nxv2f64_nxv2f64_f64( @llvm.riscv.vfmul.nxv2f64.f64( + undef, %0, double %1, iXLen %2) @@ -1267,6 +1323,7 @@ entry: } declare @llvm.riscv.vfmul.nxv4f64.f64( + , , double, iXLen); @@ -1279,6 +1336,7 @@ define @intrinsic_vfmul_vf_nxv4f64_nxv4f64_f64( @llvm.riscv.vfmul.nxv4f64.f64( + undef, %0, double %1, iXLen %2) @@ -1312,6 +1370,7 @@ entry: } declare @llvm.riscv.vfmul.nxv8f64.f64( + , , double, iXLen); @@ -1324,6 +1383,7 @@ define @intrinsic_vfmul_vf_nxv8f64_nxv8f64_f64( @llvm.riscv.vfmul.nxv8f64.f64( + undef, %0, double %1, iXLen %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfrdiv.ll b/llvm/test/CodeGen/RISCV/rvv/vfrdiv.ll index 58dc39b995055ef3a129279b488aad452c01de09..a872aad5bf0f1ce8ff38077145013368d8ea4da2 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfrdiv.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfrdiv.ll @@ -4,6 +4,7 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfrdiv.nxv1f16.f16( + , , half, iXLen); @@ -16,6 +17,7 @@ define @intrinsic_vfrdiv_vf_nxv1f16_nxv1f16_f16( @llvm.riscv.vfrdiv.nxv1f16.f16( + undef, %0, half %1, iXLen %2) @@ -49,6 +51,7 @@ entry: } declare @llvm.riscv.vfrdiv.nxv2f16.f16( + , , half, iXLen); @@ -61,6 +64,7 @@ define @intrinsic_vfrdiv_vf_nxv2f16_nxv2f16_f16( @llvm.riscv.vfrdiv.nxv2f16.f16( + undef, %0, half %1, iXLen %2) @@ -94,6 +98,7 @@ entry: } declare @llvm.riscv.vfrdiv.nxv4f16.f16( + , , half, iXLen); @@ -106,6 +111,7 @@ define @intrinsic_vfrdiv_vf_nxv4f16_nxv4f16_f16( @llvm.riscv.vfrdiv.nxv4f16.f16( + undef, %0, half %1, iXLen %2) @@ -139,6 +145,7 @@ entry: } declare @llvm.riscv.vfrdiv.nxv8f16.f16( + , , half, iXLen); @@ -151,6 +158,7 @@ define @intrinsic_vfrdiv_vf_nxv8f16_nxv8f16_f16( @llvm.riscv.vfrdiv.nxv8f16.f16( + undef, %0, half %1, iXLen %2) @@ -184,6 +192,7 @@ entry: } declare @llvm.riscv.vfrdiv.nxv16f16.f16( + , , half, iXLen); @@ -196,6 +205,7 @@ define @intrinsic_vfrdiv_vf_nxv16f16_nxv16f16_f16( @llvm.riscv.vfrdiv.nxv16f16.f16( + undef, %0, half %1, iXLen %2) @@ -229,6 +239,7 @@ entry: } declare @llvm.riscv.vfrdiv.nxv32f16.f16( + , , half, iXLen); @@ -241,6 +252,7 @@ define @intrinsic_vfrdiv_vf_nxv32f16_nxv32f16_f16( @llvm.riscv.vfrdiv.nxv32f16.f16( + undef, %0, half %1, iXLen %2) @@ -274,6 +286,7 @@ entry: } declare @llvm.riscv.vfrdiv.nxv1f32.f32( + , , float, iXLen); @@ -286,6 +299,7 @@ define @intrinsic_vfrdiv_vf_nxv1f32_nxv1f32_f32( @llvm.riscv.vfrdiv.nxv1f32.f32( + undef, %0, float %1, iXLen %2) @@ -319,6 +333,7 @@ entry: } declare @llvm.riscv.vfrdiv.nxv2f32.f32( + , , float, iXLen); @@ -331,6 +346,7 @@ define @intrinsic_vfrdiv_vf_nxv2f32_nxv2f32_f32( @llvm.riscv.vfrdiv.nxv2f32.f32( + undef, %0, float %1, iXLen %2) @@ -364,6 +380,7 @@ entry: } declare @llvm.riscv.vfrdiv.nxv4f32.f32( + , , float, iXLen); @@ -376,6 +393,7 @@ define @intrinsic_vfrdiv_vf_nxv4f32_nxv4f32_f32( @llvm.riscv.vfrdiv.nxv4f32.f32( + undef, %0, float %1, iXLen %2) @@ -409,6 +427,7 @@ entry: } declare @llvm.riscv.vfrdiv.nxv8f32.f32( + , , float, iXLen); @@ -421,6 +440,7 @@ define @intrinsic_vfrdiv_vf_nxv8f32_nxv8f32_f32( @llvm.riscv.vfrdiv.nxv8f32.f32( + undef, %0, float %1, iXLen %2) @@ -454,6 +474,7 @@ entry: } declare @llvm.riscv.vfrdiv.nxv16f32.f32( + , , float, iXLen); @@ -466,6 +487,7 @@ define @intrinsic_vfrdiv_vf_nxv16f32_nxv16f32_f32( @llvm.riscv.vfrdiv.nxv16f32.f32( + undef, %0, float %1, iXLen %2) @@ -499,6 +521,7 @@ entry: } declare @llvm.riscv.vfrdiv.nxv1f64.f64( + , , double, iXLen); @@ -511,6 +534,7 @@ define @intrinsic_vfrdiv_vf_nxv1f64_nxv1f64_f64( @llvm.riscv.vfrdiv.nxv1f64.f64( + undef, %0, double %1, iXLen %2) @@ -544,6 +568,7 @@ entry: } declare @llvm.riscv.vfrdiv.nxv2f64.f64( + , , double, iXLen); @@ -556,6 +581,7 @@ define @intrinsic_vfrdiv_vf_nxv2f64_nxv2f64_f64( @llvm.riscv.vfrdiv.nxv2f64.f64( + undef, %0, double %1, iXLen %2) @@ -589,6 +615,7 @@ entry: } declare @llvm.riscv.vfrdiv.nxv4f64.f64( + , , double, iXLen); @@ -601,6 +628,7 @@ define @intrinsic_vfrdiv_vf_nxv4f64_nxv4f64_f64( @llvm.riscv.vfrdiv.nxv4f64.f64( + undef, %0, double %1, iXLen %2) @@ -634,6 +662,7 @@ entry: } declare @llvm.riscv.vfrdiv.nxv8f64.f64( + , , double, iXLen); @@ -646,6 +675,7 @@ define @intrinsic_vfrdiv_vf_nxv8f64_nxv8f64_f64( @llvm.riscv.vfrdiv.nxv8f64.f64( + undef, %0, double %1, iXLen %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfrsub.ll b/llvm/test/CodeGen/RISCV/rvv/vfrsub.ll index 3fb281562088c8e5693c5321e661e502fe7136a3..d1cabf11698e0f00b7c30eb4e6e96886cff5c8c1 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfrsub.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfrsub.ll @@ -4,6 +4,7 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfrsub.nxv1f16.f16( + , , half, iXLen); @@ -16,6 +17,7 @@ define @intrinsic_vfrsub_vf_nxv1f16_nxv1f16_f16( @llvm.riscv.vfrsub.nxv1f16.f16( + undef, %0, half %1, iXLen %2) @@ -49,6 +51,7 @@ entry: } declare @llvm.riscv.vfrsub.nxv2f16.f16( + , , half, iXLen); @@ -61,6 +64,7 @@ define @intrinsic_vfrsub_vf_nxv2f16_nxv2f16_f16( @llvm.riscv.vfrsub.nxv2f16.f16( + undef, %0, half %1, iXLen %2) @@ -94,6 +98,7 @@ entry: } declare @llvm.riscv.vfrsub.nxv4f16.f16( + , , half, iXLen); @@ -106,6 +111,7 @@ define @intrinsic_vfrsub_vf_nxv4f16_nxv4f16_f16( @llvm.riscv.vfrsub.nxv4f16.f16( + undef, %0, half %1, iXLen %2) @@ -139,6 +145,7 @@ entry: } declare @llvm.riscv.vfrsub.nxv8f16.f16( + , , half, iXLen); @@ -151,6 +158,7 @@ define @intrinsic_vfrsub_vf_nxv8f16_nxv8f16_f16( @llvm.riscv.vfrsub.nxv8f16.f16( + undef, %0, half %1, iXLen %2) @@ -184,6 +192,7 @@ entry: } declare @llvm.riscv.vfrsub.nxv16f16.f16( + , , half, iXLen); @@ -196,6 +205,7 @@ define @intrinsic_vfrsub_vf_nxv16f16_nxv16f16_f16( @llvm.riscv.vfrsub.nxv16f16.f16( + undef, %0, half %1, iXLen %2) @@ -229,6 +239,7 @@ entry: } declare @llvm.riscv.vfrsub.nxv32f16.f16( + , , half, iXLen); @@ -241,6 +252,7 @@ define @intrinsic_vfrsub_vf_nxv32f16_nxv32f16_f16( @llvm.riscv.vfrsub.nxv32f16.f16( + undef, %0, half %1, iXLen %2) @@ -274,6 +286,7 @@ entry: } declare @llvm.riscv.vfrsub.nxv1f32.f32( + , , float, iXLen); @@ -286,6 +299,7 @@ define @intrinsic_vfrsub_vf_nxv1f32_nxv1f32_f32( @llvm.riscv.vfrsub.nxv1f32.f32( + undef, %0, float %1, iXLen %2) @@ -319,6 +333,7 @@ entry: } declare @llvm.riscv.vfrsub.nxv2f32.f32( + , , float, iXLen); @@ -331,6 +346,7 @@ define @intrinsic_vfrsub_vf_nxv2f32_nxv2f32_f32( @llvm.riscv.vfrsub.nxv2f32.f32( + undef, %0, float %1, iXLen %2) @@ -364,6 +380,7 @@ entry: } declare @llvm.riscv.vfrsub.nxv4f32.f32( + , , float, iXLen); @@ -376,6 +393,7 @@ define @intrinsic_vfrsub_vf_nxv4f32_nxv4f32_f32( @llvm.riscv.vfrsub.nxv4f32.f32( + undef, %0, float %1, iXLen %2) @@ -409,6 +427,7 @@ entry: } declare @llvm.riscv.vfrsub.nxv8f32.f32( + , , float, iXLen); @@ -421,6 +440,7 @@ define @intrinsic_vfrsub_vf_nxv8f32_nxv8f32_f32( @llvm.riscv.vfrsub.nxv8f32.f32( + undef, %0, float %1, iXLen %2) @@ -454,6 +474,7 @@ entry: } declare @llvm.riscv.vfrsub.nxv16f32.f32( + , , float, iXLen); @@ -466,6 +487,7 @@ define @intrinsic_vfrsub_vf_nxv16f32_nxv16f32_f32( @llvm.riscv.vfrsub.nxv16f32.f32( + undef, %0, float %1, iXLen %2) @@ -499,6 +521,7 @@ entry: } declare @llvm.riscv.vfrsub.nxv1f64.f64( + , , double, iXLen); @@ -511,6 +534,7 @@ define @intrinsic_vfrsub_vf_nxv1f64_nxv1f64_f64( @llvm.riscv.vfrsub.nxv1f64.f64( + undef, %0, double %1, iXLen %2) @@ -544,6 +568,7 @@ entry: } declare @llvm.riscv.vfrsub.nxv2f64.f64( + , , double, iXLen); @@ -556,6 +581,7 @@ define @intrinsic_vfrsub_vf_nxv2f64_nxv2f64_f64( @llvm.riscv.vfrsub.nxv2f64.f64( + undef, %0, double %1, iXLen %2) @@ -589,6 +615,7 @@ entry: } declare @llvm.riscv.vfrsub.nxv4f64.f64( + , , double, iXLen); @@ -601,6 +628,7 @@ define @intrinsic_vfrsub_vf_nxv4f64_nxv4f64_f64( @llvm.riscv.vfrsub.nxv4f64.f64( + undef, %0, double %1, iXLen %2) @@ -634,6 +662,7 @@ entry: } declare @llvm.riscv.vfrsub.nxv8f64.f64( + , , double, iXLen); @@ -646,6 +675,7 @@ define @intrinsic_vfrsub_vf_nxv8f64_nxv8f64_f64( @llvm.riscv.vfrsub.nxv8f64.f64( + undef, %0, double %1, iXLen %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsgnj.ll b/llvm/test/CodeGen/RISCV/rvv/vfsgnj.ll index 65a5592775cb5677450c490e4369c5175e0ede63..568d0e83a635d9d70c1b0d9ce8aa2d02a3f3d342 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfsgnj.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfsgnj.ll @@ -4,6 +4,7 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfsgnj.nxv1f16.nxv1f16( + , , , iXLen); @@ -16,6 +17,7 @@ define @intrinsic_vfsgnj_vv_nxv1f16_nxv1f16_nxv1f16( @llvm.riscv.vfsgnj.nxv1f16.nxv1f16( + undef, %0, %1, iXLen %2) @@ -49,6 +51,7 @@ entry: } declare @llvm.riscv.vfsgnj.nxv2f16.nxv2f16( + , , , iXLen); @@ -61,6 +64,7 @@ define @intrinsic_vfsgnj_vv_nxv2f16_nxv2f16_nxv2f16( @llvm.riscv.vfsgnj.nxv2f16.nxv2f16( + undef, %0, %1, iXLen %2) @@ -94,6 +98,7 @@ entry: } declare @llvm.riscv.vfsgnj.nxv4f16.nxv4f16( + , , , iXLen); @@ -106,6 +111,7 @@ define @intrinsic_vfsgnj_vv_nxv4f16_nxv4f16_nxv4f16( @llvm.riscv.vfsgnj.nxv4f16.nxv4f16( + undef, %0, %1, iXLen %2) @@ -139,6 +145,7 @@ entry: } declare @llvm.riscv.vfsgnj.nxv8f16.nxv8f16( + , , , iXLen); @@ -151,6 +158,7 @@ define @intrinsic_vfsgnj_vv_nxv8f16_nxv8f16_nxv8f16( @llvm.riscv.vfsgnj.nxv8f16.nxv8f16( + undef, %0, %1, iXLen %2) @@ -184,6 +192,7 @@ entry: } declare @llvm.riscv.vfsgnj.nxv16f16.nxv16f16( + , , , iXLen); @@ -196,6 +205,7 @@ define @intrinsic_vfsgnj_vv_nxv16f16_nxv16f16_nxv16f16( @llvm.riscv.vfsgnj.nxv16f16.nxv16f16( + undef, %0, %1, iXLen %2) @@ -229,6 +239,7 @@ entry: } declare @llvm.riscv.vfsgnj.nxv32f16.nxv32f16( + , , , iXLen); @@ -241,6 +252,7 @@ define @intrinsic_vfsgnj_vv_nxv32f16_nxv32f16_nxv32f16( @llvm.riscv.vfsgnj.nxv32f16.nxv32f16( + undef, %0, %1, iXLen %2) @@ -275,6 +287,7 @@ entry: } declare @llvm.riscv.vfsgnj.nxv1f32.nxv1f32( + , , , iXLen); @@ -287,6 +300,7 @@ define @intrinsic_vfsgnj_vv_nxv1f32_nxv1f32_nxv1f32( @llvm.riscv.vfsgnj.nxv1f32.nxv1f32( + undef, %0, %1, iXLen %2) @@ -320,6 +334,7 @@ entry: } declare @llvm.riscv.vfsgnj.nxv2f32.nxv2f32( + , , , iXLen); @@ -332,6 +347,7 @@ define @intrinsic_vfsgnj_vv_nxv2f32_nxv2f32_nxv2f32( @llvm.riscv.vfsgnj.nxv2f32.nxv2f32( + undef, %0, %1, iXLen %2) @@ -365,6 +381,7 @@ entry: } declare @llvm.riscv.vfsgnj.nxv4f32.nxv4f32( + , , , iXLen); @@ -377,6 +394,7 @@ define @intrinsic_vfsgnj_vv_nxv4f32_nxv4f32_nxv4f32( @llvm.riscv.vfsgnj.nxv4f32.nxv4f32( + undef, %0, %1, iXLen %2) @@ -410,6 +428,7 @@ entry: } declare @llvm.riscv.vfsgnj.nxv8f32.nxv8f32( + , , , iXLen); @@ -422,6 +441,7 @@ define @intrinsic_vfsgnj_vv_nxv8f32_nxv8f32_nxv8f32( @llvm.riscv.vfsgnj.nxv8f32.nxv8f32( + undef, %0, %1, iXLen %2) @@ -455,6 +475,7 @@ entry: } declare @llvm.riscv.vfsgnj.nxv16f32.nxv16f32( + , , , iXLen); @@ -467,6 +488,7 @@ define @intrinsic_vfsgnj_vv_nxv16f32_nxv16f32_nxv16f32( @llvm.riscv.vfsgnj.nxv16f32.nxv16f32( + undef, %0, %1, iXLen %2) @@ -501,6 +523,7 @@ entry: } declare @llvm.riscv.vfsgnj.nxv1f64.nxv1f64( + , , , iXLen); @@ -513,6 +536,7 @@ define @intrinsic_vfsgnj_vv_nxv1f64_nxv1f64_nxv1f64( @llvm.riscv.vfsgnj.nxv1f64.nxv1f64( + undef, %0, %1, iXLen %2) @@ -546,6 +570,7 @@ entry: } declare @llvm.riscv.vfsgnj.nxv2f64.nxv2f64( + , , , iXLen); @@ -558,6 +583,7 @@ define @intrinsic_vfsgnj_vv_nxv2f64_nxv2f64_nxv2f64( @llvm.riscv.vfsgnj.nxv2f64.nxv2f64( + undef, %0, %1, iXLen %2) @@ -591,6 +617,7 @@ entry: } declare @llvm.riscv.vfsgnj.nxv4f64.nxv4f64( + , , , iXLen); @@ -603,6 +630,7 @@ define @intrinsic_vfsgnj_vv_nxv4f64_nxv4f64_nxv4f64( @llvm.riscv.vfsgnj.nxv4f64.nxv4f64( + undef, %0, %1, iXLen %2) @@ -636,6 +664,7 @@ entry: } declare @llvm.riscv.vfsgnj.nxv8f64.nxv8f64( + , , , iXLen); @@ -648,6 +677,7 @@ define @intrinsic_vfsgnj_vv_nxv8f64_nxv8f64_nxv8f64( @llvm.riscv.vfsgnj.nxv8f64.nxv8f64( + undef, %0, %1, iXLen %2) @@ -682,6 +712,7 @@ entry: } declare @llvm.riscv.vfsgnj.nxv1f16.f16( + , , half, iXLen); @@ -694,6 +725,7 @@ define @intrinsic_vfsgnj_vf_nxv1f16_nxv1f16_f16( @llvm.riscv.vfsgnj.nxv1f16.f16( + undef, %0, half %1, iXLen %2) @@ -727,6 +759,7 @@ entry: } declare @llvm.riscv.vfsgnj.nxv2f16.f16( + , , half, iXLen); @@ -739,6 +772,7 @@ define @intrinsic_vfsgnj_vf_nxv2f16_nxv2f16_f16( @llvm.riscv.vfsgnj.nxv2f16.f16( + undef, %0, half %1, iXLen %2) @@ -772,6 +806,7 @@ entry: } declare @llvm.riscv.vfsgnj.nxv4f16.f16( + , , half, iXLen); @@ -784,6 +819,7 @@ define @intrinsic_vfsgnj_vf_nxv4f16_nxv4f16_f16( @llvm.riscv.vfsgnj.nxv4f16.f16( + undef, %0, half %1, iXLen %2) @@ -817,6 +853,7 @@ entry: } declare @llvm.riscv.vfsgnj.nxv8f16.f16( + , , half, iXLen); @@ -829,6 +866,7 @@ define @intrinsic_vfsgnj_vf_nxv8f16_nxv8f16_f16( @llvm.riscv.vfsgnj.nxv8f16.f16( + undef, %0, half %1, iXLen %2) @@ -862,6 +900,7 @@ entry: } declare @llvm.riscv.vfsgnj.nxv16f16.f16( + , , half, iXLen); @@ -874,6 +913,7 @@ define @intrinsic_vfsgnj_vf_nxv16f16_nxv16f16_f16( @llvm.riscv.vfsgnj.nxv16f16.f16( + undef, %0, half %1, iXLen %2) @@ -907,6 +947,7 @@ entry: } declare @llvm.riscv.vfsgnj.nxv32f16.f16( + , , half, iXLen); @@ -919,6 +960,7 @@ define @intrinsic_vfsgnj_vf_nxv32f16_nxv32f16_f16( @llvm.riscv.vfsgnj.nxv32f16.f16( + undef, %0, half %1, iXLen %2) @@ -952,6 +994,7 @@ entry: } declare @llvm.riscv.vfsgnj.nxv1f32.f32( + , , float, iXLen); @@ -964,6 +1007,7 @@ define @intrinsic_vfsgnj_vf_nxv1f32_nxv1f32_f32( @llvm.riscv.vfsgnj.nxv1f32.f32( + undef, %0, float %1, iXLen %2) @@ -997,6 +1041,7 @@ entry: } declare @llvm.riscv.vfsgnj.nxv2f32.f32( + , , float, iXLen); @@ -1009,6 +1054,7 @@ define @intrinsic_vfsgnj_vf_nxv2f32_nxv2f32_f32( @llvm.riscv.vfsgnj.nxv2f32.f32( + undef, %0, float %1, iXLen %2) @@ -1042,6 +1088,7 @@ entry: } declare @llvm.riscv.vfsgnj.nxv4f32.f32( + , , float, iXLen); @@ -1054,6 +1101,7 @@ define @intrinsic_vfsgnj_vf_nxv4f32_nxv4f32_f32( @llvm.riscv.vfsgnj.nxv4f32.f32( + undef, %0, float %1, iXLen %2) @@ -1087,6 +1135,7 @@ entry: } declare @llvm.riscv.vfsgnj.nxv8f32.f32( + , , float, iXLen); @@ -1099,6 +1148,7 @@ define @intrinsic_vfsgnj_vf_nxv8f32_nxv8f32_f32( @llvm.riscv.vfsgnj.nxv8f32.f32( + undef, %0, float %1, iXLen %2) @@ -1132,6 +1182,7 @@ entry: } declare @llvm.riscv.vfsgnj.nxv16f32.f32( + , , float, iXLen); @@ -1144,6 +1195,7 @@ define @intrinsic_vfsgnj_vf_nxv16f32_nxv16f32_f32( @llvm.riscv.vfsgnj.nxv16f32.f32( + undef, %0, float %1, iXLen %2) @@ -1177,6 +1229,7 @@ entry: } declare @llvm.riscv.vfsgnj.nxv1f64.f64( + , , double, iXLen); @@ -1189,6 +1242,7 @@ define @intrinsic_vfsgnj_vf_nxv1f64_nxv1f64_f64( @llvm.riscv.vfsgnj.nxv1f64.f64( + undef, %0, double %1, iXLen %2) @@ -1222,6 +1276,7 @@ entry: } declare @llvm.riscv.vfsgnj.nxv2f64.f64( + , , double, iXLen); @@ -1234,6 +1289,7 @@ define @intrinsic_vfsgnj_vf_nxv2f64_nxv2f64_f64( @llvm.riscv.vfsgnj.nxv2f64.f64( + undef, %0, double %1, iXLen %2) @@ -1267,6 +1323,7 @@ entry: } declare @llvm.riscv.vfsgnj.nxv4f64.f64( + , , double, iXLen); @@ -1279,6 +1336,7 @@ define @intrinsic_vfsgnj_vf_nxv4f64_nxv4f64_f64( @llvm.riscv.vfsgnj.nxv4f64.f64( + undef, %0, double %1, iXLen %2) @@ -1312,6 +1370,7 @@ entry: } declare @llvm.riscv.vfsgnj.nxv8f64.f64( + , , double, iXLen); @@ -1324,6 +1383,7 @@ define @intrinsic_vfsgnj_vf_nxv8f64_nxv8f64_f64( @llvm.riscv.vfsgnj.nxv8f64.f64( + undef, %0, double %1, iXLen %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsgnjn.ll b/llvm/test/CodeGen/RISCV/rvv/vfsgnjn.ll index f16c8a6db1fb9b97bb64a6c5007c82433ef16b3e..4a1171b0ffd3e720991702af8308be3d28773faf 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfsgnjn.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfsgnjn.ll @@ -4,6 +4,7 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfsgnjn.nxv1f16.nxv1f16( + , , , iXLen); @@ -16,6 +17,7 @@ define @intrinsic_vfsgnjn_vv_nxv1f16_nxv1f16_nxv1f16( @llvm.riscv.vfsgnjn.nxv1f16.nxv1f16( + undef, %0, %1, iXLen %2) @@ -49,6 +51,7 @@ entry: } declare @llvm.riscv.vfsgnjn.nxv2f16.nxv2f16( + , , , iXLen); @@ -61,6 +64,7 @@ define @intrinsic_vfsgnjn_vv_nxv2f16_nxv2f16_nxv2f16( @llvm.riscv.vfsgnjn.nxv2f16.nxv2f16( + undef, %0, %1, iXLen %2) @@ -94,6 +98,7 @@ entry: } declare @llvm.riscv.vfsgnjn.nxv4f16.nxv4f16( + , , , iXLen); @@ -106,6 +111,7 @@ define @intrinsic_vfsgnjn_vv_nxv4f16_nxv4f16_nxv4f16( @llvm.riscv.vfsgnjn.nxv4f16.nxv4f16( + undef, %0, %1, iXLen %2) @@ -139,6 +145,7 @@ entry: } declare @llvm.riscv.vfsgnjn.nxv8f16.nxv8f16( + , , , iXLen); @@ -151,6 +158,7 @@ define @intrinsic_vfsgnjn_vv_nxv8f16_nxv8f16_nxv8f16( @llvm.riscv.vfsgnjn.nxv8f16.nxv8f16( + undef, %0, %1, iXLen %2) @@ -184,6 +192,7 @@ entry: } declare @llvm.riscv.vfsgnjn.nxv16f16.nxv16f16( + , , , iXLen); @@ -196,6 +205,7 @@ define @intrinsic_vfsgnjn_vv_nxv16f16_nxv16f16_nxv16f16( @llvm.riscv.vfsgnjn.nxv16f16.nxv16f16( + undef, %0, %1, iXLen %2) @@ -229,6 +239,7 @@ entry: } declare @llvm.riscv.vfsgnjn.nxv32f16.nxv32f16( + , , , iXLen); @@ -241,6 +252,7 @@ define @intrinsic_vfsgnjn_vv_nxv32f16_nxv32f16_nxv32f16( @llvm.riscv.vfsgnjn.nxv32f16.nxv32f16( + undef, %0, %1, iXLen %2) @@ -275,6 +287,7 @@ entry: } declare @llvm.riscv.vfsgnjn.nxv1f32.nxv1f32( + , , , iXLen); @@ -287,6 +300,7 @@ define @intrinsic_vfsgnjn_vv_nxv1f32_nxv1f32_nxv1f32( @llvm.riscv.vfsgnjn.nxv1f32.nxv1f32( + undef, %0, %1, iXLen %2) @@ -320,6 +334,7 @@ entry: } declare @llvm.riscv.vfsgnjn.nxv2f32.nxv2f32( + , , , iXLen); @@ -332,6 +347,7 @@ define @intrinsic_vfsgnjn_vv_nxv2f32_nxv2f32_nxv2f32( @llvm.riscv.vfsgnjn.nxv2f32.nxv2f32( + undef, %0, %1, iXLen %2) @@ -365,6 +381,7 @@ entry: } declare @llvm.riscv.vfsgnjn.nxv4f32.nxv4f32( + , , , iXLen); @@ -377,6 +394,7 @@ define @intrinsic_vfsgnjn_vv_nxv4f32_nxv4f32_nxv4f32( @llvm.riscv.vfsgnjn.nxv4f32.nxv4f32( + undef, %0, %1, iXLen %2) @@ -410,6 +428,7 @@ entry: } declare @llvm.riscv.vfsgnjn.nxv8f32.nxv8f32( + , , , iXLen); @@ -422,6 +441,7 @@ define @intrinsic_vfsgnjn_vv_nxv8f32_nxv8f32_nxv8f32( @llvm.riscv.vfsgnjn.nxv8f32.nxv8f32( + undef, %0, %1, iXLen %2) @@ -455,6 +475,7 @@ entry: } declare @llvm.riscv.vfsgnjn.nxv16f32.nxv16f32( + , , , iXLen); @@ -467,6 +488,7 @@ define @intrinsic_vfsgnjn_vv_nxv16f32_nxv16f32_nxv16f32( @llvm.riscv.vfsgnjn.nxv16f32.nxv16f32( + undef, %0, %1, iXLen %2) @@ -501,6 +523,7 @@ entry: } declare @llvm.riscv.vfsgnjn.nxv1f64.nxv1f64( + , , , iXLen); @@ -513,6 +536,7 @@ define @intrinsic_vfsgnjn_vv_nxv1f64_nxv1f64_nxv1f64( @llvm.riscv.vfsgnjn.nxv1f64.nxv1f64( + undef, %0, %1, iXLen %2) @@ -546,6 +570,7 @@ entry: } declare @llvm.riscv.vfsgnjn.nxv2f64.nxv2f64( + , , , iXLen); @@ -558,6 +583,7 @@ define @intrinsic_vfsgnjn_vv_nxv2f64_nxv2f64_nxv2f64( @llvm.riscv.vfsgnjn.nxv2f64.nxv2f64( + undef, %0, %1, iXLen %2) @@ -591,6 +617,7 @@ entry: } declare @llvm.riscv.vfsgnjn.nxv4f64.nxv4f64( + , , , iXLen); @@ -603,6 +630,7 @@ define @intrinsic_vfsgnjn_vv_nxv4f64_nxv4f64_nxv4f64( @llvm.riscv.vfsgnjn.nxv4f64.nxv4f64( + undef, %0, %1, iXLen %2) @@ -636,6 +664,7 @@ entry: } declare @llvm.riscv.vfsgnjn.nxv8f64.nxv8f64( + , , , iXLen); @@ -648,6 +677,7 @@ define @intrinsic_vfsgnjn_vv_nxv8f64_nxv8f64_nxv8f64( @llvm.riscv.vfsgnjn.nxv8f64.nxv8f64( + undef, %0, %1, iXLen %2) @@ -682,6 +712,7 @@ entry: } declare @llvm.riscv.vfsgnjn.nxv1f16.f16( + , , half, iXLen); @@ -694,6 +725,7 @@ define @intrinsic_vfsgnjn_vf_nxv1f16_nxv1f16_f16( @llvm.riscv.vfsgnjn.nxv1f16.f16( + undef, %0, half %1, iXLen %2) @@ -727,6 +759,7 @@ entry: } declare @llvm.riscv.vfsgnjn.nxv2f16.f16( + , , half, iXLen); @@ -739,6 +772,7 @@ define @intrinsic_vfsgnjn_vf_nxv2f16_nxv2f16_f16( @llvm.riscv.vfsgnjn.nxv2f16.f16( + undef, %0, half %1, iXLen %2) @@ -772,6 +806,7 @@ entry: } declare @llvm.riscv.vfsgnjn.nxv4f16.f16( + , , half, iXLen); @@ -784,6 +819,7 @@ define @intrinsic_vfsgnjn_vf_nxv4f16_nxv4f16_f16( @llvm.riscv.vfsgnjn.nxv4f16.f16( + undef, %0, half %1, iXLen %2) @@ -817,6 +853,7 @@ entry: } declare @llvm.riscv.vfsgnjn.nxv8f16.f16( + , , half, iXLen); @@ -829,6 +866,7 @@ define @intrinsic_vfsgnjn_vf_nxv8f16_nxv8f16_f16( @llvm.riscv.vfsgnjn.nxv8f16.f16( + undef, %0, half %1, iXLen %2) @@ -862,6 +900,7 @@ entry: } declare @llvm.riscv.vfsgnjn.nxv16f16.f16( + , , half, iXLen); @@ -874,6 +913,7 @@ define @intrinsic_vfsgnjn_vf_nxv16f16_nxv16f16_f16( @llvm.riscv.vfsgnjn.nxv16f16.f16( + undef, %0, half %1, iXLen %2) @@ -907,6 +947,7 @@ entry: } declare @llvm.riscv.vfsgnjn.nxv32f16.f16( + , , half, iXLen); @@ -919,6 +960,7 @@ define @intrinsic_vfsgnjn_vf_nxv32f16_nxv32f16_f16( @llvm.riscv.vfsgnjn.nxv32f16.f16( + undef, %0, half %1, iXLen %2) @@ -952,6 +994,7 @@ entry: } declare @llvm.riscv.vfsgnjn.nxv1f32.f32( + , , float, iXLen); @@ -964,6 +1007,7 @@ define @intrinsic_vfsgnjn_vf_nxv1f32_nxv1f32_f32( @llvm.riscv.vfsgnjn.nxv1f32.f32( + undef, %0, float %1, iXLen %2) @@ -997,6 +1041,7 @@ entry: } declare @llvm.riscv.vfsgnjn.nxv2f32.f32( + , , float, iXLen); @@ -1009,6 +1054,7 @@ define @intrinsic_vfsgnjn_vf_nxv2f32_nxv2f32_f32( @llvm.riscv.vfsgnjn.nxv2f32.f32( + undef, %0, float %1, iXLen %2) @@ -1042,6 +1088,7 @@ entry: } declare @llvm.riscv.vfsgnjn.nxv4f32.f32( + , , float, iXLen); @@ -1054,6 +1101,7 @@ define @intrinsic_vfsgnjn_vf_nxv4f32_nxv4f32_f32( @llvm.riscv.vfsgnjn.nxv4f32.f32( + undef, %0, float %1, iXLen %2) @@ -1087,6 +1135,7 @@ entry: } declare @llvm.riscv.vfsgnjn.nxv8f32.f32( + , , float, iXLen); @@ -1099,6 +1148,7 @@ define @intrinsic_vfsgnjn_vf_nxv8f32_nxv8f32_f32( @llvm.riscv.vfsgnjn.nxv8f32.f32( + undef, %0, float %1, iXLen %2) @@ -1132,6 +1182,7 @@ entry: } declare @llvm.riscv.vfsgnjn.nxv16f32.f32( + , , float, iXLen); @@ -1144,6 +1195,7 @@ define @intrinsic_vfsgnjn_vf_nxv16f32_nxv16f32_f32( @llvm.riscv.vfsgnjn.nxv16f32.f32( + undef, %0, float %1, iXLen %2) @@ -1177,6 +1229,7 @@ entry: } declare @llvm.riscv.vfsgnjn.nxv1f64.f64( + , , double, iXLen); @@ -1189,6 +1242,7 @@ define @intrinsic_vfsgnjn_vf_nxv1f64_nxv1f64_f64( @llvm.riscv.vfsgnjn.nxv1f64.f64( + undef, %0, double %1, iXLen %2) @@ -1222,6 +1276,7 @@ entry: } declare @llvm.riscv.vfsgnjn.nxv2f64.f64( + , , double, iXLen); @@ -1234,6 +1289,7 @@ define @intrinsic_vfsgnjn_vf_nxv2f64_nxv2f64_f64( @llvm.riscv.vfsgnjn.nxv2f64.f64( + undef, %0, double %1, iXLen %2) @@ -1267,6 +1323,7 @@ entry: } declare @llvm.riscv.vfsgnjn.nxv4f64.f64( + , , double, iXLen); @@ -1279,6 +1336,7 @@ define @intrinsic_vfsgnjn_vf_nxv4f64_nxv4f64_f64( @llvm.riscv.vfsgnjn.nxv4f64.f64( + undef, %0, double %1, iXLen %2) @@ -1312,6 +1370,7 @@ entry: } declare @llvm.riscv.vfsgnjn.nxv8f64.f64( + , , double, iXLen); @@ -1324,6 +1383,7 @@ define @intrinsic_vfsgnjn_vf_nxv8f64_nxv8f64_f64( @llvm.riscv.vfsgnjn.nxv8f64.f64( + undef, %0, double %1, iXLen %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsgnjx.ll b/llvm/test/CodeGen/RISCV/rvv/vfsgnjx.ll index edfd578ce8aa016e31fceb9a6953d065b450d69c..cd1f2d8d6b742295b08e2749f1b0814fad330223 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfsgnjx.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfsgnjx.ll @@ -4,6 +4,7 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfsgnjx.nxv1f16.nxv1f16( + , , , iXLen); @@ -16,6 +17,7 @@ define @intrinsic_vfsgnjx_vv_nxv1f16_nxv1f16_nxv1f16( @llvm.riscv.vfsgnjx.nxv1f16.nxv1f16( + undef, %0, %1, iXLen %2) @@ -49,6 +51,7 @@ entry: } declare @llvm.riscv.vfsgnjx.nxv2f16.nxv2f16( + , , , iXLen); @@ -61,6 +64,7 @@ define @intrinsic_vfsgnjx_vv_nxv2f16_nxv2f16_nxv2f16( @llvm.riscv.vfsgnjx.nxv2f16.nxv2f16( + undef, %0, %1, iXLen %2) @@ -94,6 +98,7 @@ entry: } declare @llvm.riscv.vfsgnjx.nxv4f16.nxv4f16( + , , , iXLen); @@ -106,6 +111,7 @@ define @intrinsic_vfsgnjx_vv_nxv4f16_nxv4f16_nxv4f16( @llvm.riscv.vfsgnjx.nxv4f16.nxv4f16( + undef, %0, %1, iXLen %2) @@ -139,6 +145,7 @@ entry: } declare @llvm.riscv.vfsgnjx.nxv8f16.nxv8f16( + , , , iXLen); @@ -151,6 +158,7 @@ define @intrinsic_vfsgnjx_vv_nxv8f16_nxv8f16_nxv8f16( @llvm.riscv.vfsgnjx.nxv8f16.nxv8f16( + undef, %0, %1, iXLen %2) @@ -184,6 +192,7 @@ entry: } declare @llvm.riscv.vfsgnjx.nxv16f16.nxv16f16( + , , , iXLen); @@ -196,6 +205,7 @@ define @intrinsic_vfsgnjx_vv_nxv16f16_nxv16f16_nxv16f16( @llvm.riscv.vfsgnjx.nxv16f16.nxv16f16( + undef, %0, %1, iXLen %2) @@ -229,6 +239,7 @@ entry: } declare @llvm.riscv.vfsgnjx.nxv32f16.nxv32f16( + , , , iXLen); @@ -241,6 +252,7 @@ define @intrinsic_vfsgnjx_vv_nxv32f16_nxv32f16_nxv32f16( @llvm.riscv.vfsgnjx.nxv32f16.nxv32f16( + undef, %0, %1, iXLen %2) @@ -275,6 +287,7 @@ entry: } declare @llvm.riscv.vfsgnjx.nxv1f32.nxv1f32( + , , , iXLen); @@ -287,6 +300,7 @@ define @intrinsic_vfsgnjx_vv_nxv1f32_nxv1f32_nxv1f32( @llvm.riscv.vfsgnjx.nxv1f32.nxv1f32( + undef, %0, %1, iXLen %2) @@ -320,6 +334,7 @@ entry: } declare @llvm.riscv.vfsgnjx.nxv2f32.nxv2f32( + , , , iXLen); @@ -332,6 +347,7 @@ define @intrinsic_vfsgnjx_vv_nxv2f32_nxv2f32_nxv2f32( @llvm.riscv.vfsgnjx.nxv2f32.nxv2f32( + undef, %0, %1, iXLen %2) @@ -365,6 +381,7 @@ entry: } declare @llvm.riscv.vfsgnjx.nxv4f32.nxv4f32( + , , , iXLen); @@ -377,6 +394,7 @@ define @intrinsic_vfsgnjx_vv_nxv4f32_nxv4f32_nxv4f32( @llvm.riscv.vfsgnjx.nxv4f32.nxv4f32( + undef, %0, %1, iXLen %2) @@ -410,6 +428,7 @@ entry: } declare @llvm.riscv.vfsgnjx.nxv8f32.nxv8f32( + , , , iXLen); @@ -422,6 +441,7 @@ define @intrinsic_vfsgnjx_vv_nxv8f32_nxv8f32_nxv8f32( @llvm.riscv.vfsgnjx.nxv8f32.nxv8f32( + undef, %0, %1, iXLen %2) @@ -455,6 +475,7 @@ entry: } declare @llvm.riscv.vfsgnjx.nxv16f32.nxv16f32( + , , , iXLen); @@ -467,6 +488,7 @@ define @intrinsic_vfsgnjx_vv_nxv16f32_nxv16f32_nxv16f32( @llvm.riscv.vfsgnjx.nxv16f32.nxv16f32( + undef, %0, %1, iXLen %2) @@ -501,6 +523,7 @@ entry: } declare @llvm.riscv.vfsgnjx.nxv1f64.nxv1f64( + , , , iXLen); @@ -513,6 +536,7 @@ define @intrinsic_vfsgnjx_vv_nxv1f64_nxv1f64_nxv1f64( @llvm.riscv.vfsgnjx.nxv1f64.nxv1f64( + undef, %0, %1, iXLen %2) @@ -546,6 +570,7 @@ entry: } declare @llvm.riscv.vfsgnjx.nxv2f64.nxv2f64( + , , , iXLen); @@ -558,6 +583,7 @@ define @intrinsic_vfsgnjx_vv_nxv2f64_nxv2f64_nxv2f64( @llvm.riscv.vfsgnjx.nxv2f64.nxv2f64( + undef, %0, %1, iXLen %2) @@ -591,6 +617,7 @@ entry: } declare @llvm.riscv.vfsgnjx.nxv4f64.nxv4f64( + , , , iXLen); @@ -603,6 +630,7 @@ define @intrinsic_vfsgnjx_vv_nxv4f64_nxv4f64_nxv4f64( @llvm.riscv.vfsgnjx.nxv4f64.nxv4f64( + undef, %0, %1, iXLen %2) @@ -636,6 +664,7 @@ entry: } declare @llvm.riscv.vfsgnjx.nxv8f64.nxv8f64( + , , , iXLen); @@ -648,6 +677,7 @@ define @intrinsic_vfsgnjx_vv_nxv8f64_nxv8f64_nxv8f64( @llvm.riscv.vfsgnjx.nxv8f64.nxv8f64( + undef, %0, %1, iXLen %2) @@ -682,6 +712,7 @@ entry: } declare @llvm.riscv.vfsgnjx.nxv1f16.f16( + , , half, iXLen); @@ -694,6 +725,7 @@ define @intrinsic_vfsgnjx_vf_nxv1f16_nxv1f16_f16( @llvm.riscv.vfsgnjx.nxv1f16.f16( + undef, %0, half %1, iXLen %2) @@ -727,6 +759,7 @@ entry: } declare @llvm.riscv.vfsgnjx.nxv2f16.f16( + , , half, iXLen); @@ -739,6 +772,7 @@ define @intrinsic_vfsgnjx_vf_nxv2f16_nxv2f16_f16( @llvm.riscv.vfsgnjx.nxv2f16.f16( + undef, %0, half %1, iXLen %2) @@ -772,6 +806,7 @@ entry: } declare @llvm.riscv.vfsgnjx.nxv4f16.f16( + , , half, iXLen); @@ -784,6 +819,7 @@ define @intrinsic_vfsgnjx_vf_nxv4f16_nxv4f16_f16( @llvm.riscv.vfsgnjx.nxv4f16.f16( + undef, %0, half %1, iXLen %2) @@ -817,6 +853,7 @@ entry: } declare @llvm.riscv.vfsgnjx.nxv8f16.f16( + , , half, iXLen); @@ -829,6 +866,7 @@ define @intrinsic_vfsgnjx_vf_nxv8f16_nxv8f16_f16( @llvm.riscv.vfsgnjx.nxv8f16.f16( + undef, %0, half %1, iXLen %2) @@ -862,6 +900,7 @@ entry: } declare @llvm.riscv.vfsgnjx.nxv16f16.f16( + , , half, iXLen); @@ -874,6 +913,7 @@ define @intrinsic_vfsgnjx_vf_nxv16f16_nxv16f16_f16( @llvm.riscv.vfsgnjx.nxv16f16.f16( + undef, %0, half %1, iXLen %2) @@ -907,6 +947,7 @@ entry: } declare @llvm.riscv.vfsgnjx.nxv32f16.f16( + , , half, iXLen); @@ -919,6 +960,7 @@ define @intrinsic_vfsgnjx_vf_nxv32f16_nxv32f16_f16( @llvm.riscv.vfsgnjx.nxv32f16.f16( + undef, %0, half %1, iXLen %2) @@ -952,6 +994,7 @@ entry: } declare @llvm.riscv.vfsgnjx.nxv1f32.f32( + , , float, iXLen); @@ -964,6 +1007,7 @@ define @intrinsic_vfsgnjx_vf_nxv1f32_nxv1f32_f32( @llvm.riscv.vfsgnjx.nxv1f32.f32( + undef, %0, float %1, iXLen %2) @@ -997,6 +1041,7 @@ entry: } declare @llvm.riscv.vfsgnjx.nxv2f32.f32( + , , float, iXLen); @@ -1009,6 +1054,7 @@ define @intrinsic_vfsgnjx_vf_nxv2f32_nxv2f32_f32( @llvm.riscv.vfsgnjx.nxv2f32.f32( + undef, %0, float %1, iXLen %2) @@ -1042,6 +1088,7 @@ entry: } declare @llvm.riscv.vfsgnjx.nxv4f32.f32( + , , float, iXLen); @@ -1054,6 +1101,7 @@ define @intrinsic_vfsgnjx_vf_nxv4f32_nxv4f32_f32( @llvm.riscv.vfsgnjx.nxv4f32.f32( + undef, %0, float %1, iXLen %2) @@ -1087,6 +1135,7 @@ entry: } declare @llvm.riscv.vfsgnjx.nxv8f32.f32( + , , float, iXLen); @@ -1099,6 +1148,7 @@ define @intrinsic_vfsgnjx_vf_nxv8f32_nxv8f32_f32( @llvm.riscv.vfsgnjx.nxv8f32.f32( + undef, %0, float %1, iXLen %2) @@ -1132,6 +1182,7 @@ entry: } declare @llvm.riscv.vfsgnjx.nxv16f32.f32( + , , float, iXLen); @@ -1144,6 +1195,7 @@ define @intrinsic_vfsgnjx_vf_nxv16f32_nxv16f32_f32( @llvm.riscv.vfsgnjx.nxv16f32.f32( + undef, %0, float %1, iXLen %2) @@ -1177,6 +1229,7 @@ entry: } declare @llvm.riscv.vfsgnjx.nxv1f64.f64( + , , double, iXLen); @@ -1189,6 +1242,7 @@ define @intrinsic_vfsgnjx_vf_nxv1f64_nxv1f64_f64( @llvm.riscv.vfsgnjx.nxv1f64.f64( + undef, %0, double %1, iXLen %2) @@ -1222,6 +1276,7 @@ entry: } declare @llvm.riscv.vfsgnjx.nxv2f64.f64( + , , double, iXLen); @@ -1234,6 +1289,7 @@ define @intrinsic_vfsgnjx_vf_nxv2f64_nxv2f64_f64( @llvm.riscv.vfsgnjx.nxv2f64.f64( + undef, %0, double %1, iXLen %2) @@ -1267,6 +1323,7 @@ entry: } declare @llvm.riscv.vfsgnjx.nxv4f64.f64( + , , double, iXLen); @@ -1279,6 +1336,7 @@ define @intrinsic_vfsgnjx_vf_nxv4f64_nxv4f64_f64( @llvm.riscv.vfsgnjx.nxv4f64.f64( + undef, %0, double %1, iXLen %2) @@ -1312,6 +1370,7 @@ entry: } declare @llvm.riscv.vfsgnjx.nxv8f64.f64( + , , double, iXLen); @@ -1324,6 +1383,7 @@ define @intrinsic_vfsgnjx_vf_nxv8f64_nxv8f64_f64( @llvm.riscv.vfsgnjx.nxv8f64.f64( + undef, %0, double %1, iXLen %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfslide1down.ll b/llvm/test/CodeGen/RISCV/rvv/vfslide1down.ll index 6cbba483a8d9f4269690ce0b8bf5d29d5ec6bc47..2bcf3cfb6b68f19010073c4d5ea28e4e05964cb3 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfslide1down.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfslide1down.ll @@ -4,6 +4,7 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfslide1down.nxv1f16.f16( + , , half, iXLen); @@ -16,6 +17,7 @@ define @intrinsic_vfslide1down_vf_nxv1f16_nxv1f16_f16( @llvm.riscv.vfslide1down.nxv1f16.f16( + undef, %0, half %1, iXLen %2) @@ -49,6 +51,7 @@ entry: } declare @llvm.riscv.vfslide1down.nxv2f16.f16( + , , half, iXLen); @@ -61,6 +64,7 @@ define @intrinsic_vfslide1down_vf_nxv2f16_nxv2f16_f16( @llvm.riscv.vfslide1down.nxv2f16.f16( + undef, %0, half %1, iXLen %2) @@ -94,6 +98,7 @@ entry: } declare @llvm.riscv.vfslide1down.nxv4f16.f16( + , , half, iXLen); @@ -106,6 +111,7 @@ define @intrinsic_vfslide1down_vf_nxv4f16_nxv4f16_f16( @llvm.riscv.vfslide1down.nxv4f16.f16( + undef, %0, half %1, iXLen %2) @@ -139,6 +145,7 @@ entry: } declare @llvm.riscv.vfslide1down.nxv8f16.f16( + , , half, iXLen); @@ -151,6 +158,7 @@ define @intrinsic_vfslide1down_vf_nxv8f16_nxv8f16_f16( @llvm.riscv.vfslide1down.nxv8f16.f16( + undef, %0, half %1, iXLen %2) @@ -184,6 +192,7 @@ entry: } declare @llvm.riscv.vfslide1down.nxv16f16.f16( + , , half, iXLen); @@ -196,6 +205,7 @@ define @intrinsic_vfslide1down_vf_nxv16f16_nxv16f16_f16( @llvm.riscv.vfslide1down.nxv16f16.f16( + undef, %0, half %1, iXLen %2) @@ -229,6 +239,7 @@ entry: } declare @llvm.riscv.vfslide1down.nxv32f16.f16( + , , half, iXLen); @@ -241,6 +252,7 @@ define @intrinsic_vfslide1down_vf_nxv32f16_nxv32f16_f16( @llvm.riscv.vfslide1down.nxv32f16.f16( + undef, %0, half %1, iXLen %2) @@ -274,6 +286,7 @@ entry: } declare @llvm.riscv.vfslide1down.nxv1f32.f32( + , , float, iXLen); @@ -286,6 +299,7 @@ define @intrinsic_vfslide1down_vf_nxv1f32_nxv1f32_f32( @llvm.riscv.vfslide1down.nxv1f32.f32( + undef, %0, float %1, iXLen %2) @@ -319,6 +333,7 @@ entry: } declare @llvm.riscv.vfslide1down.nxv2f32.f32( + , , float, iXLen); @@ -331,6 +346,7 @@ define @intrinsic_vfslide1down_vf_nxv2f32_nxv2f32_f32( @llvm.riscv.vfslide1down.nxv2f32.f32( + undef, %0, float %1, iXLen %2) @@ -364,6 +380,7 @@ entry: } declare @llvm.riscv.vfslide1down.nxv4f32.f32( + , , float, iXLen); @@ -376,6 +393,7 @@ define @intrinsic_vfslide1down_vf_nxv4f32_nxv4f32_f32( @llvm.riscv.vfslide1down.nxv4f32.f32( + undef, %0, float %1, iXLen %2) @@ -409,6 +427,7 @@ entry: } declare @llvm.riscv.vfslide1down.nxv8f32.f32( + , , float, iXLen); @@ -421,6 +440,7 @@ define @intrinsic_vfslide1down_vf_nxv8f32_nxv8f32_f32( @llvm.riscv.vfslide1down.nxv8f32.f32( + undef, %0, float %1, iXLen %2) @@ -454,6 +474,7 @@ entry: } declare @llvm.riscv.vfslide1down.nxv16f32.f32( + , , float, iXLen); @@ -466,6 +487,7 @@ define @intrinsic_vfslide1down_vf_nxv16f32_nxv16f32_f32( @llvm.riscv.vfslide1down.nxv16f32.f32( + undef, %0, float %1, iXLen %2) @@ -499,6 +521,7 @@ entry: } declare @llvm.riscv.vfslide1down.nxv1f64.f64( + , , double, iXLen); @@ -511,6 +534,7 @@ define @intrinsic_vfslide1down_vf_nxv1f64_nxv1f64_f64( @llvm.riscv.vfslide1down.nxv1f64.f64( + undef, %0, double %1, iXLen %2) @@ -544,6 +568,7 @@ entry: } declare @llvm.riscv.vfslide1down.nxv2f64.f64( + , , double, iXLen); @@ -556,6 +581,7 @@ define @intrinsic_vfslide1down_vf_nxv2f64_nxv2f64_f64( @llvm.riscv.vfslide1down.nxv2f64.f64( + undef, %0, double %1, iXLen %2) @@ -589,6 +615,7 @@ entry: } declare @llvm.riscv.vfslide1down.nxv4f64.f64( + , , double, iXLen); @@ -601,6 +628,7 @@ define @intrinsic_vfslide1down_vf_nxv4f64_nxv4f64_f64( @llvm.riscv.vfslide1down.nxv4f64.f64( + undef, %0, double %1, iXLen %2) @@ -634,6 +662,7 @@ entry: } declare @llvm.riscv.vfslide1down.nxv8f64.f64( + , , double, iXLen); @@ -646,6 +675,7 @@ define @intrinsic_vfslide1down_vf_nxv8f64_nxv8f64_f64( @llvm.riscv.vfslide1down.nxv8f64.f64( + undef, %0, double %1, iXLen %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfslide1up.ll b/llvm/test/CodeGen/RISCV/rvv/vfslide1up.ll index 695cf7aab3f6cf0b6ea5f84a17aae1b9bee7899c..b70fdec7716d4718427b747844cd102d96a710be 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfslide1up.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfslide1up.ll @@ -4,6 +4,7 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfslide1up.nxv1f16.f16( + , , half, iXLen); @@ -17,6 +18,7 @@ define @intrinsic_vfslide1up_vf_nxv1f16_nxv1f16_f16( @llvm.riscv.vfslide1up.nxv1f16.f16( + undef, %0, half %1, iXLen %2) @@ -50,6 +52,7 @@ entry: } declare @llvm.riscv.vfslide1up.nxv2f16.f16( + , , half, iXLen); @@ -63,6 +66,7 @@ define @intrinsic_vfslide1up_vf_nxv2f16_nxv2f16_f16( @llvm.riscv.vfslide1up.nxv2f16.f16( + undef, %0, half %1, iXLen %2) @@ -96,6 +100,7 @@ entry: } declare @llvm.riscv.vfslide1up.nxv4f16.f16( + , , half, iXLen); @@ -109,6 +114,7 @@ define @intrinsic_vfslide1up_vf_nxv4f16_nxv4f16_f16( @llvm.riscv.vfslide1up.nxv4f16.f16( + undef, %0, half %1, iXLen %2) @@ -142,6 +148,7 @@ entry: } declare @llvm.riscv.vfslide1up.nxv8f16.f16( + , , half, iXLen); @@ -155,6 +162,7 @@ define @intrinsic_vfslide1up_vf_nxv8f16_nxv8f16_f16( @llvm.riscv.vfslide1up.nxv8f16.f16( + undef, %0, half %1, iXLen %2) @@ -188,6 +196,7 @@ entry: } declare @llvm.riscv.vfslide1up.nxv16f16.f16( + , , half, iXLen); @@ -201,6 +210,7 @@ define @intrinsic_vfslide1up_vf_nxv16f16_nxv16f16_f16( @llvm.riscv.vfslide1up.nxv16f16.f16( + undef, %0, half %1, iXLen %2) @@ -234,6 +244,7 @@ entry: } declare @llvm.riscv.vfslide1up.nxv32f16.f16( + , , half, iXLen); @@ -247,6 +258,7 @@ define @intrinsic_vfslide1up_vf_nxv32f16_nxv32f16_f16( @llvm.riscv.vfslide1up.nxv32f16.f16( + undef, %0, half %1, iXLen %2) @@ -280,6 +292,7 @@ entry: } declare @llvm.riscv.vfslide1up.nxv1f32.f32( + , , float, iXLen); @@ -293,6 +306,7 @@ define @intrinsic_vfslide1up_vf_nxv1f32_nxv1f32_f32( @llvm.riscv.vfslide1up.nxv1f32.f32( + undef, %0, float %1, iXLen %2) @@ -326,6 +340,7 @@ entry: } declare @llvm.riscv.vfslide1up.nxv2f32.f32( + , , float, iXLen); @@ -339,6 +354,7 @@ define @intrinsic_vfslide1up_vf_nxv2f32_nxv2f32_f32( @llvm.riscv.vfslide1up.nxv2f32.f32( + undef, %0, float %1, iXLen %2) @@ -372,6 +388,7 @@ entry: } declare @llvm.riscv.vfslide1up.nxv4f32.f32( + , , float, iXLen); @@ -385,6 +402,7 @@ define @intrinsic_vfslide1up_vf_nxv4f32_nxv4f32_f32( @llvm.riscv.vfslide1up.nxv4f32.f32( + undef, %0, float %1, iXLen %2) @@ -418,6 +436,7 @@ entry: } declare @llvm.riscv.vfslide1up.nxv8f32.f32( + , , float, iXLen); @@ -431,6 +450,7 @@ define @intrinsic_vfslide1up_vf_nxv8f32_nxv8f32_f32( @llvm.riscv.vfslide1up.nxv8f32.f32( + undef, %0, float %1, iXLen %2) @@ -464,6 +484,7 @@ entry: } declare @llvm.riscv.vfslide1up.nxv16f32.f32( + , , float, iXLen); @@ -477,6 +498,7 @@ define @intrinsic_vfslide1up_vf_nxv16f32_nxv16f32_f32( @llvm.riscv.vfslide1up.nxv16f32.f32( + undef, %0, float %1, iXLen %2) @@ -510,6 +532,7 @@ entry: } declare @llvm.riscv.vfslide1up.nxv1f64.f64( + , , double, iXLen); @@ -523,6 +546,7 @@ define @intrinsic_vfslide1up_vf_nxv1f64_nxv1f64_f64( @llvm.riscv.vfslide1up.nxv1f64.f64( + undef, %0, double %1, iXLen %2) @@ -556,6 +580,7 @@ entry: } declare @llvm.riscv.vfslide1up.nxv2f64.f64( + , , double, iXLen); @@ -569,6 +594,7 @@ define @intrinsic_vfslide1up_vf_nxv2f64_nxv2f64_f64( @llvm.riscv.vfslide1up.nxv2f64.f64( + undef, %0, double %1, iXLen %2) @@ -602,6 +628,7 @@ entry: } declare @llvm.riscv.vfslide1up.nxv4f64.f64( + , , double, iXLen); @@ -615,6 +642,7 @@ define @intrinsic_vfslide1up_vf_nxv4f64_nxv4f64_f64( @llvm.riscv.vfslide1up.nxv4f64.f64( + undef, %0, double %1, iXLen %2) @@ -648,6 +676,7 @@ entry: } declare @llvm.riscv.vfslide1up.nxv8f64.f64( + , , double, iXLen); @@ -661,6 +690,7 @@ define @intrinsic_vfslide1up_vf_nxv8f64_nxv8f64_f64( @llvm.riscv.vfslide1up.nxv8f64.f64( + undef, %0, double %1, iXLen %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsub.ll b/llvm/test/CodeGen/RISCV/rvv/vfsub.ll index 645fb340ffa3098c2d9767f23e4c155e03c7e0b0..e627b8f8b69e994ea8872f5dba2a428fb3c1e927 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfsub.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfsub.ll @@ -4,6 +4,7 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfsub.nxv1f16.nxv1f16( + , , , iXLen); @@ -16,6 +17,7 @@ define @intrinsic_vfsub_vv_nxv1f16_nxv1f16_nxv1f16( @llvm.riscv.vfsub.nxv1f16.nxv1f16( + undef, %0, %1, iXLen %2) @@ -49,6 +51,7 @@ entry: } declare @llvm.riscv.vfsub.nxv2f16.nxv2f16( + , , , iXLen); @@ -61,6 +64,7 @@ define @intrinsic_vfsub_vv_nxv2f16_nxv2f16_nxv2f16( @llvm.riscv.vfsub.nxv2f16.nxv2f16( + undef, %0, %1, iXLen %2) @@ -94,6 +98,7 @@ entry: } declare @llvm.riscv.vfsub.nxv4f16.nxv4f16( + , , , iXLen); @@ -106,6 +111,7 @@ define @intrinsic_vfsub_vv_nxv4f16_nxv4f16_nxv4f16( @llvm.riscv.vfsub.nxv4f16.nxv4f16( + undef, %0, %1, iXLen %2) @@ -139,6 +145,7 @@ entry: } declare @llvm.riscv.vfsub.nxv8f16.nxv8f16( + , , , iXLen); @@ -151,6 +158,7 @@ define @intrinsic_vfsub_vv_nxv8f16_nxv8f16_nxv8f16( @llvm.riscv.vfsub.nxv8f16.nxv8f16( + undef, %0, %1, iXLen %2) @@ -184,6 +192,7 @@ entry: } declare @llvm.riscv.vfsub.nxv16f16.nxv16f16( + , , , iXLen); @@ -196,6 +205,7 @@ define @intrinsic_vfsub_vv_nxv16f16_nxv16f16_nxv16f16( @llvm.riscv.vfsub.nxv16f16.nxv16f16( + undef, %0, %1, iXLen %2) @@ -229,6 +239,7 @@ entry: } declare @llvm.riscv.vfsub.nxv32f16.nxv32f16( + , , , iXLen); @@ -241,6 +252,7 @@ define @intrinsic_vfsub_vv_nxv32f16_nxv32f16_nxv32f16( @llvm.riscv.vfsub.nxv32f16.nxv32f16( + undef, %0, %1, iXLen %2) @@ -275,6 +287,7 @@ entry: } declare @llvm.riscv.vfsub.nxv1f32.nxv1f32( + , , , iXLen); @@ -287,6 +300,7 @@ define @intrinsic_vfsub_vv_nxv1f32_nxv1f32_nxv1f32( @llvm.riscv.vfsub.nxv1f32.nxv1f32( + undef, %0, %1, iXLen %2) @@ -320,6 +334,7 @@ entry: } declare @llvm.riscv.vfsub.nxv2f32.nxv2f32( + , , , iXLen); @@ -332,6 +347,7 @@ define @intrinsic_vfsub_vv_nxv2f32_nxv2f32_nxv2f32( @llvm.riscv.vfsub.nxv2f32.nxv2f32( + undef, %0, %1, iXLen %2) @@ -365,6 +381,7 @@ entry: } declare @llvm.riscv.vfsub.nxv4f32.nxv4f32( + , , , iXLen); @@ -377,6 +394,7 @@ define @intrinsic_vfsub_vv_nxv4f32_nxv4f32_nxv4f32( @llvm.riscv.vfsub.nxv4f32.nxv4f32( + undef, %0, %1, iXLen %2) @@ -410,6 +428,7 @@ entry: } declare @llvm.riscv.vfsub.nxv8f32.nxv8f32( + , , , iXLen); @@ -422,6 +441,7 @@ define @intrinsic_vfsub_vv_nxv8f32_nxv8f32_nxv8f32( @llvm.riscv.vfsub.nxv8f32.nxv8f32( + undef, %0, %1, iXLen %2) @@ -455,6 +475,7 @@ entry: } declare @llvm.riscv.vfsub.nxv16f32.nxv16f32( + , , , iXLen); @@ -467,6 +488,7 @@ define @intrinsic_vfsub_vv_nxv16f32_nxv16f32_nxv16f32( @llvm.riscv.vfsub.nxv16f32.nxv16f32( + undef, %0, %1, iXLen %2) @@ -501,6 +523,7 @@ entry: } declare @llvm.riscv.vfsub.nxv1f64.nxv1f64( + , , , iXLen); @@ -513,6 +536,7 @@ define @intrinsic_vfsub_vv_nxv1f64_nxv1f64_nxv1f64( @llvm.riscv.vfsub.nxv1f64.nxv1f64( + undef, %0, %1, iXLen %2) @@ -546,6 +570,7 @@ entry: } declare @llvm.riscv.vfsub.nxv2f64.nxv2f64( + , , , iXLen); @@ -558,6 +583,7 @@ define @intrinsic_vfsub_vv_nxv2f64_nxv2f64_nxv2f64( @llvm.riscv.vfsub.nxv2f64.nxv2f64( + undef, %0, %1, iXLen %2) @@ -591,6 +617,7 @@ entry: } declare @llvm.riscv.vfsub.nxv4f64.nxv4f64( + , , , iXLen); @@ -603,6 +630,7 @@ define @intrinsic_vfsub_vv_nxv4f64_nxv4f64_nxv4f64( @llvm.riscv.vfsub.nxv4f64.nxv4f64( + undef, %0, %1, iXLen %2) @@ -636,6 +664,7 @@ entry: } declare @llvm.riscv.vfsub.nxv8f64.nxv8f64( + , , , iXLen); @@ -648,6 +677,7 @@ define @intrinsic_vfsub_vv_nxv8f64_nxv8f64_nxv8f64( @llvm.riscv.vfsub.nxv8f64.nxv8f64( + undef, %0, %1, iXLen %2) @@ -682,6 +712,7 @@ entry: } declare @llvm.riscv.vfsub.nxv1f16.f16( + , , half, iXLen); @@ -694,6 +725,7 @@ define @intrinsic_vfsub_vf_nxv1f16_nxv1f16_f16( @llvm.riscv.vfsub.nxv1f16.f16( + undef, %0, half %1, iXLen %2) @@ -727,6 +759,7 @@ entry: } declare @llvm.riscv.vfsub.nxv2f16.f16( + , , half, iXLen); @@ -739,6 +772,7 @@ define @intrinsic_vfsub_vf_nxv2f16_nxv2f16_f16( @llvm.riscv.vfsub.nxv2f16.f16( + undef, %0, half %1, iXLen %2) @@ -772,6 +806,7 @@ entry: } declare @llvm.riscv.vfsub.nxv4f16.f16( + , , half, iXLen); @@ -784,6 +819,7 @@ define @intrinsic_vfsub_vf_nxv4f16_nxv4f16_f16( @llvm.riscv.vfsub.nxv4f16.f16( + undef, %0, half %1, iXLen %2) @@ -817,6 +853,7 @@ entry: } declare @llvm.riscv.vfsub.nxv8f16.f16( + , , half, iXLen); @@ -829,6 +866,7 @@ define @intrinsic_vfsub_vf_nxv8f16_nxv8f16_f16( @llvm.riscv.vfsub.nxv8f16.f16( + undef, %0, half %1, iXLen %2) @@ -862,6 +900,7 @@ entry: } declare @llvm.riscv.vfsub.nxv16f16.f16( + , , half, iXLen); @@ -874,6 +913,7 @@ define @intrinsic_vfsub_vf_nxv16f16_nxv16f16_f16( @llvm.riscv.vfsub.nxv16f16.f16( + undef, %0, half %1, iXLen %2) @@ -907,6 +947,7 @@ entry: } declare @llvm.riscv.vfsub.nxv32f16.f16( + , , half, iXLen); @@ -919,6 +960,7 @@ define @intrinsic_vfsub_vf_nxv32f16_nxv32f16_f16( @llvm.riscv.vfsub.nxv32f16.f16( + undef, %0, half %1, iXLen %2) @@ -952,6 +994,7 @@ entry: } declare @llvm.riscv.vfsub.nxv1f32.f32( + , , float, iXLen); @@ -964,6 +1007,7 @@ define @intrinsic_vfsub_vf_nxv1f32_nxv1f32_f32( @llvm.riscv.vfsub.nxv1f32.f32( + undef, %0, float %1, iXLen %2) @@ -997,6 +1041,7 @@ entry: } declare @llvm.riscv.vfsub.nxv2f32.f32( + , , float, iXLen); @@ -1009,6 +1054,7 @@ define @intrinsic_vfsub_vf_nxv2f32_nxv2f32_f32( @llvm.riscv.vfsub.nxv2f32.f32( + undef, %0, float %1, iXLen %2) @@ -1042,6 +1088,7 @@ entry: } declare @llvm.riscv.vfsub.nxv4f32.f32( + , , float, iXLen); @@ -1054,6 +1101,7 @@ define @intrinsic_vfsub_vf_nxv4f32_nxv4f32_f32( @llvm.riscv.vfsub.nxv4f32.f32( + undef, %0, float %1, iXLen %2) @@ -1087,6 +1135,7 @@ entry: } declare @llvm.riscv.vfsub.nxv8f32.f32( + , , float, iXLen); @@ -1099,6 +1148,7 @@ define @intrinsic_vfsub_vf_nxv8f32_nxv8f32_f32( @llvm.riscv.vfsub.nxv8f32.f32( + undef, %0, float %1, iXLen %2) @@ -1132,6 +1182,7 @@ entry: } declare @llvm.riscv.vfsub.nxv16f32.f32( + , , float, iXLen); @@ -1144,6 +1195,7 @@ define @intrinsic_vfsub_vf_nxv16f32_nxv16f32_f32( @llvm.riscv.vfsub.nxv16f32.f32( + undef, %0, float %1, iXLen %2) @@ -1177,6 +1229,7 @@ entry: } declare @llvm.riscv.vfsub.nxv1f64.f64( + , , double, iXLen); @@ -1189,6 +1242,7 @@ define @intrinsic_vfsub_vf_nxv1f64_nxv1f64_f64( @llvm.riscv.vfsub.nxv1f64.f64( + undef, %0, double %1, iXLen %2) @@ -1222,6 +1276,7 @@ entry: } declare @llvm.riscv.vfsub.nxv2f64.f64( + , , double, iXLen); @@ -1234,6 +1289,7 @@ define @intrinsic_vfsub_vf_nxv2f64_nxv2f64_f64( @llvm.riscv.vfsub.nxv2f64.f64( + undef, %0, double %1, iXLen %2) @@ -1267,6 +1323,7 @@ entry: } declare @llvm.riscv.vfsub.nxv4f64.f64( + , , double, iXLen); @@ -1279,6 +1336,7 @@ define @intrinsic_vfsub_vf_nxv4f64_nxv4f64_f64( @llvm.riscv.vfsub.nxv4f64.f64( + undef, %0, double %1, iXLen %2) @@ -1312,6 +1370,7 @@ entry: } declare @llvm.riscv.vfsub.nxv8f64.f64( + , , double, iXLen); @@ -1324,6 +1383,7 @@ define @intrinsic_vfsub_vf_nxv8f64_nxv8f64_f64( @llvm.riscv.vfsub.nxv8f64.f64( + undef, %0, double %1, iXLen %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwadd.ll b/llvm/test/CodeGen/RISCV/rvv/vfwadd.ll index 541f2b8564f280c331fa5b303777423133dfe689..5910e790dec85e0a4f365aa21230790f3cf7db5a 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwadd.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwadd.ll @@ -4,6 +4,7 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfwadd.nxv1f32.nxv1f16.nxv1f16( + , , , iXLen); @@ -17,6 +18,7 @@ define @intrinsic_vfwadd_vv_nxv1f32_nxv1f16_nxv1f16( @llvm.riscv.vfwadd.nxv1f32.nxv1f16.nxv1f16( + undef, %0, %1, iXLen %2) @@ -50,6 +52,7 @@ entry: } declare @llvm.riscv.vfwadd.nxv2f32.nxv2f16.nxv2f16( + , , , iXLen); @@ -63,6 +66,7 @@ define @intrinsic_vfwadd_vv_nxv2f32_nxv2f16_nxv2f16( @llvm.riscv.vfwadd.nxv2f32.nxv2f16.nxv2f16( + undef, %0, %1, iXLen %2) @@ -96,6 +100,7 @@ entry: } declare @llvm.riscv.vfwadd.nxv4f32.nxv4f16.nxv4f16( + , , , iXLen); @@ -109,6 +114,7 @@ define @intrinsic_vfwadd_vv_nxv4f32_nxv4f16_nxv4f16( @llvm.riscv.vfwadd.nxv4f32.nxv4f16.nxv4f16( + undef, %0, %1, iXLen %2) @@ -142,6 +148,7 @@ entry: } declare @llvm.riscv.vfwadd.nxv8f32.nxv8f16.nxv8f16( + , , , iXLen); @@ -155,6 +162,7 @@ define @intrinsic_vfwadd_vv_nxv8f32_nxv8f16_nxv8f16( @llvm.riscv.vfwadd.nxv8f32.nxv8f16.nxv8f16( + undef, %0, %1, iXLen %2) @@ -188,6 +196,7 @@ entry: } declare @llvm.riscv.vfwadd.nxv16f32.nxv16f16.nxv16f16( + , , , iXLen); @@ -201,6 +210,7 @@ define @intrinsic_vfwadd_vv_nxv16f32_nxv16f16_nxv16f16( @llvm.riscv.vfwadd.nxv16f32.nxv16f16.nxv16f16( + undef, %0, %1, iXLen %2) @@ -234,6 +244,7 @@ entry: } declare @llvm.riscv.vfwadd.nxv1f64.nxv1f32.nxv1f32( + , , , iXLen); @@ -247,6 +258,7 @@ define @intrinsic_vfwadd_vv_nxv1f64_nxv1f32_nxv1f32( @llvm.riscv.vfwadd.nxv1f64.nxv1f32.nxv1f32( + undef, %0, %1, iXLen %2) @@ -280,6 +292,7 @@ entry: } declare @llvm.riscv.vfwadd.nxv2f64.nxv2f32.nxv2f32( + , , , iXLen); @@ -293,6 +306,7 @@ define @intrinsic_vfwadd_vv_nxv2f64_nxv2f32_nxv2f32( @llvm.riscv.vfwadd.nxv2f64.nxv2f32.nxv2f32( + undef, %0, %1, iXLen %2) @@ -326,6 +340,7 @@ entry: } declare @llvm.riscv.vfwadd.nxv4f64.nxv4f32.nxv4f32( + , , , iXLen); @@ -339,6 +354,7 @@ define @intrinsic_vfwadd_vv_nxv4f64_nxv4f32_nxv4f32( @llvm.riscv.vfwadd.nxv4f64.nxv4f32.nxv4f32( + undef, %0, %1, iXLen %2) @@ -372,6 +388,7 @@ entry: } declare @llvm.riscv.vfwadd.nxv8f64.nxv8f32.nxv8f32( + , , , iXLen); @@ -385,6 +402,7 @@ define @intrinsic_vfwadd_vv_nxv8f64_nxv8f32_nxv8f32( @llvm.riscv.vfwadd.nxv8f64.nxv8f32.nxv8f32( + undef, %0, %1, iXLen %2) @@ -418,6 +436,7 @@ entry: } declare @llvm.riscv.vfwadd.nxv1f32.nxv1f16.f16( + , , half, iXLen); @@ -431,6 +450,7 @@ define @intrinsic_vfwadd_vf_nxv1f32_nxv1f16_f16( @llvm.riscv.vfwadd.nxv1f32.nxv1f16.f16( + undef, %0, half %1, iXLen %2) @@ -464,6 +484,7 @@ entry: } declare @llvm.riscv.vfwadd.nxv2f32.nxv2f16.f16( + , , half, iXLen); @@ -477,6 +498,7 @@ define @intrinsic_vfwadd_vf_nxv2f32_nxv2f16_f16( @llvm.riscv.vfwadd.nxv2f32.nxv2f16.f16( + undef, %0, half %1, iXLen %2) @@ -510,6 +532,7 @@ entry: } declare @llvm.riscv.vfwadd.nxv4f32.nxv4f16.f16( + , , half, iXLen); @@ -523,6 +546,7 @@ define @intrinsic_vfwadd_vf_nxv4f32_nxv4f16_f16( @llvm.riscv.vfwadd.nxv4f32.nxv4f16.f16( + undef, %0, half %1, iXLen %2) @@ -556,6 +580,7 @@ entry: } declare @llvm.riscv.vfwadd.nxv8f32.nxv8f16.f16( + , , half, iXLen); @@ -569,6 +594,7 @@ define @intrinsic_vfwadd_vf_nxv8f32_nxv8f16_f16( @llvm.riscv.vfwadd.nxv8f32.nxv8f16.f16( + undef, %0, half %1, iXLen %2) @@ -602,6 +628,7 @@ entry: } declare @llvm.riscv.vfwadd.nxv16f32.nxv16f16.f16( + , , half, iXLen); @@ -615,6 +642,7 @@ define @intrinsic_vfwadd_vf_nxv16f32_nxv16f16_f16( @llvm.riscv.vfwadd.nxv16f32.nxv16f16.f16( + undef, %0, half %1, iXLen %2) @@ -648,6 +676,7 @@ entry: } declare @llvm.riscv.vfwadd.nxv1f64.nxv1f32.f32( + , , float, iXLen); @@ -661,6 +690,7 @@ define @intrinsic_vfwadd_vf_nxv1f64_nxv1f32_f32( @llvm.riscv.vfwadd.nxv1f64.nxv1f32.f32( + undef, %0, float %1, iXLen %2) @@ -694,6 +724,7 @@ entry: } declare @llvm.riscv.vfwadd.nxv2f64.nxv2f32.f32( + , , float, iXLen); @@ -707,6 +738,7 @@ define @intrinsic_vfwadd_vf_nxv2f64_nxv2f32_f32( @llvm.riscv.vfwadd.nxv2f64.nxv2f32.f32( + undef, %0, float %1, iXLen %2) @@ -740,6 +772,7 @@ entry: } declare @llvm.riscv.vfwadd.nxv4f64.nxv4f32.f32( + , , float, iXLen); @@ -753,6 +786,7 @@ define @intrinsic_vfwadd_vf_nxv4f64_nxv4f32_f32( @llvm.riscv.vfwadd.nxv4f64.nxv4f32.f32( + undef, %0, float %1, iXLen %2) @@ -786,6 +820,7 @@ entry: } declare @llvm.riscv.vfwadd.nxv8f64.nxv8f32.f32( + , , float, iXLen); @@ -799,6 +834,7 @@ define @intrinsic_vfwadd_vf_nxv8f64_nxv8f32_f32( @llvm.riscv.vfwadd.nxv8f64.nxv8f32.f32( + undef, %0, float %1, iXLen %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwadd.w.ll b/llvm/test/CodeGen/RISCV/rvv/vfwadd.w.ll index 28cdfbf621b3d513acd73afd0689fead60cd9499..1027e08ffce3341a13c4e12d5e264f774af9cf64 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwadd.w.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwadd.w.ll @@ -4,6 +4,7 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfwadd.w.nxv1f32.nxv1f16( + , , , iXLen); @@ -16,6 +17,7 @@ define @intrinsic_vfwadd.w_wv_nxv1f32_nxv1f32_nxv1f16( @llvm.riscv.vfwadd.w.nxv1f32.nxv1f16( + undef, %0, %1, iXLen %2) @@ -49,6 +51,7 @@ entry: } declare @llvm.riscv.vfwadd.w.nxv2f32.nxv2f16( + , , , iXLen); @@ -61,6 +64,7 @@ define @intrinsic_vfwadd.w_wv_nxv2f32_nxv2f32_nxv2f16( @llvm.riscv.vfwadd.w.nxv2f32.nxv2f16( + undef, %0, %1, iXLen %2) @@ -94,6 +98,7 @@ entry: } declare @llvm.riscv.vfwadd.w.nxv4f32.nxv4f16( + , , , iXLen); @@ -106,6 +111,7 @@ define @intrinsic_vfwadd.w_wv_nxv4f32_nxv4f32_nxv4f16( @llvm.riscv.vfwadd.w.nxv4f32.nxv4f16( + undef, %0, %1, iXLen %2) @@ -139,6 +145,7 @@ entry: } declare @llvm.riscv.vfwadd.w.nxv8f32.nxv8f16( + , , , iXLen); @@ -151,6 +158,7 @@ define @intrinsic_vfwadd.w_wv_nxv8f32_nxv8f32_nxv8f16( @llvm.riscv.vfwadd.w.nxv8f32.nxv8f16( + undef, %0, %1, iXLen %2) @@ -184,6 +192,7 @@ entry: } declare @llvm.riscv.vfwadd.w.nxv16f32.nxv16f16( + , , , iXLen); @@ -196,6 +205,7 @@ define @intrinsic_vfwadd.w_wv_nxv16f32_nxv16f32_nxv16f16(< ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.w.nxv16f32.nxv16f16( + undef, %0, %1, iXLen %2) @@ -230,6 +240,7 @@ entry: } declare @llvm.riscv.vfwadd.w.nxv1f64.nxv1f32( + , , , iXLen); @@ -242,6 +253,7 @@ define @intrinsic_vfwadd.w_wv_nxv1f64_nxv1f64_nxv1f32( @llvm.riscv.vfwadd.w.nxv1f64.nxv1f32( + undef, %0, %1, iXLen %2) @@ -275,6 +287,7 @@ entry: } declare @llvm.riscv.vfwadd.w.nxv2f64.nxv2f32( + , , , iXLen); @@ -287,6 +300,7 @@ define @intrinsic_vfwadd.w_wv_nxv2f64_nxv2f64_nxv2f32( @llvm.riscv.vfwadd.w.nxv2f64.nxv2f32( + undef, %0, %1, iXLen %2) @@ -320,6 +334,7 @@ entry: } declare @llvm.riscv.vfwadd.w.nxv4f64.nxv4f32( + , , , iXLen); @@ -332,6 +347,7 @@ define @intrinsic_vfwadd.w_wv_nxv4f64_nxv4f64_nxv4f32( @llvm.riscv.vfwadd.w.nxv4f64.nxv4f32( + undef, %0, %1, iXLen %2) @@ -365,6 +381,7 @@ entry: } declare @llvm.riscv.vfwadd.w.nxv8f64.nxv8f32( + , , , iXLen); @@ -377,6 +394,7 @@ define @intrinsic_vfwadd.w_wv_nxv8f64_nxv8f64_nxv8f32( @llvm.riscv.vfwadd.w.nxv8f64.nxv8f32( + undef, %0, %1, iXLen %2) @@ -411,6 +429,7 @@ entry: } declare @llvm.riscv.vfwadd.w.nxv1f32.f16( + , , half, iXLen); @@ -423,6 +442,7 @@ define @intrinsic_vfwadd.w_wf_nxv1f32_nxv1f32_f16( @llvm.riscv.vfwadd.w.nxv1f32.f16( + undef, %0, half %1, iXLen %2) @@ -456,6 +476,7 @@ entry: } declare @llvm.riscv.vfwadd.w.nxv2f32.f16( + , , half, iXLen); @@ -468,6 +489,7 @@ define @intrinsic_vfwadd.w_wf_nxv2f32_nxv2f32_f16( @llvm.riscv.vfwadd.w.nxv2f32.f16( + undef, %0, half %1, iXLen %2) @@ -501,6 +523,7 @@ entry: } declare @llvm.riscv.vfwadd.w.nxv4f32.f16( + , , half, iXLen); @@ -513,6 +536,7 @@ define @intrinsic_vfwadd.w_wf_nxv4f32_nxv4f32_f16( @llvm.riscv.vfwadd.w.nxv4f32.f16( + undef, %0, half %1, iXLen %2) @@ -546,6 +570,7 @@ entry: } declare @llvm.riscv.vfwadd.w.nxv8f32.f16( + , , half, iXLen); @@ -558,6 +583,7 @@ define @intrinsic_vfwadd.w_wf_nxv8f32_nxv8f32_f16( @llvm.riscv.vfwadd.w.nxv8f32.f16( + undef, %0, half %1, iXLen %2) @@ -591,6 +617,7 @@ entry: } declare @llvm.riscv.vfwadd.w.nxv16f32.f16( + , , half, iXLen); @@ -603,6 +630,7 @@ define @intrinsic_vfwadd.w_wf_nxv16f32_nxv16f32_f16( @llvm.riscv.vfwadd.w.nxv16f32.f16( + undef, %0, half %1, iXLen %2) @@ -636,6 +664,7 @@ entry: } declare @llvm.riscv.vfwadd.w.nxv1f64.f32( + , , float, iXLen); @@ -648,6 +677,7 @@ define @intrinsic_vfwadd.w_wf_nxv1f64_nxv1f64_f32( @llvm.riscv.vfwadd.w.nxv1f64.f32( + undef, %0, float %1, iXLen %2) @@ -681,6 +711,7 @@ entry: } declare @llvm.riscv.vfwadd.w.nxv2f64.f32( + , , float, iXLen); @@ -693,6 +724,7 @@ define @intrinsic_vfwadd.w_wf_nxv2f64_nxv2f64_f32( @llvm.riscv.vfwadd.w.nxv2f64.f32( + undef, %0, float %1, iXLen %2) @@ -726,6 +758,7 @@ entry: } declare @llvm.riscv.vfwadd.w.nxv4f64.f32( + , , float, iXLen); @@ -738,6 +771,7 @@ define @intrinsic_vfwadd.w_wf_nxv4f64_nxv4f64_f32( @llvm.riscv.vfwadd.w.nxv4f64.f32( + undef, %0, float %1, iXLen %2) @@ -771,6 +805,7 @@ entry: } declare @llvm.riscv.vfwadd.w.nxv8f64.f32( + , , float, iXLen); @@ -783,6 +818,7 @@ define @intrinsic_vfwadd.w_wf_nxv8f64_nxv8f64_f32( @llvm.riscv.vfwadd.w.nxv8f64.f32( + undef, %0, float %1, iXLen %2) @@ -1130,6 +1166,7 @@ define @intrinsic_vfwadd.w_wv_untie_nxv1f32_nxv1f32_nxv1f16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.w.nxv1f32.nxv1f16( + undef, %1, %0, iXLen %2) @@ -1146,6 +1183,7 @@ define @intrinsic_vfwadd.w_wv_untie_nxv2f32_nxv2f32_nxv2f16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.w.nxv2f32.nxv2f16( + undef, %1, %0, iXLen %2) @@ -1162,6 +1200,7 @@ define @intrinsic_vfwadd.w_wv_untie_nxv4f32_nxv4f32_nxv4f16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.w.nxv4f32.nxv4f16( + undef, %1, %0, iXLen %2) @@ -1178,6 +1217,7 @@ define @intrinsic_vfwadd.w_wv_untie_nxv8f32_nxv8f32_nxv8f16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.w.nxv8f32.nxv8f16( + undef, %1, %0, iXLen %2) @@ -1194,6 +1234,7 @@ define @intrinsic_vfwadd.w_wv_untie_nxv1f64_nxv1f64_nxv1f3 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.w.nxv1f64.nxv1f32( + undef, %1, %0, iXLen %2) @@ -1210,6 +1251,7 @@ define @intrinsic_vfwadd.w_wv_untie_nxv2f64_nxv2f64_nxv2f3 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.w.nxv2f64.nxv2f32( + undef, %1, %0, iXLen %2) @@ -1226,6 +1268,7 @@ define @intrinsic_vfwadd.w_wv_untie_nxv4f64_nxv4f64_nxv4f3 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.w.nxv4f64.nxv4f32( + undef, %1, %0, iXLen %2) @@ -1242,6 +1285,7 @@ define @intrinsic_vfwadd.w_wv_untie_nxv8f64_nxv8f64_nxv8f3 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.w.nxv8f64.nxv8f32( + undef, %1, %0, iXLen %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwmul.ll b/llvm/test/CodeGen/RISCV/rvv/vfwmul.ll index b1ec8464047e1e707d8f3d7d3267751403f90e7b..017a9c65f034d6171387621b7dc09a15ddfca7be 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwmul.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwmul.ll @@ -4,6 +4,7 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfwmul.nxv1f32.nxv1f16.nxv1f16( + , , , iXLen); @@ -17,6 +18,7 @@ define @intrinsic_vfwmul_vv_nxv1f32_nxv1f16_nxv1f16( @llvm.riscv.vfwmul.nxv1f32.nxv1f16.nxv1f16( + undef, %0, %1, iXLen %2) @@ -50,6 +52,7 @@ entry: } declare @llvm.riscv.vfwmul.nxv2f32.nxv2f16.nxv2f16( + , , , iXLen); @@ -63,6 +66,7 @@ define @intrinsic_vfwmul_vv_nxv2f32_nxv2f16_nxv2f16( @llvm.riscv.vfwmul.nxv2f32.nxv2f16.nxv2f16( + undef, %0, %1, iXLen %2) @@ -96,6 +100,7 @@ entry: } declare @llvm.riscv.vfwmul.nxv4f32.nxv4f16.nxv4f16( + , , , iXLen); @@ -109,6 +114,7 @@ define @intrinsic_vfwmul_vv_nxv4f32_nxv4f16_nxv4f16( @llvm.riscv.vfwmul.nxv4f32.nxv4f16.nxv4f16( + undef, %0, %1, iXLen %2) @@ -142,6 +148,7 @@ entry: } declare @llvm.riscv.vfwmul.nxv8f32.nxv8f16.nxv8f16( + , , , iXLen); @@ -155,6 +162,7 @@ define @intrinsic_vfwmul_vv_nxv8f32_nxv8f16_nxv8f16( @llvm.riscv.vfwmul.nxv8f32.nxv8f16.nxv8f16( + undef, %0, %1, iXLen %2) @@ -188,6 +196,7 @@ entry: } declare @llvm.riscv.vfwmul.nxv16f32.nxv16f16.nxv16f16( + , , , iXLen); @@ -201,6 +210,7 @@ define @intrinsic_vfwmul_vv_nxv16f32_nxv16f16_nxv16f16( @llvm.riscv.vfwmul.nxv16f32.nxv16f16.nxv16f16( + undef, %0, %1, iXLen %2) @@ -234,6 +244,7 @@ entry: } declare @llvm.riscv.vfwmul.nxv1f64.nxv1f32.nxv1f32( + , , , iXLen); @@ -247,6 +258,7 @@ define @intrinsic_vfwmul_vv_nxv1f64_nxv1f32_nxv1f32( @llvm.riscv.vfwmul.nxv1f64.nxv1f32.nxv1f32( + undef, %0, %1, iXLen %2) @@ -280,6 +292,7 @@ entry: } declare @llvm.riscv.vfwmul.nxv2f64.nxv2f32.nxv2f32( + , , , iXLen); @@ -293,6 +306,7 @@ define @intrinsic_vfwmul_vv_nxv2f64_nxv2f32_nxv2f32( @llvm.riscv.vfwmul.nxv2f64.nxv2f32.nxv2f32( + undef, %0, %1, iXLen %2) @@ -326,6 +340,7 @@ entry: } declare @llvm.riscv.vfwmul.nxv4f64.nxv4f32.nxv4f32( + , , , iXLen); @@ -339,6 +354,7 @@ define @intrinsic_vfwmul_vv_nxv4f64_nxv4f32_nxv4f32( @llvm.riscv.vfwmul.nxv4f64.nxv4f32.nxv4f32( + undef, %0, %1, iXLen %2) @@ -372,6 +388,7 @@ entry: } declare @llvm.riscv.vfwmul.nxv8f64.nxv8f32.nxv8f32( + , , , iXLen); @@ -385,6 +402,7 @@ define @intrinsic_vfwmul_vv_nxv8f64_nxv8f32_nxv8f32( @llvm.riscv.vfwmul.nxv8f64.nxv8f32.nxv8f32( + undef, %0, %1, iXLen %2) @@ -418,6 +436,7 @@ entry: } declare @llvm.riscv.vfwmul.nxv1f32.nxv1f16.f16( + , , half, iXLen); @@ -431,6 +450,7 @@ define @intrinsic_vfwmul_vf_nxv1f32_nxv1f16_f16( @llvm.riscv.vfwmul.nxv1f32.nxv1f16.f16( + undef, %0, half %1, iXLen %2) @@ -464,6 +484,7 @@ entry: } declare @llvm.riscv.vfwmul.nxv2f32.nxv2f16.f16( + , , half, iXLen); @@ -477,6 +498,7 @@ define @intrinsic_vfwmul_vf_nxv2f32_nxv2f16_f16( @llvm.riscv.vfwmul.nxv2f32.nxv2f16.f16( + undef, %0, half %1, iXLen %2) @@ -510,6 +532,7 @@ entry: } declare @llvm.riscv.vfwmul.nxv4f32.nxv4f16.f16( + , , half, iXLen); @@ -523,6 +546,7 @@ define @intrinsic_vfwmul_vf_nxv4f32_nxv4f16_f16( @llvm.riscv.vfwmul.nxv4f32.nxv4f16.f16( + undef, %0, half %1, iXLen %2) @@ -556,6 +580,7 @@ entry: } declare @llvm.riscv.vfwmul.nxv8f32.nxv8f16.f16( + , , half, iXLen); @@ -569,6 +594,7 @@ define @intrinsic_vfwmul_vf_nxv8f32_nxv8f16_f16( @llvm.riscv.vfwmul.nxv8f32.nxv8f16.f16( + undef, %0, half %1, iXLen %2) @@ -602,6 +628,7 @@ entry: } declare @llvm.riscv.vfwmul.nxv16f32.nxv16f16.f16( + , , half, iXLen); @@ -615,6 +642,7 @@ define @intrinsic_vfwmul_vf_nxv16f32_nxv16f16_f16( @llvm.riscv.vfwmul.nxv16f32.nxv16f16.f16( + undef, %0, half %1, iXLen %2) @@ -648,6 +676,7 @@ entry: } declare @llvm.riscv.vfwmul.nxv1f64.nxv1f32.f32( + , , float, iXLen); @@ -661,6 +690,7 @@ define @intrinsic_vfwmul_vf_nxv1f64_nxv1f32_f32( @llvm.riscv.vfwmul.nxv1f64.nxv1f32.f32( + undef, %0, float %1, iXLen %2) @@ -694,6 +724,7 @@ entry: } declare @llvm.riscv.vfwmul.nxv2f64.nxv2f32.f32( + , , float, iXLen); @@ -707,6 +738,7 @@ define @intrinsic_vfwmul_vf_nxv2f64_nxv2f32_f32( @llvm.riscv.vfwmul.nxv2f64.nxv2f32.f32( + undef, %0, float %1, iXLen %2) @@ -740,6 +772,7 @@ entry: } declare @llvm.riscv.vfwmul.nxv4f64.nxv4f32.f32( + , , float, iXLen); @@ -753,6 +786,7 @@ define @intrinsic_vfwmul_vf_nxv4f64_nxv4f32_f32( @llvm.riscv.vfwmul.nxv4f64.nxv4f32.f32( + undef, %0, float %1, iXLen %2) @@ -786,6 +820,7 @@ entry: } declare @llvm.riscv.vfwmul.nxv8f64.nxv8f32.f32( + , , float, iXLen); @@ -799,6 +834,7 @@ define @intrinsic_vfwmul_vf_nxv8f64_nxv8f32_f32( @llvm.riscv.vfwmul.nxv8f64.nxv8f32.f32( + undef, %0, float %1, iXLen %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwsub.ll b/llvm/test/CodeGen/RISCV/rvv/vfwsub.ll index 916abcae0de0dacc2193d13aa477be822dba3aba..c7ed2108f4d5872d060ba25b69a2facda7fbd4e1 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwsub.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwsub.ll @@ -4,6 +4,7 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfwsub.nxv1f32.nxv1f16.nxv1f16( + , , , iXLen); @@ -17,6 +18,7 @@ define @intrinsic_vfwsub_vv_nxv1f32_nxv1f16_nxv1f16( @llvm.riscv.vfwsub.nxv1f32.nxv1f16.nxv1f16( + undef, %0, %1, iXLen %2) @@ -50,6 +52,7 @@ entry: } declare @llvm.riscv.vfwsub.nxv2f32.nxv2f16.nxv2f16( + , , , iXLen); @@ -63,6 +66,7 @@ define @intrinsic_vfwsub_vv_nxv2f32_nxv2f16_nxv2f16( @llvm.riscv.vfwsub.nxv2f32.nxv2f16.nxv2f16( + undef, %0, %1, iXLen %2) @@ -96,6 +100,7 @@ entry: } declare @llvm.riscv.vfwsub.nxv4f32.nxv4f16.nxv4f16( + , , , iXLen); @@ -109,6 +114,7 @@ define @intrinsic_vfwsub_vv_nxv4f32_nxv4f16_nxv4f16( @llvm.riscv.vfwsub.nxv4f32.nxv4f16.nxv4f16( + undef, %0, %1, iXLen %2) @@ -142,6 +148,7 @@ entry: } declare @llvm.riscv.vfwsub.nxv8f32.nxv8f16.nxv8f16( + , , , iXLen); @@ -155,6 +162,7 @@ define @intrinsic_vfwsub_vv_nxv8f32_nxv8f16_nxv8f16( @llvm.riscv.vfwsub.nxv8f32.nxv8f16.nxv8f16( + undef, %0, %1, iXLen %2) @@ -188,6 +196,7 @@ entry: } declare @llvm.riscv.vfwsub.nxv16f32.nxv16f16.nxv16f16( + , , , iXLen); @@ -201,6 +210,7 @@ define @intrinsic_vfwsub_vv_nxv16f32_nxv16f16_nxv16f16( @llvm.riscv.vfwsub.nxv16f32.nxv16f16.nxv16f16( + undef, %0, %1, iXLen %2) @@ -234,6 +244,7 @@ entry: } declare @llvm.riscv.vfwsub.nxv1f64.nxv1f32.nxv1f32( + , , , iXLen); @@ -247,6 +258,7 @@ define @intrinsic_vfwsub_vv_nxv1f64_nxv1f32_nxv1f32( @llvm.riscv.vfwsub.nxv1f64.nxv1f32.nxv1f32( + undef, %0, %1, iXLen %2) @@ -280,6 +292,7 @@ entry: } declare @llvm.riscv.vfwsub.nxv2f64.nxv2f32.nxv2f32( + , , , iXLen); @@ -293,6 +306,7 @@ define @intrinsic_vfwsub_vv_nxv2f64_nxv2f32_nxv2f32( @llvm.riscv.vfwsub.nxv2f64.nxv2f32.nxv2f32( + undef, %0, %1, iXLen %2) @@ -326,6 +340,7 @@ entry: } declare @llvm.riscv.vfwsub.nxv4f64.nxv4f32.nxv4f32( + , , , iXLen); @@ -339,6 +354,7 @@ define @intrinsic_vfwsub_vv_nxv4f64_nxv4f32_nxv4f32( @llvm.riscv.vfwsub.nxv4f64.nxv4f32.nxv4f32( + undef, %0, %1, iXLen %2) @@ -372,6 +388,7 @@ entry: } declare @llvm.riscv.vfwsub.nxv8f64.nxv8f32.nxv8f32( + , , , iXLen); @@ -385,6 +402,7 @@ define @intrinsic_vfwsub_vv_nxv8f64_nxv8f32_nxv8f32( @llvm.riscv.vfwsub.nxv8f64.nxv8f32.nxv8f32( + undef, %0, %1, iXLen %2) @@ -418,6 +436,7 @@ entry: } declare @llvm.riscv.vfwsub.nxv1f32.nxv1f16.f16( + , , half, iXLen); @@ -431,6 +450,7 @@ define @intrinsic_vfwsub_vf_nxv1f32_nxv1f16_f16( @llvm.riscv.vfwsub.nxv1f32.nxv1f16.f16( + undef, %0, half %1, iXLen %2) @@ -464,6 +484,7 @@ entry: } declare @llvm.riscv.vfwsub.nxv2f32.nxv2f16.f16( + , , half, iXLen); @@ -477,6 +498,7 @@ define @intrinsic_vfwsub_vf_nxv2f32_nxv2f16_f16( @llvm.riscv.vfwsub.nxv2f32.nxv2f16.f16( + undef, %0, half %1, iXLen %2) @@ -510,6 +532,7 @@ entry: } declare @llvm.riscv.vfwsub.nxv4f32.nxv4f16.f16( + , , half, iXLen); @@ -523,6 +546,7 @@ define @intrinsic_vfwsub_vf_nxv4f32_nxv4f16_f16( @llvm.riscv.vfwsub.nxv4f32.nxv4f16.f16( + undef, %0, half %1, iXLen %2) @@ -556,6 +580,7 @@ entry: } declare @llvm.riscv.vfwsub.nxv8f32.nxv8f16.f16( + , , half, iXLen); @@ -569,6 +594,7 @@ define @intrinsic_vfwsub_vf_nxv8f32_nxv8f16_f16( @llvm.riscv.vfwsub.nxv8f32.nxv8f16.f16( + undef, %0, half %1, iXLen %2) @@ -602,6 +628,7 @@ entry: } declare @llvm.riscv.vfwsub.nxv16f32.nxv16f16.f16( + , , half, iXLen); @@ -615,6 +642,7 @@ define @intrinsic_vfwsub_vf_nxv16f32_nxv16f16_f16( @llvm.riscv.vfwsub.nxv16f32.nxv16f16.f16( + undef, %0, half %1, iXLen %2) @@ -648,6 +676,7 @@ entry: } declare @llvm.riscv.vfwsub.nxv1f64.nxv1f32.f32( + , , float, iXLen); @@ -661,6 +690,7 @@ define @intrinsic_vfwsub_vf_nxv1f64_nxv1f32_f32( @llvm.riscv.vfwsub.nxv1f64.nxv1f32.f32( + undef, %0, float %1, iXLen %2) @@ -694,6 +724,7 @@ entry: } declare @llvm.riscv.vfwsub.nxv2f64.nxv2f32.f32( + , , float, iXLen); @@ -707,6 +738,7 @@ define @intrinsic_vfwsub_vf_nxv2f64_nxv2f32_f32( @llvm.riscv.vfwsub.nxv2f64.nxv2f32.f32( + undef, %0, float %1, iXLen %2) @@ -740,6 +772,7 @@ entry: } declare @llvm.riscv.vfwsub.nxv4f64.nxv4f32.f32( + , , float, iXLen); @@ -753,6 +786,7 @@ define @intrinsic_vfwsub_vf_nxv4f64_nxv4f32_f32( @llvm.riscv.vfwsub.nxv4f64.nxv4f32.f32( + undef, %0, float %1, iXLen %2) @@ -786,6 +820,7 @@ entry: } declare @llvm.riscv.vfwsub.nxv8f64.nxv8f32.f32( + , , float, iXLen); @@ -799,6 +834,7 @@ define @intrinsic_vfwsub_vf_nxv8f64_nxv8f32_f32( @llvm.riscv.vfwsub.nxv8f64.nxv8f32.f32( + undef, %0, float %1, iXLen %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwsub.w.ll b/llvm/test/CodeGen/RISCV/rvv/vfwsub.w.ll index b5d008c3e1ed5881e98e0b4852db3e996f911dc8..1b20551ceb9c97d5971f2c4ac3f2085e766a2ebf 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwsub.w.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwsub.w.ll @@ -4,6 +4,7 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfwsub.w.nxv1f32.nxv1f16( + , , , iXLen); @@ -16,6 +17,7 @@ define @intrinsic_vfwsub.w_wv_nxv1f32_nxv1f32_nxv1f16( @llvm.riscv.vfwsub.w.nxv1f32.nxv1f16( + undef, %0, %1, iXLen %2) @@ -49,6 +51,7 @@ entry: } declare @llvm.riscv.vfwsub.w.nxv2f32.nxv2f16( + , , , iXLen); @@ -61,6 +64,7 @@ define @intrinsic_vfwsub.w_wv_nxv2f32_nxv2f32_nxv2f16( @llvm.riscv.vfwsub.w.nxv2f32.nxv2f16( + undef, %0, %1, iXLen %2) @@ -94,6 +98,7 @@ entry: } declare @llvm.riscv.vfwsub.w.nxv4f32.nxv4f16( + , , , iXLen); @@ -106,6 +111,7 @@ define @intrinsic_vfwsub.w_wv_nxv4f32_nxv4f32_nxv4f16( @llvm.riscv.vfwsub.w.nxv4f32.nxv4f16( + undef, %0, %1, iXLen %2) @@ -139,6 +145,7 @@ entry: } declare @llvm.riscv.vfwsub.w.nxv8f32.nxv8f16( + , , , iXLen); @@ -151,6 +158,7 @@ define @intrinsic_vfwsub.w_wv_nxv8f32_nxv8f32_nxv8f16( @llvm.riscv.vfwsub.w.nxv8f32.nxv8f16( + undef, %0, %1, iXLen %2) @@ -184,6 +192,7 @@ entry: } declare @llvm.riscv.vfwsub.w.nxv16f32.nxv16f16( + , , , iXLen); @@ -196,6 +205,7 @@ define @intrinsic_vfwsub.w_wv_nxv16f32_nxv16f32_nxv16f16(< ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.w.nxv16f32.nxv16f16( + undef, %0, %1, iXLen %2) @@ -230,6 +240,7 @@ entry: } declare @llvm.riscv.vfwsub.w.nxv1f64.nxv1f32( + , , , iXLen); @@ -242,6 +253,7 @@ define @intrinsic_vfwsub.w_wv_nxv1f64_nxv1f64_nxv1f32( @llvm.riscv.vfwsub.w.nxv1f64.nxv1f32( + undef, %0, %1, iXLen %2) @@ -275,6 +287,7 @@ entry: } declare @llvm.riscv.vfwsub.w.nxv2f64.nxv2f32( + , , , iXLen); @@ -287,6 +300,7 @@ define @intrinsic_vfwsub.w_wv_nxv2f64_nxv2f64_nxv2f32( @llvm.riscv.vfwsub.w.nxv2f64.nxv2f32( + undef, %0, %1, iXLen %2) @@ -320,6 +334,7 @@ entry: } declare @llvm.riscv.vfwsub.w.nxv4f64.nxv4f32( + , , , iXLen); @@ -332,6 +347,7 @@ define @intrinsic_vfwsub.w_wv_nxv4f64_nxv4f64_nxv4f32( @llvm.riscv.vfwsub.w.nxv4f64.nxv4f32( + undef, %0, %1, iXLen %2) @@ -365,6 +381,7 @@ entry: } declare @llvm.riscv.vfwsub.w.nxv8f64.nxv8f32( + , , , iXLen); @@ -377,6 +394,7 @@ define @intrinsic_vfwsub.w_wv_nxv8f64_nxv8f64_nxv8f32( @llvm.riscv.vfwsub.w.nxv8f64.nxv8f32( + undef, %0, %1, iXLen %2) @@ -411,6 +429,7 @@ entry: } declare @llvm.riscv.vfwsub.w.nxv1f32.f16( + , , half, iXLen); @@ -423,6 +442,7 @@ define @intrinsic_vfwsub.w_wf_nxv1f32_nxv1f32_f16( @llvm.riscv.vfwsub.w.nxv1f32.f16( + undef, %0, half %1, iXLen %2) @@ -456,6 +476,7 @@ entry: } declare @llvm.riscv.vfwsub.w.nxv2f32.f16( + , , half, iXLen); @@ -468,6 +489,7 @@ define @intrinsic_vfwsub.w_wf_nxv2f32_nxv2f32_f16( @llvm.riscv.vfwsub.w.nxv2f32.f16( + undef, %0, half %1, iXLen %2) @@ -501,6 +523,7 @@ entry: } declare @llvm.riscv.vfwsub.w.nxv4f32.f16( + , , half, iXLen); @@ -513,6 +536,7 @@ define @intrinsic_vfwsub.w_wf_nxv4f32_nxv4f32_f16( @llvm.riscv.vfwsub.w.nxv4f32.f16( + undef, %0, half %1, iXLen %2) @@ -546,6 +570,7 @@ entry: } declare @llvm.riscv.vfwsub.w.nxv8f32.f16( + , , half, iXLen); @@ -558,6 +583,7 @@ define @intrinsic_vfwsub.w_wf_nxv8f32_nxv8f32_f16( @llvm.riscv.vfwsub.w.nxv8f32.f16( + undef, %0, half %1, iXLen %2) @@ -591,6 +617,7 @@ entry: } declare @llvm.riscv.vfwsub.w.nxv16f32.f16( + , , half, iXLen); @@ -603,6 +630,7 @@ define @intrinsic_vfwsub.w_wf_nxv16f32_nxv16f32_f16( @llvm.riscv.vfwsub.w.nxv16f32.f16( + undef, %0, half %1, iXLen %2) @@ -636,6 +664,7 @@ entry: } declare @llvm.riscv.vfwsub.w.nxv1f64.f32( + , , float, iXLen); @@ -648,6 +677,7 @@ define @intrinsic_vfwsub.w_wf_nxv1f64_nxv1f64_f32( @llvm.riscv.vfwsub.w.nxv1f64.f32( + undef, %0, float %1, iXLen %2) @@ -681,6 +711,7 @@ entry: } declare @llvm.riscv.vfwsub.w.nxv2f64.f32( + , , float, iXLen); @@ -693,6 +724,7 @@ define @intrinsic_vfwsub.w_wf_nxv2f64_nxv2f64_f32( @llvm.riscv.vfwsub.w.nxv2f64.f32( + undef, %0, float %1, iXLen %2) @@ -726,6 +758,7 @@ entry: } declare @llvm.riscv.vfwsub.w.nxv4f64.f32( + , , float, iXLen); @@ -738,6 +771,7 @@ define @intrinsic_vfwsub.w_wf_nxv4f64_nxv4f64_f32( @llvm.riscv.vfwsub.w.nxv4f64.f32( + undef, %0, float %1, iXLen %2) @@ -771,6 +805,7 @@ entry: } declare @llvm.riscv.vfwsub.w.nxv8f64.f32( + , , float, iXLen); @@ -783,6 +818,7 @@ define @intrinsic_vfwsub.w_wf_nxv8f64_nxv8f64_f32( @llvm.riscv.vfwsub.w.nxv8f64.f32( + undef, %0, float %1, iXLen %2) @@ -1130,6 +1166,7 @@ define @intrinsic_vfwsub.w_wv_untie_nxv1f32_nxv1f32_nxv1f16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.w.nxv1f32.nxv1f16( + undef, %1, %0, iXLen %2) @@ -1146,6 +1183,7 @@ define @intrinsic_vfwsub.w_wv_untie_nxv2f32_nxv2f32_nxv2f16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.w.nxv2f32.nxv2f16( + undef, %1, %0, iXLen %2) @@ -1162,6 +1200,7 @@ define @intrinsic_vfwsub.w_wv_untie_nxv4f32_nxv4f32_nxv4f16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.w.nxv4f32.nxv4f16( + undef, %1, %0, iXLen %2) @@ -1178,6 +1217,7 @@ define @intrinsic_vfwsub.w_wv_untie_nxv8f32_nxv8f32_nxv8f16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.w.nxv8f32.nxv8f16( + undef, %1, %0, iXLen %2) @@ -1194,6 +1234,7 @@ define @intrinsic_vfwsub.w_wv_untie_nxv1f64_nxv1f64_nxv1f3 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.w.nxv1f64.nxv1f32( + undef, %1, %0, iXLen %2) @@ -1210,6 +1251,7 @@ define @intrinsic_vfwsub.w_wv_untie_nxv2f64_nxv2f64_nxv2f3 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.w.nxv2f64.nxv2f32( + undef, %1, %0, iXLen %2) @@ -1226,6 +1268,7 @@ define @intrinsic_vfwsub.w_wv_untie_nxv4f64_nxv4f64_nxv4f3 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.w.nxv4f64.nxv4f32( + undef, %1, %0, iXLen %2) @@ -1242,6 +1285,7 @@ define @intrinsic_vfwsub.w_wv_untie_nxv8f64_nxv8f64_nxv8f3 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.w.nxv8f64.nxv8f32( + undef, %1, %0, iXLen %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vmax-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmax-rv32.ll index 1d51eb6549cff9ab8ac1d22afc3659c3b06237b4..2f431ab0195c87e3824c01a9741e4275ad66f38e 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmax-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmax-rv32.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vmax.nxv1i8.nxv1i8( + , , , i32); @@ -14,6 +15,7 @@ define @intrinsic_vmax_vv_nxv1i8_nxv1i8_nxv1i8( @llvm.riscv.vmax.nxv1i8.nxv1i8( + undef, %0, %1, i32 %2) @@ -47,6 +49,7 @@ entry: } declare @llvm.riscv.vmax.nxv2i8.nxv2i8( + , , , i32); @@ -59,6 +62,7 @@ define @intrinsic_vmax_vv_nxv2i8_nxv2i8_nxv2i8( @llvm.riscv.vmax.nxv2i8.nxv2i8( + undef, %0, %1, i32 %2) @@ -92,6 +96,7 @@ entry: } declare @llvm.riscv.vmax.nxv4i8.nxv4i8( + , , , i32); @@ -104,6 +109,7 @@ define @intrinsic_vmax_vv_nxv4i8_nxv4i8_nxv4i8( @llvm.riscv.vmax.nxv4i8.nxv4i8( + undef, %0, %1, i32 %2) @@ -137,6 +143,7 @@ entry: } declare @llvm.riscv.vmax.nxv8i8.nxv8i8( + , , , i32); @@ -149,6 +156,7 @@ define @intrinsic_vmax_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vmax.nxv8i8.nxv8i8( + undef, %0, %1, i32 %2) @@ -182,6 +190,7 @@ entry: } declare @llvm.riscv.vmax.nxv16i8.nxv16i8( + , , , i32); @@ -194,6 +203,7 @@ define @intrinsic_vmax_vv_nxv16i8_nxv16i8_nxv16i8( @llvm.riscv.vmax.nxv16i8.nxv16i8( + undef, %0, %1, i32 %2) @@ -227,6 +237,7 @@ entry: } declare @llvm.riscv.vmax.nxv32i8.nxv32i8( + , , , i32); @@ -239,6 +250,7 @@ define @intrinsic_vmax_vv_nxv32i8_nxv32i8_nxv32i8( @llvm.riscv.vmax.nxv32i8.nxv32i8( + undef, %0, %1, i32 %2) @@ -272,6 +284,7 @@ entry: } declare @llvm.riscv.vmax.nxv64i8.nxv64i8( + , , , i32); @@ -284,6 +297,7 @@ define @intrinsic_vmax_vv_nxv64i8_nxv64i8_nxv64i8( @llvm.riscv.vmax.nxv64i8.nxv64i8( + undef, %0, %1, i32 %2) @@ -318,6 +332,7 @@ entry: } declare @llvm.riscv.vmax.nxv1i16.nxv1i16( + , , , i32); @@ -330,6 +345,7 @@ define @intrinsic_vmax_vv_nxv1i16_nxv1i16_nxv1i16( @llvm.riscv.vmax.nxv1i16.nxv1i16( + undef, %0, %1, i32 %2) @@ -363,6 +379,7 @@ entry: } declare @llvm.riscv.vmax.nxv2i16.nxv2i16( + , , , i32); @@ -375,6 +392,7 @@ define @intrinsic_vmax_vv_nxv2i16_nxv2i16_nxv2i16( @llvm.riscv.vmax.nxv2i16.nxv2i16( + undef, %0, %1, i32 %2) @@ -408,6 +426,7 @@ entry: } declare @llvm.riscv.vmax.nxv4i16.nxv4i16( + , , , i32); @@ -420,6 +439,7 @@ define @intrinsic_vmax_vv_nxv4i16_nxv4i16_nxv4i16( @llvm.riscv.vmax.nxv4i16.nxv4i16( + undef, %0, %1, i32 %2) @@ -453,6 +473,7 @@ entry: } declare @llvm.riscv.vmax.nxv8i16.nxv8i16( + , , , i32); @@ -465,6 +486,7 @@ define @intrinsic_vmax_vv_nxv8i16_nxv8i16_nxv8i16( @llvm.riscv.vmax.nxv8i16.nxv8i16( + undef, %0, %1, i32 %2) @@ -498,6 +520,7 @@ entry: } declare @llvm.riscv.vmax.nxv16i16.nxv16i16( + , , , i32); @@ -510,6 +533,7 @@ define @intrinsic_vmax_vv_nxv16i16_nxv16i16_nxv16i16( @llvm.riscv.vmax.nxv16i16.nxv16i16( + undef, %0, %1, i32 %2) @@ -543,6 +567,7 @@ entry: } declare @llvm.riscv.vmax.nxv32i16.nxv32i16( + , , , i32); @@ -555,6 +580,7 @@ define @intrinsic_vmax_vv_nxv32i16_nxv32i16_nxv32i16( @llvm.riscv.vmax.nxv32i16.nxv32i16( + undef, %0, %1, i32 %2) @@ -589,6 +615,7 @@ entry: } declare @llvm.riscv.vmax.nxv1i32.nxv1i32( + , , , i32); @@ -601,6 +628,7 @@ define @intrinsic_vmax_vv_nxv1i32_nxv1i32_nxv1i32( @llvm.riscv.vmax.nxv1i32.nxv1i32( + undef, %0, %1, i32 %2) @@ -634,6 +662,7 @@ entry: } declare @llvm.riscv.vmax.nxv2i32.nxv2i32( + , , , i32); @@ -646,6 +675,7 @@ define @intrinsic_vmax_vv_nxv2i32_nxv2i32_nxv2i32( @llvm.riscv.vmax.nxv2i32.nxv2i32( + undef, %0, %1, i32 %2) @@ -679,6 +709,7 @@ entry: } declare @llvm.riscv.vmax.nxv4i32.nxv4i32( + , , , i32); @@ -691,6 +722,7 @@ define @intrinsic_vmax_vv_nxv4i32_nxv4i32_nxv4i32( @llvm.riscv.vmax.nxv4i32.nxv4i32( + undef, %0, %1, i32 %2) @@ -724,6 +756,7 @@ entry: } declare @llvm.riscv.vmax.nxv8i32.nxv8i32( + , , , i32); @@ -736,6 +769,7 @@ define @intrinsic_vmax_vv_nxv8i32_nxv8i32_nxv8i32( @llvm.riscv.vmax.nxv8i32.nxv8i32( + undef, %0, %1, i32 %2) @@ -769,6 +803,7 @@ entry: } declare @llvm.riscv.vmax.nxv16i32.nxv16i32( + , , , i32); @@ -781,6 +816,7 @@ define @intrinsic_vmax_vv_nxv16i32_nxv16i32_nxv16i32( @llvm.riscv.vmax.nxv16i32.nxv16i32( + undef, %0, %1, i32 %2) @@ -815,6 +851,7 @@ entry: } declare @llvm.riscv.vmax.nxv1i64.nxv1i64( + , , , i32); @@ -827,6 +864,7 @@ define @intrinsic_vmax_vv_nxv1i64_nxv1i64_nxv1i64( @llvm.riscv.vmax.nxv1i64.nxv1i64( + undef, %0, %1, i32 %2) @@ -860,6 +898,7 @@ entry: } declare @llvm.riscv.vmax.nxv2i64.nxv2i64( + , , , i32); @@ -872,6 +911,7 @@ define @intrinsic_vmax_vv_nxv2i64_nxv2i64_nxv2i64( @llvm.riscv.vmax.nxv2i64.nxv2i64( + undef, %0, %1, i32 %2) @@ -905,6 +945,7 @@ entry: } declare @llvm.riscv.vmax.nxv4i64.nxv4i64( + , , , i32); @@ -917,6 +958,7 @@ define @intrinsic_vmax_vv_nxv4i64_nxv4i64_nxv4i64( @llvm.riscv.vmax.nxv4i64.nxv4i64( + undef, %0, %1, i32 %2) @@ -950,6 +992,7 @@ entry: } declare @llvm.riscv.vmax.nxv8i64.nxv8i64( + , , , i32); @@ -962,6 +1005,7 @@ define @intrinsic_vmax_vv_nxv8i64_nxv8i64_nxv8i64( @llvm.riscv.vmax.nxv8i64.nxv8i64( + undef, %0, %1, i32 %2) @@ -996,6 +1040,7 @@ entry: } declare @llvm.riscv.vmax.nxv1i8.i8( + , , i8, i32); @@ -1008,6 +1053,7 @@ define @intrinsic_vmax_vx_nxv1i8_nxv1i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmax.nxv1i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1041,6 +1087,7 @@ entry: } declare @llvm.riscv.vmax.nxv2i8.i8( + , , i8, i32); @@ -1053,6 +1100,7 @@ define @intrinsic_vmax_vx_nxv2i8_nxv2i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmax.nxv2i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1086,6 +1134,7 @@ entry: } declare @llvm.riscv.vmax.nxv4i8.i8( + , , i8, i32); @@ -1098,6 +1147,7 @@ define @intrinsic_vmax_vx_nxv4i8_nxv4i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmax.nxv4i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1131,6 +1181,7 @@ entry: } declare @llvm.riscv.vmax.nxv8i8.i8( + , , i8, i32); @@ -1143,6 +1194,7 @@ define @intrinsic_vmax_vx_nxv8i8_nxv8i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmax.nxv8i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1176,6 +1228,7 @@ entry: } declare @llvm.riscv.vmax.nxv16i8.i8( + , , i8, i32); @@ -1188,6 +1241,7 @@ define @intrinsic_vmax_vx_nxv16i8_nxv16i8_i8( @llvm.riscv.vmax.nxv16i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1221,6 +1275,7 @@ entry: } declare @llvm.riscv.vmax.nxv32i8.i8( + , , i8, i32); @@ -1233,6 +1288,7 @@ define @intrinsic_vmax_vx_nxv32i8_nxv32i8_i8( @llvm.riscv.vmax.nxv32i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1266,6 +1322,7 @@ entry: } declare @llvm.riscv.vmax.nxv64i8.i8( + , , i8, i32); @@ -1278,6 +1335,7 @@ define @intrinsic_vmax_vx_nxv64i8_nxv64i8_i8( @llvm.riscv.vmax.nxv64i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1311,6 +1369,7 @@ entry: } declare @llvm.riscv.vmax.nxv1i16.i16( + , , i16, i32); @@ -1323,6 +1382,7 @@ define @intrinsic_vmax_vx_nxv1i16_nxv1i16_i16( @llvm.riscv.vmax.nxv1i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1356,6 +1416,7 @@ entry: } declare @llvm.riscv.vmax.nxv2i16.i16( + , , i16, i32); @@ -1368,6 +1429,7 @@ define @intrinsic_vmax_vx_nxv2i16_nxv2i16_i16( @llvm.riscv.vmax.nxv2i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1401,6 +1463,7 @@ entry: } declare @llvm.riscv.vmax.nxv4i16.i16( + , , i16, i32); @@ -1413,6 +1476,7 @@ define @intrinsic_vmax_vx_nxv4i16_nxv4i16_i16( @llvm.riscv.vmax.nxv4i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1446,6 +1510,7 @@ entry: } declare @llvm.riscv.vmax.nxv8i16.i16( + , , i16, i32); @@ -1458,6 +1523,7 @@ define @intrinsic_vmax_vx_nxv8i16_nxv8i16_i16( @llvm.riscv.vmax.nxv8i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1491,6 +1557,7 @@ entry: } declare @llvm.riscv.vmax.nxv16i16.i16( + , , i16, i32); @@ -1503,6 +1570,7 @@ define @intrinsic_vmax_vx_nxv16i16_nxv16i16_i16( @llvm.riscv.vmax.nxv16i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1536,6 +1604,7 @@ entry: } declare @llvm.riscv.vmax.nxv32i16.i16( + , , i16, i32); @@ -1548,6 +1617,7 @@ define @intrinsic_vmax_vx_nxv32i16_nxv32i16_i16( @llvm.riscv.vmax.nxv32i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1581,6 +1651,7 @@ entry: } declare @llvm.riscv.vmax.nxv1i32.i32( + , , i32, i32); @@ -1593,6 +1664,7 @@ define @intrinsic_vmax_vx_nxv1i32_nxv1i32_i32( @llvm.riscv.vmax.nxv1i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1626,6 +1698,7 @@ entry: } declare @llvm.riscv.vmax.nxv2i32.i32( + , , i32, i32); @@ -1638,6 +1711,7 @@ define @intrinsic_vmax_vx_nxv2i32_nxv2i32_i32( @llvm.riscv.vmax.nxv2i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1671,6 +1745,7 @@ entry: } declare @llvm.riscv.vmax.nxv4i32.i32( + , , i32, i32); @@ -1683,6 +1758,7 @@ define @intrinsic_vmax_vx_nxv4i32_nxv4i32_i32( @llvm.riscv.vmax.nxv4i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1716,6 +1792,7 @@ entry: } declare @llvm.riscv.vmax.nxv8i32.i32( + , , i32, i32); @@ -1728,6 +1805,7 @@ define @intrinsic_vmax_vx_nxv8i32_nxv8i32_i32( @llvm.riscv.vmax.nxv8i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1761,6 +1839,7 @@ entry: } declare @llvm.riscv.vmax.nxv16i32.i32( + , , i32, i32); @@ -1773,6 +1852,7 @@ define @intrinsic_vmax_vx_nxv16i32_nxv16i32_i32( @llvm.riscv.vmax.nxv16i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1806,6 +1886,7 @@ entry: } declare @llvm.riscv.vmax.nxv1i64.i64( + , , i64, i32); @@ -1824,6 +1905,7 @@ define @intrinsic_vmax_vx_nxv1i64_nxv1i64_i64( @llvm.riscv.vmax.nxv1i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1863,6 +1945,7 @@ entry: } declare @llvm.riscv.vmax.nxv2i64.i64( + , , i64, i32); @@ -1881,6 +1964,7 @@ define @intrinsic_vmax_vx_nxv2i64_nxv2i64_i64( @llvm.riscv.vmax.nxv2i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1920,6 +2004,7 @@ entry: } declare @llvm.riscv.vmax.nxv4i64.i64( + , , i64, i32); @@ -1938,6 +2023,7 @@ define @intrinsic_vmax_vx_nxv4i64_nxv4i64_i64( @llvm.riscv.vmax.nxv4i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1977,6 +2063,7 @@ entry: } declare @llvm.riscv.vmax.nxv8i64.i64( + , , i64, i32); @@ -1995,6 +2082,7 @@ define @intrinsic_vmax_vx_nxv8i64_nxv8i64_i64( @llvm.riscv.vmax.nxv8i64.i64( + undef, %0, i64 %1, i32 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vmax-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmax-rv64.ll index da40c60ff8a9614e8dcb7b1d5513db491a6e74fe..49f63fab0555c30112218baa8e57a891de041c09 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmax-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmax-rv64.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vmax.nxv1i8.nxv1i8( + , , , i64); @@ -14,6 +15,7 @@ define @intrinsic_vmax_vv_nxv1i8_nxv1i8_nxv1i8( @llvm.riscv.vmax.nxv1i8.nxv1i8( + undef, %0, %1, i64 %2) @@ -47,6 +49,7 @@ entry: } declare @llvm.riscv.vmax.nxv2i8.nxv2i8( + , , , i64); @@ -59,6 +62,7 @@ define @intrinsic_vmax_vv_nxv2i8_nxv2i8_nxv2i8( @llvm.riscv.vmax.nxv2i8.nxv2i8( + undef, %0, %1, i64 %2) @@ -92,6 +96,7 @@ entry: } declare @llvm.riscv.vmax.nxv4i8.nxv4i8( + , , , i64); @@ -104,6 +109,7 @@ define @intrinsic_vmax_vv_nxv4i8_nxv4i8_nxv4i8( @llvm.riscv.vmax.nxv4i8.nxv4i8( + undef, %0, %1, i64 %2) @@ -137,6 +143,7 @@ entry: } declare @llvm.riscv.vmax.nxv8i8.nxv8i8( + , , , i64); @@ -149,6 +156,7 @@ define @intrinsic_vmax_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vmax.nxv8i8.nxv8i8( + undef, %0, %1, i64 %2) @@ -182,6 +190,7 @@ entry: } declare @llvm.riscv.vmax.nxv16i8.nxv16i8( + , , , i64); @@ -194,6 +203,7 @@ define @intrinsic_vmax_vv_nxv16i8_nxv16i8_nxv16i8( @llvm.riscv.vmax.nxv16i8.nxv16i8( + undef, %0, %1, i64 %2) @@ -227,6 +237,7 @@ entry: } declare @llvm.riscv.vmax.nxv32i8.nxv32i8( + , , , i64); @@ -239,6 +250,7 @@ define @intrinsic_vmax_vv_nxv32i8_nxv32i8_nxv32i8( @llvm.riscv.vmax.nxv32i8.nxv32i8( + undef, %0, %1, i64 %2) @@ -272,6 +284,7 @@ entry: } declare @llvm.riscv.vmax.nxv64i8.nxv64i8( + , , , i64); @@ -284,6 +297,7 @@ define @intrinsic_vmax_vv_nxv64i8_nxv64i8_nxv64i8( @llvm.riscv.vmax.nxv64i8.nxv64i8( + undef, %0, %1, i64 %2) @@ -318,6 +332,7 @@ entry: } declare @llvm.riscv.vmax.nxv1i16.nxv1i16( + , , , i64); @@ -330,6 +345,7 @@ define @intrinsic_vmax_vv_nxv1i16_nxv1i16_nxv1i16( @llvm.riscv.vmax.nxv1i16.nxv1i16( + undef, %0, %1, i64 %2) @@ -363,6 +379,7 @@ entry: } declare @llvm.riscv.vmax.nxv2i16.nxv2i16( + , , , i64); @@ -375,6 +392,7 @@ define @intrinsic_vmax_vv_nxv2i16_nxv2i16_nxv2i16( @llvm.riscv.vmax.nxv2i16.nxv2i16( + undef, %0, %1, i64 %2) @@ -408,6 +426,7 @@ entry: } declare @llvm.riscv.vmax.nxv4i16.nxv4i16( + , , , i64); @@ -420,6 +439,7 @@ define @intrinsic_vmax_vv_nxv4i16_nxv4i16_nxv4i16( @llvm.riscv.vmax.nxv4i16.nxv4i16( + undef, %0, %1, i64 %2) @@ -453,6 +473,7 @@ entry: } declare @llvm.riscv.vmax.nxv8i16.nxv8i16( + , , , i64); @@ -465,6 +486,7 @@ define @intrinsic_vmax_vv_nxv8i16_nxv8i16_nxv8i16( @llvm.riscv.vmax.nxv8i16.nxv8i16( + undef, %0, %1, i64 %2) @@ -498,6 +520,7 @@ entry: } declare @llvm.riscv.vmax.nxv16i16.nxv16i16( + , , , i64); @@ -510,6 +533,7 @@ define @intrinsic_vmax_vv_nxv16i16_nxv16i16_nxv16i16( @llvm.riscv.vmax.nxv16i16.nxv16i16( + undef, %0, %1, i64 %2) @@ -543,6 +567,7 @@ entry: } declare @llvm.riscv.vmax.nxv32i16.nxv32i16( + , , , i64); @@ -555,6 +580,7 @@ define @intrinsic_vmax_vv_nxv32i16_nxv32i16_nxv32i16( @llvm.riscv.vmax.nxv32i16.nxv32i16( + undef, %0, %1, i64 %2) @@ -589,6 +615,7 @@ entry: } declare @llvm.riscv.vmax.nxv1i32.nxv1i32( + , , , i64); @@ -601,6 +628,7 @@ define @intrinsic_vmax_vv_nxv1i32_nxv1i32_nxv1i32( @llvm.riscv.vmax.nxv1i32.nxv1i32( + undef, %0, %1, i64 %2) @@ -634,6 +662,7 @@ entry: } declare @llvm.riscv.vmax.nxv2i32.nxv2i32( + , , , i64); @@ -646,6 +675,7 @@ define @intrinsic_vmax_vv_nxv2i32_nxv2i32_nxv2i32( @llvm.riscv.vmax.nxv2i32.nxv2i32( + undef, %0, %1, i64 %2) @@ -679,6 +709,7 @@ entry: } declare @llvm.riscv.vmax.nxv4i32.nxv4i32( + , , , i64); @@ -691,6 +722,7 @@ define @intrinsic_vmax_vv_nxv4i32_nxv4i32_nxv4i32( @llvm.riscv.vmax.nxv4i32.nxv4i32( + undef, %0, %1, i64 %2) @@ -724,6 +756,7 @@ entry: } declare @llvm.riscv.vmax.nxv8i32.nxv8i32( + , , , i64); @@ -736,6 +769,7 @@ define @intrinsic_vmax_vv_nxv8i32_nxv8i32_nxv8i32( @llvm.riscv.vmax.nxv8i32.nxv8i32( + undef, %0, %1, i64 %2) @@ -769,6 +803,7 @@ entry: } declare @llvm.riscv.vmax.nxv16i32.nxv16i32( + , , , i64); @@ -781,6 +816,7 @@ define @intrinsic_vmax_vv_nxv16i32_nxv16i32_nxv16i32( @llvm.riscv.vmax.nxv16i32.nxv16i32( + undef, %0, %1, i64 %2) @@ -815,6 +851,7 @@ entry: } declare @llvm.riscv.vmax.nxv1i64.nxv1i64( + , , , i64); @@ -827,6 +864,7 @@ define @intrinsic_vmax_vv_nxv1i64_nxv1i64_nxv1i64( @llvm.riscv.vmax.nxv1i64.nxv1i64( + undef, %0, %1, i64 %2) @@ -860,6 +898,7 @@ entry: } declare @llvm.riscv.vmax.nxv2i64.nxv2i64( + , , , i64); @@ -872,6 +911,7 @@ define @intrinsic_vmax_vv_nxv2i64_nxv2i64_nxv2i64( @llvm.riscv.vmax.nxv2i64.nxv2i64( + undef, %0, %1, i64 %2) @@ -905,6 +945,7 @@ entry: } declare @llvm.riscv.vmax.nxv4i64.nxv4i64( + , , , i64); @@ -917,6 +958,7 @@ define @intrinsic_vmax_vv_nxv4i64_nxv4i64_nxv4i64( @llvm.riscv.vmax.nxv4i64.nxv4i64( + undef, %0, %1, i64 %2) @@ -950,6 +992,7 @@ entry: } declare @llvm.riscv.vmax.nxv8i64.nxv8i64( + , , , i64); @@ -962,6 +1005,7 @@ define @intrinsic_vmax_vv_nxv8i64_nxv8i64_nxv8i64( @llvm.riscv.vmax.nxv8i64.nxv8i64( + undef, %0, %1, i64 %2) @@ -996,6 +1040,7 @@ entry: } declare @llvm.riscv.vmax.nxv1i8.i8( + , , i8, i64); @@ -1008,6 +1053,7 @@ define @intrinsic_vmax_vx_nxv1i8_nxv1i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmax.nxv1i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1041,6 +1087,7 @@ entry: } declare @llvm.riscv.vmax.nxv2i8.i8( + , , i8, i64); @@ -1053,6 +1100,7 @@ define @intrinsic_vmax_vx_nxv2i8_nxv2i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmax.nxv2i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1086,6 +1134,7 @@ entry: } declare @llvm.riscv.vmax.nxv4i8.i8( + , , i8, i64); @@ -1098,6 +1147,7 @@ define @intrinsic_vmax_vx_nxv4i8_nxv4i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmax.nxv4i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1131,6 +1181,7 @@ entry: } declare @llvm.riscv.vmax.nxv8i8.i8( + , , i8, i64); @@ -1143,6 +1194,7 @@ define @intrinsic_vmax_vx_nxv8i8_nxv8i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmax.nxv8i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1176,6 +1228,7 @@ entry: } declare @llvm.riscv.vmax.nxv16i8.i8( + , , i8, i64); @@ -1188,6 +1241,7 @@ define @intrinsic_vmax_vx_nxv16i8_nxv16i8_i8( @llvm.riscv.vmax.nxv16i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1221,6 +1275,7 @@ entry: } declare @llvm.riscv.vmax.nxv32i8.i8( + , , i8, i64); @@ -1233,6 +1288,7 @@ define @intrinsic_vmax_vx_nxv32i8_nxv32i8_i8( @llvm.riscv.vmax.nxv32i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1266,6 +1322,7 @@ entry: } declare @llvm.riscv.vmax.nxv64i8.i8( + , , i8, i64); @@ -1278,6 +1335,7 @@ define @intrinsic_vmax_vx_nxv64i8_nxv64i8_i8( @llvm.riscv.vmax.nxv64i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1311,6 +1369,7 @@ entry: } declare @llvm.riscv.vmax.nxv1i16.i16( + , , i16, i64); @@ -1323,6 +1382,7 @@ define @intrinsic_vmax_vx_nxv1i16_nxv1i16_i16( @llvm.riscv.vmax.nxv1i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1356,6 +1416,7 @@ entry: } declare @llvm.riscv.vmax.nxv2i16.i16( + , , i16, i64); @@ -1368,6 +1429,7 @@ define @intrinsic_vmax_vx_nxv2i16_nxv2i16_i16( @llvm.riscv.vmax.nxv2i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1401,6 +1463,7 @@ entry: } declare @llvm.riscv.vmax.nxv4i16.i16( + , , i16, i64); @@ -1413,6 +1476,7 @@ define @intrinsic_vmax_vx_nxv4i16_nxv4i16_i16( @llvm.riscv.vmax.nxv4i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1446,6 +1510,7 @@ entry: } declare @llvm.riscv.vmax.nxv8i16.i16( + , , i16, i64); @@ -1458,6 +1523,7 @@ define @intrinsic_vmax_vx_nxv8i16_nxv8i16_i16( @llvm.riscv.vmax.nxv8i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1491,6 +1557,7 @@ entry: } declare @llvm.riscv.vmax.nxv16i16.i16( + , , i16, i64); @@ -1503,6 +1570,7 @@ define @intrinsic_vmax_vx_nxv16i16_nxv16i16_i16( @llvm.riscv.vmax.nxv16i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1536,6 +1604,7 @@ entry: } declare @llvm.riscv.vmax.nxv32i16.i16( + , , i16, i64); @@ -1548,6 +1617,7 @@ define @intrinsic_vmax_vx_nxv32i16_nxv32i16_i16( @llvm.riscv.vmax.nxv32i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1581,6 +1651,7 @@ entry: } declare @llvm.riscv.vmax.nxv1i32.i32( + , , i32, i64); @@ -1593,6 +1664,7 @@ define @intrinsic_vmax_vx_nxv1i32_nxv1i32_i32( @llvm.riscv.vmax.nxv1i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1626,6 +1698,7 @@ entry: } declare @llvm.riscv.vmax.nxv2i32.i32( + , , i32, i64); @@ -1638,6 +1711,7 @@ define @intrinsic_vmax_vx_nxv2i32_nxv2i32_i32( @llvm.riscv.vmax.nxv2i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1671,6 +1745,7 @@ entry: } declare @llvm.riscv.vmax.nxv4i32.i32( + , , i32, i64); @@ -1683,6 +1758,7 @@ define @intrinsic_vmax_vx_nxv4i32_nxv4i32_i32( @llvm.riscv.vmax.nxv4i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1716,6 +1792,7 @@ entry: } declare @llvm.riscv.vmax.nxv8i32.i32( + , , i32, i64); @@ -1728,6 +1805,7 @@ define @intrinsic_vmax_vx_nxv8i32_nxv8i32_i32( @llvm.riscv.vmax.nxv8i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1761,6 +1839,7 @@ entry: } declare @llvm.riscv.vmax.nxv16i32.i32( + , , i32, i64); @@ -1773,6 +1852,7 @@ define @intrinsic_vmax_vx_nxv16i32_nxv16i32_i32( @llvm.riscv.vmax.nxv16i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1806,6 +1886,7 @@ entry: } declare @llvm.riscv.vmax.nxv1i64.i64( + , , i64, i64); @@ -1818,6 +1899,7 @@ define @intrinsic_vmax_vx_nxv1i64_nxv1i64_i64( @llvm.riscv.vmax.nxv1i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1851,6 +1933,7 @@ entry: } declare @llvm.riscv.vmax.nxv2i64.i64( + , , i64, i64); @@ -1863,6 +1946,7 @@ define @intrinsic_vmax_vx_nxv2i64_nxv2i64_i64( @llvm.riscv.vmax.nxv2i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1896,6 +1980,7 @@ entry: } declare @llvm.riscv.vmax.nxv4i64.i64( + , , i64, i64); @@ -1908,6 +1993,7 @@ define @intrinsic_vmax_vx_nxv4i64_nxv4i64_i64( @llvm.riscv.vmax.nxv4i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1941,6 +2027,7 @@ entry: } declare @llvm.riscv.vmax.nxv8i64.i64( + , , i64, i64); @@ -1953,6 +2040,7 @@ define @intrinsic_vmax_vx_nxv8i64_nxv8i64_i64( @llvm.riscv.vmax.nxv8i64.i64( + undef, %0, i64 %1, i64 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vmaxu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmaxu-rv32.ll index af080efa3358be14e49d8f9e27847f3b50518c4f..9523f1980ceb622939e1c719f1698e91f62c86da 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmaxu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmaxu-rv32.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vmaxu.nxv1i8.nxv1i8( + , , , i32); @@ -14,6 +15,7 @@ define @intrinsic_vmaxu_vv_nxv1i8_nxv1i8_nxv1i8( @llvm.riscv.vmaxu.nxv1i8.nxv1i8( + undef, %0, %1, i32 %2) @@ -47,6 +49,7 @@ entry: } declare @llvm.riscv.vmaxu.nxv2i8.nxv2i8( + , , , i32); @@ -59,6 +62,7 @@ define @intrinsic_vmaxu_vv_nxv2i8_nxv2i8_nxv2i8( @llvm.riscv.vmaxu.nxv2i8.nxv2i8( + undef, %0, %1, i32 %2) @@ -92,6 +96,7 @@ entry: } declare @llvm.riscv.vmaxu.nxv4i8.nxv4i8( + , , , i32); @@ -104,6 +109,7 @@ define @intrinsic_vmaxu_vv_nxv4i8_nxv4i8_nxv4i8( @llvm.riscv.vmaxu.nxv4i8.nxv4i8( + undef, %0, %1, i32 %2) @@ -137,6 +143,7 @@ entry: } declare @llvm.riscv.vmaxu.nxv8i8.nxv8i8( + , , , i32); @@ -149,6 +156,7 @@ define @intrinsic_vmaxu_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vmaxu.nxv8i8.nxv8i8( + undef, %0, %1, i32 %2) @@ -182,6 +190,7 @@ entry: } declare @llvm.riscv.vmaxu.nxv16i8.nxv16i8( + , , , i32); @@ -194,6 +203,7 @@ define @intrinsic_vmaxu_vv_nxv16i8_nxv16i8_nxv16i8( @llvm.riscv.vmaxu.nxv16i8.nxv16i8( + undef, %0, %1, i32 %2) @@ -227,6 +237,7 @@ entry: } declare @llvm.riscv.vmaxu.nxv32i8.nxv32i8( + , , , i32); @@ -239,6 +250,7 @@ define @intrinsic_vmaxu_vv_nxv32i8_nxv32i8_nxv32i8( @llvm.riscv.vmaxu.nxv32i8.nxv32i8( + undef, %0, %1, i32 %2) @@ -272,6 +284,7 @@ entry: } declare @llvm.riscv.vmaxu.nxv64i8.nxv64i8( + , , , i32); @@ -284,6 +297,7 @@ define @intrinsic_vmaxu_vv_nxv64i8_nxv64i8_nxv64i8( @llvm.riscv.vmaxu.nxv64i8.nxv64i8( + undef, %0, %1, i32 %2) @@ -318,6 +332,7 @@ entry: } declare @llvm.riscv.vmaxu.nxv1i16.nxv1i16( + , , , i32); @@ -330,6 +345,7 @@ define @intrinsic_vmaxu_vv_nxv1i16_nxv1i16_nxv1i16( @llvm.riscv.vmaxu.nxv1i16.nxv1i16( + undef, %0, %1, i32 %2) @@ -363,6 +379,7 @@ entry: } declare @llvm.riscv.vmaxu.nxv2i16.nxv2i16( + , , , i32); @@ -375,6 +392,7 @@ define @intrinsic_vmaxu_vv_nxv2i16_nxv2i16_nxv2i16( @llvm.riscv.vmaxu.nxv2i16.nxv2i16( + undef, %0, %1, i32 %2) @@ -408,6 +426,7 @@ entry: } declare @llvm.riscv.vmaxu.nxv4i16.nxv4i16( + , , , i32); @@ -420,6 +439,7 @@ define @intrinsic_vmaxu_vv_nxv4i16_nxv4i16_nxv4i16( @llvm.riscv.vmaxu.nxv4i16.nxv4i16( + undef, %0, %1, i32 %2) @@ -453,6 +473,7 @@ entry: } declare @llvm.riscv.vmaxu.nxv8i16.nxv8i16( + , , , i32); @@ -465,6 +486,7 @@ define @intrinsic_vmaxu_vv_nxv8i16_nxv8i16_nxv8i16( @llvm.riscv.vmaxu.nxv8i16.nxv8i16( + undef, %0, %1, i32 %2) @@ -498,6 +520,7 @@ entry: } declare @llvm.riscv.vmaxu.nxv16i16.nxv16i16( + , , , i32); @@ -510,6 +533,7 @@ define @intrinsic_vmaxu_vv_nxv16i16_nxv16i16_nxv16i16( @llvm.riscv.vmaxu.nxv16i16.nxv16i16( + undef, %0, %1, i32 %2) @@ -543,6 +567,7 @@ entry: } declare @llvm.riscv.vmaxu.nxv32i16.nxv32i16( + , , , i32); @@ -555,6 +580,7 @@ define @intrinsic_vmaxu_vv_nxv32i16_nxv32i16_nxv32i16( @llvm.riscv.vmaxu.nxv32i16.nxv32i16( + undef, %0, %1, i32 %2) @@ -589,6 +615,7 @@ entry: } declare @llvm.riscv.vmaxu.nxv1i32.nxv1i32( + , , , i32); @@ -601,6 +628,7 @@ define @intrinsic_vmaxu_vv_nxv1i32_nxv1i32_nxv1i32( @llvm.riscv.vmaxu.nxv1i32.nxv1i32( + undef, %0, %1, i32 %2) @@ -634,6 +662,7 @@ entry: } declare @llvm.riscv.vmaxu.nxv2i32.nxv2i32( + , , , i32); @@ -646,6 +675,7 @@ define @intrinsic_vmaxu_vv_nxv2i32_nxv2i32_nxv2i32( @llvm.riscv.vmaxu.nxv2i32.nxv2i32( + undef, %0, %1, i32 %2) @@ -679,6 +709,7 @@ entry: } declare @llvm.riscv.vmaxu.nxv4i32.nxv4i32( + , , , i32); @@ -691,6 +722,7 @@ define @intrinsic_vmaxu_vv_nxv4i32_nxv4i32_nxv4i32( @llvm.riscv.vmaxu.nxv4i32.nxv4i32( + undef, %0, %1, i32 %2) @@ -724,6 +756,7 @@ entry: } declare @llvm.riscv.vmaxu.nxv8i32.nxv8i32( + , , , i32); @@ -736,6 +769,7 @@ define @intrinsic_vmaxu_vv_nxv8i32_nxv8i32_nxv8i32( @llvm.riscv.vmaxu.nxv8i32.nxv8i32( + undef, %0, %1, i32 %2) @@ -769,6 +803,7 @@ entry: } declare @llvm.riscv.vmaxu.nxv16i32.nxv16i32( + , , , i32); @@ -781,6 +816,7 @@ define @intrinsic_vmaxu_vv_nxv16i32_nxv16i32_nxv16i32( @llvm.riscv.vmaxu.nxv16i32.nxv16i32( + undef, %0, %1, i32 %2) @@ -815,6 +851,7 @@ entry: } declare @llvm.riscv.vmaxu.nxv1i64.nxv1i64( + , , , i32); @@ -827,6 +864,7 @@ define @intrinsic_vmaxu_vv_nxv1i64_nxv1i64_nxv1i64( @llvm.riscv.vmaxu.nxv1i64.nxv1i64( + undef, %0, %1, i32 %2) @@ -860,6 +898,7 @@ entry: } declare @llvm.riscv.vmaxu.nxv2i64.nxv2i64( + , , , i32); @@ -872,6 +911,7 @@ define @intrinsic_vmaxu_vv_nxv2i64_nxv2i64_nxv2i64( @llvm.riscv.vmaxu.nxv2i64.nxv2i64( + undef, %0, %1, i32 %2) @@ -905,6 +945,7 @@ entry: } declare @llvm.riscv.vmaxu.nxv4i64.nxv4i64( + , , , i32); @@ -917,6 +958,7 @@ define @intrinsic_vmaxu_vv_nxv4i64_nxv4i64_nxv4i64( @llvm.riscv.vmaxu.nxv4i64.nxv4i64( + undef, %0, %1, i32 %2) @@ -950,6 +992,7 @@ entry: } declare @llvm.riscv.vmaxu.nxv8i64.nxv8i64( + , , , i32); @@ -962,6 +1005,7 @@ define @intrinsic_vmaxu_vv_nxv8i64_nxv8i64_nxv8i64( @llvm.riscv.vmaxu.nxv8i64.nxv8i64( + undef, %0, %1, i32 %2) @@ -996,6 +1040,7 @@ entry: } declare @llvm.riscv.vmaxu.nxv1i8.i8( + , , i8, i32); @@ -1008,6 +1053,7 @@ define @intrinsic_vmaxu_vx_nxv1i8_nxv1i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmaxu.nxv1i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1041,6 +1087,7 @@ entry: } declare @llvm.riscv.vmaxu.nxv2i8.i8( + , , i8, i32); @@ -1053,6 +1100,7 @@ define @intrinsic_vmaxu_vx_nxv2i8_nxv2i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmaxu.nxv2i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1086,6 +1134,7 @@ entry: } declare @llvm.riscv.vmaxu.nxv4i8.i8( + , , i8, i32); @@ -1098,6 +1147,7 @@ define @intrinsic_vmaxu_vx_nxv4i8_nxv4i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmaxu.nxv4i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1131,6 +1181,7 @@ entry: } declare @llvm.riscv.vmaxu.nxv8i8.i8( + , , i8, i32); @@ -1143,6 +1194,7 @@ define @intrinsic_vmaxu_vx_nxv8i8_nxv8i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmaxu.nxv8i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1176,6 +1228,7 @@ entry: } declare @llvm.riscv.vmaxu.nxv16i8.i8( + , , i8, i32); @@ -1188,6 +1241,7 @@ define @intrinsic_vmaxu_vx_nxv16i8_nxv16i8_i8( @llvm.riscv.vmaxu.nxv16i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1221,6 +1275,7 @@ entry: } declare @llvm.riscv.vmaxu.nxv32i8.i8( + , , i8, i32); @@ -1233,6 +1288,7 @@ define @intrinsic_vmaxu_vx_nxv32i8_nxv32i8_i8( @llvm.riscv.vmaxu.nxv32i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1266,6 +1322,7 @@ entry: } declare @llvm.riscv.vmaxu.nxv64i8.i8( + , , i8, i32); @@ -1278,6 +1335,7 @@ define @intrinsic_vmaxu_vx_nxv64i8_nxv64i8_i8( @llvm.riscv.vmaxu.nxv64i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1311,6 +1369,7 @@ entry: } declare @llvm.riscv.vmaxu.nxv1i16.i16( + , , i16, i32); @@ -1323,6 +1382,7 @@ define @intrinsic_vmaxu_vx_nxv1i16_nxv1i16_i16( @llvm.riscv.vmaxu.nxv1i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1356,6 +1416,7 @@ entry: } declare @llvm.riscv.vmaxu.nxv2i16.i16( + , , i16, i32); @@ -1368,6 +1429,7 @@ define @intrinsic_vmaxu_vx_nxv2i16_nxv2i16_i16( @llvm.riscv.vmaxu.nxv2i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1401,6 +1463,7 @@ entry: } declare @llvm.riscv.vmaxu.nxv4i16.i16( + , , i16, i32); @@ -1413,6 +1476,7 @@ define @intrinsic_vmaxu_vx_nxv4i16_nxv4i16_i16( @llvm.riscv.vmaxu.nxv4i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1446,6 +1510,7 @@ entry: } declare @llvm.riscv.vmaxu.nxv8i16.i16( + , , i16, i32); @@ -1458,6 +1523,7 @@ define @intrinsic_vmaxu_vx_nxv8i16_nxv8i16_i16( @llvm.riscv.vmaxu.nxv8i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1491,6 +1557,7 @@ entry: } declare @llvm.riscv.vmaxu.nxv16i16.i16( + , , i16, i32); @@ -1503,6 +1570,7 @@ define @intrinsic_vmaxu_vx_nxv16i16_nxv16i16_i16( @llvm.riscv.vmaxu.nxv16i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1536,6 +1604,7 @@ entry: } declare @llvm.riscv.vmaxu.nxv32i16.i16( + , , i16, i32); @@ -1548,6 +1617,7 @@ define @intrinsic_vmaxu_vx_nxv32i16_nxv32i16_i16( @llvm.riscv.vmaxu.nxv32i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1581,6 +1651,7 @@ entry: } declare @llvm.riscv.vmaxu.nxv1i32.i32( + , , i32, i32); @@ -1593,6 +1664,7 @@ define @intrinsic_vmaxu_vx_nxv1i32_nxv1i32_i32( @llvm.riscv.vmaxu.nxv1i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1626,6 +1698,7 @@ entry: } declare @llvm.riscv.vmaxu.nxv2i32.i32( + , , i32, i32); @@ -1638,6 +1711,7 @@ define @intrinsic_vmaxu_vx_nxv2i32_nxv2i32_i32( @llvm.riscv.vmaxu.nxv2i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1671,6 +1745,7 @@ entry: } declare @llvm.riscv.vmaxu.nxv4i32.i32( + , , i32, i32); @@ -1683,6 +1758,7 @@ define @intrinsic_vmaxu_vx_nxv4i32_nxv4i32_i32( @llvm.riscv.vmaxu.nxv4i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1716,6 +1792,7 @@ entry: } declare @llvm.riscv.vmaxu.nxv8i32.i32( + , , i32, i32); @@ -1728,6 +1805,7 @@ define @intrinsic_vmaxu_vx_nxv8i32_nxv8i32_i32( @llvm.riscv.vmaxu.nxv8i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1761,6 +1839,7 @@ entry: } declare @llvm.riscv.vmaxu.nxv16i32.i32( + , , i32, i32); @@ -1773,6 +1852,7 @@ define @intrinsic_vmaxu_vx_nxv16i32_nxv16i32_i32( @llvm.riscv.vmaxu.nxv16i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1806,6 +1886,7 @@ entry: } declare @llvm.riscv.vmaxu.nxv1i64.i64( + , , i64, i32); @@ -1824,6 +1905,7 @@ define @intrinsic_vmaxu_vx_nxv1i64_nxv1i64_i64( @llvm.riscv.vmaxu.nxv1i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1863,6 +1945,7 @@ entry: } declare @llvm.riscv.vmaxu.nxv2i64.i64( + , , i64, i32); @@ -1881,6 +1964,7 @@ define @intrinsic_vmaxu_vx_nxv2i64_nxv2i64_i64( @llvm.riscv.vmaxu.nxv2i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1920,6 +2004,7 @@ entry: } declare @llvm.riscv.vmaxu.nxv4i64.i64( + , , i64, i32); @@ -1938,6 +2023,7 @@ define @intrinsic_vmaxu_vx_nxv4i64_nxv4i64_i64( @llvm.riscv.vmaxu.nxv4i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1977,6 +2063,7 @@ entry: } declare @llvm.riscv.vmaxu.nxv8i64.i64( + , , i64, i32); @@ -1995,6 +2082,7 @@ define @intrinsic_vmaxu_vx_nxv8i64_nxv8i64_i64( @llvm.riscv.vmaxu.nxv8i64.i64( + undef, %0, i64 %1, i32 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vmaxu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmaxu-rv64.ll index bc63daea21f6958fecd41ee308a570e3401a39d6..caaeaf6ddfaefa1abd5b12e85fd23fe3611007c4 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmaxu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmaxu-rv64.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vmaxu.nxv1i8.nxv1i8( + , , , i64); @@ -14,6 +15,7 @@ define @intrinsic_vmaxu_vv_nxv1i8_nxv1i8_nxv1i8( @llvm.riscv.vmaxu.nxv1i8.nxv1i8( + undef, %0, %1, i64 %2) @@ -47,6 +49,7 @@ entry: } declare @llvm.riscv.vmaxu.nxv2i8.nxv2i8( + , , , i64); @@ -59,6 +62,7 @@ define @intrinsic_vmaxu_vv_nxv2i8_nxv2i8_nxv2i8( @llvm.riscv.vmaxu.nxv2i8.nxv2i8( + undef, %0, %1, i64 %2) @@ -92,6 +96,7 @@ entry: } declare @llvm.riscv.vmaxu.nxv4i8.nxv4i8( + , , , i64); @@ -104,6 +109,7 @@ define @intrinsic_vmaxu_vv_nxv4i8_nxv4i8_nxv4i8( @llvm.riscv.vmaxu.nxv4i8.nxv4i8( + undef, %0, %1, i64 %2) @@ -137,6 +143,7 @@ entry: } declare @llvm.riscv.vmaxu.nxv8i8.nxv8i8( + , , , i64); @@ -149,6 +156,7 @@ define @intrinsic_vmaxu_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vmaxu.nxv8i8.nxv8i8( + undef, %0, %1, i64 %2) @@ -182,6 +190,7 @@ entry: } declare @llvm.riscv.vmaxu.nxv16i8.nxv16i8( + , , , i64); @@ -194,6 +203,7 @@ define @intrinsic_vmaxu_vv_nxv16i8_nxv16i8_nxv16i8( @llvm.riscv.vmaxu.nxv16i8.nxv16i8( + undef, %0, %1, i64 %2) @@ -227,6 +237,7 @@ entry: } declare @llvm.riscv.vmaxu.nxv32i8.nxv32i8( + , , , i64); @@ -239,6 +250,7 @@ define @intrinsic_vmaxu_vv_nxv32i8_nxv32i8_nxv32i8( @llvm.riscv.vmaxu.nxv32i8.nxv32i8( + undef, %0, %1, i64 %2) @@ -272,6 +284,7 @@ entry: } declare @llvm.riscv.vmaxu.nxv64i8.nxv64i8( + , , , i64); @@ -284,6 +297,7 @@ define @intrinsic_vmaxu_vv_nxv64i8_nxv64i8_nxv64i8( @llvm.riscv.vmaxu.nxv64i8.nxv64i8( + undef, %0, %1, i64 %2) @@ -318,6 +332,7 @@ entry: } declare @llvm.riscv.vmaxu.nxv1i16.nxv1i16( + , , , i64); @@ -330,6 +345,7 @@ define @intrinsic_vmaxu_vv_nxv1i16_nxv1i16_nxv1i16( @llvm.riscv.vmaxu.nxv1i16.nxv1i16( + undef, %0, %1, i64 %2) @@ -363,6 +379,7 @@ entry: } declare @llvm.riscv.vmaxu.nxv2i16.nxv2i16( + , , , i64); @@ -375,6 +392,7 @@ define @intrinsic_vmaxu_vv_nxv2i16_nxv2i16_nxv2i16( @llvm.riscv.vmaxu.nxv2i16.nxv2i16( + undef, %0, %1, i64 %2) @@ -408,6 +426,7 @@ entry: } declare @llvm.riscv.vmaxu.nxv4i16.nxv4i16( + , , , i64); @@ -420,6 +439,7 @@ define @intrinsic_vmaxu_vv_nxv4i16_nxv4i16_nxv4i16( @llvm.riscv.vmaxu.nxv4i16.nxv4i16( + undef, %0, %1, i64 %2) @@ -453,6 +473,7 @@ entry: } declare @llvm.riscv.vmaxu.nxv8i16.nxv8i16( + , , , i64); @@ -465,6 +486,7 @@ define @intrinsic_vmaxu_vv_nxv8i16_nxv8i16_nxv8i16( @llvm.riscv.vmaxu.nxv8i16.nxv8i16( + undef, %0, %1, i64 %2) @@ -498,6 +520,7 @@ entry: } declare @llvm.riscv.vmaxu.nxv16i16.nxv16i16( + , , , i64); @@ -510,6 +533,7 @@ define @intrinsic_vmaxu_vv_nxv16i16_nxv16i16_nxv16i16( @llvm.riscv.vmaxu.nxv16i16.nxv16i16( + undef, %0, %1, i64 %2) @@ -543,6 +567,7 @@ entry: } declare @llvm.riscv.vmaxu.nxv32i16.nxv32i16( + , , , i64); @@ -555,6 +580,7 @@ define @intrinsic_vmaxu_vv_nxv32i16_nxv32i16_nxv32i16( @llvm.riscv.vmaxu.nxv32i16.nxv32i16( + undef, %0, %1, i64 %2) @@ -589,6 +615,7 @@ entry: } declare @llvm.riscv.vmaxu.nxv1i32.nxv1i32( + , , , i64); @@ -601,6 +628,7 @@ define @intrinsic_vmaxu_vv_nxv1i32_nxv1i32_nxv1i32( @llvm.riscv.vmaxu.nxv1i32.nxv1i32( + undef, %0, %1, i64 %2) @@ -634,6 +662,7 @@ entry: } declare @llvm.riscv.vmaxu.nxv2i32.nxv2i32( + , , , i64); @@ -646,6 +675,7 @@ define @intrinsic_vmaxu_vv_nxv2i32_nxv2i32_nxv2i32( @llvm.riscv.vmaxu.nxv2i32.nxv2i32( + undef, %0, %1, i64 %2) @@ -679,6 +709,7 @@ entry: } declare @llvm.riscv.vmaxu.nxv4i32.nxv4i32( + , , , i64); @@ -691,6 +722,7 @@ define @intrinsic_vmaxu_vv_nxv4i32_nxv4i32_nxv4i32( @llvm.riscv.vmaxu.nxv4i32.nxv4i32( + undef, %0, %1, i64 %2) @@ -724,6 +756,7 @@ entry: } declare @llvm.riscv.vmaxu.nxv8i32.nxv8i32( + , , , i64); @@ -736,6 +769,7 @@ define @intrinsic_vmaxu_vv_nxv8i32_nxv8i32_nxv8i32( @llvm.riscv.vmaxu.nxv8i32.nxv8i32( + undef, %0, %1, i64 %2) @@ -769,6 +803,7 @@ entry: } declare @llvm.riscv.vmaxu.nxv16i32.nxv16i32( + , , , i64); @@ -781,6 +816,7 @@ define @intrinsic_vmaxu_vv_nxv16i32_nxv16i32_nxv16i32( @llvm.riscv.vmaxu.nxv16i32.nxv16i32( + undef, %0, %1, i64 %2) @@ -815,6 +851,7 @@ entry: } declare @llvm.riscv.vmaxu.nxv1i64.nxv1i64( + , , , i64); @@ -827,6 +864,7 @@ define @intrinsic_vmaxu_vv_nxv1i64_nxv1i64_nxv1i64( @llvm.riscv.vmaxu.nxv1i64.nxv1i64( + undef, %0, %1, i64 %2) @@ -860,6 +898,7 @@ entry: } declare @llvm.riscv.vmaxu.nxv2i64.nxv2i64( + , , , i64); @@ -872,6 +911,7 @@ define @intrinsic_vmaxu_vv_nxv2i64_nxv2i64_nxv2i64( @llvm.riscv.vmaxu.nxv2i64.nxv2i64( + undef, %0, %1, i64 %2) @@ -905,6 +945,7 @@ entry: } declare @llvm.riscv.vmaxu.nxv4i64.nxv4i64( + , , , i64); @@ -917,6 +958,7 @@ define @intrinsic_vmaxu_vv_nxv4i64_nxv4i64_nxv4i64( @llvm.riscv.vmaxu.nxv4i64.nxv4i64( + undef, %0, %1, i64 %2) @@ -950,6 +992,7 @@ entry: } declare @llvm.riscv.vmaxu.nxv8i64.nxv8i64( + , , , i64); @@ -962,6 +1005,7 @@ define @intrinsic_vmaxu_vv_nxv8i64_nxv8i64_nxv8i64( @llvm.riscv.vmaxu.nxv8i64.nxv8i64( + undef, %0, %1, i64 %2) @@ -996,6 +1040,7 @@ entry: } declare @llvm.riscv.vmaxu.nxv1i8.i8( + , , i8, i64); @@ -1008,6 +1053,7 @@ define @intrinsic_vmaxu_vx_nxv1i8_nxv1i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmaxu.nxv1i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1041,6 +1087,7 @@ entry: } declare @llvm.riscv.vmaxu.nxv2i8.i8( + , , i8, i64); @@ -1053,6 +1100,7 @@ define @intrinsic_vmaxu_vx_nxv2i8_nxv2i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmaxu.nxv2i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1086,6 +1134,7 @@ entry: } declare @llvm.riscv.vmaxu.nxv4i8.i8( + , , i8, i64); @@ -1098,6 +1147,7 @@ define @intrinsic_vmaxu_vx_nxv4i8_nxv4i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmaxu.nxv4i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1131,6 +1181,7 @@ entry: } declare @llvm.riscv.vmaxu.nxv8i8.i8( + , , i8, i64); @@ -1143,6 +1194,7 @@ define @intrinsic_vmaxu_vx_nxv8i8_nxv8i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmaxu.nxv8i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1176,6 +1228,7 @@ entry: } declare @llvm.riscv.vmaxu.nxv16i8.i8( + , , i8, i64); @@ -1188,6 +1241,7 @@ define @intrinsic_vmaxu_vx_nxv16i8_nxv16i8_i8( @llvm.riscv.vmaxu.nxv16i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1221,6 +1275,7 @@ entry: } declare @llvm.riscv.vmaxu.nxv32i8.i8( + , , i8, i64); @@ -1233,6 +1288,7 @@ define @intrinsic_vmaxu_vx_nxv32i8_nxv32i8_i8( @llvm.riscv.vmaxu.nxv32i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1266,6 +1322,7 @@ entry: } declare @llvm.riscv.vmaxu.nxv64i8.i8( + , , i8, i64); @@ -1278,6 +1335,7 @@ define @intrinsic_vmaxu_vx_nxv64i8_nxv64i8_i8( @llvm.riscv.vmaxu.nxv64i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1311,6 +1369,7 @@ entry: } declare @llvm.riscv.vmaxu.nxv1i16.i16( + , , i16, i64); @@ -1323,6 +1382,7 @@ define @intrinsic_vmaxu_vx_nxv1i16_nxv1i16_i16( @llvm.riscv.vmaxu.nxv1i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1356,6 +1416,7 @@ entry: } declare @llvm.riscv.vmaxu.nxv2i16.i16( + , , i16, i64); @@ -1368,6 +1429,7 @@ define @intrinsic_vmaxu_vx_nxv2i16_nxv2i16_i16( @llvm.riscv.vmaxu.nxv2i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1401,6 +1463,7 @@ entry: } declare @llvm.riscv.vmaxu.nxv4i16.i16( + , , i16, i64); @@ -1413,6 +1476,7 @@ define @intrinsic_vmaxu_vx_nxv4i16_nxv4i16_i16( @llvm.riscv.vmaxu.nxv4i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1446,6 +1510,7 @@ entry: } declare @llvm.riscv.vmaxu.nxv8i16.i16( + , , i16, i64); @@ -1458,6 +1523,7 @@ define @intrinsic_vmaxu_vx_nxv8i16_nxv8i16_i16( @llvm.riscv.vmaxu.nxv8i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1491,6 +1557,7 @@ entry: } declare @llvm.riscv.vmaxu.nxv16i16.i16( + , , i16, i64); @@ -1503,6 +1570,7 @@ define @intrinsic_vmaxu_vx_nxv16i16_nxv16i16_i16( @llvm.riscv.vmaxu.nxv16i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1536,6 +1604,7 @@ entry: } declare @llvm.riscv.vmaxu.nxv32i16.i16( + , , i16, i64); @@ -1548,6 +1617,7 @@ define @intrinsic_vmaxu_vx_nxv32i16_nxv32i16_i16( @llvm.riscv.vmaxu.nxv32i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1581,6 +1651,7 @@ entry: } declare @llvm.riscv.vmaxu.nxv1i32.i32( + , , i32, i64); @@ -1593,6 +1664,7 @@ define @intrinsic_vmaxu_vx_nxv1i32_nxv1i32_i32( @llvm.riscv.vmaxu.nxv1i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1626,6 +1698,7 @@ entry: } declare @llvm.riscv.vmaxu.nxv2i32.i32( + , , i32, i64); @@ -1638,6 +1711,7 @@ define @intrinsic_vmaxu_vx_nxv2i32_nxv2i32_i32( @llvm.riscv.vmaxu.nxv2i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1671,6 +1745,7 @@ entry: } declare @llvm.riscv.vmaxu.nxv4i32.i32( + , , i32, i64); @@ -1683,6 +1758,7 @@ define @intrinsic_vmaxu_vx_nxv4i32_nxv4i32_i32( @llvm.riscv.vmaxu.nxv4i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1716,6 +1792,7 @@ entry: } declare @llvm.riscv.vmaxu.nxv8i32.i32( + , , i32, i64); @@ -1728,6 +1805,7 @@ define @intrinsic_vmaxu_vx_nxv8i32_nxv8i32_i32( @llvm.riscv.vmaxu.nxv8i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1761,6 +1839,7 @@ entry: } declare @llvm.riscv.vmaxu.nxv16i32.i32( + , , i32, i64); @@ -1773,6 +1852,7 @@ define @intrinsic_vmaxu_vx_nxv16i32_nxv16i32_i32( @llvm.riscv.vmaxu.nxv16i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1806,6 +1886,7 @@ entry: } declare @llvm.riscv.vmaxu.nxv1i64.i64( + , , i64, i64); @@ -1818,6 +1899,7 @@ define @intrinsic_vmaxu_vx_nxv1i64_nxv1i64_i64( @llvm.riscv.vmaxu.nxv1i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1851,6 +1933,7 @@ entry: } declare @llvm.riscv.vmaxu.nxv2i64.i64( + , , i64, i64); @@ -1863,6 +1946,7 @@ define @intrinsic_vmaxu_vx_nxv2i64_nxv2i64_i64( @llvm.riscv.vmaxu.nxv2i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1896,6 +1980,7 @@ entry: } declare @llvm.riscv.vmaxu.nxv4i64.i64( + , , i64, i64); @@ -1908,6 +1993,7 @@ define @intrinsic_vmaxu_vx_nxv4i64_nxv4i64_i64( @llvm.riscv.vmaxu.nxv4i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1941,6 +2027,7 @@ entry: } declare @llvm.riscv.vmaxu.nxv8i64.i64( + , , i64, i64); @@ -1953,6 +2040,7 @@ define @intrinsic_vmaxu_vx_nxv8i64_nxv8i64_i64( @llvm.riscv.vmaxu.nxv8i64.i64( + undef, %0, i64 %1, i64 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vmin-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmin-rv32.ll index ebc304b18f760800a4ecce0550262e3c5243c782..54c246341525483c1edbdd30e0cf44934a664543 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmin-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmin-rv32.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vmin.nxv1i8.nxv1i8( + , , , i32); @@ -14,6 +15,7 @@ define @intrinsic_vmin_vv_nxv1i8_nxv1i8_nxv1i8( @llvm.riscv.vmin.nxv1i8.nxv1i8( + undef, %0, %1, i32 %2) @@ -47,6 +49,7 @@ entry: } declare @llvm.riscv.vmin.nxv2i8.nxv2i8( + , , , i32); @@ -59,6 +62,7 @@ define @intrinsic_vmin_vv_nxv2i8_nxv2i8_nxv2i8( @llvm.riscv.vmin.nxv2i8.nxv2i8( + undef, %0, %1, i32 %2) @@ -92,6 +96,7 @@ entry: } declare @llvm.riscv.vmin.nxv4i8.nxv4i8( + , , , i32); @@ -104,6 +109,7 @@ define @intrinsic_vmin_vv_nxv4i8_nxv4i8_nxv4i8( @llvm.riscv.vmin.nxv4i8.nxv4i8( + undef, %0, %1, i32 %2) @@ -137,6 +143,7 @@ entry: } declare @llvm.riscv.vmin.nxv8i8.nxv8i8( + , , , i32); @@ -149,6 +156,7 @@ define @intrinsic_vmin_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vmin.nxv8i8.nxv8i8( + undef, %0, %1, i32 %2) @@ -182,6 +190,7 @@ entry: } declare @llvm.riscv.vmin.nxv16i8.nxv16i8( + , , , i32); @@ -194,6 +203,7 @@ define @intrinsic_vmin_vv_nxv16i8_nxv16i8_nxv16i8( @llvm.riscv.vmin.nxv16i8.nxv16i8( + undef, %0, %1, i32 %2) @@ -227,6 +237,7 @@ entry: } declare @llvm.riscv.vmin.nxv32i8.nxv32i8( + , , , i32); @@ -239,6 +250,7 @@ define @intrinsic_vmin_vv_nxv32i8_nxv32i8_nxv32i8( @llvm.riscv.vmin.nxv32i8.nxv32i8( + undef, %0, %1, i32 %2) @@ -272,6 +284,7 @@ entry: } declare @llvm.riscv.vmin.nxv64i8.nxv64i8( + , , , i32); @@ -284,6 +297,7 @@ define @intrinsic_vmin_vv_nxv64i8_nxv64i8_nxv64i8( @llvm.riscv.vmin.nxv64i8.nxv64i8( + undef, %0, %1, i32 %2) @@ -318,6 +332,7 @@ entry: } declare @llvm.riscv.vmin.nxv1i16.nxv1i16( + , , , i32); @@ -330,6 +345,7 @@ define @intrinsic_vmin_vv_nxv1i16_nxv1i16_nxv1i16( @llvm.riscv.vmin.nxv1i16.nxv1i16( + undef, %0, %1, i32 %2) @@ -363,6 +379,7 @@ entry: } declare @llvm.riscv.vmin.nxv2i16.nxv2i16( + , , , i32); @@ -375,6 +392,7 @@ define @intrinsic_vmin_vv_nxv2i16_nxv2i16_nxv2i16( @llvm.riscv.vmin.nxv2i16.nxv2i16( + undef, %0, %1, i32 %2) @@ -408,6 +426,7 @@ entry: } declare @llvm.riscv.vmin.nxv4i16.nxv4i16( + , , , i32); @@ -420,6 +439,7 @@ define @intrinsic_vmin_vv_nxv4i16_nxv4i16_nxv4i16( @llvm.riscv.vmin.nxv4i16.nxv4i16( + undef, %0, %1, i32 %2) @@ -453,6 +473,7 @@ entry: } declare @llvm.riscv.vmin.nxv8i16.nxv8i16( + , , , i32); @@ -465,6 +486,7 @@ define @intrinsic_vmin_vv_nxv8i16_nxv8i16_nxv8i16( @llvm.riscv.vmin.nxv8i16.nxv8i16( + undef, %0, %1, i32 %2) @@ -498,6 +520,7 @@ entry: } declare @llvm.riscv.vmin.nxv16i16.nxv16i16( + , , , i32); @@ -510,6 +533,7 @@ define @intrinsic_vmin_vv_nxv16i16_nxv16i16_nxv16i16( @llvm.riscv.vmin.nxv16i16.nxv16i16( + undef, %0, %1, i32 %2) @@ -543,6 +567,7 @@ entry: } declare @llvm.riscv.vmin.nxv32i16.nxv32i16( + , , , i32); @@ -555,6 +580,7 @@ define @intrinsic_vmin_vv_nxv32i16_nxv32i16_nxv32i16( @llvm.riscv.vmin.nxv32i16.nxv32i16( + undef, %0, %1, i32 %2) @@ -589,6 +615,7 @@ entry: } declare @llvm.riscv.vmin.nxv1i32.nxv1i32( + , , , i32); @@ -601,6 +628,7 @@ define @intrinsic_vmin_vv_nxv1i32_nxv1i32_nxv1i32( @llvm.riscv.vmin.nxv1i32.nxv1i32( + undef, %0, %1, i32 %2) @@ -634,6 +662,7 @@ entry: } declare @llvm.riscv.vmin.nxv2i32.nxv2i32( + , , , i32); @@ -646,6 +675,7 @@ define @intrinsic_vmin_vv_nxv2i32_nxv2i32_nxv2i32( @llvm.riscv.vmin.nxv2i32.nxv2i32( + undef, %0, %1, i32 %2) @@ -679,6 +709,7 @@ entry: } declare @llvm.riscv.vmin.nxv4i32.nxv4i32( + , , , i32); @@ -691,6 +722,7 @@ define @intrinsic_vmin_vv_nxv4i32_nxv4i32_nxv4i32( @llvm.riscv.vmin.nxv4i32.nxv4i32( + undef, %0, %1, i32 %2) @@ -724,6 +756,7 @@ entry: } declare @llvm.riscv.vmin.nxv8i32.nxv8i32( + , , , i32); @@ -736,6 +769,7 @@ define @intrinsic_vmin_vv_nxv8i32_nxv8i32_nxv8i32( @llvm.riscv.vmin.nxv8i32.nxv8i32( + undef, %0, %1, i32 %2) @@ -769,6 +803,7 @@ entry: } declare @llvm.riscv.vmin.nxv16i32.nxv16i32( + , , , i32); @@ -781,6 +816,7 @@ define @intrinsic_vmin_vv_nxv16i32_nxv16i32_nxv16i32( @llvm.riscv.vmin.nxv16i32.nxv16i32( + undef, %0, %1, i32 %2) @@ -815,6 +851,7 @@ entry: } declare @llvm.riscv.vmin.nxv1i64.nxv1i64( + , , , i32); @@ -827,6 +864,7 @@ define @intrinsic_vmin_vv_nxv1i64_nxv1i64_nxv1i64( @llvm.riscv.vmin.nxv1i64.nxv1i64( + undef, %0, %1, i32 %2) @@ -860,6 +898,7 @@ entry: } declare @llvm.riscv.vmin.nxv2i64.nxv2i64( + , , , i32); @@ -872,6 +911,7 @@ define @intrinsic_vmin_vv_nxv2i64_nxv2i64_nxv2i64( @llvm.riscv.vmin.nxv2i64.nxv2i64( + undef, %0, %1, i32 %2) @@ -905,6 +945,7 @@ entry: } declare @llvm.riscv.vmin.nxv4i64.nxv4i64( + , , , i32); @@ -917,6 +958,7 @@ define @intrinsic_vmin_vv_nxv4i64_nxv4i64_nxv4i64( @llvm.riscv.vmin.nxv4i64.nxv4i64( + undef, %0, %1, i32 %2) @@ -950,6 +992,7 @@ entry: } declare @llvm.riscv.vmin.nxv8i64.nxv8i64( + , , , i32); @@ -962,6 +1005,7 @@ define @intrinsic_vmin_vv_nxv8i64_nxv8i64_nxv8i64( @llvm.riscv.vmin.nxv8i64.nxv8i64( + undef, %0, %1, i32 %2) @@ -996,6 +1040,7 @@ entry: } declare @llvm.riscv.vmin.nxv1i8.i8( + , , i8, i32); @@ -1008,6 +1053,7 @@ define @intrinsic_vmin_vx_nxv1i8_nxv1i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmin.nxv1i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1041,6 +1087,7 @@ entry: } declare @llvm.riscv.vmin.nxv2i8.i8( + , , i8, i32); @@ -1053,6 +1100,7 @@ define @intrinsic_vmin_vx_nxv2i8_nxv2i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmin.nxv2i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1086,6 +1134,7 @@ entry: } declare @llvm.riscv.vmin.nxv4i8.i8( + , , i8, i32); @@ -1098,6 +1147,7 @@ define @intrinsic_vmin_vx_nxv4i8_nxv4i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmin.nxv4i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1131,6 +1181,7 @@ entry: } declare @llvm.riscv.vmin.nxv8i8.i8( + , , i8, i32); @@ -1143,6 +1194,7 @@ define @intrinsic_vmin_vx_nxv8i8_nxv8i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmin.nxv8i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1176,6 +1228,7 @@ entry: } declare @llvm.riscv.vmin.nxv16i8.i8( + , , i8, i32); @@ -1188,6 +1241,7 @@ define @intrinsic_vmin_vx_nxv16i8_nxv16i8_i8( @llvm.riscv.vmin.nxv16i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1221,6 +1275,7 @@ entry: } declare @llvm.riscv.vmin.nxv32i8.i8( + , , i8, i32); @@ -1233,6 +1288,7 @@ define @intrinsic_vmin_vx_nxv32i8_nxv32i8_i8( @llvm.riscv.vmin.nxv32i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1266,6 +1322,7 @@ entry: } declare @llvm.riscv.vmin.nxv64i8.i8( + , , i8, i32); @@ -1278,6 +1335,7 @@ define @intrinsic_vmin_vx_nxv64i8_nxv64i8_i8( @llvm.riscv.vmin.nxv64i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1311,6 +1369,7 @@ entry: } declare @llvm.riscv.vmin.nxv1i16.i16( + , , i16, i32); @@ -1323,6 +1382,7 @@ define @intrinsic_vmin_vx_nxv1i16_nxv1i16_i16( @llvm.riscv.vmin.nxv1i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1356,6 +1416,7 @@ entry: } declare @llvm.riscv.vmin.nxv2i16.i16( + , , i16, i32); @@ -1368,6 +1429,7 @@ define @intrinsic_vmin_vx_nxv2i16_nxv2i16_i16( @llvm.riscv.vmin.nxv2i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1401,6 +1463,7 @@ entry: } declare @llvm.riscv.vmin.nxv4i16.i16( + , , i16, i32); @@ -1413,6 +1476,7 @@ define @intrinsic_vmin_vx_nxv4i16_nxv4i16_i16( @llvm.riscv.vmin.nxv4i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1446,6 +1510,7 @@ entry: } declare @llvm.riscv.vmin.nxv8i16.i16( + , , i16, i32); @@ -1458,6 +1523,7 @@ define @intrinsic_vmin_vx_nxv8i16_nxv8i16_i16( @llvm.riscv.vmin.nxv8i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1491,6 +1557,7 @@ entry: } declare @llvm.riscv.vmin.nxv16i16.i16( + , , i16, i32); @@ -1503,6 +1570,7 @@ define @intrinsic_vmin_vx_nxv16i16_nxv16i16_i16( @llvm.riscv.vmin.nxv16i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1536,6 +1604,7 @@ entry: } declare @llvm.riscv.vmin.nxv32i16.i16( + , , i16, i32); @@ -1548,6 +1617,7 @@ define @intrinsic_vmin_vx_nxv32i16_nxv32i16_i16( @llvm.riscv.vmin.nxv32i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1581,6 +1651,7 @@ entry: } declare @llvm.riscv.vmin.nxv1i32.i32( + , , i32, i32); @@ -1593,6 +1664,7 @@ define @intrinsic_vmin_vx_nxv1i32_nxv1i32_i32( @llvm.riscv.vmin.nxv1i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1626,6 +1698,7 @@ entry: } declare @llvm.riscv.vmin.nxv2i32.i32( + , , i32, i32); @@ -1638,6 +1711,7 @@ define @intrinsic_vmin_vx_nxv2i32_nxv2i32_i32( @llvm.riscv.vmin.nxv2i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1671,6 +1745,7 @@ entry: } declare @llvm.riscv.vmin.nxv4i32.i32( + , , i32, i32); @@ -1683,6 +1758,7 @@ define @intrinsic_vmin_vx_nxv4i32_nxv4i32_i32( @llvm.riscv.vmin.nxv4i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1716,6 +1792,7 @@ entry: } declare @llvm.riscv.vmin.nxv8i32.i32( + , , i32, i32); @@ -1728,6 +1805,7 @@ define @intrinsic_vmin_vx_nxv8i32_nxv8i32_i32( @llvm.riscv.vmin.nxv8i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1761,6 +1839,7 @@ entry: } declare @llvm.riscv.vmin.nxv16i32.i32( + , , i32, i32); @@ -1773,6 +1852,7 @@ define @intrinsic_vmin_vx_nxv16i32_nxv16i32_i32( @llvm.riscv.vmin.nxv16i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1806,6 +1886,7 @@ entry: } declare @llvm.riscv.vmin.nxv1i64.i64( + , , i64, i32); @@ -1824,6 +1905,7 @@ define @intrinsic_vmin_vx_nxv1i64_nxv1i64_i64( @llvm.riscv.vmin.nxv1i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1863,6 +1945,7 @@ entry: } declare @llvm.riscv.vmin.nxv2i64.i64( + , , i64, i32); @@ -1881,6 +1964,7 @@ define @intrinsic_vmin_vx_nxv2i64_nxv2i64_i64( @llvm.riscv.vmin.nxv2i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1920,6 +2004,7 @@ entry: } declare @llvm.riscv.vmin.nxv4i64.i64( + , , i64, i32); @@ -1938,6 +2023,7 @@ define @intrinsic_vmin_vx_nxv4i64_nxv4i64_i64( @llvm.riscv.vmin.nxv4i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1977,6 +2063,7 @@ entry: } declare @llvm.riscv.vmin.nxv8i64.i64( + , , i64, i32); @@ -1995,6 +2082,7 @@ define @intrinsic_vmin_vx_nxv8i64_nxv8i64_i64( @llvm.riscv.vmin.nxv8i64.i64( + undef, %0, i64 %1, i32 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vmin-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmin-rv64.ll index febe87403cdda266dc57d7772a0a8ab25213158b..4cc67d660fdb988b12038b839bb484360d4e7f64 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmin-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmin-rv64.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vmin.nxv1i8.nxv1i8( + , , , i64); @@ -14,6 +15,7 @@ define @intrinsic_vmin_vv_nxv1i8_nxv1i8_nxv1i8( @llvm.riscv.vmin.nxv1i8.nxv1i8( + undef, %0, %1, i64 %2) @@ -47,6 +49,7 @@ entry: } declare @llvm.riscv.vmin.nxv2i8.nxv2i8( + , , , i64); @@ -59,6 +62,7 @@ define @intrinsic_vmin_vv_nxv2i8_nxv2i8_nxv2i8( @llvm.riscv.vmin.nxv2i8.nxv2i8( + undef, %0, %1, i64 %2) @@ -92,6 +96,7 @@ entry: } declare @llvm.riscv.vmin.nxv4i8.nxv4i8( + , , , i64); @@ -104,6 +109,7 @@ define @intrinsic_vmin_vv_nxv4i8_nxv4i8_nxv4i8( @llvm.riscv.vmin.nxv4i8.nxv4i8( + undef, %0, %1, i64 %2) @@ -137,6 +143,7 @@ entry: } declare @llvm.riscv.vmin.nxv8i8.nxv8i8( + , , , i64); @@ -149,6 +156,7 @@ define @intrinsic_vmin_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vmin.nxv8i8.nxv8i8( + undef, %0, %1, i64 %2) @@ -182,6 +190,7 @@ entry: } declare @llvm.riscv.vmin.nxv16i8.nxv16i8( + , , , i64); @@ -194,6 +203,7 @@ define @intrinsic_vmin_vv_nxv16i8_nxv16i8_nxv16i8( @llvm.riscv.vmin.nxv16i8.nxv16i8( + undef, %0, %1, i64 %2) @@ -227,6 +237,7 @@ entry: } declare @llvm.riscv.vmin.nxv32i8.nxv32i8( + , , , i64); @@ -239,6 +250,7 @@ define @intrinsic_vmin_vv_nxv32i8_nxv32i8_nxv32i8( @llvm.riscv.vmin.nxv32i8.nxv32i8( + undef, %0, %1, i64 %2) @@ -272,6 +284,7 @@ entry: } declare @llvm.riscv.vmin.nxv64i8.nxv64i8( + , , , i64); @@ -284,6 +297,7 @@ define @intrinsic_vmin_vv_nxv64i8_nxv64i8_nxv64i8( @llvm.riscv.vmin.nxv64i8.nxv64i8( + undef, %0, %1, i64 %2) @@ -318,6 +332,7 @@ entry: } declare @llvm.riscv.vmin.nxv1i16.nxv1i16( + , , , i64); @@ -330,6 +345,7 @@ define @intrinsic_vmin_vv_nxv1i16_nxv1i16_nxv1i16( @llvm.riscv.vmin.nxv1i16.nxv1i16( + undef, %0, %1, i64 %2) @@ -363,6 +379,7 @@ entry: } declare @llvm.riscv.vmin.nxv2i16.nxv2i16( + , , , i64); @@ -375,6 +392,7 @@ define @intrinsic_vmin_vv_nxv2i16_nxv2i16_nxv2i16( @llvm.riscv.vmin.nxv2i16.nxv2i16( + undef, %0, %1, i64 %2) @@ -408,6 +426,7 @@ entry: } declare @llvm.riscv.vmin.nxv4i16.nxv4i16( + , , , i64); @@ -420,6 +439,7 @@ define @intrinsic_vmin_vv_nxv4i16_nxv4i16_nxv4i16( @llvm.riscv.vmin.nxv4i16.nxv4i16( + undef, %0, %1, i64 %2) @@ -453,6 +473,7 @@ entry: } declare @llvm.riscv.vmin.nxv8i16.nxv8i16( + , , , i64); @@ -465,6 +486,7 @@ define @intrinsic_vmin_vv_nxv8i16_nxv8i16_nxv8i16( @llvm.riscv.vmin.nxv8i16.nxv8i16( + undef, %0, %1, i64 %2) @@ -498,6 +520,7 @@ entry: } declare @llvm.riscv.vmin.nxv16i16.nxv16i16( + , , , i64); @@ -510,6 +533,7 @@ define @intrinsic_vmin_vv_nxv16i16_nxv16i16_nxv16i16( @llvm.riscv.vmin.nxv16i16.nxv16i16( + undef, %0, %1, i64 %2) @@ -543,6 +567,7 @@ entry: } declare @llvm.riscv.vmin.nxv32i16.nxv32i16( + , , , i64); @@ -555,6 +580,7 @@ define @intrinsic_vmin_vv_nxv32i16_nxv32i16_nxv32i16( @llvm.riscv.vmin.nxv32i16.nxv32i16( + undef, %0, %1, i64 %2) @@ -589,6 +615,7 @@ entry: } declare @llvm.riscv.vmin.nxv1i32.nxv1i32( + , , , i64); @@ -601,6 +628,7 @@ define @intrinsic_vmin_vv_nxv1i32_nxv1i32_nxv1i32( @llvm.riscv.vmin.nxv1i32.nxv1i32( + undef, %0, %1, i64 %2) @@ -634,6 +662,7 @@ entry: } declare @llvm.riscv.vmin.nxv2i32.nxv2i32( + , , , i64); @@ -646,6 +675,7 @@ define @intrinsic_vmin_vv_nxv2i32_nxv2i32_nxv2i32( @llvm.riscv.vmin.nxv2i32.nxv2i32( + undef, %0, %1, i64 %2) @@ -679,6 +709,7 @@ entry: } declare @llvm.riscv.vmin.nxv4i32.nxv4i32( + , , , i64); @@ -691,6 +722,7 @@ define @intrinsic_vmin_vv_nxv4i32_nxv4i32_nxv4i32( @llvm.riscv.vmin.nxv4i32.nxv4i32( + undef, %0, %1, i64 %2) @@ -724,6 +756,7 @@ entry: } declare @llvm.riscv.vmin.nxv8i32.nxv8i32( + , , , i64); @@ -736,6 +769,7 @@ define @intrinsic_vmin_vv_nxv8i32_nxv8i32_nxv8i32( @llvm.riscv.vmin.nxv8i32.nxv8i32( + undef, %0, %1, i64 %2) @@ -769,6 +803,7 @@ entry: } declare @llvm.riscv.vmin.nxv16i32.nxv16i32( + , , , i64); @@ -781,6 +816,7 @@ define @intrinsic_vmin_vv_nxv16i32_nxv16i32_nxv16i32( @llvm.riscv.vmin.nxv16i32.nxv16i32( + undef, %0, %1, i64 %2) @@ -815,6 +851,7 @@ entry: } declare @llvm.riscv.vmin.nxv1i64.nxv1i64( + , , , i64); @@ -827,6 +864,7 @@ define @intrinsic_vmin_vv_nxv1i64_nxv1i64_nxv1i64( @llvm.riscv.vmin.nxv1i64.nxv1i64( + undef, %0, %1, i64 %2) @@ -860,6 +898,7 @@ entry: } declare @llvm.riscv.vmin.nxv2i64.nxv2i64( + , , , i64); @@ -872,6 +911,7 @@ define @intrinsic_vmin_vv_nxv2i64_nxv2i64_nxv2i64( @llvm.riscv.vmin.nxv2i64.nxv2i64( + undef, %0, %1, i64 %2) @@ -905,6 +945,7 @@ entry: } declare @llvm.riscv.vmin.nxv4i64.nxv4i64( + , , , i64); @@ -917,6 +958,7 @@ define @intrinsic_vmin_vv_nxv4i64_nxv4i64_nxv4i64( @llvm.riscv.vmin.nxv4i64.nxv4i64( + undef, %0, %1, i64 %2) @@ -950,6 +992,7 @@ entry: } declare @llvm.riscv.vmin.nxv8i64.nxv8i64( + , , , i64); @@ -962,6 +1005,7 @@ define @intrinsic_vmin_vv_nxv8i64_nxv8i64_nxv8i64( @llvm.riscv.vmin.nxv8i64.nxv8i64( + undef, %0, %1, i64 %2) @@ -996,6 +1040,7 @@ entry: } declare @llvm.riscv.vmin.nxv1i8.i8( + , , i8, i64); @@ -1008,6 +1053,7 @@ define @intrinsic_vmin_vx_nxv1i8_nxv1i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmin.nxv1i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1041,6 +1087,7 @@ entry: } declare @llvm.riscv.vmin.nxv2i8.i8( + , , i8, i64); @@ -1053,6 +1100,7 @@ define @intrinsic_vmin_vx_nxv2i8_nxv2i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmin.nxv2i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1086,6 +1134,7 @@ entry: } declare @llvm.riscv.vmin.nxv4i8.i8( + , , i8, i64); @@ -1098,6 +1147,7 @@ define @intrinsic_vmin_vx_nxv4i8_nxv4i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmin.nxv4i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1131,6 +1181,7 @@ entry: } declare @llvm.riscv.vmin.nxv8i8.i8( + , , i8, i64); @@ -1143,6 +1194,7 @@ define @intrinsic_vmin_vx_nxv8i8_nxv8i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmin.nxv8i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1176,6 +1228,7 @@ entry: } declare @llvm.riscv.vmin.nxv16i8.i8( + , , i8, i64); @@ -1188,6 +1241,7 @@ define @intrinsic_vmin_vx_nxv16i8_nxv16i8_i8( @llvm.riscv.vmin.nxv16i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1221,6 +1275,7 @@ entry: } declare @llvm.riscv.vmin.nxv32i8.i8( + , , i8, i64); @@ -1233,6 +1288,7 @@ define @intrinsic_vmin_vx_nxv32i8_nxv32i8_i8( @llvm.riscv.vmin.nxv32i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1266,6 +1322,7 @@ entry: } declare @llvm.riscv.vmin.nxv64i8.i8( + , , i8, i64); @@ -1278,6 +1335,7 @@ define @intrinsic_vmin_vx_nxv64i8_nxv64i8_i8( @llvm.riscv.vmin.nxv64i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1311,6 +1369,7 @@ entry: } declare @llvm.riscv.vmin.nxv1i16.i16( + , , i16, i64); @@ -1323,6 +1382,7 @@ define @intrinsic_vmin_vx_nxv1i16_nxv1i16_i16( @llvm.riscv.vmin.nxv1i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1356,6 +1416,7 @@ entry: } declare @llvm.riscv.vmin.nxv2i16.i16( + , , i16, i64); @@ -1368,6 +1429,7 @@ define @intrinsic_vmin_vx_nxv2i16_nxv2i16_i16( @llvm.riscv.vmin.nxv2i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1401,6 +1463,7 @@ entry: } declare @llvm.riscv.vmin.nxv4i16.i16( + , , i16, i64); @@ -1413,6 +1476,7 @@ define @intrinsic_vmin_vx_nxv4i16_nxv4i16_i16( @llvm.riscv.vmin.nxv4i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1446,6 +1510,7 @@ entry: } declare @llvm.riscv.vmin.nxv8i16.i16( + , , i16, i64); @@ -1458,6 +1523,7 @@ define @intrinsic_vmin_vx_nxv8i16_nxv8i16_i16( @llvm.riscv.vmin.nxv8i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1491,6 +1557,7 @@ entry: } declare @llvm.riscv.vmin.nxv16i16.i16( + , , i16, i64); @@ -1503,6 +1570,7 @@ define @intrinsic_vmin_vx_nxv16i16_nxv16i16_i16( @llvm.riscv.vmin.nxv16i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1536,6 +1604,7 @@ entry: } declare @llvm.riscv.vmin.nxv32i16.i16( + , , i16, i64); @@ -1548,6 +1617,7 @@ define @intrinsic_vmin_vx_nxv32i16_nxv32i16_i16( @llvm.riscv.vmin.nxv32i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1581,6 +1651,7 @@ entry: } declare @llvm.riscv.vmin.nxv1i32.i32( + , , i32, i64); @@ -1593,6 +1664,7 @@ define @intrinsic_vmin_vx_nxv1i32_nxv1i32_i32( @llvm.riscv.vmin.nxv1i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1626,6 +1698,7 @@ entry: } declare @llvm.riscv.vmin.nxv2i32.i32( + , , i32, i64); @@ -1638,6 +1711,7 @@ define @intrinsic_vmin_vx_nxv2i32_nxv2i32_i32( @llvm.riscv.vmin.nxv2i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1671,6 +1745,7 @@ entry: } declare @llvm.riscv.vmin.nxv4i32.i32( + , , i32, i64); @@ -1683,6 +1758,7 @@ define @intrinsic_vmin_vx_nxv4i32_nxv4i32_i32( @llvm.riscv.vmin.nxv4i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1716,6 +1792,7 @@ entry: } declare @llvm.riscv.vmin.nxv8i32.i32( + , , i32, i64); @@ -1728,6 +1805,7 @@ define @intrinsic_vmin_vx_nxv8i32_nxv8i32_i32( @llvm.riscv.vmin.nxv8i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1761,6 +1839,7 @@ entry: } declare @llvm.riscv.vmin.nxv16i32.i32( + , , i32, i64); @@ -1773,6 +1852,7 @@ define @intrinsic_vmin_vx_nxv16i32_nxv16i32_i32( @llvm.riscv.vmin.nxv16i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1806,6 +1886,7 @@ entry: } declare @llvm.riscv.vmin.nxv1i64.i64( + , , i64, i64); @@ -1818,6 +1899,7 @@ define @intrinsic_vmin_vx_nxv1i64_nxv1i64_i64( @llvm.riscv.vmin.nxv1i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1851,6 +1933,7 @@ entry: } declare @llvm.riscv.vmin.nxv2i64.i64( + , , i64, i64); @@ -1863,6 +1946,7 @@ define @intrinsic_vmin_vx_nxv2i64_nxv2i64_i64( @llvm.riscv.vmin.nxv2i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1896,6 +1980,7 @@ entry: } declare @llvm.riscv.vmin.nxv4i64.i64( + , , i64, i64); @@ -1908,6 +1993,7 @@ define @intrinsic_vmin_vx_nxv4i64_nxv4i64_i64( @llvm.riscv.vmin.nxv4i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1941,6 +2027,7 @@ entry: } declare @llvm.riscv.vmin.nxv8i64.i64( + , , i64, i64); @@ -1953,6 +2040,7 @@ define @intrinsic_vmin_vx_nxv8i64_nxv8i64_i64( @llvm.riscv.vmin.nxv8i64.i64( + undef, %0, i64 %1, i64 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vminu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vminu-rv32.ll index ec3cc011bd6e16d7a49e3cb07afaf5add97029b8..5f024c9b8a5f54b27cdc15e65852c8cac38abc65 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vminu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vminu-rv32.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vminu.nxv1i8.nxv1i8( + , , , i32); @@ -14,6 +15,7 @@ define @intrinsic_vminu_vv_nxv1i8_nxv1i8_nxv1i8( @llvm.riscv.vminu.nxv1i8.nxv1i8( + undef, %0, %1, i32 %2) @@ -47,6 +49,7 @@ entry: } declare @llvm.riscv.vminu.nxv2i8.nxv2i8( + , , , i32); @@ -59,6 +62,7 @@ define @intrinsic_vminu_vv_nxv2i8_nxv2i8_nxv2i8( @llvm.riscv.vminu.nxv2i8.nxv2i8( + undef, %0, %1, i32 %2) @@ -92,6 +96,7 @@ entry: } declare @llvm.riscv.vminu.nxv4i8.nxv4i8( + , , , i32); @@ -104,6 +109,7 @@ define @intrinsic_vminu_vv_nxv4i8_nxv4i8_nxv4i8( @llvm.riscv.vminu.nxv4i8.nxv4i8( + undef, %0, %1, i32 %2) @@ -137,6 +143,7 @@ entry: } declare @llvm.riscv.vminu.nxv8i8.nxv8i8( + , , , i32); @@ -149,6 +156,7 @@ define @intrinsic_vminu_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vminu.nxv8i8.nxv8i8( + undef, %0, %1, i32 %2) @@ -182,6 +190,7 @@ entry: } declare @llvm.riscv.vminu.nxv16i8.nxv16i8( + , , , i32); @@ -194,6 +203,7 @@ define @intrinsic_vminu_vv_nxv16i8_nxv16i8_nxv16i8( @llvm.riscv.vminu.nxv16i8.nxv16i8( + undef, %0, %1, i32 %2) @@ -227,6 +237,7 @@ entry: } declare @llvm.riscv.vminu.nxv32i8.nxv32i8( + , , , i32); @@ -239,6 +250,7 @@ define @intrinsic_vminu_vv_nxv32i8_nxv32i8_nxv32i8( @llvm.riscv.vminu.nxv32i8.nxv32i8( + undef, %0, %1, i32 %2) @@ -272,6 +284,7 @@ entry: } declare @llvm.riscv.vminu.nxv64i8.nxv64i8( + , , , i32); @@ -284,6 +297,7 @@ define @intrinsic_vminu_vv_nxv64i8_nxv64i8_nxv64i8( @llvm.riscv.vminu.nxv64i8.nxv64i8( + undef, %0, %1, i32 %2) @@ -318,6 +332,7 @@ entry: } declare @llvm.riscv.vminu.nxv1i16.nxv1i16( + , , , i32); @@ -330,6 +345,7 @@ define @intrinsic_vminu_vv_nxv1i16_nxv1i16_nxv1i16( @llvm.riscv.vminu.nxv1i16.nxv1i16( + undef, %0, %1, i32 %2) @@ -363,6 +379,7 @@ entry: } declare @llvm.riscv.vminu.nxv2i16.nxv2i16( + , , , i32); @@ -375,6 +392,7 @@ define @intrinsic_vminu_vv_nxv2i16_nxv2i16_nxv2i16( @llvm.riscv.vminu.nxv2i16.nxv2i16( + undef, %0, %1, i32 %2) @@ -408,6 +426,7 @@ entry: } declare @llvm.riscv.vminu.nxv4i16.nxv4i16( + , , , i32); @@ -420,6 +439,7 @@ define @intrinsic_vminu_vv_nxv4i16_nxv4i16_nxv4i16( @llvm.riscv.vminu.nxv4i16.nxv4i16( + undef, %0, %1, i32 %2) @@ -453,6 +473,7 @@ entry: } declare @llvm.riscv.vminu.nxv8i16.nxv8i16( + , , , i32); @@ -465,6 +486,7 @@ define @intrinsic_vminu_vv_nxv8i16_nxv8i16_nxv8i16( @llvm.riscv.vminu.nxv8i16.nxv8i16( + undef, %0, %1, i32 %2) @@ -498,6 +520,7 @@ entry: } declare @llvm.riscv.vminu.nxv16i16.nxv16i16( + , , , i32); @@ -510,6 +533,7 @@ define @intrinsic_vminu_vv_nxv16i16_nxv16i16_nxv16i16( @llvm.riscv.vminu.nxv16i16.nxv16i16( + undef, %0, %1, i32 %2) @@ -543,6 +567,7 @@ entry: } declare @llvm.riscv.vminu.nxv32i16.nxv32i16( + , , , i32); @@ -555,6 +580,7 @@ define @intrinsic_vminu_vv_nxv32i16_nxv32i16_nxv32i16( @llvm.riscv.vminu.nxv32i16.nxv32i16( + undef, %0, %1, i32 %2) @@ -589,6 +615,7 @@ entry: } declare @llvm.riscv.vminu.nxv1i32.nxv1i32( + , , , i32); @@ -601,6 +628,7 @@ define @intrinsic_vminu_vv_nxv1i32_nxv1i32_nxv1i32( @llvm.riscv.vminu.nxv1i32.nxv1i32( + undef, %0, %1, i32 %2) @@ -634,6 +662,7 @@ entry: } declare @llvm.riscv.vminu.nxv2i32.nxv2i32( + , , , i32); @@ -646,6 +675,7 @@ define @intrinsic_vminu_vv_nxv2i32_nxv2i32_nxv2i32( @llvm.riscv.vminu.nxv2i32.nxv2i32( + undef, %0, %1, i32 %2) @@ -679,6 +709,7 @@ entry: } declare @llvm.riscv.vminu.nxv4i32.nxv4i32( + , , , i32); @@ -691,6 +722,7 @@ define @intrinsic_vminu_vv_nxv4i32_nxv4i32_nxv4i32( @llvm.riscv.vminu.nxv4i32.nxv4i32( + undef, %0, %1, i32 %2) @@ -724,6 +756,7 @@ entry: } declare @llvm.riscv.vminu.nxv8i32.nxv8i32( + , , , i32); @@ -736,6 +769,7 @@ define @intrinsic_vminu_vv_nxv8i32_nxv8i32_nxv8i32( @llvm.riscv.vminu.nxv8i32.nxv8i32( + undef, %0, %1, i32 %2) @@ -769,6 +803,7 @@ entry: } declare @llvm.riscv.vminu.nxv16i32.nxv16i32( + , , , i32); @@ -781,6 +816,7 @@ define @intrinsic_vminu_vv_nxv16i32_nxv16i32_nxv16i32( @llvm.riscv.vminu.nxv16i32.nxv16i32( + undef, %0, %1, i32 %2) @@ -815,6 +851,7 @@ entry: } declare @llvm.riscv.vminu.nxv1i64.nxv1i64( + , , , i32); @@ -827,6 +864,7 @@ define @intrinsic_vminu_vv_nxv1i64_nxv1i64_nxv1i64( @llvm.riscv.vminu.nxv1i64.nxv1i64( + undef, %0, %1, i32 %2) @@ -860,6 +898,7 @@ entry: } declare @llvm.riscv.vminu.nxv2i64.nxv2i64( + , , , i32); @@ -872,6 +911,7 @@ define @intrinsic_vminu_vv_nxv2i64_nxv2i64_nxv2i64( @llvm.riscv.vminu.nxv2i64.nxv2i64( + undef, %0, %1, i32 %2) @@ -905,6 +945,7 @@ entry: } declare @llvm.riscv.vminu.nxv4i64.nxv4i64( + , , , i32); @@ -917,6 +958,7 @@ define @intrinsic_vminu_vv_nxv4i64_nxv4i64_nxv4i64( @llvm.riscv.vminu.nxv4i64.nxv4i64( + undef, %0, %1, i32 %2) @@ -950,6 +992,7 @@ entry: } declare @llvm.riscv.vminu.nxv8i64.nxv8i64( + , , , i32); @@ -962,6 +1005,7 @@ define @intrinsic_vminu_vv_nxv8i64_nxv8i64_nxv8i64( @llvm.riscv.vminu.nxv8i64.nxv8i64( + undef, %0, %1, i32 %2) @@ -996,6 +1040,7 @@ entry: } declare @llvm.riscv.vminu.nxv1i8.i8( + , , i8, i32); @@ -1008,6 +1053,7 @@ define @intrinsic_vminu_vx_nxv1i8_nxv1i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vminu.nxv1i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1041,6 +1087,7 @@ entry: } declare @llvm.riscv.vminu.nxv2i8.i8( + , , i8, i32); @@ -1053,6 +1100,7 @@ define @intrinsic_vminu_vx_nxv2i8_nxv2i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vminu.nxv2i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1086,6 +1134,7 @@ entry: } declare @llvm.riscv.vminu.nxv4i8.i8( + , , i8, i32); @@ -1098,6 +1147,7 @@ define @intrinsic_vminu_vx_nxv4i8_nxv4i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vminu.nxv4i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1131,6 +1181,7 @@ entry: } declare @llvm.riscv.vminu.nxv8i8.i8( + , , i8, i32); @@ -1143,6 +1194,7 @@ define @intrinsic_vminu_vx_nxv8i8_nxv8i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vminu.nxv8i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1176,6 +1228,7 @@ entry: } declare @llvm.riscv.vminu.nxv16i8.i8( + , , i8, i32); @@ -1188,6 +1241,7 @@ define @intrinsic_vminu_vx_nxv16i8_nxv16i8_i8( @llvm.riscv.vminu.nxv16i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1221,6 +1275,7 @@ entry: } declare @llvm.riscv.vminu.nxv32i8.i8( + , , i8, i32); @@ -1233,6 +1288,7 @@ define @intrinsic_vminu_vx_nxv32i8_nxv32i8_i8( @llvm.riscv.vminu.nxv32i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1266,6 +1322,7 @@ entry: } declare @llvm.riscv.vminu.nxv64i8.i8( + , , i8, i32); @@ -1278,6 +1335,7 @@ define @intrinsic_vminu_vx_nxv64i8_nxv64i8_i8( @llvm.riscv.vminu.nxv64i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1311,6 +1369,7 @@ entry: } declare @llvm.riscv.vminu.nxv1i16.i16( + , , i16, i32); @@ -1323,6 +1382,7 @@ define @intrinsic_vminu_vx_nxv1i16_nxv1i16_i16( @llvm.riscv.vminu.nxv1i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1356,6 +1416,7 @@ entry: } declare @llvm.riscv.vminu.nxv2i16.i16( + , , i16, i32); @@ -1368,6 +1429,7 @@ define @intrinsic_vminu_vx_nxv2i16_nxv2i16_i16( @llvm.riscv.vminu.nxv2i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1401,6 +1463,7 @@ entry: } declare @llvm.riscv.vminu.nxv4i16.i16( + , , i16, i32); @@ -1413,6 +1476,7 @@ define @intrinsic_vminu_vx_nxv4i16_nxv4i16_i16( @llvm.riscv.vminu.nxv4i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1446,6 +1510,7 @@ entry: } declare @llvm.riscv.vminu.nxv8i16.i16( + , , i16, i32); @@ -1458,6 +1523,7 @@ define @intrinsic_vminu_vx_nxv8i16_nxv8i16_i16( @llvm.riscv.vminu.nxv8i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1491,6 +1557,7 @@ entry: } declare @llvm.riscv.vminu.nxv16i16.i16( + , , i16, i32); @@ -1503,6 +1570,7 @@ define @intrinsic_vminu_vx_nxv16i16_nxv16i16_i16( @llvm.riscv.vminu.nxv16i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1536,6 +1604,7 @@ entry: } declare @llvm.riscv.vminu.nxv32i16.i16( + , , i16, i32); @@ -1548,6 +1617,7 @@ define @intrinsic_vminu_vx_nxv32i16_nxv32i16_i16( @llvm.riscv.vminu.nxv32i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1581,6 +1651,7 @@ entry: } declare @llvm.riscv.vminu.nxv1i32.i32( + , , i32, i32); @@ -1593,6 +1664,7 @@ define @intrinsic_vminu_vx_nxv1i32_nxv1i32_i32( @llvm.riscv.vminu.nxv1i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1626,6 +1698,7 @@ entry: } declare @llvm.riscv.vminu.nxv2i32.i32( + , , i32, i32); @@ -1638,6 +1711,7 @@ define @intrinsic_vminu_vx_nxv2i32_nxv2i32_i32( @llvm.riscv.vminu.nxv2i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1671,6 +1745,7 @@ entry: } declare @llvm.riscv.vminu.nxv4i32.i32( + , , i32, i32); @@ -1683,6 +1758,7 @@ define @intrinsic_vminu_vx_nxv4i32_nxv4i32_i32( @llvm.riscv.vminu.nxv4i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1716,6 +1792,7 @@ entry: } declare @llvm.riscv.vminu.nxv8i32.i32( + , , i32, i32); @@ -1728,6 +1805,7 @@ define @intrinsic_vminu_vx_nxv8i32_nxv8i32_i32( @llvm.riscv.vminu.nxv8i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1761,6 +1839,7 @@ entry: } declare @llvm.riscv.vminu.nxv16i32.i32( + , , i32, i32); @@ -1773,6 +1852,7 @@ define @intrinsic_vminu_vx_nxv16i32_nxv16i32_i32( @llvm.riscv.vminu.nxv16i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1806,6 +1886,7 @@ entry: } declare @llvm.riscv.vminu.nxv1i64.i64( + , , i64, i32); @@ -1824,6 +1905,7 @@ define @intrinsic_vminu_vx_nxv1i64_nxv1i64_i64( @llvm.riscv.vminu.nxv1i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1863,6 +1945,7 @@ entry: } declare @llvm.riscv.vminu.nxv2i64.i64( + , , i64, i32); @@ -1881,6 +1964,7 @@ define @intrinsic_vminu_vx_nxv2i64_nxv2i64_i64( @llvm.riscv.vminu.nxv2i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1920,6 +2004,7 @@ entry: } declare @llvm.riscv.vminu.nxv4i64.i64( + , , i64, i32); @@ -1938,6 +2023,7 @@ define @intrinsic_vminu_vx_nxv4i64_nxv4i64_i64( @llvm.riscv.vminu.nxv4i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1977,6 +2063,7 @@ entry: } declare @llvm.riscv.vminu.nxv8i64.i64( + , , i64, i32); @@ -1995,6 +2082,7 @@ define @intrinsic_vminu_vx_nxv8i64_nxv8i64_i64( @llvm.riscv.vminu.nxv8i64.i64( + undef, %0, i64 %1, i32 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vminu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vminu-rv64.ll index f3b9100c38a620d16d49851dbde80529adfc46f9..93881f6cfd54b8d13e2a2f7b0e9ee5b4952bd5fb 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vminu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vminu-rv64.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vminu.nxv1i8.nxv1i8( + , , , i64); @@ -14,6 +15,7 @@ define @intrinsic_vminu_vv_nxv1i8_nxv1i8_nxv1i8( @llvm.riscv.vminu.nxv1i8.nxv1i8( + undef, %0, %1, i64 %2) @@ -47,6 +49,7 @@ entry: } declare @llvm.riscv.vminu.nxv2i8.nxv2i8( + , , , i64); @@ -59,6 +62,7 @@ define @intrinsic_vminu_vv_nxv2i8_nxv2i8_nxv2i8( @llvm.riscv.vminu.nxv2i8.nxv2i8( + undef, %0, %1, i64 %2) @@ -92,6 +96,7 @@ entry: } declare @llvm.riscv.vminu.nxv4i8.nxv4i8( + , , , i64); @@ -104,6 +109,7 @@ define @intrinsic_vminu_vv_nxv4i8_nxv4i8_nxv4i8( @llvm.riscv.vminu.nxv4i8.nxv4i8( + undef, %0, %1, i64 %2) @@ -137,6 +143,7 @@ entry: } declare @llvm.riscv.vminu.nxv8i8.nxv8i8( + , , , i64); @@ -149,6 +156,7 @@ define @intrinsic_vminu_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vminu.nxv8i8.nxv8i8( + undef, %0, %1, i64 %2) @@ -182,6 +190,7 @@ entry: } declare @llvm.riscv.vminu.nxv16i8.nxv16i8( + , , , i64); @@ -194,6 +203,7 @@ define @intrinsic_vminu_vv_nxv16i8_nxv16i8_nxv16i8( @llvm.riscv.vminu.nxv16i8.nxv16i8( + undef, %0, %1, i64 %2) @@ -227,6 +237,7 @@ entry: } declare @llvm.riscv.vminu.nxv32i8.nxv32i8( + , , , i64); @@ -239,6 +250,7 @@ define @intrinsic_vminu_vv_nxv32i8_nxv32i8_nxv32i8( @llvm.riscv.vminu.nxv32i8.nxv32i8( + undef, %0, %1, i64 %2) @@ -272,6 +284,7 @@ entry: } declare @llvm.riscv.vminu.nxv64i8.nxv64i8( + , , , i64); @@ -284,6 +297,7 @@ define @intrinsic_vminu_vv_nxv64i8_nxv64i8_nxv64i8( @llvm.riscv.vminu.nxv64i8.nxv64i8( + undef, %0, %1, i64 %2) @@ -318,6 +332,7 @@ entry: } declare @llvm.riscv.vminu.nxv1i16.nxv1i16( + , , , i64); @@ -330,6 +345,7 @@ define @intrinsic_vminu_vv_nxv1i16_nxv1i16_nxv1i16( @llvm.riscv.vminu.nxv1i16.nxv1i16( + undef, %0, %1, i64 %2) @@ -363,6 +379,7 @@ entry: } declare @llvm.riscv.vminu.nxv2i16.nxv2i16( + , , , i64); @@ -375,6 +392,7 @@ define @intrinsic_vminu_vv_nxv2i16_nxv2i16_nxv2i16( @llvm.riscv.vminu.nxv2i16.nxv2i16( + undef, %0, %1, i64 %2) @@ -408,6 +426,7 @@ entry: } declare @llvm.riscv.vminu.nxv4i16.nxv4i16( + , , , i64); @@ -420,6 +439,7 @@ define @intrinsic_vminu_vv_nxv4i16_nxv4i16_nxv4i16( @llvm.riscv.vminu.nxv4i16.nxv4i16( + undef, %0, %1, i64 %2) @@ -453,6 +473,7 @@ entry: } declare @llvm.riscv.vminu.nxv8i16.nxv8i16( + , , , i64); @@ -465,6 +486,7 @@ define @intrinsic_vminu_vv_nxv8i16_nxv8i16_nxv8i16( @llvm.riscv.vminu.nxv8i16.nxv8i16( + undef, %0, %1, i64 %2) @@ -498,6 +520,7 @@ entry: } declare @llvm.riscv.vminu.nxv16i16.nxv16i16( + , , , i64); @@ -510,6 +533,7 @@ define @intrinsic_vminu_vv_nxv16i16_nxv16i16_nxv16i16( @llvm.riscv.vminu.nxv16i16.nxv16i16( + undef, %0, %1, i64 %2) @@ -543,6 +567,7 @@ entry: } declare @llvm.riscv.vminu.nxv32i16.nxv32i16( + , , , i64); @@ -555,6 +580,7 @@ define @intrinsic_vminu_vv_nxv32i16_nxv32i16_nxv32i16( @llvm.riscv.vminu.nxv32i16.nxv32i16( + undef, %0, %1, i64 %2) @@ -589,6 +615,7 @@ entry: } declare @llvm.riscv.vminu.nxv1i32.nxv1i32( + , , , i64); @@ -601,6 +628,7 @@ define @intrinsic_vminu_vv_nxv1i32_nxv1i32_nxv1i32( @llvm.riscv.vminu.nxv1i32.nxv1i32( + undef, %0, %1, i64 %2) @@ -634,6 +662,7 @@ entry: } declare @llvm.riscv.vminu.nxv2i32.nxv2i32( + , , , i64); @@ -646,6 +675,7 @@ define @intrinsic_vminu_vv_nxv2i32_nxv2i32_nxv2i32( @llvm.riscv.vminu.nxv2i32.nxv2i32( + undef, %0, %1, i64 %2) @@ -679,6 +709,7 @@ entry: } declare @llvm.riscv.vminu.nxv4i32.nxv4i32( + , , , i64); @@ -691,6 +722,7 @@ define @intrinsic_vminu_vv_nxv4i32_nxv4i32_nxv4i32( @llvm.riscv.vminu.nxv4i32.nxv4i32( + undef, %0, %1, i64 %2) @@ -724,6 +756,7 @@ entry: } declare @llvm.riscv.vminu.nxv8i32.nxv8i32( + , , , i64); @@ -736,6 +769,7 @@ define @intrinsic_vminu_vv_nxv8i32_nxv8i32_nxv8i32( @llvm.riscv.vminu.nxv8i32.nxv8i32( + undef, %0, %1, i64 %2) @@ -769,6 +803,7 @@ entry: } declare @llvm.riscv.vminu.nxv16i32.nxv16i32( + , , , i64); @@ -781,6 +816,7 @@ define @intrinsic_vminu_vv_nxv16i32_nxv16i32_nxv16i32( @llvm.riscv.vminu.nxv16i32.nxv16i32( + undef, %0, %1, i64 %2) @@ -815,6 +851,7 @@ entry: } declare @llvm.riscv.vminu.nxv1i64.nxv1i64( + , , , i64); @@ -827,6 +864,7 @@ define @intrinsic_vminu_vv_nxv1i64_nxv1i64_nxv1i64( @llvm.riscv.vminu.nxv1i64.nxv1i64( + undef, %0, %1, i64 %2) @@ -860,6 +898,7 @@ entry: } declare @llvm.riscv.vminu.nxv2i64.nxv2i64( + , , , i64); @@ -872,6 +911,7 @@ define @intrinsic_vminu_vv_nxv2i64_nxv2i64_nxv2i64( @llvm.riscv.vminu.nxv2i64.nxv2i64( + undef, %0, %1, i64 %2) @@ -905,6 +945,7 @@ entry: } declare @llvm.riscv.vminu.nxv4i64.nxv4i64( + , , , i64); @@ -917,6 +958,7 @@ define @intrinsic_vminu_vv_nxv4i64_nxv4i64_nxv4i64( @llvm.riscv.vminu.nxv4i64.nxv4i64( + undef, %0, %1, i64 %2) @@ -950,6 +992,7 @@ entry: } declare @llvm.riscv.vminu.nxv8i64.nxv8i64( + , , , i64); @@ -962,6 +1005,7 @@ define @intrinsic_vminu_vv_nxv8i64_nxv8i64_nxv8i64( @llvm.riscv.vminu.nxv8i64.nxv8i64( + undef, %0, %1, i64 %2) @@ -996,6 +1040,7 @@ entry: } declare @llvm.riscv.vminu.nxv1i8.i8( + , , i8, i64); @@ -1008,6 +1053,7 @@ define @intrinsic_vminu_vx_nxv1i8_nxv1i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vminu.nxv1i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1041,6 +1087,7 @@ entry: } declare @llvm.riscv.vminu.nxv2i8.i8( + , , i8, i64); @@ -1053,6 +1100,7 @@ define @intrinsic_vminu_vx_nxv2i8_nxv2i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vminu.nxv2i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1086,6 +1134,7 @@ entry: } declare @llvm.riscv.vminu.nxv4i8.i8( + , , i8, i64); @@ -1098,6 +1147,7 @@ define @intrinsic_vminu_vx_nxv4i8_nxv4i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vminu.nxv4i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1131,6 +1181,7 @@ entry: } declare @llvm.riscv.vminu.nxv8i8.i8( + , , i8, i64); @@ -1143,6 +1194,7 @@ define @intrinsic_vminu_vx_nxv8i8_nxv8i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vminu.nxv8i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1176,6 +1228,7 @@ entry: } declare @llvm.riscv.vminu.nxv16i8.i8( + , , i8, i64); @@ -1188,6 +1241,7 @@ define @intrinsic_vminu_vx_nxv16i8_nxv16i8_i8( @llvm.riscv.vminu.nxv16i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1221,6 +1275,7 @@ entry: } declare @llvm.riscv.vminu.nxv32i8.i8( + , , i8, i64); @@ -1233,6 +1288,7 @@ define @intrinsic_vminu_vx_nxv32i8_nxv32i8_i8( @llvm.riscv.vminu.nxv32i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1266,6 +1322,7 @@ entry: } declare @llvm.riscv.vminu.nxv64i8.i8( + , , i8, i64); @@ -1278,6 +1335,7 @@ define @intrinsic_vminu_vx_nxv64i8_nxv64i8_i8( @llvm.riscv.vminu.nxv64i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1311,6 +1369,7 @@ entry: } declare @llvm.riscv.vminu.nxv1i16.i16( + , , i16, i64); @@ -1323,6 +1382,7 @@ define @intrinsic_vminu_vx_nxv1i16_nxv1i16_i16( @llvm.riscv.vminu.nxv1i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1356,6 +1416,7 @@ entry: } declare @llvm.riscv.vminu.nxv2i16.i16( + , , i16, i64); @@ -1368,6 +1429,7 @@ define @intrinsic_vminu_vx_nxv2i16_nxv2i16_i16( @llvm.riscv.vminu.nxv2i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1401,6 +1463,7 @@ entry: } declare @llvm.riscv.vminu.nxv4i16.i16( + , , i16, i64); @@ -1413,6 +1476,7 @@ define @intrinsic_vminu_vx_nxv4i16_nxv4i16_i16( @llvm.riscv.vminu.nxv4i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1446,6 +1510,7 @@ entry: } declare @llvm.riscv.vminu.nxv8i16.i16( + , , i16, i64); @@ -1458,6 +1523,7 @@ define @intrinsic_vminu_vx_nxv8i16_nxv8i16_i16( @llvm.riscv.vminu.nxv8i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1491,6 +1557,7 @@ entry: } declare @llvm.riscv.vminu.nxv16i16.i16( + , , i16, i64); @@ -1503,6 +1570,7 @@ define @intrinsic_vminu_vx_nxv16i16_nxv16i16_i16( @llvm.riscv.vminu.nxv16i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1536,6 +1604,7 @@ entry: } declare @llvm.riscv.vminu.nxv32i16.i16( + , , i16, i64); @@ -1548,6 +1617,7 @@ define @intrinsic_vminu_vx_nxv32i16_nxv32i16_i16( @llvm.riscv.vminu.nxv32i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1581,6 +1651,7 @@ entry: } declare @llvm.riscv.vminu.nxv1i32.i32( + , , i32, i64); @@ -1593,6 +1664,7 @@ define @intrinsic_vminu_vx_nxv1i32_nxv1i32_i32( @llvm.riscv.vminu.nxv1i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1626,6 +1698,7 @@ entry: } declare @llvm.riscv.vminu.nxv2i32.i32( + , , i32, i64); @@ -1638,6 +1711,7 @@ define @intrinsic_vminu_vx_nxv2i32_nxv2i32_i32( @llvm.riscv.vminu.nxv2i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1671,6 +1745,7 @@ entry: } declare @llvm.riscv.vminu.nxv4i32.i32( + , , i32, i64); @@ -1683,6 +1758,7 @@ define @intrinsic_vminu_vx_nxv4i32_nxv4i32_i32( @llvm.riscv.vminu.nxv4i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1716,6 +1792,7 @@ entry: } declare @llvm.riscv.vminu.nxv8i32.i32( + , , i32, i64); @@ -1728,6 +1805,7 @@ define @intrinsic_vminu_vx_nxv8i32_nxv8i32_i32( @llvm.riscv.vminu.nxv8i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1761,6 +1839,7 @@ entry: } declare @llvm.riscv.vminu.nxv16i32.i32( + , , i32, i64); @@ -1773,6 +1852,7 @@ define @intrinsic_vminu_vx_nxv16i32_nxv16i32_i32( @llvm.riscv.vminu.nxv16i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1806,6 +1886,7 @@ entry: } declare @llvm.riscv.vminu.nxv1i64.i64( + , , i64, i64); @@ -1818,6 +1899,7 @@ define @intrinsic_vminu_vx_nxv1i64_nxv1i64_i64( @llvm.riscv.vminu.nxv1i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1851,6 +1933,7 @@ entry: } declare @llvm.riscv.vminu.nxv2i64.i64( + , , i64, i64); @@ -1863,6 +1946,7 @@ define @intrinsic_vminu_vx_nxv2i64_nxv2i64_i64( @llvm.riscv.vminu.nxv2i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1896,6 +1980,7 @@ entry: } declare @llvm.riscv.vminu.nxv4i64.i64( + , , i64, i64); @@ -1908,6 +1993,7 @@ define @intrinsic_vminu_vx_nxv4i64_nxv4i64_i64( @llvm.riscv.vminu.nxv4i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1941,6 +2027,7 @@ entry: } declare @llvm.riscv.vminu.nxv8i64.i64( + , , i64, i64); @@ -1953,6 +2040,7 @@ define @intrinsic_vminu_vx_nxv8i64_nxv8i64_i64( @llvm.riscv.vminu.nxv8i64.i64( + undef, %0, i64 %1, i64 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vmul-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmul-rv32.ll index cf0c1da69743c91010839f86f4a13fd608849c48..6727bb585e2a757d6b65d972e00f494682cb673d 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmul-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmul-rv32.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vmul.nxv1i8.nxv1i8( + , , , i32); @@ -14,6 +15,7 @@ define @intrinsic_vmul_vv_nxv1i8_nxv1i8_nxv1i8( @llvm.riscv.vmul.nxv1i8.nxv1i8( + undef, %0, %1, i32 %2) @@ -47,6 +49,7 @@ entry: } declare @llvm.riscv.vmul.nxv2i8.nxv2i8( + , , , i32); @@ -59,6 +62,7 @@ define @intrinsic_vmul_vv_nxv2i8_nxv2i8_nxv2i8( @llvm.riscv.vmul.nxv2i8.nxv2i8( + undef, %0, %1, i32 %2) @@ -92,6 +96,7 @@ entry: } declare @llvm.riscv.vmul.nxv4i8.nxv4i8( + , , , i32); @@ -104,6 +109,7 @@ define @intrinsic_vmul_vv_nxv4i8_nxv4i8_nxv4i8( @llvm.riscv.vmul.nxv4i8.nxv4i8( + undef, %0, %1, i32 %2) @@ -137,6 +143,7 @@ entry: } declare @llvm.riscv.vmul.nxv8i8.nxv8i8( + , , , i32); @@ -149,6 +156,7 @@ define @intrinsic_vmul_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vmul.nxv8i8.nxv8i8( + undef, %0, %1, i32 %2) @@ -182,6 +190,7 @@ entry: } declare @llvm.riscv.vmul.nxv16i8.nxv16i8( + , , , i32); @@ -194,6 +203,7 @@ define @intrinsic_vmul_vv_nxv16i8_nxv16i8_nxv16i8( @llvm.riscv.vmul.nxv16i8.nxv16i8( + undef, %0, %1, i32 %2) @@ -227,6 +237,7 @@ entry: } declare @llvm.riscv.vmul.nxv32i8.nxv32i8( + , , , i32); @@ -239,6 +250,7 @@ define @intrinsic_vmul_vv_nxv32i8_nxv32i8_nxv32i8( @llvm.riscv.vmul.nxv32i8.nxv32i8( + undef, %0, %1, i32 %2) @@ -272,6 +284,7 @@ entry: } declare @llvm.riscv.vmul.nxv64i8.nxv64i8( + , , , i32); @@ -284,6 +297,7 @@ define @intrinsic_vmul_vv_nxv64i8_nxv64i8_nxv64i8( @llvm.riscv.vmul.nxv64i8.nxv64i8( + undef, %0, %1, i32 %2) @@ -318,6 +332,7 @@ entry: } declare @llvm.riscv.vmul.nxv1i16.nxv1i16( + , , , i32); @@ -330,6 +345,7 @@ define @intrinsic_vmul_vv_nxv1i16_nxv1i16_nxv1i16( @llvm.riscv.vmul.nxv1i16.nxv1i16( + undef, %0, %1, i32 %2) @@ -363,6 +379,7 @@ entry: } declare @llvm.riscv.vmul.nxv2i16.nxv2i16( + , , , i32); @@ -375,6 +392,7 @@ define @intrinsic_vmul_vv_nxv2i16_nxv2i16_nxv2i16( @llvm.riscv.vmul.nxv2i16.nxv2i16( + undef, %0, %1, i32 %2) @@ -408,6 +426,7 @@ entry: } declare @llvm.riscv.vmul.nxv4i16.nxv4i16( + , , , i32); @@ -420,6 +439,7 @@ define @intrinsic_vmul_vv_nxv4i16_nxv4i16_nxv4i16( @llvm.riscv.vmul.nxv4i16.nxv4i16( + undef, %0, %1, i32 %2) @@ -453,6 +473,7 @@ entry: } declare @llvm.riscv.vmul.nxv8i16.nxv8i16( + , , , i32); @@ -465,6 +486,7 @@ define @intrinsic_vmul_vv_nxv8i16_nxv8i16_nxv8i16( @llvm.riscv.vmul.nxv8i16.nxv8i16( + undef, %0, %1, i32 %2) @@ -498,6 +520,7 @@ entry: } declare @llvm.riscv.vmul.nxv16i16.nxv16i16( + , , , i32); @@ -510,6 +533,7 @@ define @intrinsic_vmul_vv_nxv16i16_nxv16i16_nxv16i16( @llvm.riscv.vmul.nxv16i16.nxv16i16( + undef, %0, %1, i32 %2) @@ -543,6 +567,7 @@ entry: } declare @llvm.riscv.vmul.nxv32i16.nxv32i16( + , , , i32); @@ -555,6 +580,7 @@ define @intrinsic_vmul_vv_nxv32i16_nxv32i16_nxv32i16( @llvm.riscv.vmul.nxv32i16.nxv32i16( + undef, %0, %1, i32 %2) @@ -589,6 +615,7 @@ entry: } declare @llvm.riscv.vmul.nxv1i32.nxv1i32( + , , , i32); @@ -601,6 +628,7 @@ define @intrinsic_vmul_vv_nxv1i32_nxv1i32_nxv1i32( @llvm.riscv.vmul.nxv1i32.nxv1i32( + undef, %0, %1, i32 %2) @@ -634,6 +662,7 @@ entry: } declare @llvm.riscv.vmul.nxv2i32.nxv2i32( + , , , i32); @@ -646,6 +675,7 @@ define @intrinsic_vmul_vv_nxv2i32_nxv2i32_nxv2i32( @llvm.riscv.vmul.nxv2i32.nxv2i32( + undef, %0, %1, i32 %2) @@ -679,6 +709,7 @@ entry: } declare @llvm.riscv.vmul.nxv4i32.nxv4i32( + , , , i32); @@ -691,6 +722,7 @@ define @intrinsic_vmul_vv_nxv4i32_nxv4i32_nxv4i32( @llvm.riscv.vmul.nxv4i32.nxv4i32( + undef, %0, %1, i32 %2) @@ -724,6 +756,7 @@ entry: } declare @llvm.riscv.vmul.nxv8i32.nxv8i32( + , , , i32); @@ -736,6 +769,7 @@ define @intrinsic_vmul_vv_nxv8i32_nxv8i32_nxv8i32( @llvm.riscv.vmul.nxv8i32.nxv8i32( + undef, %0, %1, i32 %2) @@ -769,6 +803,7 @@ entry: } declare @llvm.riscv.vmul.nxv16i32.nxv16i32( + , , , i32); @@ -781,6 +816,7 @@ define @intrinsic_vmul_vv_nxv16i32_nxv16i32_nxv16i32( @llvm.riscv.vmul.nxv16i32.nxv16i32( + undef, %0, %1, i32 %2) @@ -815,6 +851,7 @@ entry: } declare @llvm.riscv.vmul.nxv1i64.nxv1i64( + , , , i32); @@ -827,6 +864,7 @@ define @intrinsic_vmul_vv_nxv1i64_nxv1i64_nxv1i64( @llvm.riscv.vmul.nxv1i64.nxv1i64( + undef, %0, %1, i32 %2) @@ -860,6 +898,7 @@ entry: } declare @llvm.riscv.vmul.nxv2i64.nxv2i64( + , , , i32); @@ -872,6 +911,7 @@ define @intrinsic_vmul_vv_nxv2i64_nxv2i64_nxv2i64( @llvm.riscv.vmul.nxv2i64.nxv2i64( + undef, %0, %1, i32 %2) @@ -905,6 +945,7 @@ entry: } declare @llvm.riscv.vmul.nxv4i64.nxv4i64( + , , , i32); @@ -917,6 +958,7 @@ define @intrinsic_vmul_vv_nxv4i64_nxv4i64_nxv4i64( @llvm.riscv.vmul.nxv4i64.nxv4i64( + undef, %0, %1, i32 %2) @@ -950,6 +992,7 @@ entry: } declare @llvm.riscv.vmul.nxv8i64.nxv8i64( + , , , i32); @@ -962,6 +1005,7 @@ define @intrinsic_vmul_vv_nxv8i64_nxv8i64_nxv8i64( @llvm.riscv.vmul.nxv8i64.nxv8i64( + undef, %0, %1, i32 %2) @@ -996,6 +1040,7 @@ entry: } declare @llvm.riscv.vmul.nxv1i8.i8( + , , i8, i32); @@ -1008,6 +1053,7 @@ define @intrinsic_vmul_vx_nxv1i8_nxv1i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmul.nxv1i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1041,6 +1087,7 @@ entry: } declare @llvm.riscv.vmul.nxv2i8.i8( + , , i8, i32); @@ -1053,6 +1100,7 @@ define @intrinsic_vmul_vx_nxv2i8_nxv2i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmul.nxv2i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1086,6 +1134,7 @@ entry: } declare @llvm.riscv.vmul.nxv4i8.i8( + , , i8, i32); @@ -1098,6 +1147,7 @@ define @intrinsic_vmul_vx_nxv4i8_nxv4i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmul.nxv4i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1131,6 +1181,7 @@ entry: } declare @llvm.riscv.vmul.nxv8i8.i8( + , , i8, i32); @@ -1143,6 +1194,7 @@ define @intrinsic_vmul_vx_nxv8i8_nxv8i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmul.nxv8i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1176,6 +1228,7 @@ entry: } declare @llvm.riscv.vmul.nxv16i8.i8( + , , i8, i32); @@ -1188,6 +1241,7 @@ define @intrinsic_vmul_vx_nxv16i8_nxv16i8_i8( @llvm.riscv.vmul.nxv16i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1221,6 +1275,7 @@ entry: } declare @llvm.riscv.vmul.nxv32i8.i8( + , , i8, i32); @@ -1233,6 +1288,7 @@ define @intrinsic_vmul_vx_nxv32i8_nxv32i8_i8( @llvm.riscv.vmul.nxv32i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1266,6 +1322,7 @@ entry: } declare @llvm.riscv.vmul.nxv64i8.i8( + , , i8, i32); @@ -1278,6 +1335,7 @@ define @intrinsic_vmul_vx_nxv64i8_nxv64i8_i8( @llvm.riscv.vmul.nxv64i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1311,6 +1369,7 @@ entry: } declare @llvm.riscv.vmul.nxv1i16.i16( + , , i16, i32); @@ -1323,6 +1382,7 @@ define @intrinsic_vmul_vx_nxv1i16_nxv1i16_i16( @llvm.riscv.vmul.nxv1i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1356,6 +1416,7 @@ entry: } declare @llvm.riscv.vmul.nxv2i16.i16( + , , i16, i32); @@ -1368,6 +1429,7 @@ define @intrinsic_vmul_vx_nxv2i16_nxv2i16_i16( @llvm.riscv.vmul.nxv2i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1401,6 +1463,7 @@ entry: } declare @llvm.riscv.vmul.nxv4i16.i16( + , , i16, i32); @@ -1413,6 +1476,7 @@ define @intrinsic_vmul_vx_nxv4i16_nxv4i16_i16( @llvm.riscv.vmul.nxv4i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1446,6 +1510,7 @@ entry: } declare @llvm.riscv.vmul.nxv8i16.i16( + , , i16, i32); @@ -1458,6 +1523,7 @@ define @intrinsic_vmul_vx_nxv8i16_nxv8i16_i16( @llvm.riscv.vmul.nxv8i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1491,6 +1557,7 @@ entry: } declare @llvm.riscv.vmul.nxv16i16.i16( + , , i16, i32); @@ -1503,6 +1570,7 @@ define @intrinsic_vmul_vx_nxv16i16_nxv16i16_i16( @llvm.riscv.vmul.nxv16i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1536,6 +1604,7 @@ entry: } declare @llvm.riscv.vmul.nxv32i16.i16( + , , i16, i32); @@ -1548,6 +1617,7 @@ define @intrinsic_vmul_vx_nxv32i16_nxv32i16_i16( @llvm.riscv.vmul.nxv32i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1581,6 +1651,7 @@ entry: } declare @llvm.riscv.vmul.nxv1i32.i32( + , , i32, i32); @@ -1593,6 +1664,7 @@ define @intrinsic_vmul_vx_nxv1i32_nxv1i32_i32( @llvm.riscv.vmul.nxv1i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1626,6 +1698,7 @@ entry: } declare @llvm.riscv.vmul.nxv2i32.i32( + , , i32, i32); @@ -1638,6 +1711,7 @@ define @intrinsic_vmul_vx_nxv2i32_nxv2i32_i32( @llvm.riscv.vmul.nxv2i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1671,6 +1745,7 @@ entry: } declare @llvm.riscv.vmul.nxv4i32.i32( + , , i32, i32); @@ -1683,6 +1758,7 @@ define @intrinsic_vmul_vx_nxv4i32_nxv4i32_i32( @llvm.riscv.vmul.nxv4i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1716,6 +1792,7 @@ entry: } declare @llvm.riscv.vmul.nxv8i32.i32( + , , i32, i32); @@ -1728,6 +1805,7 @@ define @intrinsic_vmul_vx_nxv8i32_nxv8i32_i32( @llvm.riscv.vmul.nxv8i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1761,6 +1839,7 @@ entry: } declare @llvm.riscv.vmul.nxv16i32.i32( + , , i32, i32); @@ -1773,6 +1852,7 @@ define @intrinsic_vmul_vx_nxv16i32_nxv16i32_i32( @llvm.riscv.vmul.nxv16i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1806,6 +1886,7 @@ entry: } declare @llvm.riscv.vmul.nxv1i64.i64( + , , i64, i32); @@ -1824,6 +1905,7 @@ define @intrinsic_vmul_vx_nxv1i64_nxv1i64_i64( @llvm.riscv.vmul.nxv1i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1863,6 +1945,7 @@ entry: } declare @llvm.riscv.vmul.nxv2i64.i64( + , , i64, i32); @@ -1881,6 +1964,7 @@ define @intrinsic_vmul_vx_nxv2i64_nxv2i64_i64( @llvm.riscv.vmul.nxv2i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1920,6 +2004,7 @@ entry: } declare @llvm.riscv.vmul.nxv4i64.i64( + , , i64, i32); @@ -1938,6 +2023,7 @@ define @intrinsic_vmul_vx_nxv4i64_nxv4i64_i64( @llvm.riscv.vmul.nxv4i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1977,6 +2063,7 @@ entry: } declare @llvm.riscv.vmul.nxv8i64.i64( + , , i64, i32); @@ -1995,6 +2082,7 @@ define @intrinsic_vmul_vx_nxv8i64_nxv8i64_i64( @llvm.riscv.vmul.nxv8i64.i64( + undef, %0, i64 %1, i32 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vmul-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmul-rv64.ll index adf746657527dc293dc24ee9d65a40ddb2e17d78..28c2d775e1bc7d4d2271d0e3a63f7be0a0f9d5d3 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmul-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmul-rv64.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vmul.nxv1i8.nxv1i8( + , , , i64); @@ -14,6 +15,7 @@ define @intrinsic_vmul_vv_nxv1i8_nxv1i8_nxv1i8( @llvm.riscv.vmul.nxv1i8.nxv1i8( + undef, %0, %1, i64 %2) @@ -47,6 +49,7 @@ entry: } declare @llvm.riscv.vmul.nxv2i8.nxv2i8( + , , , i64); @@ -59,6 +62,7 @@ define @intrinsic_vmul_vv_nxv2i8_nxv2i8_nxv2i8( @llvm.riscv.vmul.nxv2i8.nxv2i8( + undef, %0, %1, i64 %2) @@ -92,6 +96,7 @@ entry: } declare @llvm.riscv.vmul.nxv4i8.nxv4i8( + , , , i64); @@ -104,6 +109,7 @@ define @intrinsic_vmul_vv_nxv4i8_nxv4i8_nxv4i8( @llvm.riscv.vmul.nxv4i8.nxv4i8( + undef, %0, %1, i64 %2) @@ -137,6 +143,7 @@ entry: } declare @llvm.riscv.vmul.nxv8i8.nxv8i8( + , , , i64); @@ -149,6 +156,7 @@ define @intrinsic_vmul_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vmul.nxv8i8.nxv8i8( + undef, %0, %1, i64 %2) @@ -182,6 +190,7 @@ entry: } declare @llvm.riscv.vmul.nxv16i8.nxv16i8( + , , , i64); @@ -194,6 +203,7 @@ define @intrinsic_vmul_vv_nxv16i8_nxv16i8_nxv16i8( @llvm.riscv.vmul.nxv16i8.nxv16i8( + undef, %0, %1, i64 %2) @@ -227,6 +237,7 @@ entry: } declare @llvm.riscv.vmul.nxv32i8.nxv32i8( + , , , i64); @@ -239,6 +250,7 @@ define @intrinsic_vmul_vv_nxv32i8_nxv32i8_nxv32i8( @llvm.riscv.vmul.nxv32i8.nxv32i8( + undef, %0, %1, i64 %2) @@ -272,6 +284,7 @@ entry: } declare @llvm.riscv.vmul.nxv64i8.nxv64i8( + , , , i64); @@ -284,6 +297,7 @@ define @intrinsic_vmul_vv_nxv64i8_nxv64i8_nxv64i8( @llvm.riscv.vmul.nxv64i8.nxv64i8( + undef, %0, %1, i64 %2) @@ -318,6 +332,7 @@ entry: } declare @llvm.riscv.vmul.nxv1i16.nxv1i16( + , , , i64); @@ -330,6 +345,7 @@ define @intrinsic_vmul_vv_nxv1i16_nxv1i16_nxv1i16( @llvm.riscv.vmul.nxv1i16.nxv1i16( + undef, %0, %1, i64 %2) @@ -363,6 +379,7 @@ entry: } declare @llvm.riscv.vmul.nxv2i16.nxv2i16( + , , , i64); @@ -375,6 +392,7 @@ define @intrinsic_vmul_vv_nxv2i16_nxv2i16_nxv2i16( @llvm.riscv.vmul.nxv2i16.nxv2i16( + undef, %0, %1, i64 %2) @@ -408,6 +426,7 @@ entry: } declare @llvm.riscv.vmul.nxv4i16.nxv4i16( + , , , i64); @@ -420,6 +439,7 @@ define @intrinsic_vmul_vv_nxv4i16_nxv4i16_nxv4i16( @llvm.riscv.vmul.nxv4i16.nxv4i16( + undef, %0, %1, i64 %2) @@ -453,6 +473,7 @@ entry: } declare @llvm.riscv.vmul.nxv8i16.nxv8i16( + , , , i64); @@ -465,6 +486,7 @@ define @intrinsic_vmul_vv_nxv8i16_nxv8i16_nxv8i16( @llvm.riscv.vmul.nxv8i16.nxv8i16( + undef, %0, %1, i64 %2) @@ -498,6 +520,7 @@ entry: } declare @llvm.riscv.vmul.nxv16i16.nxv16i16( + , , , i64); @@ -510,6 +533,7 @@ define @intrinsic_vmul_vv_nxv16i16_nxv16i16_nxv16i16( @llvm.riscv.vmul.nxv16i16.nxv16i16( + undef, %0, %1, i64 %2) @@ -543,6 +567,7 @@ entry: } declare @llvm.riscv.vmul.nxv32i16.nxv32i16( + , , , i64); @@ -555,6 +580,7 @@ define @intrinsic_vmul_vv_nxv32i16_nxv32i16_nxv32i16( @llvm.riscv.vmul.nxv32i16.nxv32i16( + undef, %0, %1, i64 %2) @@ -589,6 +615,7 @@ entry: } declare @llvm.riscv.vmul.nxv1i32.nxv1i32( + , , , i64); @@ -601,6 +628,7 @@ define @intrinsic_vmul_vv_nxv1i32_nxv1i32_nxv1i32( @llvm.riscv.vmul.nxv1i32.nxv1i32( + undef, %0, %1, i64 %2) @@ -634,6 +662,7 @@ entry: } declare @llvm.riscv.vmul.nxv2i32.nxv2i32( + , , , i64); @@ -646,6 +675,7 @@ define @intrinsic_vmul_vv_nxv2i32_nxv2i32_nxv2i32( @llvm.riscv.vmul.nxv2i32.nxv2i32( + undef, %0, %1, i64 %2) @@ -679,6 +709,7 @@ entry: } declare @llvm.riscv.vmul.nxv4i32.nxv4i32( + , , , i64); @@ -691,6 +722,7 @@ define @intrinsic_vmul_vv_nxv4i32_nxv4i32_nxv4i32( @llvm.riscv.vmul.nxv4i32.nxv4i32( + undef, %0, %1, i64 %2) @@ -724,6 +756,7 @@ entry: } declare @llvm.riscv.vmul.nxv8i32.nxv8i32( + , , , i64); @@ -736,6 +769,7 @@ define @intrinsic_vmul_vv_nxv8i32_nxv8i32_nxv8i32( @llvm.riscv.vmul.nxv8i32.nxv8i32( + undef, %0, %1, i64 %2) @@ -769,6 +803,7 @@ entry: } declare @llvm.riscv.vmul.nxv16i32.nxv16i32( + , , , i64); @@ -781,6 +816,7 @@ define @intrinsic_vmul_vv_nxv16i32_nxv16i32_nxv16i32( @llvm.riscv.vmul.nxv16i32.nxv16i32( + undef, %0, %1, i64 %2) @@ -815,6 +851,7 @@ entry: } declare @llvm.riscv.vmul.nxv1i64.nxv1i64( + , , , i64); @@ -827,6 +864,7 @@ define @intrinsic_vmul_vv_nxv1i64_nxv1i64_nxv1i64( @llvm.riscv.vmul.nxv1i64.nxv1i64( + undef, %0, %1, i64 %2) @@ -860,6 +898,7 @@ entry: } declare @llvm.riscv.vmul.nxv2i64.nxv2i64( + , , , i64); @@ -872,6 +911,7 @@ define @intrinsic_vmul_vv_nxv2i64_nxv2i64_nxv2i64( @llvm.riscv.vmul.nxv2i64.nxv2i64( + undef, %0, %1, i64 %2) @@ -905,6 +945,7 @@ entry: } declare @llvm.riscv.vmul.nxv4i64.nxv4i64( + , , , i64); @@ -917,6 +958,7 @@ define @intrinsic_vmul_vv_nxv4i64_nxv4i64_nxv4i64( @llvm.riscv.vmul.nxv4i64.nxv4i64( + undef, %0, %1, i64 %2) @@ -950,6 +992,7 @@ entry: } declare @llvm.riscv.vmul.nxv8i64.nxv8i64( + , , , i64); @@ -962,6 +1005,7 @@ define @intrinsic_vmul_vv_nxv8i64_nxv8i64_nxv8i64( @llvm.riscv.vmul.nxv8i64.nxv8i64( + undef, %0, %1, i64 %2) @@ -996,6 +1040,7 @@ entry: } declare @llvm.riscv.vmul.nxv1i8.i8( + , , i8, i64); @@ -1008,6 +1053,7 @@ define @intrinsic_vmul_vx_nxv1i8_nxv1i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmul.nxv1i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1041,6 +1087,7 @@ entry: } declare @llvm.riscv.vmul.nxv2i8.i8( + , , i8, i64); @@ -1053,6 +1100,7 @@ define @intrinsic_vmul_vx_nxv2i8_nxv2i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmul.nxv2i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1086,6 +1134,7 @@ entry: } declare @llvm.riscv.vmul.nxv4i8.i8( + , , i8, i64); @@ -1098,6 +1147,7 @@ define @intrinsic_vmul_vx_nxv4i8_nxv4i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmul.nxv4i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1131,6 +1181,7 @@ entry: } declare @llvm.riscv.vmul.nxv8i8.i8( + , , i8, i64); @@ -1143,6 +1194,7 @@ define @intrinsic_vmul_vx_nxv8i8_nxv8i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmul.nxv8i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1176,6 +1228,7 @@ entry: } declare @llvm.riscv.vmul.nxv16i8.i8( + , , i8, i64); @@ -1188,6 +1241,7 @@ define @intrinsic_vmul_vx_nxv16i8_nxv16i8_i8( @llvm.riscv.vmul.nxv16i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1221,6 +1275,7 @@ entry: } declare @llvm.riscv.vmul.nxv32i8.i8( + , , i8, i64); @@ -1233,6 +1288,7 @@ define @intrinsic_vmul_vx_nxv32i8_nxv32i8_i8( @llvm.riscv.vmul.nxv32i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1266,6 +1322,7 @@ entry: } declare @llvm.riscv.vmul.nxv64i8.i8( + , , i8, i64); @@ -1278,6 +1335,7 @@ define @intrinsic_vmul_vx_nxv64i8_nxv64i8_i8( @llvm.riscv.vmul.nxv64i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1311,6 +1369,7 @@ entry: } declare @llvm.riscv.vmul.nxv1i16.i16( + , , i16, i64); @@ -1323,6 +1382,7 @@ define @intrinsic_vmul_vx_nxv1i16_nxv1i16_i16( @llvm.riscv.vmul.nxv1i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1356,6 +1416,7 @@ entry: } declare @llvm.riscv.vmul.nxv2i16.i16( + , , i16, i64); @@ -1368,6 +1429,7 @@ define @intrinsic_vmul_vx_nxv2i16_nxv2i16_i16( @llvm.riscv.vmul.nxv2i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1401,6 +1463,7 @@ entry: } declare @llvm.riscv.vmul.nxv4i16.i16( + , , i16, i64); @@ -1413,6 +1476,7 @@ define @intrinsic_vmul_vx_nxv4i16_nxv4i16_i16( @llvm.riscv.vmul.nxv4i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1446,6 +1510,7 @@ entry: } declare @llvm.riscv.vmul.nxv8i16.i16( + , , i16, i64); @@ -1458,6 +1523,7 @@ define @intrinsic_vmul_vx_nxv8i16_nxv8i16_i16( @llvm.riscv.vmul.nxv8i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1491,6 +1557,7 @@ entry: } declare @llvm.riscv.vmul.nxv16i16.i16( + , , i16, i64); @@ -1503,6 +1570,7 @@ define @intrinsic_vmul_vx_nxv16i16_nxv16i16_i16( @llvm.riscv.vmul.nxv16i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1536,6 +1604,7 @@ entry: } declare @llvm.riscv.vmul.nxv32i16.i16( + , , i16, i64); @@ -1548,6 +1617,7 @@ define @intrinsic_vmul_vx_nxv32i16_nxv32i16_i16( @llvm.riscv.vmul.nxv32i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1581,6 +1651,7 @@ entry: } declare @llvm.riscv.vmul.nxv1i32.i32( + , , i32, i64); @@ -1593,6 +1664,7 @@ define @intrinsic_vmul_vx_nxv1i32_nxv1i32_i32( @llvm.riscv.vmul.nxv1i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1626,6 +1698,7 @@ entry: } declare @llvm.riscv.vmul.nxv2i32.i32( + , , i32, i64); @@ -1638,6 +1711,7 @@ define @intrinsic_vmul_vx_nxv2i32_nxv2i32_i32( @llvm.riscv.vmul.nxv2i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1671,6 +1745,7 @@ entry: } declare @llvm.riscv.vmul.nxv4i32.i32( + , , i32, i64); @@ -1683,6 +1758,7 @@ define @intrinsic_vmul_vx_nxv4i32_nxv4i32_i32( @llvm.riscv.vmul.nxv4i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1716,6 +1792,7 @@ entry: } declare @llvm.riscv.vmul.nxv8i32.i32( + , , i32, i64); @@ -1728,6 +1805,7 @@ define @intrinsic_vmul_vx_nxv8i32_nxv8i32_i32( @llvm.riscv.vmul.nxv8i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1761,6 +1839,7 @@ entry: } declare @llvm.riscv.vmul.nxv16i32.i32( + , , i32, i64); @@ -1773,6 +1852,7 @@ define @intrinsic_vmul_vx_nxv16i32_nxv16i32_i32( @llvm.riscv.vmul.nxv16i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1806,6 +1886,7 @@ entry: } declare @llvm.riscv.vmul.nxv1i64.i64( + , , i64, i64); @@ -1818,6 +1899,7 @@ define @intrinsic_vmul_vx_nxv1i64_nxv1i64_i64( @llvm.riscv.vmul.nxv1i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1851,6 +1933,7 @@ entry: } declare @llvm.riscv.vmul.nxv2i64.i64( + , , i64, i64); @@ -1863,6 +1946,7 @@ define @intrinsic_vmul_vx_nxv2i64_nxv2i64_i64( @llvm.riscv.vmul.nxv2i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1896,6 +1980,7 @@ entry: } declare @llvm.riscv.vmul.nxv4i64.i64( + , , i64, i64); @@ -1908,6 +1993,7 @@ define @intrinsic_vmul_vx_nxv4i64_nxv4i64_i64( @llvm.riscv.vmul.nxv4i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1941,6 +2027,7 @@ entry: } declare @llvm.riscv.vmul.nxv8i64.i64( + , , i64, i64); @@ -1953,6 +2040,7 @@ define @intrinsic_vmul_vx_nxv8i64_nxv8i64_i64( @llvm.riscv.vmul.nxv8i64.i64( + undef, %0, i64 %1, i64 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vmulh-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmulh-rv32.ll index 51e254f5fb3526678a6edecae95d43729b1af67d..4bd0f8369920327866ff0e5c7c1c60826aaccc1d 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmulh-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmulh-rv32.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vmulh.nxv1i8.nxv1i8( + , , , i32); @@ -14,6 +15,7 @@ define @intrinsic_vmulh_vv_nxv1i8_nxv1i8_nxv1i8( @llvm.riscv.vmulh.nxv1i8.nxv1i8( + undef, %0, %1, i32 %2) @@ -47,6 +49,7 @@ entry: } declare @llvm.riscv.vmulh.nxv2i8.nxv2i8( + , , , i32); @@ -59,6 +62,7 @@ define @intrinsic_vmulh_vv_nxv2i8_nxv2i8_nxv2i8( @llvm.riscv.vmulh.nxv2i8.nxv2i8( + undef, %0, %1, i32 %2) @@ -92,6 +96,7 @@ entry: } declare @llvm.riscv.vmulh.nxv4i8.nxv4i8( + , , , i32); @@ -104,6 +109,7 @@ define @intrinsic_vmulh_vv_nxv4i8_nxv4i8_nxv4i8( @llvm.riscv.vmulh.nxv4i8.nxv4i8( + undef, %0, %1, i32 %2) @@ -137,6 +143,7 @@ entry: } declare @llvm.riscv.vmulh.nxv8i8.nxv8i8( + , , , i32); @@ -149,6 +156,7 @@ define @intrinsic_vmulh_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vmulh.nxv8i8.nxv8i8( + undef, %0, %1, i32 %2) @@ -182,6 +190,7 @@ entry: } declare @llvm.riscv.vmulh.nxv16i8.nxv16i8( + , , , i32); @@ -194,6 +203,7 @@ define @intrinsic_vmulh_vv_nxv16i8_nxv16i8_nxv16i8( @llvm.riscv.vmulh.nxv16i8.nxv16i8( + undef, %0, %1, i32 %2) @@ -227,6 +237,7 @@ entry: } declare @llvm.riscv.vmulh.nxv32i8.nxv32i8( + , , , i32); @@ -239,6 +250,7 @@ define @intrinsic_vmulh_vv_nxv32i8_nxv32i8_nxv32i8( @llvm.riscv.vmulh.nxv32i8.nxv32i8( + undef, %0, %1, i32 %2) @@ -272,6 +284,7 @@ entry: } declare @llvm.riscv.vmulh.nxv64i8.nxv64i8( + , , , i32); @@ -284,6 +297,7 @@ define @intrinsic_vmulh_vv_nxv64i8_nxv64i8_nxv64i8( @llvm.riscv.vmulh.nxv64i8.nxv64i8( + undef, %0, %1, i32 %2) @@ -318,6 +332,7 @@ entry: } declare @llvm.riscv.vmulh.nxv1i16.nxv1i16( + , , , i32); @@ -330,6 +345,7 @@ define @intrinsic_vmulh_vv_nxv1i16_nxv1i16_nxv1i16( @llvm.riscv.vmulh.nxv1i16.nxv1i16( + undef, %0, %1, i32 %2) @@ -363,6 +379,7 @@ entry: } declare @llvm.riscv.vmulh.nxv2i16.nxv2i16( + , , , i32); @@ -375,6 +392,7 @@ define @intrinsic_vmulh_vv_nxv2i16_nxv2i16_nxv2i16( @llvm.riscv.vmulh.nxv2i16.nxv2i16( + undef, %0, %1, i32 %2) @@ -408,6 +426,7 @@ entry: } declare @llvm.riscv.vmulh.nxv4i16.nxv4i16( + , , , i32); @@ -420,6 +439,7 @@ define @intrinsic_vmulh_vv_nxv4i16_nxv4i16_nxv4i16( @llvm.riscv.vmulh.nxv4i16.nxv4i16( + undef, %0, %1, i32 %2) @@ -453,6 +473,7 @@ entry: } declare @llvm.riscv.vmulh.nxv8i16.nxv8i16( + , , , i32); @@ -465,6 +486,7 @@ define @intrinsic_vmulh_vv_nxv8i16_nxv8i16_nxv8i16( @llvm.riscv.vmulh.nxv8i16.nxv8i16( + undef, %0, %1, i32 %2) @@ -498,6 +520,7 @@ entry: } declare @llvm.riscv.vmulh.nxv16i16.nxv16i16( + , , , i32); @@ -510,6 +533,7 @@ define @intrinsic_vmulh_vv_nxv16i16_nxv16i16_nxv16i16( @llvm.riscv.vmulh.nxv16i16.nxv16i16( + undef, %0, %1, i32 %2) @@ -543,6 +567,7 @@ entry: } declare @llvm.riscv.vmulh.nxv32i16.nxv32i16( + , , , i32); @@ -555,6 +580,7 @@ define @intrinsic_vmulh_vv_nxv32i16_nxv32i16_nxv32i16( @llvm.riscv.vmulh.nxv32i16.nxv32i16( + undef, %0, %1, i32 %2) @@ -589,6 +615,7 @@ entry: } declare @llvm.riscv.vmulh.nxv1i32.nxv1i32( + , , , i32); @@ -601,6 +628,7 @@ define @intrinsic_vmulh_vv_nxv1i32_nxv1i32_nxv1i32( @llvm.riscv.vmulh.nxv1i32.nxv1i32( + undef, %0, %1, i32 %2) @@ -634,6 +662,7 @@ entry: } declare @llvm.riscv.vmulh.nxv2i32.nxv2i32( + , , , i32); @@ -646,6 +675,7 @@ define @intrinsic_vmulh_vv_nxv2i32_nxv2i32_nxv2i32( @llvm.riscv.vmulh.nxv2i32.nxv2i32( + undef, %0, %1, i32 %2) @@ -679,6 +709,7 @@ entry: } declare @llvm.riscv.vmulh.nxv4i32.nxv4i32( + , , , i32); @@ -691,6 +722,7 @@ define @intrinsic_vmulh_vv_nxv4i32_nxv4i32_nxv4i32( @llvm.riscv.vmulh.nxv4i32.nxv4i32( + undef, %0, %1, i32 %2) @@ -724,6 +756,7 @@ entry: } declare @llvm.riscv.vmulh.nxv8i32.nxv8i32( + , , , i32); @@ -736,6 +769,7 @@ define @intrinsic_vmulh_vv_nxv8i32_nxv8i32_nxv8i32( @llvm.riscv.vmulh.nxv8i32.nxv8i32( + undef, %0, %1, i32 %2) @@ -769,6 +803,7 @@ entry: } declare @llvm.riscv.vmulh.nxv16i32.nxv16i32( + , , , i32); @@ -781,6 +816,7 @@ define @intrinsic_vmulh_vv_nxv16i32_nxv16i32_nxv16i32( @llvm.riscv.vmulh.nxv16i32.nxv16i32( + undef, %0, %1, i32 %2) @@ -815,6 +851,7 @@ entry: } declare @llvm.riscv.vmulh.nxv1i64.nxv1i64( + , , , i32); @@ -827,6 +864,7 @@ define @intrinsic_vmulh_vv_nxv1i64_nxv1i64_nxv1i64( @llvm.riscv.vmulh.nxv1i64.nxv1i64( + undef, %0, %1, i32 %2) @@ -860,6 +898,7 @@ entry: } declare @llvm.riscv.vmulh.nxv2i64.nxv2i64( + , , , i32); @@ -872,6 +911,7 @@ define @intrinsic_vmulh_vv_nxv2i64_nxv2i64_nxv2i64( @llvm.riscv.vmulh.nxv2i64.nxv2i64( + undef, %0, %1, i32 %2) @@ -905,6 +945,7 @@ entry: } declare @llvm.riscv.vmulh.nxv4i64.nxv4i64( + , , , i32); @@ -917,6 +958,7 @@ define @intrinsic_vmulh_vv_nxv4i64_nxv4i64_nxv4i64( @llvm.riscv.vmulh.nxv4i64.nxv4i64( + undef, %0, %1, i32 %2) @@ -950,6 +992,7 @@ entry: } declare @llvm.riscv.vmulh.nxv8i64.nxv8i64( + , , , i32); @@ -962,6 +1005,7 @@ define @intrinsic_vmulh_vv_nxv8i64_nxv8i64_nxv8i64( @llvm.riscv.vmulh.nxv8i64.nxv8i64( + undef, %0, %1, i32 %2) @@ -996,6 +1040,7 @@ entry: } declare @llvm.riscv.vmulh.nxv1i8.i8( + , , i8, i32); @@ -1008,6 +1053,7 @@ define @intrinsic_vmulh_vx_nxv1i8_nxv1i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulh.nxv1i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1041,6 +1087,7 @@ entry: } declare @llvm.riscv.vmulh.nxv2i8.i8( + , , i8, i32); @@ -1053,6 +1100,7 @@ define @intrinsic_vmulh_vx_nxv2i8_nxv2i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulh.nxv2i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1086,6 +1134,7 @@ entry: } declare @llvm.riscv.vmulh.nxv4i8.i8( + , , i8, i32); @@ -1098,6 +1147,7 @@ define @intrinsic_vmulh_vx_nxv4i8_nxv4i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulh.nxv4i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1131,6 +1181,7 @@ entry: } declare @llvm.riscv.vmulh.nxv8i8.i8( + , , i8, i32); @@ -1143,6 +1194,7 @@ define @intrinsic_vmulh_vx_nxv8i8_nxv8i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulh.nxv8i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1176,6 +1228,7 @@ entry: } declare @llvm.riscv.vmulh.nxv16i8.i8( + , , i8, i32); @@ -1188,6 +1241,7 @@ define @intrinsic_vmulh_vx_nxv16i8_nxv16i8_i8( @llvm.riscv.vmulh.nxv16i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1221,6 +1275,7 @@ entry: } declare @llvm.riscv.vmulh.nxv32i8.i8( + , , i8, i32); @@ -1233,6 +1288,7 @@ define @intrinsic_vmulh_vx_nxv32i8_nxv32i8_i8( @llvm.riscv.vmulh.nxv32i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1266,6 +1322,7 @@ entry: } declare @llvm.riscv.vmulh.nxv64i8.i8( + , , i8, i32); @@ -1278,6 +1335,7 @@ define @intrinsic_vmulh_vx_nxv64i8_nxv64i8_i8( @llvm.riscv.vmulh.nxv64i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1311,6 +1369,7 @@ entry: } declare @llvm.riscv.vmulh.nxv1i16.i16( + , , i16, i32); @@ -1323,6 +1382,7 @@ define @intrinsic_vmulh_vx_nxv1i16_nxv1i16_i16( @llvm.riscv.vmulh.nxv1i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1356,6 +1416,7 @@ entry: } declare @llvm.riscv.vmulh.nxv2i16.i16( + , , i16, i32); @@ -1368,6 +1429,7 @@ define @intrinsic_vmulh_vx_nxv2i16_nxv2i16_i16( @llvm.riscv.vmulh.nxv2i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1401,6 +1463,7 @@ entry: } declare @llvm.riscv.vmulh.nxv4i16.i16( + , , i16, i32); @@ -1413,6 +1476,7 @@ define @intrinsic_vmulh_vx_nxv4i16_nxv4i16_i16( @llvm.riscv.vmulh.nxv4i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1446,6 +1510,7 @@ entry: } declare @llvm.riscv.vmulh.nxv8i16.i16( + , , i16, i32); @@ -1458,6 +1523,7 @@ define @intrinsic_vmulh_vx_nxv8i16_nxv8i16_i16( @llvm.riscv.vmulh.nxv8i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1491,6 +1557,7 @@ entry: } declare @llvm.riscv.vmulh.nxv16i16.i16( + , , i16, i32); @@ -1503,6 +1570,7 @@ define @intrinsic_vmulh_vx_nxv16i16_nxv16i16_i16( @llvm.riscv.vmulh.nxv16i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1536,6 +1604,7 @@ entry: } declare @llvm.riscv.vmulh.nxv32i16.i16( + , , i16, i32); @@ -1548,6 +1617,7 @@ define @intrinsic_vmulh_vx_nxv32i16_nxv32i16_i16( @llvm.riscv.vmulh.nxv32i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1581,6 +1651,7 @@ entry: } declare @llvm.riscv.vmulh.nxv1i32.i32( + , , i32, i32); @@ -1593,6 +1664,7 @@ define @intrinsic_vmulh_vx_nxv1i32_nxv1i32_i32( @llvm.riscv.vmulh.nxv1i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1626,6 +1698,7 @@ entry: } declare @llvm.riscv.vmulh.nxv2i32.i32( + , , i32, i32); @@ -1638,6 +1711,7 @@ define @intrinsic_vmulh_vx_nxv2i32_nxv2i32_i32( @llvm.riscv.vmulh.nxv2i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1671,6 +1745,7 @@ entry: } declare @llvm.riscv.vmulh.nxv4i32.i32( + , , i32, i32); @@ -1683,6 +1758,7 @@ define @intrinsic_vmulh_vx_nxv4i32_nxv4i32_i32( @llvm.riscv.vmulh.nxv4i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1716,6 +1792,7 @@ entry: } declare @llvm.riscv.vmulh.nxv8i32.i32( + , , i32, i32); @@ -1728,6 +1805,7 @@ define @intrinsic_vmulh_vx_nxv8i32_nxv8i32_i32( @llvm.riscv.vmulh.nxv8i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1761,6 +1839,7 @@ entry: } declare @llvm.riscv.vmulh.nxv16i32.i32( + , , i32, i32); @@ -1773,6 +1852,7 @@ define @intrinsic_vmulh_vx_nxv16i32_nxv16i32_i32( @llvm.riscv.vmulh.nxv16i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1806,6 +1886,7 @@ entry: } declare @llvm.riscv.vmulh.nxv1i64.i64( + , , i64, i32); @@ -1824,6 +1905,7 @@ define @intrinsic_vmulh_vx_nxv1i64_nxv1i64_i64( @llvm.riscv.vmulh.nxv1i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1863,6 +1945,7 @@ entry: } declare @llvm.riscv.vmulh.nxv2i64.i64( + , , i64, i32); @@ -1881,6 +1964,7 @@ define @intrinsic_vmulh_vx_nxv2i64_nxv2i64_i64( @llvm.riscv.vmulh.nxv2i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1920,6 +2004,7 @@ entry: } declare @llvm.riscv.vmulh.nxv4i64.i64( + , , i64, i32); @@ -1938,6 +2023,7 @@ define @intrinsic_vmulh_vx_nxv4i64_nxv4i64_i64( @llvm.riscv.vmulh.nxv4i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1977,6 +2063,7 @@ entry: } declare @llvm.riscv.vmulh.nxv8i64.i64( + , , i64, i32); @@ -1995,6 +2082,7 @@ define @intrinsic_vmulh_vx_nxv8i64_nxv8i64_i64( @llvm.riscv.vmulh.nxv8i64.i64( + undef, %0, i64 %1, i32 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vmulh-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmulh-rv64.ll index 9242b6220237d7d1fe01cc5c4daa63365462a9a1..2fd93845e4be24b6eeac6613e84342be0b1fb4db 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmulh-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmulh-rv64.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vmulh.nxv1i8.nxv1i8( + , , , i64); @@ -14,6 +15,7 @@ define @intrinsic_vmulh_vv_nxv1i8_nxv1i8_nxv1i8( @llvm.riscv.vmulh.nxv1i8.nxv1i8( + undef, %0, %1, i64 %2) @@ -47,6 +49,7 @@ entry: } declare @llvm.riscv.vmulh.nxv2i8.nxv2i8( + , , , i64); @@ -59,6 +62,7 @@ define @intrinsic_vmulh_vv_nxv2i8_nxv2i8_nxv2i8( @llvm.riscv.vmulh.nxv2i8.nxv2i8( + undef, %0, %1, i64 %2) @@ -92,6 +96,7 @@ entry: } declare @llvm.riscv.vmulh.nxv4i8.nxv4i8( + , , , i64); @@ -104,6 +109,7 @@ define @intrinsic_vmulh_vv_nxv4i8_nxv4i8_nxv4i8( @llvm.riscv.vmulh.nxv4i8.nxv4i8( + undef, %0, %1, i64 %2) @@ -137,6 +143,7 @@ entry: } declare @llvm.riscv.vmulh.nxv8i8.nxv8i8( + , , , i64); @@ -149,6 +156,7 @@ define @intrinsic_vmulh_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vmulh.nxv8i8.nxv8i8( + undef, %0, %1, i64 %2) @@ -182,6 +190,7 @@ entry: } declare @llvm.riscv.vmulh.nxv16i8.nxv16i8( + , , , i64); @@ -194,6 +203,7 @@ define @intrinsic_vmulh_vv_nxv16i8_nxv16i8_nxv16i8( @llvm.riscv.vmulh.nxv16i8.nxv16i8( + undef, %0, %1, i64 %2) @@ -227,6 +237,7 @@ entry: } declare @llvm.riscv.vmulh.nxv32i8.nxv32i8( + , , , i64); @@ -239,6 +250,7 @@ define @intrinsic_vmulh_vv_nxv32i8_nxv32i8_nxv32i8( @llvm.riscv.vmulh.nxv32i8.nxv32i8( + undef, %0, %1, i64 %2) @@ -272,6 +284,7 @@ entry: } declare @llvm.riscv.vmulh.nxv64i8.nxv64i8( + , , , i64); @@ -284,6 +297,7 @@ define @intrinsic_vmulh_vv_nxv64i8_nxv64i8_nxv64i8( @llvm.riscv.vmulh.nxv64i8.nxv64i8( + undef, %0, %1, i64 %2) @@ -318,6 +332,7 @@ entry: } declare @llvm.riscv.vmulh.nxv1i16.nxv1i16( + , , , i64); @@ -330,6 +345,7 @@ define @intrinsic_vmulh_vv_nxv1i16_nxv1i16_nxv1i16( @llvm.riscv.vmulh.nxv1i16.nxv1i16( + undef, %0, %1, i64 %2) @@ -363,6 +379,7 @@ entry: } declare @llvm.riscv.vmulh.nxv2i16.nxv2i16( + , , , i64); @@ -375,6 +392,7 @@ define @intrinsic_vmulh_vv_nxv2i16_nxv2i16_nxv2i16( @llvm.riscv.vmulh.nxv2i16.nxv2i16( + undef, %0, %1, i64 %2) @@ -408,6 +426,7 @@ entry: } declare @llvm.riscv.vmulh.nxv4i16.nxv4i16( + , , , i64); @@ -420,6 +439,7 @@ define @intrinsic_vmulh_vv_nxv4i16_nxv4i16_nxv4i16( @llvm.riscv.vmulh.nxv4i16.nxv4i16( + undef, %0, %1, i64 %2) @@ -453,6 +473,7 @@ entry: } declare @llvm.riscv.vmulh.nxv8i16.nxv8i16( + , , , i64); @@ -465,6 +486,7 @@ define @intrinsic_vmulh_vv_nxv8i16_nxv8i16_nxv8i16( @llvm.riscv.vmulh.nxv8i16.nxv8i16( + undef, %0, %1, i64 %2) @@ -498,6 +520,7 @@ entry: } declare @llvm.riscv.vmulh.nxv16i16.nxv16i16( + , , , i64); @@ -510,6 +533,7 @@ define @intrinsic_vmulh_vv_nxv16i16_nxv16i16_nxv16i16( @llvm.riscv.vmulh.nxv16i16.nxv16i16( + undef, %0, %1, i64 %2) @@ -543,6 +567,7 @@ entry: } declare @llvm.riscv.vmulh.nxv32i16.nxv32i16( + , , , i64); @@ -555,6 +580,7 @@ define @intrinsic_vmulh_vv_nxv32i16_nxv32i16_nxv32i16( @llvm.riscv.vmulh.nxv32i16.nxv32i16( + undef, %0, %1, i64 %2) @@ -589,6 +615,7 @@ entry: } declare @llvm.riscv.vmulh.nxv1i32.nxv1i32( + , , , i64); @@ -601,6 +628,7 @@ define @intrinsic_vmulh_vv_nxv1i32_nxv1i32_nxv1i32( @llvm.riscv.vmulh.nxv1i32.nxv1i32( + undef, %0, %1, i64 %2) @@ -634,6 +662,7 @@ entry: } declare @llvm.riscv.vmulh.nxv2i32.nxv2i32( + , , , i64); @@ -646,6 +675,7 @@ define @intrinsic_vmulh_vv_nxv2i32_nxv2i32_nxv2i32( @llvm.riscv.vmulh.nxv2i32.nxv2i32( + undef, %0, %1, i64 %2) @@ -679,6 +709,7 @@ entry: } declare @llvm.riscv.vmulh.nxv4i32.nxv4i32( + , , , i64); @@ -691,6 +722,7 @@ define @intrinsic_vmulh_vv_nxv4i32_nxv4i32_nxv4i32( @llvm.riscv.vmulh.nxv4i32.nxv4i32( + undef, %0, %1, i64 %2) @@ -724,6 +756,7 @@ entry: } declare @llvm.riscv.vmulh.nxv8i32.nxv8i32( + , , , i64); @@ -736,6 +769,7 @@ define @intrinsic_vmulh_vv_nxv8i32_nxv8i32_nxv8i32( @llvm.riscv.vmulh.nxv8i32.nxv8i32( + undef, %0, %1, i64 %2) @@ -769,6 +803,7 @@ entry: } declare @llvm.riscv.vmulh.nxv16i32.nxv16i32( + , , , i64); @@ -781,6 +816,7 @@ define @intrinsic_vmulh_vv_nxv16i32_nxv16i32_nxv16i32( @llvm.riscv.vmulh.nxv16i32.nxv16i32( + undef, %0, %1, i64 %2) @@ -815,6 +851,7 @@ entry: } declare @llvm.riscv.vmulh.nxv1i64.nxv1i64( + , , , i64); @@ -827,6 +864,7 @@ define @intrinsic_vmulh_vv_nxv1i64_nxv1i64_nxv1i64( @llvm.riscv.vmulh.nxv1i64.nxv1i64( + undef, %0, %1, i64 %2) @@ -860,6 +898,7 @@ entry: } declare @llvm.riscv.vmulh.nxv2i64.nxv2i64( + , , , i64); @@ -872,6 +911,7 @@ define @intrinsic_vmulh_vv_nxv2i64_nxv2i64_nxv2i64( @llvm.riscv.vmulh.nxv2i64.nxv2i64( + undef, %0, %1, i64 %2) @@ -905,6 +945,7 @@ entry: } declare @llvm.riscv.vmulh.nxv4i64.nxv4i64( + , , , i64); @@ -917,6 +958,7 @@ define @intrinsic_vmulh_vv_nxv4i64_nxv4i64_nxv4i64( @llvm.riscv.vmulh.nxv4i64.nxv4i64( + undef, %0, %1, i64 %2) @@ -950,6 +992,7 @@ entry: } declare @llvm.riscv.vmulh.nxv8i64.nxv8i64( + , , , i64); @@ -962,6 +1005,7 @@ define @intrinsic_vmulh_vv_nxv8i64_nxv8i64_nxv8i64( @llvm.riscv.vmulh.nxv8i64.nxv8i64( + undef, %0, %1, i64 %2) @@ -996,6 +1040,7 @@ entry: } declare @llvm.riscv.vmulh.nxv1i8.i8( + , , i8, i64); @@ -1008,6 +1053,7 @@ define @intrinsic_vmulh_vx_nxv1i8_nxv1i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulh.nxv1i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1041,6 +1087,7 @@ entry: } declare @llvm.riscv.vmulh.nxv2i8.i8( + , , i8, i64); @@ -1053,6 +1100,7 @@ define @intrinsic_vmulh_vx_nxv2i8_nxv2i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulh.nxv2i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1086,6 +1134,7 @@ entry: } declare @llvm.riscv.vmulh.nxv4i8.i8( + , , i8, i64); @@ -1098,6 +1147,7 @@ define @intrinsic_vmulh_vx_nxv4i8_nxv4i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulh.nxv4i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1131,6 +1181,7 @@ entry: } declare @llvm.riscv.vmulh.nxv8i8.i8( + , , i8, i64); @@ -1143,6 +1194,7 @@ define @intrinsic_vmulh_vx_nxv8i8_nxv8i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulh.nxv8i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1176,6 +1228,7 @@ entry: } declare @llvm.riscv.vmulh.nxv16i8.i8( + , , i8, i64); @@ -1188,6 +1241,7 @@ define @intrinsic_vmulh_vx_nxv16i8_nxv16i8_i8( @llvm.riscv.vmulh.nxv16i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1221,6 +1275,7 @@ entry: } declare @llvm.riscv.vmulh.nxv32i8.i8( + , , i8, i64); @@ -1233,6 +1288,7 @@ define @intrinsic_vmulh_vx_nxv32i8_nxv32i8_i8( @llvm.riscv.vmulh.nxv32i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1266,6 +1322,7 @@ entry: } declare @llvm.riscv.vmulh.nxv64i8.i8( + , , i8, i64); @@ -1278,6 +1335,7 @@ define @intrinsic_vmulh_vx_nxv64i8_nxv64i8_i8( @llvm.riscv.vmulh.nxv64i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1311,6 +1369,7 @@ entry: } declare @llvm.riscv.vmulh.nxv1i16.i16( + , , i16, i64); @@ -1323,6 +1382,7 @@ define @intrinsic_vmulh_vx_nxv1i16_nxv1i16_i16( @llvm.riscv.vmulh.nxv1i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1356,6 +1416,7 @@ entry: } declare @llvm.riscv.vmulh.nxv2i16.i16( + , , i16, i64); @@ -1368,6 +1429,7 @@ define @intrinsic_vmulh_vx_nxv2i16_nxv2i16_i16( @llvm.riscv.vmulh.nxv2i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1401,6 +1463,7 @@ entry: } declare @llvm.riscv.vmulh.nxv4i16.i16( + , , i16, i64); @@ -1413,6 +1476,7 @@ define @intrinsic_vmulh_vx_nxv4i16_nxv4i16_i16( @llvm.riscv.vmulh.nxv4i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1446,6 +1510,7 @@ entry: } declare @llvm.riscv.vmulh.nxv8i16.i16( + , , i16, i64); @@ -1458,6 +1523,7 @@ define @intrinsic_vmulh_vx_nxv8i16_nxv8i16_i16( @llvm.riscv.vmulh.nxv8i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1491,6 +1557,7 @@ entry: } declare @llvm.riscv.vmulh.nxv16i16.i16( + , , i16, i64); @@ -1503,6 +1570,7 @@ define @intrinsic_vmulh_vx_nxv16i16_nxv16i16_i16( @llvm.riscv.vmulh.nxv16i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1536,6 +1604,7 @@ entry: } declare @llvm.riscv.vmulh.nxv32i16.i16( + , , i16, i64); @@ -1548,6 +1617,7 @@ define @intrinsic_vmulh_vx_nxv32i16_nxv32i16_i16( @llvm.riscv.vmulh.nxv32i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1581,6 +1651,7 @@ entry: } declare @llvm.riscv.vmulh.nxv1i32.i32( + , , i32, i64); @@ -1593,6 +1664,7 @@ define @intrinsic_vmulh_vx_nxv1i32_nxv1i32_i32( @llvm.riscv.vmulh.nxv1i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1626,6 +1698,7 @@ entry: } declare @llvm.riscv.vmulh.nxv2i32.i32( + , , i32, i64); @@ -1638,6 +1711,7 @@ define @intrinsic_vmulh_vx_nxv2i32_nxv2i32_i32( @llvm.riscv.vmulh.nxv2i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1671,6 +1745,7 @@ entry: } declare @llvm.riscv.vmulh.nxv4i32.i32( + , , i32, i64); @@ -1683,6 +1758,7 @@ define @intrinsic_vmulh_vx_nxv4i32_nxv4i32_i32( @llvm.riscv.vmulh.nxv4i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1716,6 +1792,7 @@ entry: } declare @llvm.riscv.vmulh.nxv8i32.i32( + , , i32, i64); @@ -1728,6 +1805,7 @@ define @intrinsic_vmulh_vx_nxv8i32_nxv8i32_i32( @llvm.riscv.vmulh.nxv8i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1761,6 +1839,7 @@ entry: } declare @llvm.riscv.vmulh.nxv16i32.i32( + , , i32, i64); @@ -1773,6 +1852,7 @@ define @intrinsic_vmulh_vx_nxv16i32_nxv16i32_i32( @llvm.riscv.vmulh.nxv16i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1806,6 +1886,7 @@ entry: } declare @llvm.riscv.vmulh.nxv1i64.i64( + , , i64, i64); @@ -1818,6 +1899,7 @@ define @intrinsic_vmulh_vx_nxv1i64_nxv1i64_i64( @llvm.riscv.vmulh.nxv1i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1851,6 +1933,7 @@ entry: } declare @llvm.riscv.vmulh.nxv2i64.i64( + , , i64, i64); @@ -1863,6 +1946,7 @@ define @intrinsic_vmulh_vx_nxv2i64_nxv2i64_i64( @llvm.riscv.vmulh.nxv2i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1896,6 +1980,7 @@ entry: } declare @llvm.riscv.vmulh.nxv4i64.i64( + , , i64, i64); @@ -1908,6 +1993,7 @@ define @intrinsic_vmulh_vx_nxv4i64_nxv4i64_i64( @llvm.riscv.vmulh.nxv4i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1941,6 +2027,7 @@ entry: } declare @llvm.riscv.vmulh.nxv8i64.i64( + , , i64, i64); @@ -1953,6 +2040,7 @@ define @intrinsic_vmulh_vx_nxv8i64_nxv8i64_i64( @llvm.riscv.vmulh.nxv8i64.i64( + undef, %0, i64 %1, i64 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vmulhsu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmulhsu-rv32.ll index 068a53f994fe1a0b7607e53e6ea946f1a57eaceb..28afafa2e0a38819291a25d099ddb45627f2f3f7 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmulhsu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmulhsu-rv32.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vmulhsu.nxv1i8.nxv1i8( + , , , i32); @@ -14,6 +15,7 @@ define @intrinsic_vmulhsu_vv_nxv1i8_nxv1i8_nxv1i8( @llvm.riscv.vmulhsu.nxv1i8.nxv1i8( + undef, %0, %1, i32 %2) @@ -47,6 +49,7 @@ entry: } declare @llvm.riscv.vmulhsu.nxv2i8.nxv2i8( + , , , i32); @@ -59,6 +62,7 @@ define @intrinsic_vmulhsu_vv_nxv2i8_nxv2i8_nxv2i8( @llvm.riscv.vmulhsu.nxv2i8.nxv2i8( + undef, %0, %1, i32 %2) @@ -92,6 +96,7 @@ entry: } declare @llvm.riscv.vmulhsu.nxv4i8.nxv4i8( + , , , i32); @@ -104,6 +109,7 @@ define @intrinsic_vmulhsu_vv_nxv4i8_nxv4i8_nxv4i8( @llvm.riscv.vmulhsu.nxv4i8.nxv4i8( + undef, %0, %1, i32 %2) @@ -137,6 +143,7 @@ entry: } declare @llvm.riscv.vmulhsu.nxv8i8.nxv8i8( + , , , i32); @@ -149,6 +156,7 @@ define @intrinsic_vmulhsu_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vmulhsu.nxv8i8.nxv8i8( + undef, %0, %1, i32 %2) @@ -182,6 +190,7 @@ entry: } declare @llvm.riscv.vmulhsu.nxv16i8.nxv16i8( + , , , i32); @@ -194,6 +203,7 @@ define @intrinsic_vmulhsu_vv_nxv16i8_nxv16i8_nxv16i8( @llvm.riscv.vmulhsu.nxv16i8.nxv16i8( + undef, %0, %1, i32 %2) @@ -227,6 +237,7 @@ entry: } declare @llvm.riscv.vmulhsu.nxv32i8.nxv32i8( + , , , i32); @@ -239,6 +250,7 @@ define @intrinsic_vmulhsu_vv_nxv32i8_nxv32i8_nxv32i8( @llvm.riscv.vmulhsu.nxv32i8.nxv32i8( + undef, %0, %1, i32 %2) @@ -272,6 +284,7 @@ entry: } declare @llvm.riscv.vmulhsu.nxv64i8.nxv64i8( + , , , i32); @@ -284,6 +297,7 @@ define @intrinsic_vmulhsu_vv_nxv64i8_nxv64i8_nxv64i8( @llvm.riscv.vmulhsu.nxv64i8.nxv64i8( + undef, %0, %1, i32 %2) @@ -318,6 +332,7 @@ entry: } declare @llvm.riscv.vmulhsu.nxv1i16.nxv1i16( + , , , i32); @@ -330,6 +345,7 @@ define @intrinsic_vmulhsu_vv_nxv1i16_nxv1i16_nxv1i16( @llvm.riscv.vmulhsu.nxv1i16.nxv1i16( + undef, %0, %1, i32 %2) @@ -363,6 +379,7 @@ entry: } declare @llvm.riscv.vmulhsu.nxv2i16.nxv2i16( + , , , i32); @@ -375,6 +392,7 @@ define @intrinsic_vmulhsu_vv_nxv2i16_nxv2i16_nxv2i16( @llvm.riscv.vmulhsu.nxv2i16.nxv2i16( + undef, %0, %1, i32 %2) @@ -408,6 +426,7 @@ entry: } declare @llvm.riscv.vmulhsu.nxv4i16.nxv4i16( + , , , i32); @@ -420,6 +439,7 @@ define @intrinsic_vmulhsu_vv_nxv4i16_nxv4i16_nxv4i16( @llvm.riscv.vmulhsu.nxv4i16.nxv4i16( + undef, %0, %1, i32 %2) @@ -453,6 +473,7 @@ entry: } declare @llvm.riscv.vmulhsu.nxv8i16.nxv8i16( + , , , i32); @@ -465,6 +486,7 @@ define @intrinsic_vmulhsu_vv_nxv8i16_nxv8i16_nxv8i16( @llvm.riscv.vmulhsu.nxv8i16.nxv8i16( + undef, %0, %1, i32 %2) @@ -498,6 +520,7 @@ entry: } declare @llvm.riscv.vmulhsu.nxv16i16.nxv16i16( + , , , i32); @@ -510,6 +533,7 @@ define @intrinsic_vmulhsu_vv_nxv16i16_nxv16i16_nxv16i16( @llvm.riscv.vmulhsu.nxv16i16.nxv16i16( + undef, %0, %1, i32 %2) @@ -543,6 +567,7 @@ entry: } declare @llvm.riscv.vmulhsu.nxv32i16.nxv32i16( + , , , i32); @@ -555,6 +580,7 @@ define @intrinsic_vmulhsu_vv_nxv32i16_nxv32i16_nxv32i16( @llvm.riscv.vmulhsu.nxv32i16.nxv32i16( + undef, %0, %1, i32 %2) @@ -589,6 +615,7 @@ entry: } declare @llvm.riscv.vmulhsu.nxv1i32.nxv1i32( + , , , i32); @@ -601,6 +628,7 @@ define @intrinsic_vmulhsu_vv_nxv1i32_nxv1i32_nxv1i32( @llvm.riscv.vmulhsu.nxv1i32.nxv1i32( + undef, %0, %1, i32 %2) @@ -634,6 +662,7 @@ entry: } declare @llvm.riscv.vmulhsu.nxv2i32.nxv2i32( + , , , i32); @@ -646,6 +675,7 @@ define @intrinsic_vmulhsu_vv_nxv2i32_nxv2i32_nxv2i32( @llvm.riscv.vmulhsu.nxv2i32.nxv2i32( + undef, %0, %1, i32 %2) @@ -679,6 +709,7 @@ entry: } declare @llvm.riscv.vmulhsu.nxv4i32.nxv4i32( + , , , i32); @@ -691,6 +722,7 @@ define @intrinsic_vmulhsu_vv_nxv4i32_nxv4i32_nxv4i32( @llvm.riscv.vmulhsu.nxv4i32.nxv4i32( + undef, %0, %1, i32 %2) @@ -724,6 +756,7 @@ entry: } declare @llvm.riscv.vmulhsu.nxv8i32.nxv8i32( + , , , i32); @@ -736,6 +769,7 @@ define @intrinsic_vmulhsu_vv_nxv8i32_nxv8i32_nxv8i32( @llvm.riscv.vmulhsu.nxv8i32.nxv8i32( + undef, %0, %1, i32 %2) @@ -769,6 +803,7 @@ entry: } declare @llvm.riscv.vmulhsu.nxv16i32.nxv16i32( + , , , i32); @@ -781,6 +816,7 @@ define @intrinsic_vmulhsu_vv_nxv16i32_nxv16i32_nxv16i32( @llvm.riscv.vmulhsu.nxv16i32.nxv16i32( + undef, %0, %1, i32 %2) @@ -815,6 +851,7 @@ entry: } declare @llvm.riscv.vmulhsu.nxv1i64.nxv1i64( + , , , i32); @@ -827,6 +864,7 @@ define @intrinsic_vmulhsu_vv_nxv1i64_nxv1i64_nxv1i64( @llvm.riscv.vmulhsu.nxv1i64.nxv1i64( + undef, %0, %1, i32 %2) @@ -860,6 +898,7 @@ entry: } declare @llvm.riscv.vmulhsu.nxv2i64.nxv2i64( + , , , i32); @@ -872,6 +911,7 @@ define @intrinsic_vmulhsu_vv_nxv2i64_nxv2i64_nxv2i64( @llvm.riscv.vmulhsu.nxv2i64.nxv2i64( + undef, %0, %1, i32 %2) @@ -905,6 +945,7 @@ entry: } declare @llvm.riscv.vmulhsu.nxv4i64.nxv4i64( + , , , i32); @@ -917,6 +958,7 @@ define @intrinsic_vmulhsu_vv_nxv4i64_nxv4i64_nxv4i64( @llvm.riscv.vmulhsu.nxv4i64.nxv4i64( + undef, %0, %1, i32 %2) @@ -950,6 +992,7 @@ entry: } declare @llvm.riscv.vmulhsu.nxv8i64.nxv8i64( + , , , i32); @@ -962,6 +1005,7 @@ define @intrinsic_vmulhsu_vv_nxv8i64_nxv8i64_nxv8i64( @llvm.riscv.vmulhsu.nxv8i64.nxv8i64( + undef, %0, %1, i32 %2) @@ -996,6 +1040,7 @@ entry: } declare @llvm.riscv.vmulhsu.nxv1i8.i8( + , , i8, i32); @@ -1008,6 +1053,7 @@ define @intrinsic_vmulhsu_vx_nxv1i8_nxv1i8_i8( @llvm.riscv.vmulhsu.nxv1i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1041,6 +1087,7 @@ entry: } declare @llvm.riscv.vmulhsu.nxv2i8.i8( + , , i8, i32); @@ -1053,6 +1100,7 @@ define @intrinsic_vmulhsu_vx_nxv2i8_nxv2i8_i8( @llvm.riscv.vmulhsu.nxv2i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1086,6 +1134,7 @@ entry: } declare @llvm.riscv.vmulhsu.nxv4i8.i8( + , , i8, i32); @@ -1098,6 +1147,7 @@ define @intrinsic_vmulhsu_vx_nxv4i8_nxv4i8_i8( @llvm.riscv.vmulhsu.nxv4i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1131,6 +1181,7 @@ entry: } declare @llvm.riscv.vmulhsu.nxv8i8.i8( + , , i8, i32); @@ -1143,6 +1194,7 @@ define @intrinsic_vmulhsu_vx_nxv8i8_nxv8i8_i8( @llvm.riscv.vmulhsu.nxv8i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1176,6 +1228,7 @@ entry: } declare @llvm.riscv.vmulhsu.nxv16i8.i8( + , , i8, i32); @@ -1188,6 +1241,7 @@ define @intrinsic_vmulhsu_vx_nxv16i8_nxv16i8_i8( @llvm.riscv.vmulhsu.nxv16i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1221,6 +1275,7 @@ entry: } declare @llvm.riscv.vmulhsu.nxv32i8.i8( + , , i8, i32); @@ -1233,6 +1288,7 @@ define @intrinsic_vmulhsu_vx_nxv32i8_nxv32i8_i8( @llvm.riscv.vmulhsu.nxv32i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1266,6 +1322,7 @@ entry: } declare @llvm.riscv.vmulhsu.nxv64i8.i8( + , , i8, i32); @@ -1278,6 +1335,7 @@ define @intrinsic_vmulhsu_vx_nxv64i8_nxv64i8_i8( @llvm.riscv.vmulhsu.nxv64i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1311,6 +1369,7 @@ entry: } declare @llvm.riscv.vmulhsu.nxv1i16.i16( + , , i16, i32); @@ -1323,6 +1382,7 @@ define @intrinsic_vmulhsu_vx_nxv1i16_nxv1i16_i16( @llvm.riscv.vmulhsu.nxv1i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1356,6 +1416,7 @@ entry: } declare @llvm.riscv.vmulhsu.nxv2i16.i16( + , , i16, i32); @@ -1368,6 +1429,7 @@ define @intrinsic_vmulhsu_vx_nxv2i16_nxv2i16_i16( @llvm.riscv.vmulhsu.nxv2i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1401,6 +1463,7 @@ entry: } declare @llvm.riscv.vmulhsu.nxv4i16.i16( + , , i16, i32); @@ -1413,6 +1476,7 @@ define @intrinsic_vmulhsu_vx_nxv4i16_nxv4i16_i16( @llvm.riscv.vmulhsu.nxv4i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1446,6 +1510,7 @@ entry: } declare @llvm.riscv.vmulhsu.nxv8i16.i16( + , , i16, i32); @@ -1458,6 +1523,7 @@ define @intrinsic_vmulhsu_vx_nxv8i16_nxv8i16_i16( @llvm.riscv.vmulhsu.nxv8i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1491,6 +1557,7 @@ entry: } declare @llvm.riscv.vmulhsu.nxv16i16.i16( + , , i16, i32); @@ -1503,6 +1570,7 @@ define @intrinsic_vmulhsu_vx_nxv16i16_nxv16i16_i16( @llvm.riscv.vmulhsu.nxv16i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1536,6 +1604,7 @@ entry: } declare @llvm.riscv.vmulhsu.nxv32i16.i16( + , , i16, i32); @@ -1548,6 +1617,7 @@ define @intrinsic_vmulhsu_vx_nxv32i16_nxv32i16_i16( @llvm.riscv.vmulhsu.nxv32i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1581,6 +1651,7 @@ entry: } declare @llvm.riscv.vmulhsu.nxv1i32.i32( + , , i32, i32); @@ -1593,6 +1664,7 @@ define @intrinsic_vmulhsu_vx_nxv1i32_nxv1i32_i32( @llvm.riscv.vmulhsu.nxv1i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1626,6 +1698,7 @@ entry: } declare @llvm.riscv.vmulhsu.nxv2i32.i32( + , , i32, i32); @@ -1638,6 +1711,7 @@ define @intrinsic_vmulhsu_vx_nxv2i32_nxv2i32_i32( @llvm.riscv.vmulhsu.nxv2i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1671,6 +1745,7 @@ entry: } declare @llvm.riscv.vmulhsu.nxv4i32.i32( + , , i32, i32); @@ -1683,6 +1758,7 @@ define @intrinsic_vmulhsu_vx_nxv4i32_nxv4i32_i32( @llvm.riscv.vmulhsu.nxv4i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1716,6 +1792,7 @@ entry: } declare @llvm.riscv.vmulhsu.nxv8i32.i32( + , , i32, i32); @@ -1728,6 +1805,7 @@ define @intrinsic_vmulhsu_vx_nxv8i32_nxv8i32_i32( @llvm.riscv.vmulhsu.nxv8i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1761,6 +1839,7 @@ entry: } declare @llvm.riscv.vmulhsu.nxv16i32.i32( + , , i32, i32); @@ -1773,6 +1852,7 @@ define @intrinsic_vmulhsu_vx_nxv16i32_nxv16i32_i32( @llvm.riscv.vmulhsu.nxv16i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1806,6 +1886,7 @@ entry: } declare @llvm.riscv.vmulhsu.nxv1i64.i64( + , , i64, i32); @@ -1824,6 +1905,7 @@ define @intrinsic_vmulhsu_vx_nxv1i64_nxv1i64_i64( @llvm.riscv.vmulhsu.nxv1i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1863,6 +1945,7 @@ entry: } declare @llvm.riscv.vmulhsu.nxv2i64.i64( + , , i64, i32); @@ -1881,6 +1964,7 @@ define @intrinsic_vmulhsu_vx_nxv2i64_nxv2i64_i64( @llvm.riscv.vmulhsu.nxv2i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1920,6 +2004,7 @@ entry: } declare @llvm.riscv.vmulhsu.nxv4i64.i64( + , , i64, i32); @@ -1938,6 +2023,7 @@ define @intrinsic_vmulhsu_vx_nxv4i64_nxv4i64_i64( @llvm.riscv.vmulhsu.nxv4i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1977,6 +2063,7 @@ entry: } declare @llvm.riscv.vmulhsu.nxv8i64.i64( + , , i64, i32); @@ -1995,6 +2082,7 @@ define @intrinsic_vmulhsu_vx_nxv8i64_nxv8i64_i64( @llvm.riscv.vmulhsu.nxv8i64.i64( + undef, %0, i64 %1, i32 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vmulhsu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmulhsu-rv64.ll index 328c9098fb4d0e771679e3e65a8291bd9490f674..fd7f4a120ccff1538d67d21d455d0a4db33f0541 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmulhsu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmulhsu-rv64.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vmulhsu.nxv1i8.nxv1i8( + , , , i64); @@ -14,6 +15,7 @@ define @intrinsic_vmulhsu_vv_nxv1i8_nxv1i8_nxv1i8( @llvm.riscv.vmulhsu.nxv1i8.nxv1i8( + undef, %0, %1, i64 %2) @@ -47,6 +49,7 @@ entry: } declare @llvm.riscv.vmulhsu.nxv2i8.nxv2i8( + , , , i64); @@ -59,6 +62,7 @@ define @intrinsic_vmulhsu_vv_nxv2i8_nxv2i8_nxv2i8( @llvm.riscv.vmulhsu.nxv2i8.nxv2i8( + undef, %0, %1, i64 %2) @@ -92,6 +96,7 @@ entry: } declare @llvm.riscv.vmulhsu.nxv4i8.nxv4i8( + , , , i64); @@ -104,6 +109,7 @@ define @intrinsic_vmulhsu_vv_nxv4i8_nxv4i8_nxv4i8( @llvm.riscv.vmulhsu.nxv4i8.nxv4i8( + undef, %0, %1, i64 %2) @@ -137,6 +143,7 @@ entry: } declare @llvm.riscv.vmulhsu.nxv8i8.nxv8i8( + , , , i64); @@ -149,6 +156,7 @@ define @intrinsic_vmulhsu_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vmulhsu.nxv8i8.nxv8i8( + undef, %0, %1, i64 %2) @@ -182,6 +190,7 @@ entry: } declare @llvm.riscv.vmulhsu.nxv16i8.nxv16i8( + , , , i64); @@ -194,6 +203,7 @@ define @intrinsic_vmulhsu_vv_nxv16i8_nxv16i8_nxv16i8( @llvm.riscv.vmulhsu.nxv16i8.nxv16i8( + undef, %0, %1, i64 %2) @@ -227,6 +237,7 @@ entry: } declare @llvm.riscv.vmulhsu.nxv32i8.nxv32i8( + , , , i64); @@ -239,6 +250,7 @@ define @intrinsic_vmulhsu_vv_nxv32i8_nxv32i8_nxv32i8( @llvm.riscv.vmulhsu.nxv32i8.nxv32i8( + undef, %0, %1, i64 %2) @@ -272,6 +284,7 @@ entry: } declare @llvm.riscv.vmulhsu.nxv64i8.nxv64i8( + , , , i64); @@ -284,6 +297,7 @@ define @intrinsic_vmulhsu_vv_nxv64i8_nxv64i8_nxv64i8( @llvm.riscv.vmulhsu.nxv64i8.nxv64i8( + undef, %0, %1, i64 %2) @@ -318,6 +332,7 @@ entry: } declare @llvm.riscv.vmulhsu.nxv1i16.nxv1i16( + , , , i64); @@ -330,6 +345,7 @@ define @intrinsic_vmulhsu_vv_nxv1i16_nxv1i16_nxv1i16( @llvm.riscv.vmulhsu.nxv1i16.nxv1i16( + undef, %0, %1, i64 %2) @@ -363,6 +379,7 @@ entry: } declare @llvm.riscv.vmulhsu.nxv2i16.nxv2i16( + , , , i64); @@ -375,6 +392,7 @@ define @intrinsic_vmulhsu_vv_nxv2i16_nxv2i16_nxv2i16( @llvm.riscv.vmulhsu.nxv2i16.nxv2i16( + undef, %0, %1, i64 %2) @@ -408,6 +426,7 @@ entry: } declare @llvm.riscv.vmulhsu.nxv4i16.nxv4i16( + , , , i64); @@ -420,6 +439,7 @@ define @intrinsic_vmulhsu_vv_nxv4i16_nxv4i16_nxv4i16( @llvm.riscv.vmulhsu.nxv4i16.nxv4i16( + undef, %0, %1, i64 %2) @@ -453,6 +473,7 @@ entry: } declare @llvm.riscv.vmulhsu.nxv8i16.nxv8i16( + , , , i64); @@ -465,6 +486,7 @@ define @intrinsic_vmulhsu_vv_nxv8i16_nxv8i16_nxv8i16( @llvm.riscv.vmulhsu.nxv8i16.nxv8i16( + undef, %0, %1, i64 %2) @@ -498,6 +520,7 @@ entry: } declare @llvm.riscv.vmulhsu.nxv16i16.nxv16i16( + , , , i64); @@ -510,6 +533,7 @@ define @intrinsic_vmulhsu_vv_nxv16i16_nxv16i16_nxv16i16( @llvm.riscv.vmulhsu.nxv16i16.nxv16i16( + undef, %0, %1, i64 %2) @@ -543,6 +567,7 @@ entry: } declare @llvm.riscv.vmulhsu.nxv32i16.nxv32i16( + , , , i64); @@ -555,6 +580,7 @@ define @intrinsic_vmulhsu_vv_nxv32i16_nxv32i16_nxv32i16( @llvm.riscv.vmulhsu.nxv32i16.nxv32i16( + undef, %0, %1, i64 %2) @@ -589,6 +615,7 @@ entry: } declare @llvm.riscv.vmulhsu.nxv1i32.nxv1i32( + , , , i64); @@ -601,6 +628,7 @@ define @intrinsic_vmulhsu_vv_nxv1i32_nxv1i32_nxv1i32( @llvm.riscv.vmulhsu.nxv1i32.nxv1i32( + undef, %0, %1, i64 %2) @@ -634,6 +662,7 @@ entry: } declare @llvm.riscv.vmulhsu.nxv2i32.nxv2i32( + , , , i64); @@ -646,6 +675,7 @@ define @intrinsic_vmulhsu_vv_nxv2i32_nxv2i32_nxv2i32( @llvm.riscv.vmulhsu.nxv2i32.nxv2i32( + undef, %0, %1, i64 %2) @@ -679,6 +709,7 @@ entry: } declare @llvm.riscv.vmulhsu.nxv4i32.nxv4i32( + , , , i64); @@ -691,6 +722,7 @@ define @intrinsic_vmulhsu_vv_nxv4i32_nxv4i32_nxv4i32( @llvm.riscv.vmulhsu.nxv4i32.nxv4i32( + undef, %0, %1, i64 %2) @@ -724,6 +756,7 @@ entry: } declare @llvm.riscv.vmulhsu.nxv8i32.nxv8i32( + , , , i64); @@ -736,6 +769,7 @@ define @intrinsic_vmulhsu_vv_nxv8i32_nxv8i32_nxv8i32( @llvm.riscv.vmulhsu.nxv8i32.nxv8i32( + undef, %0, %1, i64 %2) @@ -769,6 +803,7 @@ entry: } declare @llvm.riscv.vmulhsu.nxv16i32.nxv16i32( + , , , i64); @@ -781,6 +816,7 @@ define @intrinsic_vmulhsu_vv_nxv16i32_nxv16i32_nxv16i32( @llvm.riscv.vmulhsu.nxv16i32.nxv16i32( + undef, %0, %1, i64 %2) @@ -815,6 +851,7 @@ entry: } declare @llvm.riscv.vmulhsu.nxv1i64.nxv1i64( + , , , i64); @@ -827,6 +864,7 @@ define @intrinsic_vmulhsu_vv_nxv1i64_nxv1i64_nxv1i64( @llvm.riscv.vmulhsu.nxv1i64.nxv1i64( + undef, %0, %1, i64 %2) @@ -860,6 +898,7 @@ entry: } declare @llvm.riscv.vmulhsu.nxv2i64.nxv2i64( + , , , i64); @@ -872,6 +911,7 @@ define @intrinsic_vmulhsu_vv_nxv2i64_nxv2i64_nxv2i64( @llvm.riscv.vmulhsu.nxv2i64.nxv2i64( + undef, %0, %1, i64 %2) @@ -905,6 +945,7 @@ entry: } declare @llvm.riscv.vmulhsu.nxv4i64.nxv4i64( + , , , i64); @@ -917,6 +958,7 @@ define @intrinsic_vmulhsu_vv_nxv4i64_nxv4i64_nxv4i64( @llvm.riscv.vmulhsu.nxv4i64.nxv4i64( + undef, %0, %1, i64 %2) @@ -950,6 +992,7 @@ entry: } declare @llvm.riscv.vmulhsu.nxv8i64.nxv8i64( + , , , i64); @@ -962,6 +1005,7 @@ define @intrinsic_vmulhsu_vv_nxv8i64_nxv8i64_nxv8i64( @llvm.riscv.vmulhsu.nxv8i64.nxv8i64( + undef, %0, %1, i64 %2) @@ -996,6 +1040,7 @@ entry: } declare @llvm.riscv.vmulhsu.nxv1i8.i8( + , , i8, i64); @@ -1008,6 +1053,7 @@ define @intrinsic_vmulhsu_vx_nxv1i8_nxv1i8_i8( @llvm.riscv.vmulhsu.nxv1i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1041,6 +1087,7 @@ entry: } declare @llvm.riscv.vmulhsu.nxv2i8.i8( + , , i8, i64); @@ -1053,6 +1100,7 @@ define @intrinsic_vmulhsu_vx_nxv2i8_nxv2i8_i8( @llvm.riscv.vmulhsu.nxv2i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1086,6 +1134,7 @@ entry: } declare @llvm.riscv.vmulhsu.nxv4i8.i8( + , , i8, i64); @@ -1098,6 +1147,7 @@ define @intrinsic_vmulhsu_vx_nxv4i8_nxv4i8_i8( @llvm.riscv.vmulhsu.nxv4i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1131,6 +1181,7 @@ entry: } declare @llvm.riscv.vmulhsu.nxv8i8.i8( + , , i8, i64); @@ -1143,6 +1194,7 @@ define @intrinsic_vmulhsu_vx_nxv8i8_nxv8i8_i8( @llvm.riscv.vmulhsu.nxv8i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1176,6 +1228,7 @@ entry: } declare @llvm.riscv.vmulhsu.nxv16i8.i8( + , , i8, i64); @@ -1188,6 +1241,7 @@ define @intrinsic_vmulhsu_vx_nxv16i8_nxv16i8_i8( @llvm.riscv.vmulhsu.nxv16i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1221,6 +1275,7 @@ entry: } declare @llvm.riscv.vmulhsu.nxv32i8.i8( + , , i8, i64); @@ -1233,6 +1288,7 @@ define @intrinsic_vmulhsu_vx_nxv32i8_nxv32i8_i8( @llvm.riscv.vmulhsu.nxv32i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1266,6 +1322,7 @@ entry: } declare @llvm.riscv.vmulhsu.nxv64i8.i8( + , , i8, i64); @@ -1278,6 +1335,7 @@ define @intrinsic_vmulhsu_vx_nxv64i8_nxv64i8_i8( @llvm.riscv.vmulhsu.nxv64i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1311,6 +1369,7 @@ entry: } declare @llvm.riscv.vmulhsu.nxv1i16.i16( + , , i16, i64); @@ -1323,6 +1382,7 @@ define @intrinsic_vmulhsu_vx_nxv1i16_nxv1i16_i16( @llvm.riscv.vmulhsu.nxv1i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1356,6 +1416,7 @@ entry: } declare @llvm.riscv.vmulhsu.nxv2i16.i16( + , , i16, i64); @@ -1368,6 +1429,7 @@ define @intrinsic_vmulhsu_vx_nxv2i16_nxv2i16_i16( @llvm.riscv.vmulhsu.nxv2i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1401,6 +1463,7 @@ entry: } declare @llvm.riscv.vmulhsu.nxv4i16.i16( + , , i16, i64); @@ -1413,6 +1476,7 @@ define @intrinsic_vmulhsu_vx_nxv4i16_nxv4i16_i16( @llvm.riscv.vmulhsu.nxv4i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1446,6 +1510,7 @@ entry: } declare @llvm.riscv.vmulhsu.nxv8i16.i16( + , , i16, i64); @@ -1458,6 +1523,7 @@ define @intrinsic_vmulhsu_vx_nxv8i16_nxv8i16_i16( @llvm.riscv.vmulhsu.nxv8i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1491,6 +1557,7 @@ entry: } declare @llvm.riscv.vmulhsu.nxv16i16.i16( + , , i16, i64); @@ -1503,6 +1570,7 @@ define @intrinsic_vmulhsu_vx_nxv16i16_nxv16i16_i16( @llvm.riscv.vmulhsu.nxv16i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1536,6 +1604,7 @@ entry: } declare @llvm.riscv.vmulhsu.nxv32i16.i16( + , , i16, i64); @@ -1548,6 +1617,7 @@ define @intrinsic_vmulhsu_vx_nxv32i16_nxv32i16_i16( @llvm.riscv.vmulhsu.nxv32i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1581,6 +1651,7 @@ entry: } declare @llvm.riscv.vmulhsu.nxv1i32.i32( + , , i32, i64); @@ -1593,6 +1664,7 @@ define @intrinsic_vmulhsu_vx_nxv1i32_nxv1i32_i32( @llvm.riscv.vmulhsu.nxv1i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1626,6 +1698,7 @@ entry: } declare @llvm.riscv.vmulhsu.nxv2i32.i32( + , , i32, i64); @@ -1638,6 +1711,7 @@ define @intrinsic_vmulhsu_vx_nxv2i32_nxv2i32_i32( @llvm.riscv.vmulhsu.nxv2i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1671,6 +1745,7 @@ entry: } declare @llvm.riscv.vmulhsu.nxv4i32.i32( + , , i32, i64); @@ -1683,6 +1758,7 @@ define @intrinsic_vmulhsu_vx_nxv4i32_nxv4i32_i32( @llvm.riscv.vmulhsu.nxv4i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1716,6 +1792,7 @@ entry: } declare @llvm.riscv.vmulhsu.nxv8i32.i32( + , , i32, i64); @@ -1728,6 +1805,7 @@ define @intrinsic_vmulhsu_vx_nxv8i32_nxv8i32_i32( @llvm.riscv.vmulhsu.nxv8i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1761,6 +1839,7 @@ entry: } declare @llvm.riscv.vmulhsu.nxv16i32.i32( + , , i32, i64); @@ -1773,6 +1852,7 @@ define @intrinsic_vmulhsu_vx_nxv16i32_nxv16i32_i32( @llvm.riscv.vmulhsu.nxv16i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1806,6 +1886,7 @@ entry: } declare @llvm.riscv.vmulhsu.nxv1i64.i64( + , , i64, i64); @@ -1818,6 +1899,7 @@ define @intrinsic_vmulhsu_vx_nxv1i64_nxv1i64_i64( @llvm.riscv.vmulhsu.nxv1i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1851,6 +1933,7 @@ entry: } declare @llvm.riscv.vmulhsu.nxv2i64.i64( + , , i64, i64); @@ -1863,6 +1946,7 @@ define @intrinsic_vmulhsu_vx_nxv2i64_nxv2i64_i64( @llvm.riscv.vmulhsu.nxv2i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1896,6 +1980,7 @@ entry: } declare @llvm.riscv.vmulhsu.nxv4i64.i64( + , , i64, i64); @@ -1908,6 +1993,7 @@ define @intrinsic_vmulhsu_vx_nxv4i64_nxv4i64_i64( @llvm.riscv.vmulhsu.nxv4i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1941,6 +2027,7 @@ entry: } declare @llvm.riscv.vmulhsu.nxv8i64.i64( + , , i64, i64); @@ -1953,6 +2040,7 @@ define @intrinsic_vmulhsu_vx_nxv8i64_nxv8i64_i64( @llvm.riscv.vmulhsu.nxv8i64.i64( + undef, %0, i64 %1, i64 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vmulhu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmulhu-rv32.ll index 8c04d78a906029f0c997353d1245b5c39ae152aa..a20149f9b23d49baa52d961ace91040a91e9d1e6 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmulhu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmulhu-rv32.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vmulhu.nxv1i8.nxv1i8( + , , , i32); @@ -14,6 +15,7 @@ define @intrinsic_vmulhu_vv_nxv1i8_nxv1i8_nxv1i8( @llvm.riscv.vmulhu.nxv1i8.nxv1i8( + undef, %0, %1, i32 %2) @@ -47,6 +49,7 @@ entry: } declare @llvm.riscv.vmulhu.nxv2i8.nxv2i8( + , , , i32); @@ -59,6 +62,7 @@ define @intrinsic_vmulhu_vv_nxv2i8_nxv2i8_nxv2i8( @llvm.riscv.vmulhu.nxv2i8.nxv2i8( + undef, %0, %1, i32 %2) @@ -92,6 +96,7 @@ entry: } declare @llvm.riscv.vmulhu.nxv4i8.nxv4i8( + , , , i32); @@ -104,6 +109,7 @@ define @intrinsic_vmulhu_vv_nxv4i8_nxv4i8_nxv4i8( @llvm.riscv.vmulhu.nxv4i8.nxv4i8( + undef, %0, %1, i32 %2) @@ -137,6 +143,7 @@ entry: } declare @llvm.riscv.vmulhu.nxv8i8.nxv8i8( + , , , i32); @@ -149,6 +156,7 @@ define @intrinsic_vmulhu_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vmulhu.nxv8i8.nxv8i8( + undef, %0, %1, i32 %2) @@ -182,6 +190,7 @@ entry: } declare @llvm.riscv.vmulhu.nxv16i8.nxv16i8( + , , , i32); @@ -194,6 +203,7 @@ define @intrinsic_vmulhu_vv_nxv16i8_nxv16i8_nxv16i8( @llvm.riscv.vmulhu.nxv16i8.nxv16i8( + undef, %0, %1, i32 %2) @@ -227,6 +237,7 @@ entry: } declare @llvm.riscv.vmulhu.nxv32i8.nxv32i8( + , , , i32); @@ -239,6 +250,7 @@ define @intrinsic_vmulhu_vv_nxv32i8_nxv32i8_nxv32i8( @llvm.riscv.vmulhu.nxv32i8.nxv32i8( + undef, %0, %1, i32 %2) @@ -272,6 +284,7 @@ entry: } declare @llvm.riscv.vmulhu.nxv64i8.nxv64i8( + , , , i32); @@ -284,6 +297,7 @@ define @intrinsic_vmulhu_vv_nxv64i8_nxv64i8_nxv64i8( @llvm.riscv.vmulhu.nxv64i8.nxv64i8( + undef, %0, %1, i32 %2) @@ -318,6 +332,7 @@ entry: } declare @llvm.riscv.vmulhu.nxv1i16.nxv1i16( + , , , i32); @@ -330,6 +345,7 @@ define @intrinsic_vmulhu_vv_nxv1i16_nxv1i16_nxv1i16( @llvm.riscv.vmulhu.nxv1i16.nxv1i16( + undef, %0, %1, i32 %2) @@ -363,6 +379,7 @@ entry: } declare @llvm.riscv.vmulhu.nxv2i16.nxv2i16( + , , , i32); @@ -375,6 +392,7 @@ define @intrinsic_vmulhu_vv_nxv2i16_nxv2i16_nxv2i16( @llvm.riscv.vmulhu.nxv2i16.nxv2i16( + undef, %0, %1, i32 %2) @@ -408,6 +426,7 @@ entry: } declare @llvm.riscv.vmulhu.nxv4i16.nxv4i16( + , , , i32); @@ -420,6 +439,7 @@ define @intrinsic_vmulhu_vv_nxv4i16_nxv4i16_nxv4i16( @llvm.riscv.vmulhu.nxv4i16.nxv4i16( + undef, %0, %1, i32 %2) @@ -453,6 +473,7 @@ entry: } declare @llvm.riscv.vmulhu.nxv8i16.nxv8i16( + , , , i32); @@ -465,6 +486,7 @@ define @intrinsic_vmulhu_vv_nxv8i16_nxv8i16_nxv8i16( @llvm.riscv.vmulhu.nxv8i16.nxv8i16( + undef, %0, %1, i32 %2) @@ -498,6 +520,7 @@ entry: } declare @llvm.riscv.vmulhu.nxv16i16.nxv16i16( + , , , i32); @@ -510,6 +533,7 @@ define @intrinsic_vmulhu_vv_nxv16i16_nxv16i16_nxv16i16( @llvm.riscv.vmulhu.nxv16i16.nxv16i16( + undef, %0, %1, i32 %2) @@ -543,6 +567,7 @@ entry: } declare @llvm.riscv.vmulhu.nxv32i16.nxv32i16( + , , , i32); @@ -555,6 +580,7 @@ define @intrinsic_vmulhu_vv_nxv32i16_nxv32i16_nxv32i16( @llvm.riscv.vmulhu.nxv32i16.nxv32i16( + undef, %0, %1, i32 %2) @@ -589,6 +615,7 @@ entry: } declare @llvm.riscv.vmulhu.nxv1i32.nxv1i32( + , , , i32); @@ -601,6 +628,7 @@ define @intrinsic_vmulhu_vv_nxv1i32_nxv1i32_nxv1i32( @llvm.riscv.vmulhu.nxv1i32.nxv1i32( + undef, %0, %1, i32 %2) @@ -634,6 +662,7 @@ entry: } declare @llvm.riscv.vmulhu.nxv2i32.nxv2i32( + , , , i32); @@ -646,6 +675,7 @@ define @intrinsic_vmulhu_vv_nxv2i32_nxv2i32_nxv2i32( @llvm.riscv.vmulhu.nxv2i32.nxv2i32( + undef, %0, %1, i32 %2) @@ -679,6 +709,7 @@ entry: } declare @llvm.riscv.vmulhu.nxv4i32.nxv4i32( + , , , i32); @@ -691,6 +722,7 @@ define @intrinsic_vmulhu_vv_nxv4i32_nxv4i32_nxv4i32( @llvm.riscv.vmulhu.nxv4i32.nxv4i32( + undef, %0, %1, i32 %2) @@ -724,6 +756,7 @@ entry: } declare @llvm.riscv.vmulhu.nxv8i32.nxv8i32( + , , , i32); @@ -736,6 +769,7 @@ define @intrinsic_vmulhu_vv_nxv8i32_nxv8i32_nxv8i32( @llvm.riscv.vmulhu.nxv8i32.nxv8i32( + undef, %0, %1, i32 %2) @@ -769,6 +803,7 @@ entry: } declare @llvm.riscv.vmulhu.nxv16i32.nxv16i32( + , , , i32); @@ -781,6 +816,7 @@ define @intrinsic_vmulhu_vv_nxv16i32_nxv16i32_nxv16i32( @llvm.riscv.vmulhu.nxv16i32.nxv16i32( + undef, %0, %1, i32 %2) @@ -815,6 +851,7 @@ entry: } declare @llvm.riscv.vmulhu.nxv1i64.nxv1i64( + , , , i32); @@ -827,6 +864,7 @@ define @intrinsic_vmulhu_vv_nxv1i64_nxv1i64_nxv1i64( @llvm.riscv.vmulhu.nxv1i64.nxv1i64( + undef, %0, %1, i32 %2) @@ -860,6 +898,7 @@ entry: } declare @llvm.riscv.vmulhu.nxv2i64.nxv2i64( + , , , i32); @@ -872,6 +911,7 @@ define @intrinsic_vmulhu_vv_nxv2i64_nxv2i64_nxv2i64( @llvm.riscv.vmulhu.nxv2i64.nxv2i64( + undef, %0, %1, i32 %2) @@ -905,6 +945,7 @@ entry: } declare @llvm.riscv.vmulhu.nxv4i64.nxv4i64( + , , , i32); @@ -917,6 +958,7 @@ define @intrinsic_vmulhu_vv_nxv4i64_nxv4i64_nxv4i64( @llvm.riscv.vmulhu.nxv4i64.nxv4i64( + undef, %0, %1, i32 %2) @@ -950,6 +992,7 @@ entry: } declare @llvm.riscv.vmulhu.nxv8i64.nxv8i64( + , , , i32); @@ -962,6 +1005,7 @@ define @intrinsic_vmulhu_vv_nxv8i64_nxv8i64_nxv8i64( @llvm.riscv.vmulhu.nxv8i64.nxv8i64( + undef, %0, %1, i32 %2) @@ -996,6 +1040,7 @@ entry: } declare @llvm.riscv.vmulhu.nxv1i8.i8( + , , i8, i32); @@ -1008,6 +1053,7 @@ define @intrinsic_vmulhu_vx_nxv1i8_nxv1i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhu.nxv1i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1041,6 +1087,7 @@ entry: } declare @llvm.riscv.vmulhu.nxv2i8.i8( + , , i8, i32); @@ -1053,6 +1100,7 @@ define @intrinsic_vmulhu_vx_nxv2i8_nxv2i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhu.nxv2i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1086,6 +1134,7 @@ entry: } declare @llvm.riscv.vmulhu.nxv4i8.i8( + , , i8, i32); @@ -1098,6 +1147,7 @@ define @intrinsic_vmulhu_vx_nxv4i8_nxv4i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhu.nxv4i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1131,6 +1181,7 @@ entry: } declare @llvm.riscv.vmulhu.nxv8i8.i8( + , , i8, i32); @@ -1143,6 +1194,7 @@ define @intrinsic_vmulhu_vx_nxv8i8_nxv8i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhu.nxv8i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1176,6 +1228,7 @@ entry: } declare @llvm.riscv.vmulhu.nxv16i8.i8( + , , i8, i32); @@ -1188,6 +1241,7 @@ define @intrinsic_vmulhu_vx_nxv16i8_nxv16i8_i8( @llvm.riscv.vmulhu.nxv16i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1221,6 +1275,7 @@ entry: } declare @llvm.riscv.vmulhu.nxv32i8.i8( + , , i8, i32); @@ -1233,6 +1288,7 @@ define @intrinsic_vmulhu_vx_nxv32i8_nxv32i8_i8( @llvm.riscv.vmulhu.nxv32i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1266,6 +1322,7 @@ entry: } declare @llvm.riscv.vmulhu.nxv64i8.i8( + , , i8, i32); @@ -1278,6 +1335,7 @@ define @intrinsic_vmulhu_vx_nxv64i8_nxv64i8_i8( @llvm.riscv.vmulhu.nxv64i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1311,6 +1369,7 @@ entry: } declare @llvm.riscv.vmulhu.nxv1i16.i16( + , , i16, i32); @@ -1323,6 +1382,7 @@ define @intrinsic_vmulhu_vx_nxv1i16_nxv1i16_i16( @llvm.riscv.vmulhu.nxv1i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1356,6 +1416,7 @@ entry: } declare @llvm.riscv.vmulhu.nxv2i16.i16( + , , i16, i32); @@ -1368,6 +1429,7 @@ define @intrinsic_vmulhu_vx_nxv2i16_nxv2i16_i16( @llvm.riscv.vmulhu.nxv2i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1401,6 +1463,7 @@ entry: } declare @llvm.riscv.vmulhu.nxv4i16.i16( + , , i16, i32); @@ -1413,6 +1476,7 @@ define @intrinsic_vmulhu_vx_nxv4i16_nxv4i16_i16( @llvm.riscv.vmulhu.nxv4i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1446,6 +1510,7 @@ entry: } declare @llvm.riscv.vmulhu.nxv8i16.i16( + , , i16, i32); @@ -1458,6 +1523,7 @@ define @intrinsic_vmulhu_vx_nxv8i16_nxv8i16_i16( @llvm.riscv.vmulhu.nxv8i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1491,6 +1557,7 @@ entry: } declare @llvm.riscv.vmulhu.nxv16i16.i16( + , , i16, i32); @@ -1503,6 +1570,7 @@ define @intrinsic_vmulhu_vx_nxv16i16_nxv16i16_i16( @llvm.riscv.vmulhu.nxv16i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1536,6 +1604,7 @@ entry: } declare @llvm.riscv.vmulhu.nxv32i16.i16( + , , i16, i32); @@ -1548,6 +1617,7 @@ define @intrinsic_vmulhu_vx_nxv32i16_nxv32i16_i16( @llvm.riscv.vmulhu.nxv32i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1581,6 +1651,7 @@ entry: } declare @llvm.riscv.vmulhu.nxv1i32.i32( + , , i32, i32); @@ -1593,6 +1664,7 @@ define @intrinsic_vmulhu_vx_nxv1i32_nxv1i32_i32( @llvm.riscv.vmulhu.nxv1i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1626,6 +1698,7 @@ entry: } declare @llvm.riscv.vmulhu.nxv2i32.i32( + , , i32, i32); @@ -1638,6 +1711,7 @@ define @intrinsic_vmulhu_vx_nxv2i32_nxv2i32_i32( @llvm.riscv.vmulhu.nxv2i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1671,6 +1745,7 @@ entry: } declare @llvm.riscv.vmulhu.nxv4i32.i32( + , , i32, i32); @@ -1683,6 +1758,7 @@ define @intrinsic_vmulhu_vx_nxv4i32_nxv4i32_i32( @llvm.riscv.vmulhu.nxv4i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1716,6 +1792,7 @@ entry: } declare @llvm.riscv.vmulhu.nxv8i32.i32( + , , i32, i32); @@ -1728,6 +1805,7 @@ define @intrinsic_vmulhu_vx_nxv8i32_nxv8i32_i32( @llvm.riscv.vmulhu.nxv8i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1761,6 +1839,7 @@ entry: } declare @llvm.riscv.vmulhu.nxv16i32.i32( + , , i32, i32); @@ -1773,6 +1852,7 @@ define @intrinsic_vmulhu_vx_nxv16i32_nxv16i32_i32( @llvm.riscv.vmulhu.nxv16i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1806,6 +1886,7 @@ entry: } declare @llvm.riscv.vmulhu.nxv1i64.i64( + , , i64, i32); @@ -1824,6 +1905,7 @@ define @intrinsic_vmulhu_vx_nxv1i64_nxv1i64_i64( @llvm.riscv.vmulhu.nxv1i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1863,6 +1945,7 @@ entry: } declare @llvm.riscv.vmulhu.nxv2i64.i64( + , , i64, i32); @@ -1881,6 +1964,7 @@ define @intrinsic_vmulhu_vx_nxv2i64_nxv2i64_i64( @llvm.riscv.vmulhu.nxv2i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1920,6 +2004,7 @@ entry: } declare @llvm.riscv.vmulhu.nxv4i64.i64( + , , i64, i32); @@ -1938,6 +2023,7 @@ define @intrinsic_vmulhu_vx_nxv4i64_nxv4i64_i64( @llvm.riscv.vmulhu.nxv4i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1977,6 +2063,7 @@ entry: } declare @llvm.riscv.vmulhu.nxv8i64.i64( + , , i64, i32); @@ -1995,6 +2082,7 @@ define @intrinsic_vmulhu_vx_nxv8i64_nxv8i64_i64( @llvm.riscv.vmulhu.nxv8i64.i64( + undef, %0, i64 %1, i32 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vmulhu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmulhu-rv64.ll index 48ce2b21aacb66640e4d5df672cc0f2efa362a9c..b3b735281097155575c3813bf9aed82209f1495d 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmulhu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmulhu-rv64.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vmulhu.nxv1i8.nxv1i8( + , , , i64); @@ -14,6 +15,7 @@ define @intrinsic_vmulhu_vv_nxv1i8_nxv1i8_nxv1i8( @llvm.riscv.vmulhu.nxv1i8.nxv1i8( + undef, %0, %1, i64 %2) @@ -47,6 +49,7 @@ entry: } declare @llvm.riscv.vmulhu.nxv2i8.nxv2i8( + , , , i64); @@ -59,6 +62,7 @@ define @intrinsic_vmulhu_vv_nxv2i8_nxv2i8_nxv2i8( @llvm.riscv.vmulhu.nxv2i8.nxv2i8( + undef, %0, %1, i64 %2) @@ -92,6 +96,7 @@ entry: } declare @llvm.riscv.vmulhu.nxv4i8.nxv4i8( + , , , i64); @@ -104,6 +109,7 @@ define @intrinsic_vmulhu_vv_nxv4i8_nxv4i8_nxv4i8( @llvm.riscv.vmulhu.nxv4i8.nxv4i8( + undef, %0, %1, i64 %2) @@ -137,6 +143,7 @@ entry: } declare @llvm.riscv.vmulhu.nxv8i8.nxv8i8( + , , , i64); @@ -149,6 +156,7 @@ define @intrinsic_vmulhu_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vmulhu.nxv8i8.nxv8i8( + undef, %0, %1, i64 %2) @@ -182,6 +190,7 @@ entry: } declare @llvm.riscv.vmulhu.nxv16i8.nxv16i8( + , , , i64); @@ -194,6 +203,7 @@ define @intrinsic_vmulhu_vv_nxv16i8_nxv16i8_nxv16i8( @llvm.riscv.vmulhu.nxv16i8.nxv16i8( + undef, %0, %1, i64 %2) @@ -227,6 +237,7 @@ entry: } declare @llvm.riscv.vmulhu.nxv32i8.nxv32i8( + , , , i64); @@ -239,6 +250,7 @@ define @intrinsic_vmulhu_vv_nxv32i8_nxv32i8_nxv32i8( @llvm.riscv.vmulhu.nxv32i8.nxv32i8( + undef, %0, %1, i64 %2) @@ -272,6 +284,7 @@ entry: } declare @llvm.riscv.vmulhu.nxv64i8.nxv64i8( + , , , i64); @@ -284,6 +297,7 @@ define @intrinsic_vmulhu_vv_nxv64i8_nxv64i8_nxv64i8( @llvm.riscv.vmulhu.nxv64i8.nxv64i8( + undef, %0, %1, i64 %2) @@ -318,6 +332,7 @@ entry: } declare @llvm.riscv.vmulhu.nxv1i16.nxv1i16( + , , , i64); @@ -330,6 +345,7 @@ define @intrinsic_vmulhu_vv_nxv1i16_nxv1i16_nxv1i16( @llvm.riscv.vmulhu.nxv1i16.nxv1i16( + undef, %0, %1, i64 %2) @@ -363,6 +379,7 @@ entry: } declare @llvm.riscv.vmulhu.nxv2i16.nxv2i16( + , , , i64); @@ -375,6 +392,7 @@ define @intrinsic_vmulhu_vv_nxv2i16_nxv2i16_nxv2i16( @llvm.riscv.vmulhu.nxv2i16.nxv2i16( + undef, %0, %1, i64 %2) @@ -408,6 +426,7 @@ entry: } declare @llvm.riscv.vmulhu.nxv4i16.nxv4i16( + , , , i64); @@ -420,6 +439,7 @@ define @intrinsic_vmulhu_vv_nxv4i16_nxv4i16_nxv4i16( @llvm.riscv.vmulhu.nxv4i16.nxv4i16( + undef, %0, %1, i64 %2) @@ -453,6 +473,7 @@ entry: } declare @llvm.riscv.vmulhu.nxv8i16.nxv8i16( + , , , i64); @@ -465,6 +486,7 @@ define @intrinsic_vmulhu_vv_nxv8i16_nxv8i16_nxv8i16( @llvm.riscv.vmulhu.nxv8i16.nxv8i16( + undef, %0, %1, i64 %2) @@ -498,6 +520,7 @@ entry: } declare @llvm.riscv.vmulhu.nxv16i16.nxv16i16( + , , , i64); @@ -510,6 +533,7 @@ define @intrinsic_vmulhu_vv_nxv16i16_nxv16i16_nxv16i16( @llvm.riscv.vmulhu.nxv16i16.nxv16i16( + undef, %0, %1, i64 %2) @@ -543,6 +567,7 @@ entry: } declare @llvm.riscv.vmulhu.nxv32i16.nxv32i16( + , , , i64); @@ -555,6 +580,7 @@ define @intrinsic_vmulhu_vv_nxv32i16_nxv32i16_nxv32i16( @llvm.riscv.vmulhu.nxv32i16.nxv32i16( + undef, %0, %1, i64 %2) @@ -589,6 +615,7 @@ entry: } declare @llvm.riscv.vmulhu.nxv1i32.nxv1i32( + , , , i64); @@ -601,6 +628,7 @@ define @intrinsic_vmulhu_vv_nxv1i32_nxv1i32_nxv1i32( @llvm.riscv.vmulhu.nxv1i32.nxv1i32( + undef, %0, %1, i64 %2) @@ -634,6 +662,7 @@ entry: } declare @llvm.riscv.vmulhu.nxv2i32.nxv2i32( + , , , i64); @@ -646,6 +675,7 @@ define @intrinsic_vmulhu_vv_nxv2i32_nxv2i32_nxv2i32( @llvm.riscv.vmulhu.nxv2i32.nxv2i32( + undef, %0, %1, i64 %2) @@ -679,6 +709,7 @@ entry: } declare @llvm.riscv.vmulhu.nxv4i32.nxv4i32( + , , , i64); @@ -691,6 +722,7 @@ define @intrinsic_vmulhu_vv_nxv4i32_nxv4i32_nxv4i32( @llvm.riscv.vmulhu.nxv4i32.nxv4i32( + undef, %0, %1, i64 %2) @@ -724,6 +756,7 @@ entry: } declare @llvm.riscv.vmulhu.nxv8i32.nxv8i32( + , , , i64); @@ -736,6 +769,7 @@ define @intrinsic_vmulhu_vv_nxv8i32_nxv8i32_nxv8i32( @llvm.riscv.vmulhu.nxv8i32.nxv8i32( + undef, %0, %1, i64 %2) @@ -769,6 +803,7 @@ entry: } declare @llvm.riscv.vmulhu.nxv16i32.nxv16i32( + , , , i64); @@ -781,6 +816,7 @@ define @intrinsic_vmulhu_vv_nxv16i32_nxv16i32_nxv16i32( @llvm.riscv.vmulhu.nxv16i32.nxv16i32( + undef, %0, %1, i64 %2) @@ -815,6 +851,7 @@ entry: } declare @llvm.riscv.vmulhu.nxv1i64.nxv1i64( + , , , i64); @@ -827,6 +864,7 @@ define @intrinsic_vmulhu_vv_nxv1i64_nxv1i64_nxv1i64( @llvm.riscv.vmulhu.nxv1i64.nxv1i64( + undef, %0, %1, i64 %2) @@ -860,6 +898,7 @@ entry: } declare @llvm.riscv.vmulhu.nxv2i64.nxv2i64( + , , , i64); @@ -872,6 +911,7 @@ define @intrinsic_vmulhu_vv_nxv2i64_nxv2i64_nxv2i64( @llvm.riscv.vmulhu.nxv2i64.nxv2i64( + undef, %0, %1, i64 %2) @@ -905,6 +945,7 @@ entry: } declare @llvm.riscv.vmulhu.nxv4i64.nxv4i64( + , , , i64); @@ -917,6 +958,7 @@ define @intrinsic_vmulhu_vv_nxv4i64_nxv4i64_nxv4i64( @llvm.riscv.vmulhu.nxv4i64.nxv4i64( + undef, %0, %1, i64 %2) @@ -950,6 +992,7 @@ entry: } declare @llvm.riscv.vmulhu.nxv8i64.nxv8i64( + , , , i64); @@ -962,6 +1005,7 @@ define @intrinsic_vmulhu_vv_nxv8i64_nxv8i64_nxv8i64( @llvm.riscv.vmulhu.nxv8i64.nxv8i64( + undef, %0, %1, i64 %2) @@ -996,6 +1040,7 @@ entry: } declare @llvm.riscv.vmulhu.nxv1i8.i8( + , , i8, i64); @@ -1008,6 +1053,7 @@ define @intrinsic_vmulhu_vx_nxv1i8_nxv1i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhu.nxv1i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1041,6 +1087,7 @@ entry: } declare @llvm.riscv.vmulhu.nxv2i8.i8( + , , i8, i64); @@ -1053,6 +1100,7 @@ define @intrinsic_vmulhu_vx_nxv2i8_nxv2i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhu.nxv2i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1086,6 +1134,7 @@ entry: } declare @llvm.riscv.vmulhu.nxv4i8.i8( + , , i8, i64); @@ -1098,6 +1147,7 @@ define @intrinsic_vmulhu_vx_nxv4i8_nxv4i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhu.nxv4i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1131,6 +1181,7 @@ entry: } declare @llvm.riscv.vmulhu.nxv8i8.i8( + , , i8, i64); @@ -1143,6 +1194,7 @@ define @intrinsic_vmulhu_vx_nxv8i8_nxv8i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhu.nxv8i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1176,6 +1228,7 @@ entry: } declare @llvm.riscv.vmulhu.nxv16i8.i8( + , , i8, i64); @@ -1188,6 +1241,7 @@ define @intrinsic_vmulhu_vx_nxv16i8_nxv16i8_i8( @llvm.riscv.vmulhu.nxv16i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1221,6 +1275,7 @@ entry: } declare @llvm.riscv.vmulhu.nxv32i8.i8( + , , i8, i64); @@ -1233,6 +1288,7 @@ define @intrinsic_vmulhu_vx_nxv32i8_nxv32i8_i8( @llvm.riscv.vmulhu.nxv32i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1266,6 +1322,7 @@ entry: } declare @llvm.riscv.vmulhu.nxv64i8.i8( + , , i8, i64); @@ -1278,6 +1335,7 @@ define @intrinsic_vmulhu_vx_nxv64i8_nxv64i8_i8( @llvm.riscv.vmulhu.nxv64i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1311,6 +1369,7 @@ entry: } declare @llvm.riscv.vmulhu.nxv1i16.i16( + , , i16, i64); @@ -1323,6 +1382,7 @@ define @intrinsic_vmulhu_vx_nxv1i16_nxv1i16_i16( @llvm.riscv.vmulhu.nxv1i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1356,6 +1416,7 @@ entry: } declare @llvm.riscv.vmulhu.nxv2i16.i16( + , , i16, i64); @@ -1368,6 +1429,7 @@ define @intrinsic_vmulhu_vx_nxv2i16_nxv2i16_i16( @llvm.riscv.vmulhu.nxv2i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1401,6 +1463,7 @@ entry: } declare @llvm.riscv.vmulhu.nxv4i16.i16( + , , i16, i64); @@ -1413,6 +1476,7 @@ define @intrinsic_vmulhu_vx_nxv4i16_nxv4i16_i16( @llvm.riscv.vmulhu.nxv4i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1446,6 +1510,7 @@ entry: } declare @llvm.riscv.vmulhu.nxv8i16.i16( + , , i16, i64); @@ -1458,6 +1523,7 @@ define @intrinsic_vmulhu_vx_nxv8i16_nxv8i16_i16( @llvm.riscv.vmulhu.nxv8i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1491,6 +1557,7 @@ entry: } declare @llvm.riscv.vmulhu.nxv16i16.i16( + , , i16, i64); @@ -1503,6 +1570,7 @@ define @intrinsic_vmulhu_vx_nxv16i16_nxv16i16_i16( @llvm.riscv.vmulhu.nxv16i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1536,6 +1604,7 @@ entry: } declare @llvm.riscv.vmulhu.nxv32i16.i16( + , , i16, i64); @@ -1548,6 +1617,7 @@ define @intrinsic_vmulhu_vx_nxv32i16_nxv32i16_i16( @llvm.riscv.vmulhu.nxv32i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1581,6 +1651,7 @@ entry: } declare @llvm.riscv.vmulhu.nxv1i32.i32( + , , i32, i64); @@ -1593,6 +1664,7 @@ define @intrinsic_vmulhu_vx_nxv1i32_nxv1i32_i32( @llvm.riscv.vmulhu.nxv1i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1626,6 +1698,7 @@ entry: } declare @llvm.riscv.vmulhu.nxv2i32.i32( + , , i32, i64); @@ -1638,6 +1711,7 @@ define @intrinsic_vmulhu_vx_nxv2i32_nxv2i32_i32( @llvm.riscv.vmulhu.nxv2i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1671,6 +1745,7 @@ entry: } declare @llvm.riscv.vmulhu.nxv4i32.i32( + , , i32, i64); @@ -1683,6 +1758,7 @@ define @intrinsic_vmulhu_vx_nxv4i32_nxv4i32_i32( @llvm.riscv.vmulhu.nxv4i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1716,6 +1792,7 @@ entry: } declare @llvm.riscv.vmulhu.nxv8i32.i32( + , , i32, i64); @@ -1728,6 +1805,7 @@ define @intrinsic_vmulhu_vx_nxv8i32_nxv8i32_i32( @llvm.riscv.vmulhu.nxv8i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1761,6 +1839,7 @@ entry: } declare @llvm.riscv.vmulhu.nxv16i32.i32( + , , i32, i64); @@ -1773,6 +1852,7 @@ define @intrinsic_vmulhu_vx_nxv16i32_nxv16i32_i32( @llvm.riscv.vmulhu.nxv16i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1806,6 +1886,7 @@ entry: } declare @llvm.riscv.vmulhu.nxv1i64.i64( + , , i64, i64); @@ -1818,6 +1899,7 @@ define @intrinsic_vmulhu_vx_nxv1i64_nxv1i64_i64( @llvm.riscv.vmulhu.nxv1i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1851,6 +1933,7 @@ entry: } declare @llvm.riscv.vmulhu.nxv2i64.i64( + , , i64, i64); @@ -1863,6 +1946,7 @@ define @intrinsic_vmulhu_vx_nxv2i64_nxv2i64_i64( @llvm.riscv.vmulhu.nxv2i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1896,6 +1980,7 @@ entry: } declare @llvm.riscv.vmulhu.nxv4i64.i64( + , , i64, i64); @@ -1908,6 +1993,7 @@ define @intrinsic_vmulhu_vx_nxv4i64_nxv4i64_i64( @llvm.riscv.vmulhu.nxv4i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1941,6 +2027,7 @@ entry: } declare @llvm.riscv.vmulhu.nxv8i64.i64( + , , i64, i64); @@ -1953,6 +2040,7 @@ define @intrinsic_vmulhu_vx_nxv8i64_nxv8i64_i64( @llvm.riscv.vmulhu.nxv8i64.i64( + undef, %0, i64 %1, i64 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vnclip-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vnclip-rv32.ll index 3dfadcc3f6c46cda17bc2c6ec9e97a44767c0878..9452abe1711845b553c4438fba534be61b53cd88 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vnclip-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vnclip-rv32.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vnclip.nxv1i8.nxv1i16.nxv1i8( + , , , i32); @@ -14,6 +15,7 @@ define @intrinsic_vnclip_wv_nxv1i8_nxv1i16_nxv1i8( @llvm.riscv.vnclip.nxv1i8.nxv1i16.nxv1i8( + undef, %0, %1, i32 %2) @@ -47,6 +49,7 @@ entry: } declare @llvm.riscv.vnclip.nxv2i8.nxv2i16.nxv2i8( + , , , i32); @@ -59,6 +62,7 @@ define @intrinsic_vnclip_wv_nxv2i8_nxv2i16_nxv2i8( @llvm.riscv.vnclip.nxv2i8.nxv2i16.nxv2i8( + undef, %0, %1, i32 %2) @@ -92,6 +96,7 @@ entry: } declare @llvm.riscv.vnclip.nxv4i8.nxv4i16.nxv4i8( + , , , i32); @@ -104,6 +109,7 @@ define @intrinsic_vnclip_wv_nxv4i8_nxv4i16_nxv4i8( @llvm.riscv.vnclip.nxv4i8.nxv4i16.nxv4i8( + undef, %0, %1, i32 %2) @@ -137,6 +143,7 @@ entry: } declare @llvm.riscv.vnclip.nxv8i8.nxv8i16.nxv8i8( + , , , i32); @@ -150,6 +157,7 @@ define @intrinsic_vnclip_wv_nxv8i8_nxv8i16_nxv8i8( @llvm.riscv.vnclip.nxv8i8.nxv8i16.nxv8i8( + undef, %0, %1, i32 %2) @@ -183,6 +191,7 @@ entry: } declare @llvm.riscv.vnclip.nxv16i8.nxv16i16.nxv16i8( + , , , i32); @@ -196,6 +205,7 @@ define @intrinsic_vnclip_wv_nxv16i8_nxv16i16_nxv16i8( @llvm.riscv.vnclip.nxv16i8.nxv16i16.nxv16i8( + undef, %0, %1, i32 %2) @@ -229,6 +239,7 @@ entry: } declare @llvm.riscv.vnclip.nxv32i8.nxv32i16.nxv32i8( + , , , i32); @@ -242,6 +253,7 @@ define @intrinsic_vnclip_wv_nxv32i8_nxv32i16_nxv32i8( @llvm.riscv.vnclip.nxv32i8.nxv32i16.nxv32i8( + undef, %0, %1, i32 %2) @@ -275,6 +287,7 @@ entry: } declare @llvm.riscv.vnclip.nxv1i16.nxv1i32.nxv1i16( + , , , i32); @@ -287,6 +300,7 @@ define @intrinsic_vnclip_wv_nxv1i16_nxv1i32_nxv1i16( @llvm.riscv.vnclip.nxv1i16.nxv1i32.nxv1i16( + undef, %0, %1, i32 %2) @@ -320,6 +334,7 @@ entry: } declare @llvm.riscv.vnclip.nxv2i16.nxv2i32.nxv2i16( + , , , i32); @@ -332,6 +347,7 @@ define @intrinsic_vnclip_wv_nxv2i16_nxv2i32_nxv2i16( @llvm.riscv.vnclip.nxv2i16.nxv2i32.nxv2i16( + undef, %0, %1, i32 %2) @@ -365,6 +381,7 @@ entry: } declare @llvm.riscv.vnclip.nxv4i16.nxv4i32.nxv4i16( + , , , i32); @@ -378,6 +395,7 @@ define @intrinsic_vnclip_wv_nxv4i16_nxv4i32_nxv4i16( @llvm.riscv.vnclip.nxv4i16.nxv4i32.nxv4i16( + undef, %0, %1, i32 %2) @@ -411,6 +429,7 @@ entry: } declare @llvm.riscv.vnclip.nxv8i16.nxv8i32.nxv8i16( + , , , i32); @@ -424,6 +443,7 @@ define @intrinsic_vnclip_wv_nxv8i16_nxv8i32_nxv8i16( @llvm.riscv.vnclip.nxv8i16.nxv8i32.nxv8i16( + undef, %0, %1, i32 %2) @@ -457,6 +477,7 @@ entry: } declare @llvm.riscv.vnclip.nxv16i16.nxv16i32.nxv16i16( + , , , i32); @@ -470,6 +491,7 @@ define @intrinsic_vnclip_wv_nxv16i16_nxv16i32_nxv16i16( @llvm.riscv.vnclip.nxv16i16.nxv16i32.nxv16i16( + undef, %0, %1, i32 %2) @@ -503,6 +525,7 @@ entry: } declare @llvm.riscv.vnclip.nxv1i32.nxv1i64.nxv1i32( + , , , i32); @@ -515,6 +538,7 @@ define @intrinsic_vnclip_wv_nxv1i32_nxv1i64_nxv1i32( @llvm.riscv.vnclip.nxv1i32.nxv1i64.nxv1i32( + undef, %0, %1, i32 %2) @@ -548,6 +572,7 @@ entry: } declare @llvm.riscv.vnclip.nxv2i32.nxv2i64.nxv2i32( + , , , i32); @@ -561,6 +586,7 @@ define @intrinsic_vnclip_wv_nxv2i32_nxv2i64_nxv2i32( @llvm.riscv.vnclip.nxv2i32.nxv2i64.nxv2i32( + undef, %0, %1, i32 %2) @@ -594,6 +620,7 @@ entry: } declare @llvm.riscv.vnclip.nxv4i32.nxv4i64.nxv4i32( + , , , i32); @@ -607,6 +634,7 @@ define @intrinsic_vnclip_wv_nxv4i32_nxv4i64_nxv4i32( @llvm.riscv.vnclip.nxv4i32.nxv4i64.nxv4i32( + undef, %0, %1, i32 %2) @@ -640,6 +668,7 @@ entry: } declare @llvm.riscv.vnclip.nxv8i32.nxv8i64.nxv8i32( + , , , i32); @@ -653,6 +682,7 @@ define @intrinsic_vnclip_wv_nxv8i32_nxv8i64_nxv8i32( @llvm.riscv.vnclip.nxv8i32.nxv8i64.nxv8i32( + undef, %0, %1, i32 %2) @@ -686,6 +716,7 @@ entry: } declare @llvm.riscv.vnclip.nxv1i8.nxv1i16( + , , i32, i32); @@ -698,6 +729,7 @@ define @intrinsic_vnclip_vx_nxv1i8_nxv1i16( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv1i8.nxv1i16( + undef, %0, i32 %1, i32 %2) @@ -731,6 +763,7 @@ entry: } declare @llvm.riscv.vnclip.nxv2i8.nxv2i16( + , , i32, i32); @@ -743,6 +776,7 @@ define @intrinsic_vnclip_vx_nxv2i8_nxv2i16( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv2i8.nxv2i16( + undef, %0, i32 %1, i32 %2) @@ -776,6 +810,7 @@ entry: } declare @llvm.riscv.vnclip.nxv4i8.nxv4i16( + , , i32, i32); @@ -788,6 +823,7 @@ define @intrinsic_vnclip_vx_nxv4i8_nxv4i16( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv4i8.nxv4i16( + undef, %0, i32 %1, i32 %2) @@ -821,6 +857,7 @@ entry: } declare @llvm.riscv.vnclip.nxv8i8.nxv8i16( + , , i32, i32); @@ -834,6 +871,7 @@ define @intrinsic_vnclip_vx_nxv8i8_nxv8i16( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv8i8.nxv8i16( + undef, %0, i32 %1, i32 %2) @@ -867,6 +905,7 @@ entry: } declare @llvm.riscv.vnclip.nxv16i8.nxv16i16( + , , i32, i32); @@ -880,6 +919,7 @@ define @intrinsic_vnclip_vx_nxv16i8_nxv16i16( @llvm.riscv.vnclip.nxv16i8.nxv16i16( + undef, %0, i32 %1, i32 %2) @@ -913,6 +953,7 @@ entry: } declare @llvm.riscv.vnclip.nxv32i8.nxv32i16( + , , i32, i32); @@ -926,6 +967,7 @@ define @intrinsic_vnclip_vx_nxv32i8_nxv32i16( @llvm.riscv.vnclip.nxv32i8.nxv32i16( + undef, %0, i32 %1, i32 %2) @@ -959,6 +1001,7 @@ entry: } declare @llvm.riscv.vnclip.nxv1i16.nxv1i32( + , , i32, i32); @@ -971,6 +1014,7 @@ define @intrinsic_vnclip_vx_nxv1i16_nxv1i32( @llvm.riscv.vnclip.nxv1i16.nxv1i32( + undef, %0, i32 %1, i32 %2) @@ -1004,6 +1048,7 @@ entry: } declare @llvm.riscv.vnclip.nxv2i16.nxv2i32( + , , i32, i32); @@ -1016,6 +1061,7 @@ define @intrinsic_vnclip_vx_nxv2i16_nxv2i32( @llvm.riscv.vnclip.nxv2i16.nxv2i32( + undef, %0, i32 %1, i32 %2) @@ -1049,6 +1095,7 @@ entry: } declare @llvm.riscv.vnclip.nxv4i16.nxv4i32( + , , i32, i32); @@ -1062,6 +1109,7 @@ define @intrinsic_vnclip_vx_nxv4i16_nxv4i32( @llvm.riscv.vnclip.nxv4i16.nxv4i32( + undef, %0, i32 %1, i32 %2) @@ -1095,6 +1143,7 @@ entry: } declare @llvm.riscv.vnclip.nxv8i16.nxv8i32( + , , i32, i32); @@ -1108,6 +1157,7 @@ define @intrinsic_vnclip_vx_nxv8i16_nxv8i32( @llvm.riscv.vnclip.nxv8i16.nxv8i32( + undef, %0, i32 %1, i32 %2) @@ -1141,6 +1191,7 @@ entry: } declare @llvm.riscv.vnclip.nxv16i16.nxv16i32( + , , i32, i32); @@ -1154,6 +1205,7 @@ define @intrinsic_vnclip_vx_nxv16i16_nxv16i32( @llvm.riscv.vnclip.nxv16i16.nxv16i32( + undef, %0, i32 %1, i32 %2) @@ -1187,6 +1239,7 @@ entry: } declare @llvm.riscv.vnclip.nxv1i32.nxv1i64( + , , i32, i32); @@ -1199,6 +1252,7 @@ define @intrinsic_vnclip_vx_nxv1i32_nxv1i64( @llvm.riscv.vnclip.nxv1i32.nxv1i64( + undef, %0, i32 %1, i32 %2) @@ -1232,6 +1286,7 @@ entry: } declare @llvm.riscv.vnclip.nxv2i32.nxv2i64( + , , i32, i32); @@ -1245,6 +1300,7 @@ define @intrinsic_vnclip_vx_nxv2i32_nxv2i64( @llvm.riscv.vnclip.nxv2i32.nxv2i64( + undef, %0, i32 %1, i32 %2) @@ -1278,6 +1334,7 @@ entry: } declare @llvm.riscv.vnclip.nxv4i32.nxv4i64( + , , i32, i32); @@ -1291,6 +1348,7 @@ define @intrinsic_vnclip_vx_nxv4i32_nxv4i64( @llvm.riscv.vnclip.nxv4i32.nxv4i64( + undef, %0, i32 %1, i32 %2) @@ -1324,6 +1382,7 @@ entry: } declare @llvm.riscv.vnclip.nxv8i32.nxv8i64( + , , i32, i32); @@ -1337,6 +1396,7 @@ define @intrinsic_vnclip_vx_nxv8i32_nxv8i64( @llvm.riscv.vnclip.nxv8i32.nxv8i64( + undef, %0, i32 %1, i32 %2) @@ -1377,6 +1437,7 @@ define @intrinsic_vnclip_vi_nxv1i8_nxv1i16_i8( @llvm.riscv.vnclip.nxv1i8.nxv1i16( + undef, %0, i32 9, i32 %1) @@ -1409,6 +1470,7 @@ define @intrinsic_vnclip_vi_nxv2i8_nxv2i16_i8( @llvm.riscv.vnclip.nxv2i8.nxv2i16( + undef, %0, i32 9, i32 %1) @@ -1441,6 +1503,7 @@ define @intrinsic_vnclip_vi_nxv4i8_nxv4i16_i8( @llvm.riscv.vnclip.nxv4i8.nxv4i16( + undef, %0, i32 9, i32 %1) @@ -1474,6 +1537,7 @@ define @intrinsic_vnclip_vi_nxv8i8_nxv8i16_i8( @llvm.riscv.vnclip.nxv8i8.nxv8i16( + undef, %0, i32 9, i32 %1) @@ -1507,6 +1571,7 @@ define @intrinsic_vnclip_vi_nxv16i8_nxv16i16_i8( @llvm.riscv.vnclip.nxv16i8.nxv16i16( + undef, %0, i32 9, i32 %1) @@ -1540,6 +1605,7 @@ define @intrinsic_vnclip_vi_nxv32i8_nxv32i16_i8( @llvm.riscv.vnclip.nxv32i8.nxv32i16( + undef, %0, i32 9, i32 %1) @@ -1572,6 +1638,7 @@ define @intrinsic_vnclip_vi_nxv1i16_nxv1i32_i16( @llvm.riscv.vnclip.nxv1i16.nxv1i32( + undef, %0, i32 9, i32 %1) @@ -1604,6 +1671,7 @@ define @intrinsic_vnclip_vi_nxv2i16_nxv2i32_i16( @llvm.riscv.vnclip.nxv2i16.nxv2i32( + undef, %0, i32 9, i32 %1) @@ -1637,6 +1705,7 @@ define @intrinsic_vnclip_vi_nxv4i16_nxv4i32_i16( @llvm.riscv.vnclip.nxv4i16.nxv4i32( + undef, %0, i32 9, i32 %1) @@ -1670,6 +1739,7 @@ define @intrinsic_vnclip_vi_nxv8i16_nxv8i32_i16( @llvm.riscv.vnclip.nxv8i16.nxv8i32( + undef, %0, i32 9, i32 %1) @@ -1703,6 +1773,7 @@ define @intrinsic_vnclip_vi_nxv16i16_nxv16i32_i16( @llvm.riscv.vnclip.nxv16i16.nxv16i32( + undef, %0, i32 9, i32 %1) @@ -1735,6 +1806,7 @@ define @intrinsic_vnclip_vi_nxv1i32_nxv1i64_i32( @llvm.riscv.vnclip.nxv1i32.nxv1i64( + undef, %0, i32 9, i32 %1) @@ -1768,6 +1840,7 @@ define @intrinsic_vnclip_vi_nxv2i32_nxv2i64_i32( @llvm.riscv.vnclip.nxv2i32.nxv2i64( + undef, %0, i32 9, i32 %1) @@ -1801,6 +1874,7 @@ define @intrinsic_vnclip_vi_nxv4i32_nxv4i64_i32( @llvm.riscv.vnclip.nxv4i32.nxv4i64( + undef, %0, i32 9, i32 %1) @@ -1834,6 +1908,7 @@ define @intrinsic_vnclip_vi_nxv8i32_nxv8i64_i32( @llvm.riscv.vnclip.nxv8i32.nxv8i64( + undef, %0, i32 9, i32 %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vnclip-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vnclip-rv64.ll index dc0f3f879e370d1993f98d41793bf6b746d0d43e..df8def1426f022120a7da3039a9c31632d86d1cb 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vnclip-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vnclip-rv64.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vnclip.nxv1i8.nxv1i16.nxv1i8( + , , , i64); @@ -14,6 +15,7 @@ define @intrinsic_vnclip_wv_nxv1i8_nxv1i16_nxv1i8( @llvm.riscv.vnclip.nxv1i8.nxv1i16.nxv1i8( + undef, %0, %1, i64 %2) @@ -47,6 +49,7 @@ entry: } declare @llvm.riscv.vnclip.nxv2i8.nxv2i16.nxv2i8( + , , , i64); @@ -59,6 +62,7 @@ define @intrinsic_vnclip_wv_nxv2i8_nxv2i16_nxv2i8( @llvm.riscv.vnclip.nxv2i8.nxv2i16.nxv2i8( + undef, %0, %1, i64 %2) @@ -92,6 +96,7 @@ entry: } declare @llvm.riscv.vnclip.nxv4i8.nxv4i16.nxv4i8( + , , , i64); @@ -104,6 +109,7 @@ define @intrinsic_vnclip_wv_nxv4i8_nxv4i16_nxv4i8( @llvm.riscv.vnclip.nxv4i8.nxv4i16.nxv4i8( + undef, %0, %1, i64 %2) @@ -137,6 +143,7 @@ entry: } declare @llvm.riscv.vnclip.nxv8i8.nxv8i16.nxv8i8( + , , , i64); @@ -150,6 +157,7 @@ define @intrinsic_vnclip_wv_nxv8i8_nxv8i16_nxv8i8( @llvm.riscv.vnclip.nxv8i8.nxv8i16.nxv8i8( + undef, %0, %1, i64 %2) @@ -183,6 +191,7 @@ entry: } declare @llvm.riscv.vnclip.nxv16i8.nxv16i16.nxv16i8( + , , , i64); @@ -196,6 +205,7 @@ define @intrinsic_vnclip_wv_nxv16i8_nxv16i16_nxv16i8( @llvm.riscv.vnclip.nxv16i8.nxv16i16.nxv16i8( + undef, %0, %1, i64 %2) @@ -229,6 +239,7 @@ entry: } declare @llvm.riscv.vnclip.nxv32i8.nxv32i16.nxv32i8( + , , , i64); @@ -242,6 +253,7 @@ define @intrinsic_vnclip_wv_nxv32i8_nxv32i16_nxv32i8( @llvm.riscv.vnclip.nxv32i8.nxv32i16.nxv32i8( + undef, %0, %1, i64 %2) @@ -275,6 +287,7 @@ entry: } declare @llvm.riscv.vnclip.nxv1i16.nxv1i32.nxv1i16( + , , , i64); @@ -287,6 +300,7 @@ define @intrinsic_vnclip_wv_nxv1i16_nxv1i32_nxv1i16( @llvm.riscv.vnclip.nxv1i16.nxv1i32.nxv1i16( + undef, %0, %1, i64 %2) @@ -320,6 +334,7 @@ entry: } declare @llvm.riscv.vnclip.nxv2i16.nxv2i32.nxv2i16( + , , , i64); @@ -332,6 +347,7 @@ define @intrinsic_vnclip_wv_nxv2i16_nxv2i32_nxv2i16( @llvm.riscv.vnclip.nxv2i16.nxv2i32.nxv2i16( + undef, %0, %1, i64 %2) @@ -365,6 +381,7 @@ entry: } declare @llvm.riscv.vnclip.nxv4i16.nxv4i32.nxv4i16( + , , , i64); @@ -378,6 +395,7 @@ define @intrinsic_vnclip_wv_nxv4i16_nxv4i32_nxv4i16( @llvm.riscv.vnclip.nxv4i16.nxv4i32.nxv4i16( + undef, %0, %1, i64 %2) @@ -411,6 +429,7 @@ entry: } declare @llvm.riscv.vnclip.nxv8i16.nxv8i32.nxv8i16( + , , , i64); @@ -424,6 +443,7 @@ define @intrinsic_vnclip_wv_nxv8i16_nxv8i32_nxv8i16( @llvm.riscv.vnclip.nxv8i16.nxv8i32.nxv8i16( + undef, %0, %1, i64 %2) @@ -457,6 +477,7 @@ entry: } declare @llvm.riscv.vnclip.nxv16i16.nxv16i32.nxv16i16( + , , , i64); @@ -470,6 +491,7 @@ define @intrinsic_vnclip_wv_nxv16i16_nxv16i32_nxv16i16( @llvm.riscv.vnclip.nxv16i16.nxv16i32.nxv16i16( + undef, %0, %1, i64 %2) @@ -503,6 +525,7 @@ entry: } declare @llvm.riscv.vnclip.nxv1i32.nxv1i64.nxv1i32( + , , , i64); @@ -515,6 +538,7 @@ define @intrinsic_vnclip_wv_nxv1i32_nxv1i64_nxv1i32( @llvm.riscv.vnclip.nxv1i32.nxv1i64.nxv1i32( + undef, %0, %1, i64 %2) @@ -548,6 +572,7 @@ entry: } declare @llvm.riscv.vnclip.nxv2i32.nxv2i64.nxv2i32( + , , , i64); @@ -561,6 +586,7 @@ define @intrinsic_vnclip_wv_nxv2i32_nxv2i64_nxv2i32( @llvm.riscv.vnclip.nxv2i32.nxv2i64.nxv2i32( + undef, %0, %1, i64 %2) @@ -594,6 +620,7 @@ entry: } declare @llvm.riscv.vnclip.nxv4i32.nxv4i64.nxv4i32( + , , , i64); @@ -607,6 +634,7 @@ define @intrinsic_vnclip_wv_nxv4i32_nxv4i64_nxv4i32( @llvm.riscv.vnclip.nxv4i32.nxv4i64.nxv4i32( + undef, %0, %1, i64 %2) @@ -640,6 +668,7 @@ entry: } declare @llvm.riscv.vnclip.nxv8i32.nxv8i64.nxv8i32( + , , , i64); @@ -653,6 +682,7 @@ define @intrinsic_vnclip_wv_nxv8i32_nxv8i64_nxv8i32( @llvm.riscv.vnclip.nxv8i32.nxv8i64.nxv8i32( + undef, %0, %1, i64 %2) @@ -686,6 +716,7 @@ entry: } declare @llvm.riscv.vnclip.nxv1i8.nxv1i16( + , , i64, i64); @@ -698,6 +729,7 @@ define @intrinsic_vnclip_vx_nxv1i8_nxv1i16( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv1i8.nxv1i16( + undef, %0, i64 %1, i64 %2) @@ -731,6 +763,7 @@ entry: } declare @llvm.riscv.vnclip.nxv2i8.nxv2i16( + , , i64, i64); @@ -743,6 +776,7 @@ define @intrinsic_vnclip_vx_nxv2i8_nxv2i16( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv2i8.nxv2i16( + undef, %0, i64 %1, i64 %2) @@ -776,6 +810,7 @@ entry: } declare @llvm.riscv.vnclip.nxv4i8.nxv4i16( + , , i64, i64); @@ -788,6 +823,7 @@ define @intrinsic_vnclip_vx_nxv4i8_nxv4i16( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv4i8.nxv4i16( + undef, %0, i64 %1, i64 %2) @@ -821,6 +857,7 @@ entry: } declare @llvm.riscv.vnclip.nxv8i8.nxv8i16( + , , i64, i64); @@ -834,6 +871,7 @@ define @intrinsic_vnclip_vx_nxv8i8_nxv8i16( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv8i8.nxv8i16( + undef, %0, i64 %1, i64 %2) @@ -867,6 +905,7 @@ entry: } declare @llvm.riscv.vnclip.nxv16i8.nxv16i16( + , , i64, i64); @@ -880,6 +919,7 @@ define @intrinsic_vnclip_vx_nxv16i8_nxv16i16( @llvm.riscv.vnclip.nxv16i8.nxv16i16( + undef, %0, i64 %1, i64 %2) @@ -913,6 +953,7 @@ entry: } declare @llvm.riscv.vnclip.nxv32i8.nxv32i16( + , , i64, i64); @@ -926,6 +967,7 @@ define @intrinsic_vnclip_vx_nxv32i8_nxv32i16( @llvm.riscv.vnclip.nxv32i8.nxv32i16( + undef, %0, i64 %1, i64 %2) @@ -959,6 +1001,7 @@ entry: } declare @llvm.riscv.vnclip.nxv1i16.nxv1i32( + , , i64, i64); @@ -971,6 +1014,7 @@ define @intrinsic_vnclip_vx_nxv1i16_nxv1i32( @llvm.riscv.vnclip.nxv1i16.nxv1i32( + undef, %0, i64 %1, i64 %2) @@ -1004,6 +1048,7 @@ entry: } declare @llvm.riscv.vnclip.nxv2i16.nxv2i32( + , , i64, i64); @@ -1016,6 +1061,7 @@ define @intrinsic_vnclip_vx_nxv2i16_nxv2i32( @llvm.riscv.vnclip.nxv2i16.nxv2i32( + undef, %0, i64 %1, i64 %2) @@ -1049,6 +1095,7 @@ entry: } declare @llvm.riscv.vnclip.nxv4i16.nxv4i32( + , , i64, i64); @@ -1062,6 +1109,7 @@ define @intrinsic_vnclip_vx_nxv4i16_nxv4i32( @llvm.riscv.vnclip.nxv4i16.nxv4i32( + undef, %0, i64 %1, i64 %2) @@ -1095,6 +1143,7 @@ entry: } declare @llvm.riscv.vnclip.nxv8i16.nxv8i32( + , , i64, i64); @@ -1108,6 +1157,7 @@ define @intrinsic_vnclip_vx_nxv8i16_nxv8i32( @llvm.riscv.vnclip.nxv8i16.nxv8i32( + undef, %0, i64 %1, i64 %2) @@ -1141,6 +1191,7 @@ entry: } declare @llvm.riscv.vnclip.nxv16i16.nxv16i32( + , , i64, i64); @@ -1154,6 +1205,7 @@ define @intrinsic_vnclip_vx_nxv16i16_nxv16i32( @llvm.riscv.vnclip.nxv16i16.nxv16i32( + undef, %0, i64 %1, i64 %2) @@ -1187,6 +1239,7 @@ entry: } declare @llvm.riscv.vnclip.nxv1i32.nxv1i64( + , , i64, i64); @@ -1199,6 +1252,7 @@ define @intrinsic_vnclip_vx_nxv1i32_nxv1i64( @llvm.riscv.vnclip.nxv1i32.nxv1i64( + undef, %0, i64 %1, i64 %2) @@ -1232,6 +1286,7 @@ entry: } declare @llvm.riscv.vnclip.nxv2i32.nxv2i64( + , , i64, i64); @@ -1245,6 +1300,7 @@ define @intrinsic_vnclip_vx_nxv2i32_nxv2i64( @llvm.riscv.vnclip.nxv2i32.nxv2i64( + undef, %0, i64 %1, i64 %2) @@ -1278,6 +1334,7 @@ entry: } declare @llvm.riscv.vnclip.nxv4i32.nxv4i64( + , , i64, i64); @@ -1291,6 +1348,7 @@ define @intrinsic_vnclip_vx_nxv4i32_nxv4i64( @llvm.riscv.vnclip.nxv4i32.nxv4i64( + undef, %0, i64 %1, i64 %2) @@ -1324,6 +1382,7 @@ entry: } declare @llvm.riscv.vnclip.nxv8i32.nxv8i64( + , , i64, i64); @@ -1337,6 +1396,7 @@ define @intrinsic_vnclip_vx_nxv8i32_nxv8i64( @llvm.riscv.vnclip.nxv8i32.nxv8i64( + undef, %0, i64 %1, i64 %2) @@ -1377,6 +1437,7 @@ define @intrinsic_vnclip_vi_nxv1i8_nxv1i16_i8( @llvm.riscv.vnclip.nxv1i8.nxv1i16( + undef, %0, i64 9, i64 %1) @@ -1409,6 +1470,7 @@ define @intrinsic_vnclip_vi_nxv2i8_nxv2i16_i8( @llvm.riscv.vnclip.nxv2i8.nxv2i16( + undef, %0, i64 9, i64 %1) @@ -1441,6 +1503,7 @@ define @intrinsic_vnclip_vi_nxv4i8_nxv4i16_i8( @llvm.riscv.vnclip.nxv4i8.nxv4i16( + undef, %0, i64 9, i64 %1) @@ -1474,6 +1537,7 @@ define @intrinsic_vnclip_vi_nxv8i8_nxv8i16_i8( @llvm.riscv.vnclip.nxv8i8.nxv8i16( + undef, %0, i64 9, i64 %1) @@ -1507,6 +1571,7 @@ define @intrinsic_vnclip_vi_nxv16i8_nxv16i16_i8( @llvm.riscv.vnclip.nxv16i8.nxv16i16( + undef, %0, i64 9, i64 %1) @@ -1540,6 +1605,7 @@ define @intrinsic_vnclip_vi_nxv32i8_nxv32i16_i8( @llvm.riscv.vnclip.nxv32i8.nxv32i16( + undef, %0, i64 9, i64 %1) @@ -1572,6 +1638,7 @@ define @intrinsic_vnclip_vi_nxv1i16_nxv1i32_i16( @llvm.riscv.vnclip.nxv1i16.nxv1i32( + undef, %0, i64 9, i64 %1) @@ -1604,6 +1671,7 @@ define @intrinsic_vnclip_vi_nxv2i16_nxv2i32_i16( @llvm.riscv.vnclip.nxv2i16.nxv2i32( + undef, %0, i64 9, i64 %1) @@ -1637,6 +1705,7 @@ define @intrinsic_vnclip_vi_nxv4i16_nxv4i32_i16( @llvm.riscv.vnclip.nxv4i16.nxv4i32( + undef, %0, i64 9, i64 %1) @@ -1670,6 +1739,7 @@ define @intrinsic_vnclip_vi_nxv8i16_nxv8i32_i16( @llvm.riscv.vnclip.nxv8i16.nxv8i32( + undef, %0, i64 9, i64 %1) @@ -1703,6 +1773,7 @@ define @intrinsic_vnclip_vi_nxv16i16_nxv16i32_i16( @llvm.riscv.vnclip.nxv16i16.nxv16i32( + undef, %0, i64 9, i64 %1) @@ -1735,6 +1806,7 @@ define @intrinsic_vnclip_vi_nxv1i32_nxv1i64_i32( @llvm.riscv.vnclip.nxv1i32.nxv1i64( + undef, %0, i64 9, i64 %1) @@ -1768,6 +1840,7 @@ define @intrinsic_vnclip_vi_nxv2i32_nxv2i64_i32( @llvm.riscv.vnclip.nxv2i32.nxv2i64( + undef, %0, i64 9, i64 %1) @@ -1801,6 +1874,7 @@ define @intrinsic_vnclip_vi_nxv4i32_nxv4i64_i32( @llvm.riscv.vnclip.nxv4i32.nxv4i64( + undef, %0, i64 9, i64 %1) @@ -1834,6 +1908,7 @@ define @intrinsic_vnclip_vi_nxv8i32_nxv8i64_i32( @llvm.riscv.vnclip.nxv8i32.nxv8i64( + undef, %0, i64 9, i64 %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vnclipu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vnclipu-rv32.ll index 1fb5c651ebe0fc1002173d6663a05c60dd8eeea4..7d368f9a0ca2a83003b9133342aa898253a8d6fc 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vnclipu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vnclipu-rv32.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vnclipu.nxv1i8.nxv1i16.nxv1i8( + , , , i32); @@ -14,6 +15,7 @@ define @intrinsic_vnclipu_wv_nxv1i8_nxv1i16_nxv1i8( @llvm.riscv.vnclipu.nxv1i8.nxv1i16.nxv1i8( + undef, %0, %1, i32 %2) @@ -47,6 +49,7 @@ entry: } declare @llvm.riscv.vnclipu.nxv2i8.nxv2i16.nxv2i8( + , , , i32); @@ -59,6 +62,7 @@ define @intrinsic_vnclipu_wv_nxv2i8_nxv2i16_nxv2i8( @llvm.riscv.vnclipu.nxv2i8.nxv2i16.nxv2i8( + undef, %0, %1, i32 %2) @@ -92,6 +96,7 @@ entry: } declare @llvm.riscv.vnclipu.nxv4i8.nxv4i16.nxv4i8( + , , , i32); @@ -104,6 +109,7 @@ define @intrinsic_vnclipu_wv_nxv4i8_nxv4i16_nxv4i8( @llvm.riscv.vnclipu.nxv4i8.nxv4i16.nxv4i8( + undef, %0, %1, i32 %2) @@ -137,6 +143,7 @@ entry: } declare @llvm.riscv.vnclipu.nxv8i8.nxv8i16.nxv8i8( + , , , i32); @@ -150,6 +157,7 @@ define @intrinsic_vnclipu_wv_nxv8i8_nxv8i16_nxv8i8( @llvm.riscv.vnclipu.nxv8i8.nxv8i16.nxv8i8( + undef, %0, %1, i32 %2) @@ -183,6 +191,7 @@ entry: } declare @llvm.riscv.vnclipu.nxv16i8.nxv16i16.nxv16i8( + , , , i32); @@ -196,6 +205,7 @@ define @intrinsic_vnclipu_wv_nxv16i8_nxv16i16_nxv16i8( @llvm.riscv.vnclipu.nxv16i8.nxv16i16.nxv16i8( + undef, %0, %1, i32 %2) @@ -229,6 +239,7 @@ entry: } declare @llvm.riscv.vnclipu.nxv32i8.nxv32i16.nxv32i8( + , , , i32); @@ -242,6 +253,7 @@ define @intrinsic_vnclipu_wv_nxv32i8_nxv32i16_nxv32i8( @llvm.riscv.vnclipu.nxv32i8.nxv32i16.nxv32i8( + undef, %0, %1, i32 %2) @@ -275,6 +287,7 @@ entry: } declare @llvm.riscv.vnclipu.nxv1i16.nxv1i32.nxv1i16( + , , , i32); @@ -287,6 +300,7 @@ define @intrinsic_vnclipu_wv_nxv1i16_nxv1i32_nxv1i16( @llvm.riscv.vnclipu.nxv1i16.nxv1i32.nxv1i16( + undef, %0, %1, i32 %2) @@ -320,6 +334,7 @@ entry: } declare @llvm.riscv.vnclipu.nxv2i16.nxv2i32.nxv2i16( + , , , i32); @@ -332,6 +347,7 @@ define @intrinsic_vnclipu_wv_nxv2i16_nxv2i32_nxv2i16( @llvm.riscv.vnclipu.nxv2i16.nxv2i32.nxv2i16( + undef, %0, %1, i32 %2) @@ -365,6 +381,7 @@ entry: } declare @llvm.riscv.vnclipu.nxv4i16.nxv4i32.nxv4i16( + , , , i32); @@ -378,6 +395,7 @@ define @intrinsic_vnclipu_wv_nxv4i16_nxv4i32_nxv4i16( @llvm.riscv.vnclipu.nxv4i16.nxv4i32.nxv4i16( + undef, %0, %1, i32 %2) @@ -411,6 +429,7 @@ entry: } declare @llvm.riscv.vnclipu.nxv8i16.nxv8i32.nxv8i16( + , , , i32); @@ -424,6 +443,7 @@ define @intrinsic_vnclipu_wv_nxv8i16_nxv8i32_nxv8i16( @llvm.riscv.vnclipu.nxv8i16.nxv8i32.nxv8i16( + undef, %0, %1, i32 %2) @@ -457,6 +477,7 @@ entry: } declare @llvm.riscv.vnclipu.nxv16i16.nxv16i32.nxv16i16( + , , , i32); @@ -470,6 +491,7 @@ define @intrinsic_vnclipu_wv_nxv16i16_nxv16i32_nxv16i16( @llvm.riscv.vnclipu.nxv16i16.nxv16i32.nxv16i16( + undef, %0, %1, i32 %2) @@ -503,6 +525,7 @@ entry: } declare @llvm.riscv.vnclipu.nxv1i32.nxv1i64.nxv1i32( + , , , i32); @@ -515,6 +538,7 @@ define @intrinsic_vnclipu_wv_nxv1i32_nxv1i64_nxv1i32( @llvm.riscv.vnclipu.nxv1i32.nxv1i64.nxv1i32( + undef, %0, %1, i32 %2) @@ -548,6 +572,7 @@ entry: } declare @llvm.riscv.vnclipu.nxv2i32.nxv2i64.nxv2i32( + , , , i32); @@ -561,6 +586,7 @@ define @intrinsic_vnclipu_wv_nxv2i32_nxv2i64_nxv2i32( @llvm.riscv.vnclipu.nxv2i32.nxv2i64.nxv2i32( + undef, %0, %1, i32 %2) @@ -594,6 +620,7 @@ entry: } declare @llvm.riscv.vnclipu.nxv4i32.nxv4i64.nxv4i32( + , , , i32); @@ -607,6 +634,7 @@ define @intrinsic_vnclipu_wv_nxv4i32_nxv4i64_nxv4i32( @llvm.riscv.vnclipu.nxv4i32.nxv4i64.nxv4i32( + undef, %0, %1, i32 %2) @@ -640,6 +668,7 @@ entry: } declare @llvm.riscv.vnclipu.nxv8i32.nxv8i64.nxv8i32( + , , , i32); @@ -653,6 +682,7 @@ define @intrinsic_vnclipu_wv_nxv8i32_nxv8i64_nxv8i32( @llvm.riscv.vnclipu.nxv8i32.nxv8i64.nxv8i32( + undef, %0, %1, i32 %2) @@ -686,6 +716,7 @@ entry: } declare @llvm.riscv.vnclipu.nxv1i8.nxv1i16( + , , i32, i32); @@ -698,6 +729,7 @@ define @intrinsic_vnclipu_vx_nxv1i8_nxv1i16( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv1i8.nxv1i16( + undef, %0, i32 %1, i32 %2) @@ -731,6 +763,7 @@ entry: } declare @llvm.riscv.vnclipu.nxv2i8.nxv2i16( + , , i32, i32); @@ -743,6 +776,7 @@ define @intrinsic_vnclipu_vx_nxv2i8_nxv2i16( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv2i8.nxv2i16( + undef, %0, i32 %1, i32 %2) @@ -776,6 +810,7 @@ entry: } declare @llvm.riscv.vnclipu.nxv4i8.nxv4i16( + , , i32, i32); @@ -788,6 +823,7 @@ define @intrinsic_vnclipu_vx_nxv4i8_nxv4i16( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv4i8.nxv4i16( + undef, %0, i32 %1, i32 %2) @@ -821,6 +857,7 @@ entry: } declare @llvm.riscv.vnclipu.nxv8i8.nxv8i16( + , , i32, i32); @@ -834,6 +871,7 @@ define @intrinsic_vnclipu_vx_nxv8i8_nxv8i16( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv8i8.nxv8i16( + undef, %0, i32 %1, i32 %2) @@ -867,6 +905,7 @@ entry: } declare @llvm.riscv.vnclipu.nxv16i8.nxv16i16( + , , i32, i32); @@ -880,6 +919,7 @@ define @intrinsic_vnclipu_vx_nxv16i8_nxv16i16( @llvm.riscv.vnclipu.nxv16i8.nxv16i16( + undef, %0, i32 %1, i32 %2) @@ -913,6 +953,7 @@ entry: } declare @llvm.riscv.vnclipu.nxv32i8.nxv32i16( + , , i32, i32); @@ -926,6 +967,7 @@ define @intrinsic_vnclipu_vx_nxv32i8_nxv32i16( @llvm.riscv.vnclipu.nxv32i8.nxv32i16( + undef, %0, i32 %1, i32 %2) @@ -959,6 +1001,7 @@ entry: } declare @llvm.riscv.vnclipu.nxv1i16.nxv1i32( + , , i32, i32); @@ -971,6 +1014,7 @@ define @intrinsic_vnclipu_vx_nxv1i16_nxv1i32( @llvm.riscv.vnclipu.nxv1i16.nxv1i32( + undef, %0, i32 %1, i32 %2) @@ -1004,6 +1048,7 @@ entry: } declare @llvm.riscv.vnclipu.nxv2i16.nxv2i32( + , , i32, i32); @@ -1016,6 +1061,7 @@ define @intrinsic_vnclipu_vx_nxv2i16_nxv2i32( @llvm.riscv.vnclipu.nxv2i16.nxv2i32( + undef, %0, i32 %1, i32 %2) @@ -1049,6 +1095,7 @@ entry: } declare @llvm.riscv.vnclipu.nxv4i16.nxv4i32( + , , i32, i32); @@ -1062,6 +1109,7 @@ define @intrinsic_vnclipu_vx_nxv4i16_nxv4i32( @llvm.riscv.vnclipu.nxv4i16.nxv4i32( + undef, %0, i32 %1, i32 %2) @@ -1095,6 +1143,7 @@ entry: } declare @llvm.riscv.vnclipu.nxv8i16.nxv8i32( + , , i32, i32); @@ -1108,6 +1157,7 @@ define @intrinsic_vnclipu_vx_nxv8i16_nxv8i32( @llvm.riscv.vnclipu.nxv8i16.nxv8i32( + undef, %0, i32 %1, i32 %2) @@ -1141,6 +1191,7 @@ entry: } declare @llvm.riscv.vnclipu.nxv16i16.nxv16i32( + , , i32, i32); @@ -1154,6 +1205,7 @@ define @intrinsic_vnclipu_vx_nxv16i16_nxv16i32( @llvm.riscv.vnclipu.nxv16i16.nxv16i32( + undef, %0, i32 %1, i32 %2) @@ -1187,6 +1239,7 @@ entry: } declare @llvm.riscv.vnclipu.nxv1i32.nxv1i64( + , , i32, i32); @@ -1199,6 +1252,7 @@ define @intrinsic_vnclipu_vx_nxv1i32_nxv1i64( @llvm.riscv.vnclipu.nxv1i32.nxv1i64( + undef, %0, i32 %1, i32 %2) @@ -1232,6 +1286,7 @@ entry: } declare @llvm.riscv.vnclipu.nxv2i32.nxv2i64( + , , i32, i32); @@ -1245,6 +1300,7 @@ define @intrinsic_vnclipu_vx_nxv2i32_nxv2i64( @llvm.riscv.vnclipu.nxv2i32.nxv2i64( + undef, %0, i32 %1, i32 %2) @@ -1278,6 +1334,7 @@ entry: } declare @llvm.riscv.vnclipu.nxv4i32.nxv4i64( + , , i32, i32); @@ -1291,6 +1348,7 @@ define @intrinsic_vnclipu_vx_nxv4i32_nxv4i64( @llvm.riscv.vnclipu.nxv4i32.nxv4i64( + undef, %0, i32 %1, i32 %2) @@ -1324,6 +1382,7 @@ entry: } declare @llvm.riscv.vnclipu.nxv8i32.nxv8i64( + , , i32, i32); @@ -1337,6 +1396,7 @@ define @intrinsic_vnclipu_vx_nxv8i32_nxv8i64( @llvm.riscv.vnclipu.nxv8i32.nxv8i64( + undef, %0, i32 %1, i32 %2) @@ -1377,6 +1437,7 @@ define @intrinsic_vnclipu_vi_nxv1i8_nxv1i16_i8( @llvm.riscv.vnclipu.nxv1i8.nxv1i16( + undef, %0, i32 9, i32 %1) @@ -1409,6 +1470,7 @@ define @intrinsic_vnclipu_vi_nxv2i8_nxv2i16_i8( @llvm.riscv.vnclipu.nxv2i8.nxv2i16( + undef, %0, i32 9, i32 %1) @@ -1441,6 +1503,7 @@ define @intrinsic_vnclipu_vi_nxv4i8_nxv4i16_i8( @llvm.riscv.vnclipu.nxv4i8.nxv4i16( + undef, %0, i32 9, i32 %1) @@ -1474,6 +1537,7 @@ define @intrinsic_vnclipu_vi_nxv8i8_nxv8i16_i8( @llvm.riscv.vnclipu.nxv8i8.nxv8i16( + undef, %0, i32 9, i32 %1) @@ -1507,6 +1571,7 @@ define @intrinsic_vnclipu_vi_nxv16i8_nxv16i16_i8( @llvm.riscv.vnclipu.nxv16i8.nxv16i16( + undef, %0, i32 9, i32 %1) @@ -1540,6 +1605,7 @@ define @intrinsic_vnclipu_vi_nxv32i8_nxv32i16_i8( @llvm.riscv.vnclipu.nxv32i8.nxv32i16( + undef, %0, i32 9, i32 %1) @@ -1572,6 +1638,7 @@ define @intrinsic_vnclipu_vi_nxv1i16_nxv1i32_i16( @llvm.riscv.vnclipu.nxv1i16.nxv1i32( + undef, %0, i32 9, i32 %1) @@ -1604,6 +1671,7 @@ define @intrinsic_vnclipu_vi_nxv2i16_nxv2i32_i16( @llvm.riscv.vnclipu.nxv2i16.nxv2i32( + undef, %0, i32 9, i32 %1) @@ -1637,6 +1705,7 @@ define @intrinsic_vnclipu_vi_nxv4i16_nxv4i32_i16( @llvm.riscv.vnclipu.nxv4i16.nxv4i32( + undef, %0, i32 9, i32 %1) @@ -1670,6 +1739,7 @@ define @intrinsic_vnclipu_vi_nxv8i16_nxv8i32_i16( @llvm.riscv.vnclipu.nxv8i16.nxv8i32( + undef, %0, i32 9, i32 %1) @@ -1703,6 +1773,7 @@ define @intrinsic_vnclipu_vi_nxv16i16_nxv16i32_i16( @llvm.riscv.vnclipu.nxv16i16.nxv16i32( + undef, %0, i32 9, i32 %1) @@ -1735,6 +1806,7 @@ define @intrinsic_vnclipu_vi_nxv1i32_nxv1i64_i32( @llvm.riscv.vnclipu.nxv1i32.nxv1i64( + undef, %0, i32 9, i32 %1) @@ -1768,6 +1840,7 @@ define @intrinsic_vnclipu_vi_nxv2i32_nxv2i64_i32( @llvm.riscv.vnclipu.nxv2i32.nxv2i64( + undef, %0, i32 9, i32 %1) @@ -1801,6 +1874,7 @@ define @intrinsic_vnclipu_vi_nxv4i32_nxv4i64_i32( @llvm.riscv.vnclipu.nxv4i32.nxv4i64( + undef, %0, i32 9, i32 %1) @@ -1834,6 +1908,7 @@ define @intrinsic_vnclipu_vi_nxv8i32_nxv8i64_i32( @llvm.riscv.vnclipu.nxv8i32.nxv8i64( + undef, %0, i32 9, i32 %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vnclipu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vnclipu-rv64.ll index 0fd55a75c5cf599438b5f0c1ac3117401b37a9cd..4a41fe987c2d17e80f50281400b11663b0bc26c1 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vnclipu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vnclipu-rv64.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vnclipu.nxv1i8.nxv1i16.nxv1i8( + , , , i64); @@ -14,6 +15,7 @@ define @intrinsic_vnclipu_wv_nxv1i8_nxv1i16_nxv1i8( @llvm.riscv.vnclipu.nxv1i8.nxv1i16.nxv1i8( + undef, %0, %1, i64 %2) @@ -47,6 +49,7 @@ entry: } declare @llvm.riscv.vnclipu.nxv2i8.nxv2i16.nxv2i8( + , , , i64); @@ -59,6 +62,7 @@ define @intrinsic_vnclipu_wv_nxv2i8_nxv2i16_nxv2i8( @llvm.riscv.vnclipu.nxv2i8.nxv2i16.nxv2i8( + undef, %0, %1, i64 %2) @@ -92,6 +96,7 @@ entry: } declare @llvm.riscv.vnclipu.nxv4i8.nxv4i16.nxv4i8( + , , , i64); @@ -104,6 +109,7 @@ define @intrinsic_vnclipu_wv_nxv4i8_nxv4i16_nxv4i8( @llvm.riscv.vnclipu.nxv4i8.nxv4i16.nxv4i8( + undef, %0, %1, i64 %2) @@ -137,6 +143,7 @@ entry: } declare @llvm.riscv.vnclipu.nxv8i8.nxv8i16.nxv8i8( + , , , i64); @@ -150,6 +157,7 @@ define @intrinsic_vnclipu_wv_nxv8i8_nxv8i16_nxv8i8( @llvm.riscv.vnclipu.nxv8i8.nxv8i16.nxv8i8( + undef, %0, %1, i64 %2) @@ -183,6 +191,7 @@ entry: } declare @llvm.riscv.vnclipu.nxv16i8.nxv16i16.nxv16i8( + , , , i64); @@ -196,6 +205,7 @@ define @intrinsic_vnclipu_wv_nxv16i8_nxv16i16_nxv16i8( @llvm.riscv.vnclipu.nxv16i8.nxv16i16.nxv16i8( + undef, %0, %1, i64 %2) @@ -229,6 +239,7 @@ entry: } declare @llvm.riscv.vnclipu.nxv32i8.nxv32i16.nxv32i8( + , , , i64); @@ -242,6 +253,7 @@ define @intrinsic_vnclipu_wv_nxv32i8_nxv32i16_nxv32i8( @llvm.riscv.vnclipu.nxv32i8.nxv32i16.nxv32i8( + undef, %0, %1, i64 %2) @@ -275,6 +287,7 @@ entry: } declare @llvm.riscv.vnclipu.nxv1i16.nxv1i32.nxv1i16( + , , , i64); @@ -287,6 +300,7 @@ define @intrinsic_vnclipu_wv_nxv1i16_nxv1i32_nxv1i16( @llvm.riscv.vnclipu.nxv1i16.nxv1i32.nxv1i16( + undef, %0, %1, i64 %2) @@ -320,6 +334,7 @@ entry: } declare @llvm.riscv.vnclipu.nxv2i16.nxv2i32.nxv2i16( + , , , i64); @@ -332,6 +347,7 @@ define @intrinsic_vnclipu_wv_nxv2i16_nxv2i32_nxv2i16( @llvm.riscv.vnclipu.nxv2i16.nxv2i32.nxv2i16( + undef, %0, %1, i64 %2) @@ -365,6 +381,7 @@ entry: } declare @llvm.riscv.vnclipu.nxv4i16.nxv4i32.nxv4i16( + , , , i64); @@ -378,6 +395,7 @@ define @intrinsic_vnclipu_wv_nxv4i16_nxv4i32_nxv4i16( @llvm.riscv.vnclipu.nxv4i16.nxv4i32.nxv4i16( + undef, %0, %1, i64 %2) @@ -411,6 +429,7 @@ entry: } declare @llvm.riscv.vnclipu.nxv8i16.nxv8i32.nxv8i16( + , , , i64); @@ -424,6 +443,7 @@ define @intrinsic_vnclipu_wv_nxv8i16_nxv8i32_nxv8i16( @llvm.riscv.vnclipu.nxv8i16.nxv8i32.nxv8i16( + undef, %0, %1, i64 %2) @@ -457,6 +477,7 @@ entry: } declare @llvm.riscv.vnclipu.nxv16i16.nxv16i32.nxv16i16( + , , , i64); @@ -470,6 +491,7 @@ define @intrinsic_vnclipu_wv_nxv16i16_nxv16i32_nxv16i16( @llvm.riscv.vnclipu.nxv16i16.nxv16i32.nxv16i16( + undef, %0, %1, i64 %2) @@ -503,6 +525,7 @@ entry: } declare @llvm.riscv.vnclipu.nxv1i32.nxv1i64.nxv1i32( + , , , i64); @@ -515,6 +538,7 @@ define @intrinsic_vnclipu_wv_nxv1i32_nxv1i64_nxv1i32( @llvm.riscv.vnclipu.nxv1i32.nxv1i64.nxv1i32( + undef, %0, %1, i64 %2) @@ -548,6 +572,7 @@ entry: } declare @llvm.riscv.vnclipu.nxv2i32.nxv2i64.nxv2i32( + , , , i64); @@ -561,6 +586,7 @@ define @intrinsic_vnclipu_wv_nxv2i32_nxv2i64_nxv2i32( @llvm.riscv.vnclipu.nxv2i32.nxv2i64.nxv2i32( + undef, %0, %1, i64 %2) @@ -594,6 +620,7 @@ entry: } declare @llvm.riscv.vnclipu.nxv4i32.nxv4i64.nxv4i32( + , , , i64); @@ -607,6 +634,7 @@ define @intrinsic_vnclipu_wv_nxv4i32_nxv4i64_nxv4i32( @llvm.riscv.vnclipu.nxv4i32.nxv4i64.nxv4i32( + undef, %0, %1, i64 %2) @@ -640,6 +668,7 @@ entry: } declare @llvm.riscv.vnclipu.nxv8i32.nxv8i64.nxv8i32( + , , , i64); @@ -653,6 +682,7 @@ define @intrinsic_vnclipu_wv_nxv8i32_nxv8i64_nxv8i32( @llvm.riscv.vnclipu.nxv8i32.nxv8i64.nxv8i32( + undef, %0, %1, i64 %2) @@ -686,6 +716,7 @@ entry: } declare @llvm.riscv.vnclipu.nxv1i8.nxv1i16( + , , i64, i64); @@ -698,6 +729,7 @@ define @intrinsic_vnclipu_vx_nxv1i8_nxv1i16( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv1i8.nxv1i16( + undef, %0, i64 %1, i64 %2) @@ -731,6 +763,7 @@ entry: } declare @llvm.riscv.vnclipu.nxv2i8.nxv2i16( + , , i64, i64); @@ -743,6 +776,7 @@ define @intrinsic_vnclipu_vx_nxv2i8_nxv2i16( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv2i8.nxv2i16( + undef, %0, i64 %1, i64 %2) @@ -776,6 +810,7 @@ entry: } declare @llvm.riscv.vnclipu.nxv4i8.nxv4i16( + , , i64, i64); @@ -788,6 +823,7 @@ define @intrinsic_vnclipu_vx_nxv4i8_nxv4i16( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv4i8.nxv4i16( + undef, %0, i64 %1, i64 %2) @@ -821,6 +857,7 @@ entry: } declare @llvm.riscv.vnclipu.nxv8i8.nxv8i16( + , , i64, i64); @@ -834,6 +871,7 @@ define @intrinsic_vnclipu_vx_nxv8i8_nxv8i16( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv8i8.nxv8i16( + undef, %0, i64 %1, i64 %2) @@ -867,6 +905,7 @@ entry: } declare @llvm.riscv.vnclipu.nxv16i8.nxv16i16( + , , i64, i64); @@ -880,6 +919,7 @@ define @intrinsic_vnclipu_vx_nxv16i8_nxv16i16( @llvm.riscv.vnclipu.nxv16i8.nxv16i16( + undef, %0, i64 %1, i64 %2) @@ -913,6 +953,7 @@ entry: } declare @llvm.riscv.vnclipu.nxv32i8.nxv32i16( + , , i64, i64); @@ -926,6 +967,7 @@ define @intrinsic_vnclipu_vx_nxv32i8_nxv32i16( @llvm.riscv.vnclipu.nxv32i8.nxv32i16( + undef, %0, i64 %1, i64 %2) @@ -959,6 +1001,7 @@ entry: } declare @llvm.riscv.vnclipu.nxv1i16.nxv1i32( + , , i64, i64); @@ -971,6 +1014,7 @@ define @intrinsic_vnclipu_vx_nxv1i16_nxv1i32( @llvm.riscv.vnclipu.nxv1i16.nxv1i32( + undef, %0, i64 %1, i64 %2) @@ -1004,6 +1048,7 @@ entry: } declare @llvm.riscv.vnclipu.nxv2i16.nxv2i32( + , , i64, i64); @@ -1016,6 +1061,7 @@ define @intrinsic_vnclipu_vx_nxv2i16_nxv2i32( @llvm.riscv.vnclipu.nxv2i16.nxv2i32( + undef, %0, i64 %1, i64 %2) @@ -1049,6 +1095,7 @@ entry: } declare @llvm.riscv.vnclipu.nxv4i16.nxv4i32( + , , i64, i64); @@ -1062,6 +1109,7 @@ define @intrinsic_vnclipu_vx_nxv4i16_nxv4i32( @llvm.riscv.vnclipu.nxv4i16.nxv4i32( + undef, %0, i64 %1, i64 %2) @@ -1095,6 +1143,7 @@ entry: } declare @llvm.riscv.vnclipu.nxv8i16.nxv8i32( + , , i64, i64); @@ -1108,6 +1157,7 @@ define @intrinsic_vnclipu_vx_nxv8i16_nxv8i32( @llvm.riscv.vnclipu.nxv8i16.nxv8i32( + undef, %0, i64 %1, i64 %2) @@ -1141,6 +1191,7 @@ entry: } declare @llvm.riscv.vnclipu.nxv16i16.nxv16i32( + , , i64, i64); @@ -1154,6 +1205,7 @@ define @intrinsic_vnclipu_vx_nxv16i16_nxv16i32( @llvm.riscv.vnclipu.nxv16i16.nxv16i32( + undef, %0, i64 %1, i64 %2) @@ -1187,6 +1239,7 @@ entry: } declare @llvm.riscv.vnclipu.nxv1i32.nxv1i64( + , , i64, i64); @@ -1199,6 +1252,7 @@ define @intrinsic_vnclipu_vx_nxv1i32_nxv1i64( @llvm.riscv.vnclipu.nxv1i32.nxv1i64( + undef, %0, i64 %1, i64 %2) @@ -1232,6 +1286,7 @@ entry: } declare @llvm.riscv.vnclipu.nxv2i32.nxv2i64( + , , i64, i64); @@ -1245,6 +1300,7 @@ define @intrinsic_vnclipu_vx_nxv2i32_nxv2i64( @llvm.riscv.vnclipu.nxv2i32.nxv2i64( + undef, %0, i64 %1, i64 %2) @@ -1278,6 +1334,7 @@ entry: } declare @llvm.riscv.vnclipu.nxv4i32.nxv4i64( + , , i64, i64); @@ -1291,6 +1348,7 @@ define @intrinsic_vnclipu_vx_nxv4i32_nxv4i64( @llvm.riscv.vnclipu.nxv4i32.nxv4i64( + undef, %0, i64 %1, i64 %2) @@ -1324,6 +1382,7 @@ entry: } declare @llvm.riscv.vnclipu.nxv8i32.nxv8i64( + , , i64, i64); @@ -1337,6 +1396,7 @@ define @intrinsic_vnclipu_vx_nxv8i32_nxv8i64( @llvm.riscv.vnclipu.nxv8i32.nxv8i64( + undef, %0, i64 %1, i64 %2) @@ -1377,6 +1437,7 @@ define @intrinsic_vnclipu_vi_nxv1i8_nxv1i16_i8( @llvm.riscv.vnclipu.nxv1i8.nxv1i16( + undef, %0, i64 9, i64 %1) @@ -1409,6 +1470,7 @@ define @intrinsic_vnclipu_vi_nxv2i8_nxv2i16_i8( @llvm.riscv.vnclipu.nxv2i8.nxv2i16( + undef, %0, i64 9, i64 %1) @@ -1441,6 +1503,7 @@ define @intrinsic_vnclipu_vi_nxv4i8_nxv4i16_i8( @llvm.riscv.vnclipu.nxv4i8.nxv4i16( + undef, %0, i64 9, i64 %1) @@ -1474,6 +1537,7 @@ define @intrinsic_vnclipu_vi_nxv8i8_nxv8i16_i8( @llvm.riscv.vnclipu.nxv8i8.nxv8i16( + undef, %0, i64 9, i64 %1) @@ -1507,6 +1571,7 @@ define @intrinsic_vnclipu_vi_nxv16i8_nxv16i16_i8( @llvm.riscv.vnclipu.nxv16i8.nxv16i16( + undef, %0, i64 9, i64 %1) @@ -1540,6 +1605,7 @@ define @intrinsic_vnclipu_vi_nxv32i8_nxv32i16_i8( @llvm.riscv.vnclipu.nxv32i8.nxv32i16( + undef, %0, i64 9, i64 %1) @@ -1572,6 +1638,7 @@ define @intrinsic_vnclipu_vi_nxv1i16_nxv1i32_i16( @llvm.riscv.vnclipu.nxv1i16.nxv1i32( + undef, %0, i64 9, i64 %1) @@ -1604,6 +1671,7 @@ define @intrinsic_vnclipu_vi_nxv2i16_nxv2i32_i16( @llvm.riscv.vnclipu.nxv2i16.nxv2i32( + undef, %0, i64 9, i64 %1) @@ -1637,6 +1705,7 @@ define @intrinsic_vnclipu_vi_nxv4i16_nxv4i32_i16( @llvm.riscv.vnclipu.nxv4i16.nxv4i32( + undef, %0, i64 9, i64 %1) @@ -1670,6 +1739,7 @@ define @intrinsic_vnclipu_vi_nxv8i16_nxv8i32_i16( @llvm.riscv.vnclipu.nxv8i16.nxv8i32( + undef, %0, i64 9, i64 %1) @@ -1703,6 +1773,7 @@ define @intrinsic_vnclipu_vi_nxv16i16_nxv16i32_i16( @llvm.riscv.vnclipu.nxv16i16.nxv16i32( + undef, %0, i64 9, i64 %1) @@ -1735,6 +1806,7 @@ define @intrinsic_vnclipu_vi_nxv1i32_nxv1i64_i32( @llvm.riscv.vnclipu.nxv1i32.nxv1i64( + undef, %0, i64 9, i64 %1) @@ -1768,6 +1840,7 @@ define @intrinsic_vnclipu_vi_nxv2i32_nxv2i64_i32( @llvm.riscv.vnclipu.nxv2i32.nxv2i64( + undef, %0, i64 9, i64 %1) @@ -1801,6 +1874,7 @@ define @intrinsic_vnclipu_vi_nxv4i32_nxv4i64_i32( @llvm.riscv.vnclipu.nxv4i32.nxv4i64( + undef, %0, i64 9, i64 %1) @@ -1834,6 +1908,7 @@ define @intrinsic_vnclipu_vi_nxv8i32_nxv8i64_i32( @llvm.riscv.vnclipu.nxv8i32.nxv8i64( + undef, %0, i64 9, i64 %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vnsra-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vnsra-rv32.ll index 123dfbe13d77a6dd18735a63bed0f1225d4ba42c..e29de52f45ab507de60b6ed477dca25d41765242 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vnsra-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vnsra-rv32.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vnsra.nxv1i8.nxv1i16.nxv1i8( + , , , i32); @@ -14,6 +15,7 @@ define @intrinsic_vnsra_wv_nxv1i8_nxv1i16_nxv1i8( @llvm.riscv.vnsra.nxv1i8.nxv1i16.nxv1i8( + undef, %0, %1, i32 %2) @@ -47,6 +49,7 @@ entry: } declare @llvm.riscv.vnsra.nxv2i8.nxv2i16.nxv2i8( + , , , i32); @@ -59,6 +62,7 @@ define @intrinsic_vnsra_wv_nxv2i8_nxv2i16_nxv2i8( @llvm.riscv.vnsra.nxv2i8.nxv2i16.nxv2i8( + undef, %0, %1, i32 %2) @@ -92,6 +96,7 @@ entry: } declare @llvm.riscv.vnsra.nxv4i8.nxv4i16.nxv4i8( + , , , i32); @@ -104,6 +109,7 @@ define @intrinsic_vnsra_wv_nxv4i8_nxv4i16_nxv4i8( @llvm.riscv.vnsra.nxv4i8.nxv4i16.nxv4i8( + undef, %0, %1, i32 %2) @@ -137,6 +143,7 @@ entry: } declare @llvm.riscv.vnsra.nxv8i8.nxv8i16.nxv8i8( + , , , i32); @@ -150,6 +157,7 @@ define @intrinsic_vnsra_wv_nxv8i8_nxv8i16_nxv8i8( @llvm.riscv.vnsra.nxv8i8.nxv8i16.nxv8i8( + undef, %0, %1, i32 %2) @@ -183,6 +191,7 @@ entry: } declare @llvm.riscv.vnsra.nxv16i8.nxv16i16.nxv16i8( + , , , i32); @@ -196,6 +205,7 @@ define @intrinsic_vnsra_wv_nxv16i8_nxv16i16_nxv16i8( @llvm.riscv.vnsra.nxv16i8.nxv16i16.nxv16i8( + undef, %0, %1, i32 %2) @@ -229,6 +239,7 @@ entry: } declare @llvm.riscv.vnsra.nxv32i8.nxv32i16.nxv32i8( + , , , i32); @@ -242,6 +253,7 @@ define @intrinsic_vnsra_wv_nxv32i8_nxv32i16_nxv32i8( @llvm.riscv.vnsra.nxv32i8.nxv32i16.nxv32i8( + undef, %0, %1, i32 %2) @@ -275,6 +287,7 @@ entry: } declare @llvm.riscv.vnsra.nxv1i16.nxv1i32.nxv1i16( + , , , i32); @@ -287,6 +300,7 @@ define @intrinsic_vnsra_wv_nxv1i16_nxv1i32_nxv1i16( @llvm.riscv.vnsra.nxv1i16.nxv1i32.nxv1i16( + undef, %0, %1, i32 %2) @@ -320,6 +334,7 @@ entry: } declare @llvm.riscv.vnsra.nxv2i16.nxv2i32.nxv2i16( + , , , i32); @@ -332,6 +347,7 @@ define @intrinsic_vnsra_wv_nxv2i16_nxv2i32_nxv2i16( @llvm.riscv.vnsra.nxv2i16.nxv2i32.nxv2i16( + undef, %0, %1, i32 %2) @@ -365,6 +381,7 @@ entry: } declare @llvm.riscv.vnsra.nxv4i16.nxv4i32.nxv4i16( + , , , i32); @@ -378,6 +395,7 @@ define @intrinsic_vnsra_wv_nxv4i16_nxv4i32_nxv4i16( @llvm.riscv.vnsra.nxv4i16.nxv4i32.nxv4i16( + undef, %0, %1, i32 %2) @@ -411,6 +429,7 @@ entry: } declare @llvm.riscv.vnsra.nxv8i16.nxv8i32.nxv8i16( + , , , i32); @@ -424,6 +443,7 @@ define @intrinsic_vnsra_wv_nxv8i16_nxv8i32_nxv8i16( @llvm.riscv.vnsra.nxv8i16.nxv8i32.nxv8i16( + undef, %0, %1, i32 %2) @@ -457,6 +477,7 @@ entry: } declare @llvm.riscv.vnsra.nxv16i16.nxv16i32.nxv16i16( + , , , i32); @@ -470,6 +491,7 @@ define @intrinsic_vnsra_wv_nxv16i16_nxv16i32_nxv16i16( @llvm.riscv.vnsra.nxv16i16.nxv16i32.nxv16i16( + undef, %0, %1, i32 %2) @@ -503,6 +525,7 @@ entry: } declare @llvm.riscv.vnsra.nxv1i32.nxv1i64.nxv1i32( + , , , i32); @@ -515,6 +538,7 @@ define @intrinsic_vnsra_wv_nxv1i32_nxv1i64_nxv1i32( @llvm.riscv.vnsra.nxv1i32.nxv1i64.nxv1i32( + undef, %0, %1, i32 %2) @@ -548,6 +572,7 @@ entry: } declare @llvm.riscv.vnsra.nxv2i32.nxv2i64.nxv2i32( + , , , i32); @@ -561,6 +586,7 @@ define @intrinsic_vnsra_wv_nxv2i32_nxv2i64_nxv2i32( @llvm.riscv.vnsra.nxv2i32.nxv2i64.nxv2i32( + undef, %0, %1, i32 %2) @@ -594,6 +620,7 @@ entry: } declare @llvm.riscv.vnsra.nxv4i32.nxv4i64.nxv4i32( + , , , i32); @@ -607,6 +634,7 @@ define @intrinsic_vnsra_wv_nxv4i32_nxv4i64_nxv4i32( @llvm.riscv.vnsra.nxv4i32.nxv4i64.nxv4i32( + undef, %0, %1, i32 %2) @@ -640,6 +668,7 @@ entry: } declare @llvm.riscv.vnsra.nxv8i32.nxv8i64.nxv8i32( + , , , i32); @@ -653,6 +682,7 @@ define @intrinsic_vnsra_wv_nxv8i32_nxv8i64_nxv8i32( @llvm.riscv.vnsra.nxv8i32.nxv8i64.nxv8i32( + undef, %0, %1, i32 %2) @@ -686,6 +716,7 @@ entry: } declare @llvm.riscv.vnsra.nxv1i8.nxv1i16( + , , i32, i32); @@ -698,6 +729,7 @@ define @intrinsic_vnsra_vx_nxv1i8_nxv1i16( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv1i8.nxv1i16( + undef, %0, i32 %1, i32 %2) @@ -731,6 +763,7 @@ entry: } declare @llvm.riscv.vnsra.nxv2i8.nxv2i16( + , , i32, i32); @@ -743,6 +776,7 @@ define @intrinsic_vnsra_vx_nxv2i8_nxv2i16( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv2i8.nxv2i16( + undef, %0, i32 %1, i32 %2) @@ -776,6 +810,7 @@ entry: } declare @llvm.riscv.vnsra.nxv4i8.nxv4i16( + , , i32, i32); @@ -788,6 +823,7 @@ define @intrinsic_vnsra_vx_nxv4i8_nxv4i16( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv4i8.nxv4i16( + undef, %0, i32 %1, i32 %2) @@ -821,6 +857,7 @@ entry: } declare @llvm.riscv.vnsra.nxv8i8.nxv8i16( + , , i32, i32); @@ -834,6 +871,7 @@ define @intrinsic_vnsra_vx_nxv8i8_nxv8i16( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv8i8.nxv8i16( + undef, %0, i32 %1, i32 %2) @@ -867,6 +905,7 @@ entry: } declare @llvm.riscv.vnsra.nxv16i8.nxv16i16( + , , i32, i32); @@ -880,6 +919,7 @@ define @intrinsic_vnsra_vx_nxv16i8_nxv16i16( @llvm.riscv.vnsra.nxv16i8.nxv16i16( + undef, %0, i32 %1, i32 %2) @@ -913,6 +953,7 @@ entry: } declare @llvm.riscv.vnsra.nxv32i8.nxv32i16( + , , i32, i32); @@ -926,6 +967,7 @@ define @intrinsic_vnsra_vx_nxv32i8_nxv32i16( @llvm.riscv.vnsra.nxv32i8.nxv32i16( + undef, %0, i32 %1, i32 %2) @@ -959,6 +1001,7 @@ entry: } declare @llvm.riscv.vnsra.nxv1i16.nxv1i32( + , , i32, i32); @@ -971,6 +1014,7 @@ define @intrinsic_vnsra_vx_nxv1i16_nxv1i32( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv1i16.nxv1i32( + undef, %0, i32 %1, i32 %2) @@ -1004,6 +1048,7 @@ entry: } declare @llvm.riscv.vnsra.nxv2i16.nxv2i32( + , , i32, i32); @@ -1016,6 +1061,7 @@ define @intrinsic_vnsra_vx_nxv2i16_nxv2i32( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv2i16.nxv2i32( + undef, %0, i32 %1, i32 %2) @@ -1049,6 +1095,7 @@ entry: } declare @llvm.riscv.vnsra.nxv4i16.nxv4i32( + , , i32, i32); @@ -1062,6 +1109,7 @@ define @intrinsic_vnsra_vx_nxv4i16_nxv4i32( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv4i16.nxv4i32( + undef, %0, i32 %1, i32 %2) @@ -1095,6 +1143,7 @@ entry: } declare @llvm.riscv.vnsra.nxv8i16.nxv8i32( + , , i32, i32); @@ -1108,6 +1157,7 @@ define @intrinsic_vnsra_vx_nxv8i16_nxv8i32( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv8i16.nxv8i32( + undef, %0, i32 %1, i32 %2) @@ -1141,6 +1191,7 @@ entry: } declare @llvm.riscv.vnsra.nxv16i16.nxv16i32( + , , i32, i32); @@ -1154,6 +1205,7 @@ define @intrinsic_vnsra_vx_nxv16i16_nxv16i32( @llvm.riscv.vnsra.nxv16i16.nxv16i32( + undef, %0, i32 %1, i32 %2) @@ -1187,6 +1239,7 @@ entry: } declare @llvm.riscv.vnsra.nxv1i32.nxv1i64( + , , i32, i32); @@ -1199,6 +1252,7 @@ define @intrinsic_vnsra_vx_nxv1i32_nxv1i64( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv1i32.nxv1i64( + undef, %0, i32 %1, i32 %2) @@ -1232,6 +1286,7 @@ entry: } declare @llvm.riscv.vnsra.nxv2i32.nxv2i64( + , , i32, i32); @@ -1245,6 +1300,7 @@ define @intrinsic_vnsra_vx_nxv2i32_nxv2i64( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv2i32.nxv2i64( + undef, %0, i32 %1, i32 %2) @@ -1278,6 +1334,7 @@ entry: } declare @llvm.riscv.vnsra.nxv4i32.nxv4i64( + , , i32, i32); @@ -1291,6 +1348,7 @@ define @intrinsic_vnsra_vx_nxv4i32_nxv4i64( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv4i32.nxv4i64( + undef, %0, i32 %1, i32 %2) @@ -1324,6 +1382,7 @@ entry: } declare @llvm.riscv.vnsra.nxv8i32.nxv8i64( + , , i32, i32); @@ -1337,6 +1396,7 @@ define @intrinsic_vnsra_vx_nxv8i32_nxv8i64( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv8i32.nxv8i64( + undef, %0, i32 %1, i32 %2) @@ -1377,6 +1437,7 @@ define @intrinsic_vnsra_vi_nxv1i8_nxv1i16_i8( @llvm.riscv.vnsra.nxv1i8.nxv1i16( + undef, %0, i32 9, i32 %1) @@ -1409,6 +1470,7 @@ define @intrinsic_vnsra_vi_nxv2i8_nxv2i16_i8( @llvm.riscv.vnsra.nxv2i8.nxv2i16( + undef, %0, i32 9, i32 %1) @@ -1441,6 +1503,7 @@ define @intrinsic_vnsra_vi_nxv4i8_nxv4i16_i8( @llvm.riscv.vnsra.nxv4i8.nxv4i16( + undef, %0, i32 9, i32 %1) @@ -1474,6 +1537,7 @@ define @intrinsic_vnsra_vi_nxv8i8_nxv8i16_i8( @llvm.riscv.vnsra.nxv8i8.nxv8i16( + undef, %0, i32 9, i32 %1) @@ -1507,6 +1571,7 @@ define @intrinsic_vnsra_vi_nxv16i8_nxv16i16_i8( @llvm.riscv.vnsra.nxv16i8.nxv16i16( + undef, %0, i32 9, i32 %1) @@ -1540,6 +1605,7 @@ define @intrinsic_vnsra_vi_nxv32i8_nxv32i16_i8( @llvm.riscv.vnsra.nxv32i8.nxv32i16( + undef, %0, i32 9, i32 %1) @@ -1572,6 +1638,7 @@ define @intrinsic_vnsra_vi_nxv1i16_nxv1i32_i16( @llvm.riscv.vnsra.nxv1i16.nxv1i32( + undef, %0, i32 9, i32 %1) @@ -1604,6 +1671,7 @@ define @intrinsic_vnsra_vi_nxv2i16_nxv2i32_i16( @llvm.riscv.vnsra.nxv2i16.nxv2i32( + undef, %0, i32 9, i32 %1) @@ -1637,6 +1705,7 @@ define @intrinsic_vnsra_vi_nxv4i16_nxv4i32_i16( @llvm.riscv.vnsra.nxv4i16.nxv4i32( + undef, %0, i32 9, i32 %1) @@ -1670,6 +1739,7 @@ define @intrinsic_vnsra_vi_nxv8i16_nxv8i32_i16( @llvm.riscv.vnsra.nxv8i16.nxv8i32( + undef, %0, i32 9, i32 %1) @@ -1703,6 +1773,7 @@ define @intrinsic_vnsra_vi_nxv16i16_nxv16i32_i16( @llvm.riscv.vnsra.nxv16i16.nxv16i32( + undef, %0, i32 9, i32 %1) @@ -1735,6 +1806,7 @@ define @intrinsic_vnsra_vi_nxv1i32_nxv1i64_i32( @llvm.riscv.vnsra.nxv1i32.nxv1i64( + undef, %0, i32 9, i32 %1) @@ -1768,6 +1840,7 @@ define @intrinsic_vnsra_vi_nxv2i32_nxv2i64_i32( @llvm.riscv.vnsra.nxv2i32.nxv2i64( + undef, %0, i32 9, i32 %1) @@ -1801,6 +1874,7 @@ define @intrinsic_vnsra_vi_nxv4i32_nxv4i64_i32( @llvm.riscv.vnsra.nxv4i32.nxv4i64( + undef, %0, i32 9, i32 %1) @@ -1834,6 +1908,7 @@ define @intrinsic_vnsra_vi_nxv8i32_nxv8i64_i32( @llvm.riscv.vnsra.nxv8i32.nxv8i64( + undef, %0, i32 9, i32 %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vnsra-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vnsra-rv64.ll index b6ad65065adbdbc3a98336a0febe0fb1a784e92e..c9686b059b9ccdead0917a5a9fbc7df0b176b156 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vnsra-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vnsra-rv64.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vnsra.nxv1i8.nxv1i16.nxv1i8( + , , , i64); @@ -14,6 +15,7 @@ define @intrinsic_vnsra_wv_nxv1i8_nxv1i16_nxv1i8( @llvm.riscv.vnsra.nxv1i8.nxv1i16.nxv1i8( + undef, %0, %1, i64 %2) @@ -47,6 +49,7 @@ entry: } declare @llvm.riscv.vnsra.nxv2i8.nxv2i16.nxv2i8( + , , , i64); @@ -59,6 +62,7 @@ define @intrinsic_vnsra_wv_nxv2i8_nxv2i16_nxv2i8( @llvm.riscv.vnsra.nxv2i8.nxv2i16.nxv2i8( + undef, %0, %1, i64 %2) @@ -92,6 +96,7 @@ entry: } declare @llvm.riscv.vnsra.nxv4i8.nxv4i16.nxv4i8( + , , , i64); @@ -104,6 +109,7 @@ define @intrinsic_vnsra_wv_nxv4i8_nxv4i16_nxv4i8( @llvm.riscv.vnsra.nxv4i8.nxv4i16.nxv4i8( + undef, %0, %1, i64 %2) @@ -137,6 +143,7 @@ entry: } declare @llvm.riscv.vnsra.nxv8i8.nxv8i16.nxv8i8( + , , , i64); @@ -150,6 +157,7 @@ define @intrinsic_vnsra_wv_nxv8i8_nxv8i16_nxv8i8( @llvm.riscv.vnsra.nxv8i8.nxv8i16.nxv8i8( + undef, %0, %1, i64 %2) @@ -183,6 +191,7 @@ entry: } declare @llvm.riscv.vnsra.nxv16i8.nxv16i16.nxv16i8( + , , , i64); @@ -196,6 +205,7 @@ define @intrinsic_vnsra_wv_nxv16i8_nxv16i16_nxv16i8( @llvm.riscv.vnsra.nxv16i8.nxv16i16.nxv16i8( + undef, %0, %1, i64 %2) @@ -229,6 +239,7 @@ entry: } declare @llvm.riscv.vnsra.nxv32i8.nxv32i16.nxv32i8( + , , , i64); @@ -242,6 +253,7 @@ define @intrinsic_vnsra_wv_nxv32i8_nxv32i16_nxv32i8( @llvm.riscv.vnsra.nxv32i8.nxv32i16.nxv32i8( + undef, %0, %1, i64 %2) @@ -275,6 +287,7 @@ entry: } declare @llvm.riscv.vnsra.nxv1i16.nxv1i32.nxv1i16( + , , , i64); @@ -287,6 +300,7 @@ define @intrinsic_vnsra_wv_nxv1i16_nxv1i32_nxv1i16( @llvm.riscv.vnsra.nxv1i16.nxv1i32.nxv1i16( + undef, %0, %1, i64 %2) @@ -320,6 +334,7 @@ entry: } declare @llvm.riscv.vnsra.nxv2i16.nxv2i32.nxv2i16( + , , , i64); @@ -332,6 +347,7 @@ define @intrinsic_vnsra_wv_nxv2i16_nxv2i32_nxv2i16( @llvm.riscv.vnsra.nxv2i16.nxv2i32.nxv2i16( + undef, %0, %1, i64 %2) @@ -365,6 +381,7 @@ entry: } declare @llvm.riscv.vnsra.nxv4i16.nxv4i32.nxv4i16( + , , , i64); @@ -378,6 +395,7 @@ define @intrinsic_vnsra_wv_nxv4i16_nxv4i32_nxv4i16( @llvm.riscv.vnsra.nxv4i16.nxv4i32.nxv4i16( + undef, %0, %1, i64 %2) @@ -411,6 +429,7 @@ entry: } declare @llvm.riscv.vnsra.nxv8i16.nxv8i32.nxv8i16( + , , , i64); @@ -424,6 +443,7 @@ define @intrinsic_vnsra_wv_nxv8i16_nxv8i32_nxv8i16( @llvm.riscv.vnsra.nxv8i16.nxv8i32.nxv8i16( + undef, %0, %1, i64 %2) @@ -457,6 +477,7 @@ entry: } declare @llvm.riscv.vnsra.nxv16i16.nxv16i32.nxv16i16( + , , , i64); @@ -470,6 +491,7 @@ define @intrinsic_vnsra_wv_nxv16i16_nxv16i32_nxv16i16( @llvm.riscv.vnsra.nxv16i16.nxv16i32.nxv16i16( + undef, %0, %1, i64 %2) @@ -503,6 +525,7 @@ entry: } declare @llvm.riscv.vnsra.nxv1i32.nxv1i64.nxv1i32( + , , , i64); @@ -515,6 +538,7 @@ define @intrinsic_vnsra_wv_nxv1i32_nxv1i64_nxv1i32( @llvm.riscv.vnsra.nxv1i32.nxv1i64.nxv1i32( + undef, %0, %1, i64 %2) @@ -548,6 +572,7 @@ entry: } declare @llvm.riscv.vnsra.nxv2i32.nxv2i64.nxv2i32( + , , , i64); @@ -561,6 +586,7 @@ define @intrinsic_vnsra_wv_nxv2i32_nxv2i64_nxv2i32( @llvm.riscv.vnsra.nxv2i32.nxv2i64.nxv2i32( + undef, %0, %1, i64 %2) @@ -594,6 +620,7 @@ entry: } declare @llvm.riscv.vnsra.nxv4i32.nxv4i64.nxv4i32( + , , , i64); @@ -607,6 +634,7 @@ define @intrinsic_vnsra_wv_nxv4i32_nxv4i64_nxv4i32( @llvm.riscv.vnsra.nxv4i32.nxv4i64.nxv4i32( + undef, %0, %1, i64 %2) @@ -640,6 +668,7 @@ entry: } declare @llvm.riscv.vnsra.nxv8i32.nxv8i64.nxv8i32( + , , , i64); @@ -653,6 +682,7 @@ define @intrinsic_vnsra_wv_nxv8i32_nxv8i64_nxv8i32( @llvm.riscv.vnsra.nxv8i32.nxv8i64.nxv8i32( + undef, %0, %1, i64 %2) @@ -686,6 +716,7 @@ entry: } declare @llvm.riscv.vnsra.nxv1i8.nxv1i16( + , , i64, i64); @@ -698,6 +729,7 @@ define @intrinsic_vnsra_vx_nxv1i8_nxv1i16( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv1i8.nxv1i16( + undef, %0, i64 %1, i64 %2) @@ -731,6 +763,7 @@ entry: } declare @llvm.riscv.vnsra.nxv2i8.nxv2i16( + , , i64, i64); @@ -743,6 +776,7 @@ define @intrinsic_vnsra_vx_nxv2i8_nxv2i16( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv2i8.nxv2i16( + undef, %0, i64 %1, i64 %2) @@ -776,6 +810,7 @@ entry: } declare @llvm.riscv.vnsra.nxv4i8.nxv4i16( + , , i64, i64); @@ -788,6 +823,7 @@ define @intrinsic_vnsra_vx_nxv4i8_nxv4i16( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv4i8.nxv4i16( + undef, %0, i64 %1, i64 %2) @@ -821,6 +857,7 @@ entry: } declare @llvm.riscv.vnsra.nxv8i8.nxv8i16( + , , i64, i64); @@ -834,6 +871,7 @@ define @intrinsic_vnsra_vx_nxv8i8_nxv8i16( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv8i8.nxv8i16( + undef, %0, i64 %1, i64 %2) @@ -867,6 +905,7 @@ entry: } declare @llvm.riscv.vnsra.nxv16i8.nxv16i16( + , , i64, i64); @@ -880,6 +919,7 @@ define @intrinsic_vnsra_vx_nxv16i8_nxv16i16( @llvm.riscv.vnsra.nxv16i8.nxv16i16( + undef, %0, i64 %1, i64 %2) @@ -913,6 +953,7 @@ entry: } declare @llvm.riscv.vnsra.nxv32i8.nxv32i16( + , , i64, i64); @@ -926,6 +967,7 @@ define @intrinsic_vnsra_vx_nxv32i8_nxv32i16( @llvm.riscv.vnsra.nxv32i8.nxv32i16( + undef, %0, i64 %1, i64 %2) @@ -959,6 +1001,7 @@ entry: } declare @llvm.riscv.vnsra.nxv1i16.nxv1i32( + , , i64, i64); @@ -971,6 +1014,7 @@ define @intrinsic_vnsra_vx_nxv1i16_nxv1i32( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv1i16.nxv1i32( + undef, %0, i64 %1, i64 %2) @@ -1004,6 +1048,7 @@ entry: } declare @llvm.riscv.vnsra.nxv2i16.nxv2i32( + , , i64, i64); @@ -1016,6 +1061,7 @@ define @intrinsic_vnsra_vx_nxv2i16_nxv2i32( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv2i16.nxv2i32( + undef, %0, i64 %1, i64 %2) @@ -1049,6 +1095,7 @@ entry: } declare @llvm.riscv.vnsra.nxv4i16.nxv4i32( + , , i64, i64); @@ -1062,6 +1109,7 @@ define @intrinsic_vnsra_vx_nxv4i16_nxv4i32( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv4i16.nxv4i32( + undef, %0, i64 %1, i64 %2) @@ -1095,6 +1143,7 @@ entry: } declare @llvm.riscv.vnsra.nxv8i16.nxv8i32( + , , i64, i64); @@ -1108,6 +1157,7 @@ define @intrinsic_vnsra_vx_nxv8i16_nxv8i32( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv8i16.nxv8i32( + undef, %0, i64 %1, i64 %2) @@ -1141,6 +1191,7 @@ entry: } declare @llvm.riscv.vnsra.nxv16i16.nxv16i32( + , , i64, i64); @@ -1154,6 +1205,7 @@ define @intrinsic_vnsra_vx_nxv16i16_nxv16i32( @llvm.riscv.vnsra.nxv16i16.nxv16i32( + undef, %0, i64 %1, i64 %2) @@ -1187,6 +1239,7 @@ entry: } declare @llvm.riscv.vnsra.nxv1i32.nxv1i64( + , , i64, i64); @@ -1199,6 +1252,7 @@ define @intrinsic_vnsra_vx_nxv1i32_nxv1i64( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv1i32.nxv1i64( + undef, %0, i64 %1, i64 %2) @@ -1232,6 +1286,7 @@ entry: } declare @llvm.riscv.vnsra.nxv2i32.nxv2i64( + , , i64, i64); @@ -1245,6 +1300,7 @@ define @intrinsic_vnsra_vx_nxv2i32_nxv2i64( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv2i32.nxv2i64( + undef, %0, i64 %1, i64 %2) @@ -1278,6 +1334,7 @@ entry: } declare @llvm.riscv.vnsra.nxv4i32.nxv4i64( + , , i64, i64); @@ -1291,6 +1348,7 @@ define @intrinsic_vnsra_vx_nxv4i32_nxv4i64( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv4i32.nxv4i64( + undef, %0, i64 %1, i64 %2) @@ -1324,6 +1382,7 @@ entry: } declare @llvm.riscv.vnsra.nxv8i32.nxv8i64( + , , i64, i64); @@ -1337,6 +1396,7 @@ define @intrinsic_vnsra_vx_nxv8i32_nxv8i64( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv8i32.nxv8i64( + undef, %0, i64 %1, i64 %2) @@ -1377,6 +1437,7 @@ define @intrinsic_vnsra_vi_nxv1i8_nxv1i16_i8( @llvm.riscv.vnsra.nxv1i8.nxv1i16( + undef, %0, i64 9, i64 %1) @@ -1409,6 +1470,7 @@ define @intrinsic_vnsra_vi_nxv2i8_nxv2i16_i8( @llvm.riscv.vnsra.nxv2i8.nxv2i16( + undef, %0, i64 9, i64 %1) @@ -1441,6 +1503,7 @@ define @intrinsic_vnsra_vi_nxv4i8_nxv4i16_i8( @llvm.riscv.vnsra.nxv4i8.nxv4i16( + undef, %0, i64 9, i64 %1) @@ -1474,6 +1537,7 @@ define @intrinsic_vnsra_vi_nxv8i8_nxv8i16_i8( @llvm.riscv.vnsra.nxv8i8.nxv8i16( + undef, %0, i64 9, i64 %1) @@ -1507,6 +1571,7 @@ define @intrinsic_vnsra_vi_nxv16i8_nxv16i16_i8( @llvm.riscv.vnsra.nxv16i8.nxv16i16( + undef, %0, i64 9, i64 %1) @@ -1540,6 +1605,7 @@ define @intrinsic_vnsra_vi_nxv32i8_nxv32i16_i8( @llvm.riscv.vnsra.nxv32i8.nxv32i16( + undef, %0, i64 9, i64 %1) @@ -1572,6 +1638,7 @@ define @intrinsic_vnsra_vi_nxv1i16_nxv1i32_i16( @llvm.riscv.vnsra.nxv1i16.nxv1i32( + undef, %0, i64 9, i64 %1) @@ -1604,6 +1671,7 @@ define @intrinsic_vnsra_vi_nxv2i16_nxv2i32_i16( @llvm.riscv.vnsra.nxv2i16.nxv2i32( + undef, %0, i64 9, i64 %1) @@ -1637,6 +1705,7 @@ define @intrinsic_vnsra_vi_nxv4i16_nxv4i32_i16( @llvm.riscv.vnsra.nxv4i16.nxv4i32( + undef, %0, i64 9, i64 %1) @@ -1670,6 +1739,7 @@ define @intrinsic_vnsra_vi_nxv8i16_nxv8i32_i16( @llvm.riscv.vnsra.nxv8i16.nxv8i32( + undef, %0, i64 9, i64 %1) @@ -1703,6 +1773,7 @@ define @intrinsic_vnsra_vi_nxv16i16_nxv16i32_i16( @llvm.riscv.vnsra.nxv16i16.nxv16i32( + undef, %0, i64 9, i64 %1) @@ -1735,6 +1806,7 @@ define @intrinsic_vnsra_vi_nxv1i32_nxv1i64_i32( @llvm.riscv.vnsra.nxv1i32.nxv1i64( + undef, %0, i64 9, i64 %1) @@ -1768,6 +1840,7 @@ define @intrinsic_vnsra_vi_nxv2i32_nxv2i64_i32( @llvm.riscv.vnsra.nxv2i32.nxv2i64( + undef, %0, i64 9, i64 %1) @@ -1801,6 +1874,7 @@ define @intrinsic_vnsra_vi_nxv4i32_nxv4i64_i32( @llvm.riscv.vnsra.nxv4i32.nxv4i64( + undef, %0, i64 9, i64 %1) @@ -1834,6 +1908,7 @@ define @intrinsic_vnsra_vi_nxv8i32_nxv8i64_i32( @llvm.riscv.vnsra.nxv8i32.nxv8i64( + undef, %0, i64 9, i64 %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vnsrl-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vnsrl-rv32.ll index 065203a3fa0d436a07f7891f1c7ff537b83ca69e..1d8338610a19a8ddd28b3ee03e9976d617827e35 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vnsrl-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vnsrl-rv32.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vnsrl.nxv1i8.nxv1i16.nxv1i8( + , , , i32); @@ -14,6 +15,7 @@ define @intrinsic_vnsrl_wv_nxv1i8_nxv1i16_nxv1i8( @llvm.riscv.vnsrl.nxv1i8.nxv1i16.nxv1i8( + undef, %0, %1, i32 %2) @@ -47,6 +49,7 @@ entry: } declare @llvm.riscv.vnsrl.nxv2i8.nxv2i16.nxv2i8( + , , , i32); @@ -59,6 +62,7 @@ define @intrinsic_vnsrl_wv_nxv2i8_nxv2i16_nxv2i8( @llvm.riscv.vnsrl.nxv2i8.nxv2i16.nxv2i8( + undef, %0, %1, i32 %2) @@ -92,6 +96,7 @@ entry: } declare @llvm.riscv.vnsrl.nxv4i8.nxv4i16.nxv4i8( + , , , i32); @@ -104,6 +109,7 @@ define @intrinsic_vnsrl_wv_nxv4i8_nxv4i16_nxv4i8( @llvm.riscv.vnsrl.nxv4i8.nxv4i16.nxv4i8( + undef, %0, %1, i32 %2) @@ -137,6 +143,7 @@ entry: } declare @llvm.riscv.vnsrl.nxv8i8.nxv8i16.nxv8i8( + , , , i32); @@ -150,6 +157,7 @@ define @intrinsic_vnsrl_wv_nxv8i8_nxv8i16_nxv8i8( @llvm.riscv.vnsrl.nxv8i8.nxv8i16.nxv8i8( + undef, %0, %1, i32 %2) @@ -183,6 +191,7 @@ entry: } declare @llvm.riscv.vnsrl.nxv16i8.nxv16i16.nxv16i8( + , , , i32); @@ -196,6 +205,7 @@ define @intrinsic_vnsrl_wv_nxv16i8_nxv16i16_nxv16i8( @llvm.riscv.vnsrl.nxv16i8.nxv16i16.nxv16i8( + undef, %0, %1, i32 %2) @@ -229,6 +239,7 @@ entry: } declare @llvm.riscv.vnsrl.nxv32i8.nxv32i16.nxv32i8( + , , , i32); @@ -242,6 +253,7 @@ define @intrinsic_vnsrl_wv_nxv32i8_nxv32i16_nxv32i8( @llvm.riscv.vnsrl.nxv32i8.nxv32i16.nxv32i8( + undef, %0, %1, i32 %2) @@ -275,6 +287,7 @@ entry: } declare @llvm.riscv.vnsrl.nxv1i16.nxv1i32.nxv1i16( + , , , i32); @@ -287,6 +300,7 @@ define @intrinsic_vnsrl_wv_nxv1i16_nxv1i32_nxv1i16( @llvm.riscv.vnsrl.nxv1i16.nxv1i32.nxv1i16( + undef, %0, %1, i32 %2) @@ -320,6 +334,7 @@ entry: } declare @llvm.riscv.vnsrl.nxv2i16.nxv2i32.nxv2i16( + , , , i32); @@ -332,6 +347,7 @@ define @intrinsic_vnsrl_wv_nxv2i16_nxv2i32_nxv2i16( @llvm.riscv.vnsrl.nxv2i16.nxv2i32.nxv2i16( + undef, %0, %1, i32 %2) @@ -365,6 +381,7 @@ entry: } declare @llvm.riscv.vnsrl.nxv4i16.nxv4i32.nxv4i16( + , , , i32); @@ -378,6 +395,7 @@ define @intrinsic_vnsrl_wv_nxv4i16_nxv4i32_nxv4i16( @llvm.riscv.vnsrl.nxv4i16.nxv4i32.nxv4i16( + undef, %0, %1, i32 %2) @@ -411,6 +429,7 @@ entry: } declare @llvm.riscv.vnsrl.nxv8i16.nxv8i32.nxv8i16( + , , , i32); @@ -424,6 +443,7 @@ define @intrinsic_vnsrl_wv_nxv8i16_nxv8i32_nxv8i16( @llvm.riscv.vnsrl.nxv8i16.nxv8i32.nxv8i16( + undef, %0, %1, i32 %2) @@ -457,6 +477,7 @@ entry: } declare @llvm.riscv.vnsrl.nxv16i16.nxv16i32.nxv16i16( + , , , i32); @@ -470,6 +491,7 @@ define @intrinsic_vnsrl_wv_nxv16i16_nxv16i32_nxv16i16( @llvm.riscv.vnsrl.nxv16i16.nxv16i32.nxv16i16( + undef, %0, %1, i32 %2) @@ -503,6 +525,7 @@ entry: } declare @llvm.riscv.vnsrl.nxv1i32.nxv1i64.nxv1i32( + , , , i32); @@ -515,6 +538,7 @@ define @intrinsic_vnsrl_wv_nxv1i32_nxv1i64_nxv1i32( @llvm.riscv.vnsrl.nxv1i32.nxv1i64.nxv1i32( + undef, %0, %1, i32 %2) @@ -548,6 +572,7 @@ entry: } declare @llvm.riscv.vnsrl.nxv2i32.nxv2i64.nxv2i32( + , , , i32); @@ -561,6 +586,7 @@ define @intrinsic_vnsrl_wv_nxv2i32_nxv2i64_nxv2i32( @llvm.riscv.vnsrl.nxv2i32.nxv2i64.nxv2i32( + undef, %0, %1, i32 %2) @@ -594,6 +620,7 @@ entry: } declare @llvm.riscv.vnsrl.nxv4i32.nxv4i64.nxv4i32( + , , , i32); @@ -607,6 +634,7 @@ define @intrinsic_vnsrl_wv_nxv4i32_nxv4i64_nxv4i32( @llvm.riscv.vnsrl.nxv4i32.nxv4i64.nxv4i32( + undef, %0, %1, i32 %2) @@ -640,6 +668,7 @@ entry: } declare @llvm.riscv.vnsrl.nxv8i32.nxv8i64.nxv8i32( + , , , i32); @@ -653,6 +682,7 @@ define @intrinsic_vnsrl_wv_nxv8i32_nxv8i64_nxv8i32( @llvm.riscv.vnsrl.nxv8i32.nxv8i64.nxv8i32( + undef, %0, %1, i32 %2) @@ -686,6 +716,7 @@ entry: } declare @llvm.riscv.vnsrl.nxv1i8.nxv1i16( + , , i32, i32); @@ -698,6 +729,7 @@ define @intrinsic_vnsrl_vx_nxv1i8_nxv1i16( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv1i8.nxv1i16( + undef, %0, i32 %1, i32 %2) @@ -731,6 +763,7 @@ entry: } declare @llvm.riscv.vnsrl.nxv2i8.nxv2i16( + , , i32, i32); @@ -743,6 +776,7 @@ define @intrinsic_vnsrl_vx_nxv2i8_nxv2i16( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv2i8.nxv2i16( + undef, %0, i32 %1, i32 %2) @@ -776,6 +810,7 @@ entry: } declare @llvm.riscv.vnsrl.nxv4i8.nxv4i16( + , , i32, i32); @@ -788,6 +823,7 @@ define @intrinsic_vnsrl_vx_nxv4i8_nxv4i16( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv4i8.nxv4i16( + undef, %0, i32 %1, i32 %2) @@ -821,6 +857,7 @@ entry: } declare @llvm.riscv.vnsrl.nxv8i8.nxv8i16( + , , i32, i32); @@ -834,6 +871,7 @@ define @intrinsic_vnsrl_vx_nxv8i8_nxv8i16( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv8i8.nxv8i16( + undef, %0, i32 %1, i32 %2) @@ -867,6 +905,7 @@ entry: } declare @llvm.riscv.vnsrl.nxv16i8.nxv16i16( + , , i32, i32); @@ -880,6 +919,7 @@ define @intrinsic_vnsrl_vx_nxv16i8_nxv16i16( @llvm.riscv.vnsrl.nxv16i8.nxv16i16( + undef, %0, i32 %1, i32 %2) @@ -913,6 +953,7 @@ entry: } declare @llvm.riscv.vnsrl.nxv32i8.nxv32i16( + , , i32, i32); @@ -926,6 +967,7 @@ define @intrinsic_vnsrl_vx_nxv32i8_nxv32i16( @llvm.riscv.vnsrl.nxv32i8.nxv32i16( + undef, %0, i32 %1, i32 %2) @@ -959,6 +1001,7 @@ entry: } declare @llvm.riscv.vnsrl.nxv1i16.nxv1i32( + , , i32, i32); @@ -971,6 +1014,7 @@ define @intrinsic_vnsrl_vx_nxv1i16_nxv1i32( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv1i16.nxv1i32( + undef, %0, i32 %1, i32 %2) @@ -1004,6 +1048,7 @@ entry: } declare @llvm.riscv.vnsrl.nxv2i16.nxv2i32( + , , i32, i32); @@ -1016,6 +1061,7 @@ define @intrinsic_vnsrl_vx_nxv2i16_nxv2i32( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv2i16.nxv2i32( + undef, %0, i32 %1, i32 %2) @@ -1049,6 +1095,7 @@ entry: } declare @llvm.riscv.vnsrl.nxv4i16.nxv4i32( + , , i32, i32); @@ -1062,6 +1109,7 @@ define @intrinsic_vnsrl_vx_nxv4i16_nxv4i32( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv4i16.nxv4i32( + undef, %0, i32 %1, i32 %2) @@ -1095,6 +1143,7 @@ entry: } declare @llvm.riscv.vnsrl.nxv8i16.nxv8i32( + , , i32, i32); @@ -1108,6 +1157,7 @@ define @intrinsic_vnsrl_vx_nxv8i16_nxv8i32( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv8i16.nxv8i32( + undef, %0, i32 %1, i32 %2) @@ -1141,6 +1191,7 @@ entry: } declare @llvm.riscv.vnsrl.nxv16i16.nxv16i32( + , , i32, i32); @@ -1154,6 +1205,7 @@ define @intrinsic_vnsrl_vx_nxv16i16_nxv16i32( @llvm.riscv.vnsrl.nxv16i16.nxv16i32( + undef, %0, i32 %1, i32 %2) @@ -1187,6 +1239,7 @@ entry: } declare @llvm.riscv.vnsrl.nxv1i32.nxv1i64( + , , i32, i32); @@ -1199,6 +1252,7 @@ define @intrinsic_vnsrl_vx_nxv1i32_nxv1i64( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv1i32.nxv1i64( + undef, %0, i32 %1, i32 %2) @@ -1232,6 +1286,7 @@ entry: } declare @llvm.riscv.vnsrl.nxv2i32.nxv2i64( + , , i32, i32); @@ -1245,6 +1300,7 @@ define @intrinsic_vnsrl_vx_nxv2i32_nxv2i64( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv2i32.nxv2i64( + undef, %0, i32 %1, i32 %2) @@ -1278,6 +1334,7 @@ entry: } declare @llvm.riscv.vnsrl.nxv4i32.nxv4i64( + , , i32, i32); @@ -1291,6 +1348,7 @@ define @intrinsic_vnsrl_vx_nxv4i32_nxv4i64( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv4i32.nxv4i64( + undef, %0, i32 %1, i32 %2) @@ -1324,6 +1382,7 @@ entry: } declare @llvm.riscv.vnsrl.nxv8i32.nxv8i64( + , , i32, i32); @@ -1337,6 +1396,7 @@ define @intrinsic_vnsrl_vx_nxv8i32_nxv8i64( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv8i32.nxv8i64( + undef, %0, i32 %1, i32 %2) @@ -1377,6 +1437,7 @@ define @intrinsic_vnsrl_vi_nxv1i8_nxv1i16_i8( @llvm.riscv.vnsrl.nxv1i8.nxv1i16( + undef, %0, i32 9, i32 %1) @@ -1409,6 +1470,7 @@ define @intrinsic_vnsrl_vi_nxv2i8_nxv2i16_i8( @llvm.riscv.vnsrl.nxv2i8.nxv2i16( + undef, %0, i32 9, i32 %1) @@ -1441,6 +1503,7 @@ define @intrinsic_vnsrl_vi_nxv4i8_nxv4i16_i8( @llvm.riscv.vnsrl.nxv4i8.nxv4i16( + undef, %0, i32 9, i32 %1) @@ -1474,6 +1537,7 @@ define @intrinsic_vnsrl_vi_nxv8i8_nxv8i16_i8( @llvm.riscv.vnsrl.nxv8i8.nxv8i16( + undef, %0, i32 9, i32 %1) @@ -1507,6 +1571,7 @@ define @intrinsic_vnsrl_vi_nxv16i8_nxv16i16_i8( @llvm.riscv.vnsrl.nxv16i8.nxv16i16( + undef, %0, i32 9, i32 %1) @@ -1540,6 +1605,7 @@ define @intrinsic_vnsrl_vi_nxv32i8_nxv32i16_i8( @llvm.riscv.vnsrl.nxv32i8.nxv32i16( + undef, %0, i32 9, i32 %1) @@ -1572,6 +1638,7 @@ define @intrinsic_vnsrl_vi_nxv1i16_nxv1i32_i16( @llvm.riscv.vnsrl.nxv1i16.nxv1i32( + undef, %0, i32 9, i32 %1) @@ -1604,6 +1671,7 @@ define @intrinsic_vnsrl_vi_nxv2i16_nxv2i32_i16( @llvm.riscv.vnsrl.nxv2i16.nxv2i32( + undef, %0, i32 9, i32 %1) @@ -1637,6 +1705,7 @@ define @intrinsic_vnsrl_vi_nxv4i16_nxv4i32_i16( @llvm.riscv.vnsrl.nxv4i16.nxv4i32( + undef, %0, i32 9, i32 %1) @@ -1670,6 +1739,7 @@ define @intrinsic_vnsrl_vi_nxv8i16_nxv8i32_i16( @llvm.riscv.vnsrl.nxv8i16.nxv8i32( + undef, %0, i32 9, i32 %1) @@ -1703,6 +1773,7 @@ define @intrinsic_vnsrl_vi_nxv16i16_nxv16i32_i16( @llvm.riscv.vnsrl.nxv16i16.nxv16i32( + undef, %0, i32 9, i32 %1) @@ -1735,6 +1806,7 @@ define @intrinsic_vnsrl_vi_nxv1i32_nxv1i64_i32( @llvm.riscv.vnsrl.nxv1i32.nxv1i64( + undef, %0, i32 9, i32 %1) @@ -1768,6 +1840,7 @@ define @intrinsic_vnsrl_vi_nxv2i32_nxv2i64_i32( @llvm.riscv.vnsrl.nxv2i32.nxv2i64( + undef, %0, i32 9, i32 %1) @@ -1801,6 +1874,7 @@ define @intrinsic_vnsrl_vi_nxv4i32_nxv4i64_i32( @llvm.riscv.vnsrl.nxv4i32.nxv4i64( + undef, %0, i32 9, i32 %1) @@ -1834,6 +1908,7 @@ define @intrinsic_vnsrl_vi_nxv8i32_nxv8i64_i32( @llvm.riscv.vnsrl.nxv8i32.nxv8i64( + undef, %0, i32 9, i32 %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vnsrl-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vnsrl-rv64.ll index 8f8a89a85e821665f4173bd393e3b2899635b3d4..942e9cc7670fc8df5da6988d8790157bfa2cc374 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vnsrl-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vnsrl-rv64.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vnsrl.nxv1i8.nxv1i16.nxv1i8( + , , , i64); @@ -14,6 +15,7 @@ define @intrinsic_vnsrl_wv_nxv1i8_nxv1i16_nxv1i8( @llvm.riscv.vnsrl.nxv1i8.nxv1i16.nxv1i8( + undef, %0, %1, i64 %2) @@ -47,6 +49,7 @@ entry: } declare @llvm.riscv.vnsrl.nxv2i8.nxv2i16.nxv2i8( + , , , i64); @@ -59,6 +62,7 @@ define @intrinsic_vnsrl_wv_nxv2i8_nxv2i16_nxv2i8( @llvm.riscv.vnsrl.nxv2i8.nxv2i16.nxv2i8( + undef, %0, %1, i64 %2) @@ -92,6 +96,7 @@ entry: } declare @llvm.riscv.vnsrl.nxv4i8.nxv4i16.nxv4i8( + , , , i64); @@ -104,6 +109,7 @@ define @intrinsic_vnsrl_wv_nxv4i8_nxv4i16_nxv4i8( @llvm.riscv.vnsrl.nxv4i8.nxv4i16.nxv4i8( + undef, %0, %1, i64 %2) @@ -137,6 +143,7 @@ entry: } declare @llvm.riscv.vnsrl.nxv8i8.nxv8i16.nxv8i8( + , , , i64); @@ -150,6 +157,7 @@ define @intrinsic_vnsrl_wv_nxv8i8_nxv8i16_nxv8i8( @llvm.riscv.vnsrl.nxv8i8.nxv8i16.nxv8i8( + undef, %0, %1, i64 %2) @@ -183,6 +191,7 @@ entry: } declare @llvm.riscv.vnsrl.nxv16i8.nxv16i16.nxv16i8( + , , , i64); @@ -196,6 +205,7 @@ define @intrinsic_vnsrl_wv_nxv16i8_nxv16i16_nxv16i8( @llvm.riscv.vnsrl.nxv16i8.nxv16i16.nxv16i8( + undef, %0, %1, i64 %2) @@ -229,6 +239,7 @@ entry: } declare @llvm.riscv.vnsrl.nxv32i8.nxv32i16.nxv32i8( + , , , i64); @@ -242,6 +253,7 @@ define @intrinsic_vnsrl_wv_nxv32i8_nxv32i16_nxv32i8( @llvm.riscv.vnsrl.nxv32i8.nxv32i16.nxv32i8( + undef, %0, %1, i64 %2) @@ -275,6 +287,7 @@ entry: } declare @llvm.riscv.vnsrl.nxv1i16.nxv1i32.nxv1i16( + , , , i64); @@ -287,6 +300,7 @@ define @intrinsic_vnsrl_wv_nxv1i16_nxv1i32_nxv1i16( @llvm.riscv.vnsrl.nxv1i16.nxv1i32.nxv1i16( + undef, %0, %1, i64 %2) @@ -320,6 +334,7 @@ entry: } declare @llvm.riscv.vnsrl.nxv2i16.nxv2i32.nxv2i16( + , , , i64); @@ -332,6 +347,7 @@ define @intrinsic_vnsrl_wv_nxv2i16_nxv2i32_nxv2i16( @llvm.riscv.vnsrl.nxv2i16.nxv2i32.nxv2i16( + undef, %0, %1, i64 %2) @@ -365,6 +381,7 @@ entry: } declare @llvm.riscv.vnsrl.nxv4i16.nxv4i32.nxv4i16( + , , , i64); @@ -378,6 +395,7 @@ define @intrinsic_vnsrl_wv_nxv4i16_nxv4i32_nxv4i16( @llvm.riscv.vnsrl.nxv4i16.nxv4i32.nxv4i16( + undef, %0, %1, i64 %2) @@ -411,6 +429,7 @@ entry: } declare @llvm.riscv.vnsrl.nxv8i16.nxv8i32.nxv8i16( + , , , i64); @@ -424,6 +443,7 @@ define @intrinsic_vnsrl_wv_nxv8i16_nxv8i32_nxv8i16( @llvm.riscv.vnsrl.nxv8i16.nxv8i32.nxv8i16( + undef, %0, %1, i64 %2) @@ -457,6 +477,7 @@ entry: } declare @llvm.riscv.vnsrl.nxv16i16.nxv16i32.nxv16i16( + , , , i64); @@ -470,6 +491,7 @@ define @intrinsic_vnsrl_wv_nxv16i16_nxv16i32_nxv16i16( @llvm.riscv.vnsrl.nxv16i16.nxv16i32.nxv16i16( + undef, %0, %1, i64 %2) @@ -503,6 +525,7 @@ entry: } declare @llvm.riscv.vnsrl.nxv1i32.nxv1i64.nxv1i32( + , , , i64); @@ -515,6 +538,7 @@ define @intrinsic_vnsrl_wv_nxv1i32_nxv1i64_nxv1i32( @llvm.riscv.vnsrl.nxv1i32.nxv1i64.nxv1i32( + undef, %0, %1, i64 %2) @@ -548,6 +572,7 @@ entry: } declare @llvm.riscv.vnsrl.nxv2i32.nxv2i64.nxv2i32( + , , , i64); @@ -561,6 +586,7 @@ define @intrinsic_vnsrl_wv_nxv2i32_nxv2i64_nxv2i32( @llvm.riscv.vnsrl.nxv2i32.nxv2i64.nxv2i32( + undef, %0, %1, i64 %2) @@ -594,6 +620,7 @@ entry: } declare @llvm.riscv.vnsrl.nxv4i32.nxv4i64.nxv4i32( + , , , i64); @@ -607,6 +634,7 @@ define @intrinsic_vnsrl_wv_nxv4i32_nxv4i64_nxv4i32( @llvm.riscv.vnsrl.nxv4i32.nxv4i64.nxv4i32( + undef, %0, %1, i64 %2) @@ -640,6 +668,7 @@ entry: } declare @llvm.riscv.vnsrl.nxv8i32.nxv8i64.nxv8i32( + , , , i64); @@ -653,6 +682,7 @@ define @intrinsic_vnsrl_wv_nxv8i32_nxv8i64_nxv8i32( @llvm.riscv.vnsrl.nxv8i32.nxv8i64.nxv8i32( + undef, %0, %1, i64 %2) @@ -686,6 +716,7 @@ entry: } declare @llvm.riscv.vnsrl.nxv1i8.nxv1i16( + , , i64, i64); @@ -698,6 +729,7 @@ define @intrinsic_vnsrl_vx_nxv1i8_nxv1i16( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv1i8.nxv1i16( + undef, %0, i64 %1, i64 %2) @@ -731,6 +763,7 @@ entry: } declare @llvm.riscv.vnsrl.nxv2i8.nxv2i16( + , , i64, i64); @@ -743,6 +776,7 @@ define @intrinsic_vnsrl_vx_nxv2i8_nxv2i16( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv2i8.nxv2i16( + undef, %0, i64 %1, i64 %2) @@ -776,6 +810,7 @@ entry: } declare @llvm.riscv.vnsrl.nxv4i8.nxv4i16( + , , i64, i64); @@ -788,6 +823,7 @@ define @intrinsic_vnsrl_vx_nxv4i8_nxv4i16( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv4i8.nxv4i16( + undef, %0, i64 %1, i64 %2) @@ -821,6 +857,7 @@ entry: } declare @llvm.riscv.vnsrl.nxv8i8.nxv8i16( + , , i64, i64); @@ -834,6 +871,7 @@ define @intrinsic_vnsrl_vx_nxv8i8_nxv8i16( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv8i8.nxv8i16( + undef, %0, i64 %1, i64 %2) @@ -867,6 +905,7 @@ entry: } declare @llvm.riscv.vnsrl.nxv16i8.nxv16i16( + , , i64, i64); @@ -880,6 +919,7 @@ define @intrinsic_vnsrl_vx_nxv16i8_nxv16i16( @llvm.riscv.vnsrl.nxv16i8.nxv16i16( + undef, %0, i64 %1, i64 %2) @@ -913,6 +953,7 @@ entry: } declare @llvm.riscv.vnsrl.nxv32i8.nxv32i16( + , , i64, i64); @@ -926,6 +967,7 @@ define @intrinsic_vnsrl_vx_nxv32i8_nxv32i16( @llvm.riscv.vnsrl.nxv32i8.nxv32i16( + undef, %0, i64 %1, i64 %2) @@ -959,6 +1001,7 @@ entry: } declare @llvm.riscv.vnsrl.nxv1i16.nxv1i32( + , , i64, i64); @@ -971,6 +1014,7 @@ define @intrinsic_vnsrl_vx_nxv1i16_nxv1i32( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv1i16.nxv1i32( + undef, %0, i64 %1, i64 %2) @@ -1004,6 +1048,7 @@ entry: } declare @llvm.riscv.vnsrl.nxv2i16.nxv2i32( + , , i64, i64); @@ -1016,6 +1061,7 @@ define @intrinsic_vnsrl_vx_nxv2i16_nxv2i32( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv2i16.nxv2i32( + undef, %0, i64 %1, i64 %2) @@ -1049,6 +1095,7 @@ entry: } declare @llvm.riscv.vnsrl.nxv4i16.nxv4i32( + , , i64, i64); @@ -1062,6 +1109,7 @@ define @intrinsic_vnsrl_vx_nxv4i16_nxv4i32( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv4i16.nxv4i32( + undef, %0, i64 %1, i64 %2) @@ -1095,6 +1143,7 @@ entry: } declare @llvm.riscv.vnsrl.nxv8i16.nxv8i32( + , , i64, i64); @@ -1108,6 +1157,7 @@ define @intrinsic_vnsrl_vx_nxv8i16_nxv8i32( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv8i16.nxv8i32( + undef, %0, i64 %1, i64 %2) @@ -1141,6 +1191,7 @@ entry: } declare @llvm.riscv.vnsrl.nxv16i16.nxv16i32( + , , i64, i64); @@ -1154,6 +1205,7 @@ define @intrinsic_vnsrl_vx_nxv16i16_nxv16i32( @llvm.riscv.vnsrl.nxv16i16.nxv16i32( + undef, %0, i64 %1, i64 %2) @@ -1187,6 +1239,7 @@ entry: } declare @llvm.riscv.vnsrl.nxv1i32.nxv1i64( + , , i64, i64); @@ -1199,6 +1252,7 @@ define @intrinsic_vnsrl_vx_nxv1i32_nxv1i64( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv1i32.nxv1i64( + undef, %0, i64 %1, i64 %2) @@ -1232,6 +1286,7 @@ entry: } declare @llvm.riscv.vnsrl.nxv2i32.nxv2i64( + , , i64, i64); @@ -1245,6 +1300,7 @@ define @intrinsic_vnsrl_vx_nxv2i32_nxv2i64( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv2i32.nxv2i64( + undef, %0, i64 %1, i64 %2) @@ -1278,6 +1334,7 @@ entry: } declare @llvm.riscv.vnsrl.nxv4i32.nxv4i64( + , , i64, i64); @@ -1291,6 +1348,7 @@ define @intrinsic_vnsrl_vx_nxv4i32_nxv4i64( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv4i32.nxv4i64( + undef, %0, i64 %1, i64 %2) @@ -1324,6 +1382,7 @@ entry: } declare @llvm.riscv.vnsrl.nxv8i32.nxv8i64( + , , i64, i64); @@ -1337,6 +1396,7 @@ define @intrinsic_vnsrl_vx_nxv8i32_nxv8i64( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv8i32.nxv8i64( + undef, %0, i64 %1, i64 %2) @@ -1377,6 +1437,7 @@ define @intrinsic_vnsrl_vi_nxv1i8_nxv1i16_i8( @llvm.riscv.vnsrl.nxv1i8.nxv1i16( + undef, %0, i64 9, i64 %1) @@ -1409,6 +1470,7 @@ define @intrinsic_vnsrl_vi_nxv2i8_nxv2i16_i8( @llvm.riscv.vnsrl.nxv2i8.nxv2i16( + undef, %0, i64 9, i64 %1) @@ -1441,6 +1503,7 @@ define @intrinsic_vnsrl_vi_nxv4i8_nxv4i16_i8( @llvm.riscv.vnsrl.nxv4i8.nxv4i16( + undef, %0, i64 9, i64 %1) @@ -1474,6 +1537,7 @@ define @intrinsic_vnsrl_vi_nxv8i8_nxv8i16_i8( @llvm.riscv.vnsrl.nxv8i8.nxv8i16( + undef, %0, i64 9, i64 %1) @@ -1507,6 +1571,7 @@ define @intrinsic_vnsrl_vi_nxv16i8_nxv16i16_i8( @llvm.riscv.vnsrl.nxv16i8.nxv16i16( + undef, %0, i64 9, i64 %1) @@ -1540,6 +1605,7 @@ define @intrinsic_vnsrl_vi_nxv32i8_nxv32i16_i8( @llvm.riscv.vnsrl.nxv32i8.nxv32i16( + undef, %0, i64 9, i64 %1) @@ -1572,6 +1638,7 @@ define @intrinsic_vnsrl_vi_nxv1i16_nxv1i32_i16( @llvm.riscv.vnsrl.nxv1i16.nxv1i32( + undef, %0, i64 9, i64 %1) @@ -1604,6 +1671,7 @@ define @intrinsic_vnsrl_vi_nxv2i16_nxv2i32_i16( @llvm.riscv.vnsrl.nxv2i16.nxv2i32( + undef, %0, i64 9, i64 %1) @@ -1637,6 +1705,7 @@ define @intrinsic_vnsrl_vi_nxv4i16_nxv4i32_i16( @llvm.riscv.vnsrl.nxv4i16.nxv4i32( + undef, %0, i64 9, i64 %1) @@ -1670,6 +1739,7 @@ define @intrinsic_vnsrl_vi_nxv8i16_nxv8i32_i16( @llvm.riscv.vnsrl.nxv8i16.nxv8i32( + undef, %0, i64 9, i64 %1) @@ -1703,6 +1773,7 @@ define @intrinsic_vnsrl_vi_nxv16i16_nxv16i32_i16( @llvm.riscv.vnsrl.nxv16i16.nxv16i32( + undef, %0, i64 9, i64 %1) @@ -1735,6 +1806,7 @@ define @intrinsic_vnsrl_vi_nxv1i32_nxv1i64_i32( @llvm.riscv.vnsrl.nxv1i32.nxv1i64( + undef, %0, i64 9, i64 %1) @@ -1768,6 +1840,7 @@ define @intrinsic_vnsrl_vi_nxv2i32_nxv2i64_i32( @llvm.riscv.vnsrl.nxv2i32.nxv2i64( + undef, %0, i64 9, i64 %1) @@ -1801,6 +1874,7 @@ define @intrinsic_vnsrl_vi_nxv4i32_nxv4i64_i32( @llvm.riscv.vnsrl.nxv4i32.nxv4i64( + undef, %0, i64 9, i64 %1) @@ -1834,6 +1908,7 @@ define @intrinsic_vnsrl_vi_nxv8i32_nxv8i64_i32( @llvm.riscv.vnsrl.nxv8i32.nxv8i64( + undef, %0, i64 9, i64 %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vor-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vor-rv32.ll index ab10d1ce9c3ade1222b8237b73b855549e66b46d..3068585d78c70728511df64f4d13f6484620093b 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vor-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vor-rv32.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v,+f -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vor.nxv1i8.nxv1i8( + , , , i32); @@ -14,6 +15,7 @@ define @intrinsic_vor_vv_nxv1i8_nxv1i8_nxv1i8( @llvm.riscv.vor.nxv1i8.nxv1i8( + undef, %0, %1, i32 %2) @@ -47,6 +49,7 @@ entry: } declare @llvm.riscv.vor.nxv2i8.nxv2i8( + , , , i32); @@ -59,6 +62,7 @@ define @intrinsic_vor_vv_nxv2i8_nxv2i8_nxv2i8( @llvm.riscv.vor.nxv2i8.nxv2i8( + undef, %0, %1, i32 %2) @@ -92,6 +96,7 @@ entry: } declare @llvm.riscv.vor.nxv4i8.nxv4i8( + , , , i32); @@ -104,6 +109,7 @@ define @intrinsic_vor_vv_nxv4i8_nxv4i8_nxv4i8( @llvm.riscv.vor.nxv4i8.nxv4i8( + undef, %0, %1, i32 %2) @@ -137,6 +143,7 @@ entry: } declare @llvm.riscv.vor.nxv8i8.nxv8i8( + , , , i32); @@ -149,6 +156,7 @@ define @intrinsic_vor_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vor.nxv8i8.nxv8i8( + undef, %0, %1, i32 %2) @@ -182,6 +190,7 @@ entry: } declare @llvm.riscv.vor.nxv16i8.nxv16i8( + , , , i32); @@ -194,6 +203,7 @@ define @intrinsic_vor_vv_nxv16i8_nxv16i8_nxv16i8( @llvm.riscv.vor.nxv16i8.nxv16i8( + undef, %0, %1, i32 %2) @@ -227,6 +237,7 @@ entry: } declare @llvm.riscv.vor.nxv32i8.nxv32i8( + , , , i32); @@ -239,6 +250,7 @@ define @intrinsic_vor_vv_nxv32i8_nxv32i8_nxv32i8( @llvm.riscv.vor.nxv32i8.nxv32i8( + undef, %0, %1, i32 %2) @@ -272,6 +284,7 @@ entry: } declare @llvm.riscv.vor.nxv64i8.nxv64i8( + , , , i32); @@ -284,6 +297,7 @@ define @intrinsic_vor_vv_nxv64i8_nxv64i8_nxv64i8( @llvm.riscv.vor.nxv64i8.nxv64i8( + undef, %0, %1, i32 %2) @@ -318,6 +332,7 @@ entry: } declare @llvm.riscv.vor.nxv1i16.nxv1i16( + , , , i32); @@ -330,6 +345,7 @@ define @intrinsic_vor_vv_nxv1i16_nxv1i16_nxv1i16( @llvm.riscv.vor.nxv1i16.nxv1i16( + undef, %0, %1, i32 %2) @@ -363,6 +379,7 @@ entry: } declare @llvm.riscv.vor.nxv2i16.nxv2i16( + , , , i32); @@ -375,6 +392,7 @@ define @intrinsic_vor_vv_nxv2i16_nxv2i16_nxv2i16( @llvm.riscv.vor.nxv2i16.nxv2i16( + undef, %0, %1, i32 %2) @@ -408,6 +426,7 @@ entry: } declare @llvm.riscv.vor.nxv4i16.nxv4i16( + , , , i32); @@ -420,6 +439,7 @@ define @intrinsic_vor_vv_nxv4i16_nxv4i16_nxv4i16( @llvm.riscv.vor.nxv4i16.nxv4i16( + undef, %0, %1, i32 %2) @@ -453,6 +473,7 @@ entry: } declare @llvm.riscv.vor.nxv8i16.nxv8i16( + , , , i32); @@ -465,6 +486,7 @@ define @intrinsic_vor_vv_nxv8i16_nxv8i16_nxv8i16( @llvm.riscv.vor.nxv8i16.nxv8i16( + undef, %0, %1, i32 %2) @@ -498,6 +520,7 @@ entry: } declare @llvm.riscv.vor.nxv16i16.nxv16i16( + , , , i32); @@ -510,6 +533,7 @@ define @intrinsic_vor_vv_nxv16i16_nxv16i16_nxv16i16( @llvm.riscv.vor.nxv16i16.nxv16i16( + undef, %0, %1, i32 %2) @@ -543,6 +567,7 @@ entry: } declare @llvm.riscv.vor.nxv32i16.nxv32i16( + , , , i32); @@ -555,6 +580,7 @@ define @intrinsic_vor_vv_nxv32i16_nxv32i16_nxv32i16( @llvm.riscv.vor.nxv32i16.nxv32i16( + undef, %0, %1, i32 %2) @@ -589,6 +615,7 @@ entry: } declare @llvm.riscv.vor.nxv1i32.nxv1i32( + , , , i32); @@ -601,6 +628,7 @@ define @intrinsic_vor_vv_nxv1i32_nxv1i32_nxv1i32( @llvm.riscv.vor.nxv1i32.nxv1i32( + undef, %0, %1, i32 %2) @@ -634,6 +662,7 @@ entry: } declare @llvm.riscv.vor.nxv2i32.nxv2i32( + , , , i32); @@ -646,6 +675,7 @@ define @intrinsic_vor_vv_nxv2i32_nxv2i32_nxv2i32( @llvm.riscv.vor.nxv2i32.nxv2i32( + undef, %0, %1, i32 %2) @@ -679,6 +709,7 @@ entry: } declare @llvm.riscv.vor.nxv4i32.nxv4i32( + , , , i32); @@ -691,6 +722,7 @@ define @intrinsic_vor_vv_nxv4i32_nxv4i32_nxv4i32( @llvm.riscv.vor.nxv4i32.nxv4i32( + undef, %0, %1, i32 %2) @@ -724,6 +756,7 @@ entry: } declare @llvm.riscv.vor.nxv8i32.nxv8i32( + , , , i32); @@ -736,6 +769,7 @@ define @intrinsic_vor_vv_nxv8i32_nxv8i32_nxv8i32( @llvm.riscv.vor.nxv8i32.nxv8i32( + undef, %0, %1, i32 %2) @@ -769,6 +803,7 @@ entry: } declare @llvm.riscv.vor.nxv16i32.nxv16i32( + , , , i32); @@ -781,6 +816,7 @@ define @intrinsic_vor_vv_nxv16i32_nxv16i32_nxv16i32( @llvm.riscv.vor.nxv16i32.nxv16i32( + undef, %0, %1, i32 %2) @@ -815,6 +851,7 @@ entry: } declare @llvm.riscv.vor.nxv1i64.nxv1i64( + , , , i32); @@ -827,6 +864,7 @@ define @intrinsic_vor_vv_nxv1i64_nxv1i64_nxv1i64( @llvm.riscv.vor.nxv1i64.nxv1i64( + undef, %0, %1, i32 %2) @@ -860,6 +898,7 @@ entry: } declare @llvm.riscv.vor.nxv2i64.nxv2i64( + , , , i32); @@ -872,6 +911,7 @@ define @intrinsic_vor_vv_nxv2i64_nxv2i64_nxv2i64( @llvm.riscv.vor.nxv2i64.nxv2i64( + undef, %0, %1, i32 %2) @@ -905,6 +945,7 @@ entry: } declare @llvm.riscv.vor.nxv4i64.nxv4i64( + , , , i32); @@ -917,6 +958,7 @@ define @intrinsic_vor_vv_nxv4i64_nxv4i64_nxv4i64( @llvm.riscv.vor.nxv4i64.nxv4i64( + undef, %0, %1, i32 %2) @@ -950,6 +992,7 @@ entry: } declare @llvm.riscv.vor.nxv8i64.nxv8i64( + , , , i32); @@ -962,6 +1005,7 @@ define @intrinsic_vor_vv_nxv8i64_nxv8i64_nxv8i64( @llvm.riscv.vor.nxv8i64.nxv8i64( + undef, %0, %1, i32 %2) @@ -996,6 +1040,7 @@ entry: } declare @llvm.riscv.vor.nxv1i8.i8( + , , i8, i32); @@ -1008,6 +1053,7 @@ define @intrinsic_vor_vx_nxv1i8_nxv1i8_i8( %0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv1i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1041,6 +1087,7 @@ entry: } declare @llvm.riscv.vor.nxv2i8.i8( + , , i8, i32); @@ -1053,6 +1100,7 @@ define @intrinsic_vor_vx_nxv2i8_nxv2i8_i8( %0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv2i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1086,6 +1134,7 @@ entry: } declare @llvm.riscv.vor.nxv4i8.i8( + , , i8, i32); @@ -1098,6 +1147,7 @@ define @intrinsic_vor_vx_nxv4i8_nxv4i8_i8( %0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv4i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1131,6 +1181,7 @@ entry: } declare @llvm.riscv.vor.nxv8i8.i8( + , , i8, i32); @@ -1143,6 +1194,7 @@ define @intrinsic_vor_vx_nxv8i8_nxv8i8_i8( %0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv8i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1176,6 +1228,7 @@ entry: } declare @llvm.riscv.vor.nxv16i8.i8( + , , i8, i32); @@ -1188,6 +1241,7 @@ define @intrinsic_vor_vx_nxv16i8_nxv16i8_i8( @llvm.riscv.vor.nxv16i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1221,6 +1275,7 @@ entry: } declare @llvm.riscv.vor.nxv32i8.i8( + , , i8, i32); @@ -1233,6 +1288,7 @@ define @intrinsic_vor_vx_nxv32i8_nxv32i8_i8( @llvm.riscv.vor.nxv32i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1266,6 +1322,7 @@ entry: } declare @llvm.riscv.vor.nxv64i8.i8( + , , i8, i32); @@ -1278,6 +1335,7 @@ define @intrinsic_vor_vx_nxv64i8_nxv64i8_i8( @llvm.riscv.vor.nxv64i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1311,6 +1369,7 @@ entry: } declare @llvm.riscv.vor.nxv1i16.i16( + , , i16, i32); @@ -1323,6 +1382,7 @@ define @intrinsic_vor_vx_nxv1i16_nxv1i16_i16( @llvm.riscv.vor.nxv1i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1356,6 +1416,7 @@ entry: } declare @llvm.riscv.vor.nxv2i16.i16( + , , i16, i32); @@ -1368,6 +1429,7 @@ define @intrinsic_vor_vx_nxv2i16_nxv2i16_i16( @llvm.riscv.vor.nxv2i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1401,6 +1463,7 @@ entry: } declare @llvm.riscv.vor.nxv4i16.i16( + , , i16, i32); @@ -1413,6 +1476,7 @@ define @intrinsic_vor_vx_nxv4i16_nxv4i16_i16( @llvm.riscv.vor.nxv4i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1446,6 +1510,7 @@ entry: } declare @llvm.riscv.vor.nxv8i16.i16( + , , i16, i32); @@ -1458,6 +1523,7 @@ define @intrinsic_vor_vx_nxv8i16_nxv8i16_i16( @llvm.riscv.vor.nxv8i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1491,6 +1557,7 @@ entry: } declare @llvm.riscv.vor.nxv16i16.i16( + , , i16, i32); @@ -1503,6 +1570,7 @@ define @intrinsic_vor_vx_nxv16i16_nxv16i16_i16( @llvm.riscv.vor.nxv16i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1536,6 +1604,7 @@ entry: } declare @llvm.riscv.vor.nxv32i16.i16( + , , i16, i32); @@ -1548,6 +1617,7 @@ define @intrinsic_vor_vx_nxv32i16_nxv32i16_i16( @llvm.riscv.vor.nxv32i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1581,6 +1651,7 @@ entry: } declare @llvm.riscv.vor.nxv1i32.i32( + , , i32, i32); @@ -1593,6 +1664,7 @@ define @intrinsic_vor_vx_nxv1i32_nxv1i32_i32( @llvm.riscv.vor.nxv1i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1626,6 +1698,7 @@ entry: } declare @llvm.riscv.vor.nxv2i32.i32( + , , i32, i32); @@ -1638,6 +1711,7 @@ define @intrinsic_vor_vx_nxv2i32_nxv2i32_i32( @llvm.riscv.vor.nxv2i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1671,6 +1745,7 @@ entry: } declare @llvm.riscv.vor.nxv4i32.i32( + , , i32, i32); @@ -1683,6 +1758,7 @@ define @intrinsic_vor_vx_nxv4i32_nxv4i32_i32( @llvm.riscv.vor.nxv4i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1716,6 +1792,7 @@ entry: } declare @llvm.riscv.vor.nxv8i32.i32( + , , i32, i32); @@ -1728,6 +1805,7 @@ define @intrinsic_vor_vx_nxv8i32_nxv8i32_i32( @llvm.riscv.vor.nxv8i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1761,6 +1839,7 @@ entry: } declare @llvm.riscv.vor.nxv16i32.i32( + , , i32, i32); @@ -1773,6 +1852,7 @@ define @intrinsic_vor_vx_nxv16i32_nxv16i32_i32( @llvm.riscv.vor.nxv16i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1806,6 +1886,7 @@ entry: } declare @llvm.riscv.vor.nxv1i64.i64( + , , i64, i32); @@ -1824,6 +1905,7 @@ define @intrinsic_vor_vx_nxv1i64_nxv1i64_i64( @llvm.riscv.vor.nxv1i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1863,6 +1945,7 @@ entry: } declare @llvm.riscv.vor.nxv2i64.i64( + , , i64, i32); @@ -1881,6 +1964,7 @@ define @intrinsic_vor_vx_nxv2i64_nxv2i64_i64( @llvm.riscv.vor.nxv2i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1920,6 +2004,7 @@ entry: } declare @llvm.riscv.vor.nxv4i64.i64( + , , i64, i32); @@ -1938,6 +2023,7 @@ define @intrinsic_vor_vx_nxv4i64_nxv4i64_i64( @llvm.riscv.vor.nxv4i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1977,6 +2063,7 @@ entry: } declare @llvm.riscv.vor.nxv8i64.i64( + , , i64, i32); @@ -1995,6 +2082,7 @@ define @intrinsic_vor_vx_nxv8i64_nxv8i64_i64( @llvm.riscv.vor.nxv8i64.i64( + undef, %0, i64 %1, i32 %2) @@ -2041,6 +2129,7 @@ define @intrinsic_vor_vi_nxv1i8_nxv1i8_i8( %0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv1i8.i8( + undef, %0, i8 9, i32 %1) @@ -2073,6 +2162,7 @@ define @intrinsic_vor_vi_nxv2i8_nxv2i8_i8( %0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv2i8.i8( + undef, %0, i8 9, i32 %1) @@ -2105,6 +2195,7 @@ define @intrinsic_vor_vi_nxv4i8_nxv4i8_i8( %0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv4i8.i8( + undef, %0, i8 9, i32 %1) @@ -2137,6 +2228,7 @@ define @intrinsic_vor_vi_nxv8i8_nxv8i8_i8( %0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv8i8.i8( + undef, %0, i8 9, i32 %1) @@ -2169,6 +2261,7 @@ define @intrinsic_vor_vi_nxv16i8_nxv16i8_i8( @llvm.riscv.vor.nxv16i8.i8( + undef, %0, i8 9, i32 %1) @@ -2201,6 +2294,7 @@ define @intrinsic_vor_vi_nxv32i8_nxv32i8_i8( @llvm.riscv.vor.nxv32i8.i8( + undef, %0, i8 9, i32 %1) @@ -2233,6 +2327,7 @@ define @intrinsic_vor_vi_nxv64i8_nxv64i8_i8( @llvm.riscv.vor.nxv64i8.i8( + undef, %0, i8 9, i32 %1) @@ -2265,6 +2360,7 @@ define @intrinsic_vor_vi_nxv1i16_nxv1i16_i16( @llvm.riscv.vor.nxv1i16.i16( + undef, %0, i16 9, i32 %1) @@ -2297,6 +2393,7 @@ define @intrinsic_vor_vi_nxv2i16_nxv2i16_i16( @llvm.riscv.vor.nxv2i16.i16( + undef, %0, i16 9, i32 %1) @@ -2329,6 +2426,7 @@ define @intrinsic_vor_vi_nxv4i16_nxv4i16_i16( @llvm.riscv.vor.nxv4i16.i16( + undef, %0, i16 9, i32 %1) @@ -2361,6 +2459,7 @@ define @intrinsic_vor_vi_nxv8i16_nxv8i16_i16( @llvm.riscv.vor.nxv8i16.i16( + undef, %0, i16 9, i32 %1) @@ -2393,6 +2492,7 @@ define @intrinsic_vor_vi_nxv16i16_nxv16i16_i16( @llvm.riscv.vor.nxv16i16.i16( + undef, %0, i16 9, i32 %1) @@ -2425,6 +2525,7 @@ define @intrinsic_vor_vi_nxv32i16_nxv32i16_i16( @llvm.riscv.vor.nxv32i16.i16( + undef, %0, i16 9, i32 %1) @@ -2457,6 +2558,7 @@ define @intrinsic_vor_vi_nxv1i32_nxv1i32_i32( @llvm.riscv.vor.nxv1i32.i32( + undef, %0, i32 9, i32 %1) @@ -2489,6 +2591,7 @@ define @intrinsic_vor_vi_nxv2i32_nxv2i32_i32( @llvm.riscv.vor.nxv2i32.i32( + undef, %0, i32 9, i32 %1) @@ -2521,6 +2624,7 @@ define @intrinsic_vor_vi_nxv4i32_nxv4i32_i32( @llvm.riscv.vor.nxv4i32.i32( + undef, %0, i32 9, i32 %1) @@ -2553,6 +2657,7 @@ define @intrinsic_vor_vi_nxv8i32_nxv8i32_i32( @llvm.riscv.vor.nxv8i32.i32( + undef, %0, i32 9, i32 %1) @@ -2585,6 +2690,7 @@ define @intrinsic_vor_vi_nxv16i32_nxv16i32_i32( @llvm.riscv.vor.nxv16i32.i32( + undef, %0, i32 9, i32 %1) @@ -2617,6 +2723,7 @@ define @intrinsic_vor_vi_nxv1i64_nxv1i64_i64( @llvm.riscv.vor.nxv1i64.i64( + undef, %0, i64 9, i32 %1) @@ -2649,6 +2756,7 @@ define @intrinsic_vor_vi_nxv2i64_nxv2i64_i64( @llvm.riscv.vor.nxv2i64.i64( + undef, %0, i64 9, i32 %1) @@ -2681,6 +2789,7 @@ define @intrinsic_vor_vi_nxv4i64_nxv4i64_i64( @llvm.riscv.vor.nxv4i64.i64( + undef, %0, i64 9, i32 %1) @@ -2713,6 +2822,7 @@ define @intrinsic_vor_vi_nxv8i64_nxv8i64_i64( @llvm.riscv.vor.nxv8i64.i64( + undef, %0, i64 9, i32 %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vor-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vor-rv64.ll index d30bf628ad030048d0113921ca0810ef4b72a1bf..57be344004e287d8c0a90690f76bcdfd13c0ddfb 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vor-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vor-rv64.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v,+d -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vor.nxv1i8.nxv1i8( + , , , i64); @@ -14,6 +15,7 @@ define @intrinsic_vor_vv_nxv1i8_nxv1i8_nxv1i8( @llvm.riscv.vor.nxv1i8.nxv1i8( + undef, %0, %1, i64 %2) @@ -47,6 +49,7 @@ entry: } declare @llvm.riscv.vor.nxv2i8.nxv2i8( + , , , i64); @@ -59,6 +62,7 @@ define @intrinsic_vor_vv_nxv2i8_nxv2i8_nxv2i8( @llvm.riscv.vor.nxv2i8.nxv2i8( + undef, %0, %1, i64 %2) @@ -92,6 +96,7 @@ entry: } declare @llvm.riscv.vor.nxv4i8.nxv4i8( + , , , i64); @@ -104,6 +109,7 @@ define @intrinsic_vor_vv_nxv4i8_nxv4i8_nxv4i8( @llvm.riscv.vor.nxv4i8.nxv4i8( + undef, %0, %1, i64 %2) @@ -137,6 +143,7 @@ entry: } declare @llvm.riscv.vor.nxv8i8.nxv8i8( + , , , i64); @@ -149,6 +156,7 @@ define @intrinsic_vor_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vor.nxv8i8.nxv8i8( + undef, %0, %1, i64 %2) @@ -182,6 +190,7 @@ entry: } declare @llvm.riscv.vor.nxv16i8.nxv16i8( + , , , i64); @@ -194,6 +203,7 @@ define @intrinsic_vor_vv_nxv16i8_nxv16i8_nxv16i8( @llvm.riscv.vor.nxv16i8.nxv16i8( + undef, %0, %1, i64 %2) @@ -227,6 +237,7 @@ entry: } declare @llvm.riscv.vor.nxv32i8.nxv32i8( + , , , i64); @@ -239,6 +250,7 @@ define @intrinsic_vor_vv_nxv32i8_nxv32i8_nxv32i8( @llvm.riscv.vor.nxv32i8.nxv32i8( + undef, %0, %1, i64 %2) @@ -272,6 +284,7 @@ entry: } declare @llvm.riscv.vor.nxv64i8.nxv64i8( + , , , i64); @@ -284,6 +297,7 @@ define @intrinsic_vor_vv_nxv64i8_nxv64i8_nxv64i8( @llvm.riscv.vor.nxv64i8.nxv64i8( + undef, %0, %1, i64 %2) @@ -318,6 +332,7 @@ entry: } declare @llvm.riscv.vor.nxv1i16.nxv1i16( + , , , i64); @@ -330,6 +345,7 @@ define @intrinsic_vor_vv_nxv1i16_nxv1i16_nxv1i16( @llvm.riscv.vor.nxv1i16.nxv1i16( + undef, %0, %1, i64 %2) @@ -363,6 +379,7 @@ entry: } declare @llvm.riscv.vor.nxv2i16.nxv2i16( + , , , i64); @@ -375,6 +392,7 @@ define @intrinsic_vor_vv_nxv2i16_nxv2i16_nxv2i16( @llvm.riscv.vor.nxv2i16.nxv2i16( + undef, %0, %1, i64 %2) @@ -408,6 +426,7 @@ entry: } declare @llvm.riscv.vor.nxv4i16.nxv4i16( + , , , i64); @@ -420,6 +439,7 @@ define @intrinsic_vor_vv_nxv4i16_nxv4i16_nxv4i16( @llvm.riscv.vor.nxv4i16.nxv4i16( + undef, %0, %1, i64 %2) @@ -453,6 +473,7 @@ entry: } declare @llvm.riscv.vor.nxv8i16.nxv8i16( + , , , i64); @@ -465,6 +486,7 @@ define @intrinsic_vor_vv_nxv8i16_nxv8i16_nxv8i16( @llvm.riscv.vor.nxv8i16.nxv8i16( + undef, %0, %1, i64 %2) @@ -498,6 +520,7 @@ entry: } declare @llvm.riscv.vor.nxv16i16.nxv16i16( + , , , i64); @@ -510,6 +533,7 @@ define @intrinsic_vor_vv_nxv16i16_nxv16i16_nxv16i16( @llvm.riscv.vor.nxv16i16.nxv16i16( + undef, %0, %1, i64 %2) @@ -543,6 +567,7 @@ entry: } declare @llvm.riscv.vor.nxv32i16.nxv32i16( + , , , i64); @@ -555,6 +580,7 @@ define @intrinsic_vor_vv_nxv32i16_nxv32i16_nxv32i16( @llvm.riscv.vor.nxv32i16.nxv32i16( + undef, %0, %1, i64 %2) @@ -589,6 +615,7 @@ entry: } declare @llvm.riscv.vor.nxv1i32.nxv1i32( + , , , i64); @@ -601,6 +628,7 @@ define @intrinsic_vor_vv_nxv1i32_nxv1i32_nxv1i32( @llvm.riscv.vor.nxv1i32.nxv1i32( + undef, %0, %1, i64 %2) @@ -634,6 +662,7 @@ entry: } declare @llvm.riscv.vor.nxv2i32.nxv2i32( + , , , i64); @@ -646,6 +675,7 @@ define @intrinsic_vor_vv_nxv2i32_nxv2i32_nxv2i32( @llvm.riscv.vor.nxv2i32.nxv2i32( + undef, %0, %1, i64 %2) @@ -679,6 +709,7 @@ entry: } declare @llvm.riscv.vor.nxv4i32.nxv4i32( + , , , i64); @@ -691,6 +722,7 @@ define @intrinsic_vor_vv_nxv4i32_nxv4i32_nxv4i32( @llvm.riscv.vor.nxv4i32.nxv4i32( + undef, %0, %1, i64 %2) @@ -724,6 +756,7 @@ entry: } declare @llvm.riscv.vor.nxv8i32.nxv8i32( + , , , i64); @@ -736,6 +769,7 @@ define @intrinsic_vor_vv_nxv8i32_nxv8i32_nxv8i32( @llvm.riscv.vor.nxv8i32.nxv8i32( + undef, %0, %1, i64 %2) @@ -769,6 +803,7 @@ entry: } declare @llvm.riscv.vor.nxv16i32.nxv16i32( + , , , i64); @@ -781,6 +816,7 @@ define @intrinsic_vor_vv_nxv16i32_nxv16i32_nxv16i32( @llvm.riscv.vor.nxv16i32.nxv16i32( + undef, %0, %1, i64 %2) @@ -815,6 +851,7 @@ entry: } declare @llvm.riscv.vor.nxv1i64.nxv1i64( + , , , i64); @@ -827,6 +864,7 @@ define @intrinsic_vor_vv_nxv1i64_nxv1i64_nxv1i64( @llvm.riscv.vor.nxv1i64.nxv1i64( + undef, %0, %1, i64 %2) @@ -860,6 +898,7 @@ entry: } declare @llvm.riscv.vor.nxv2i64.nxv2i64( + , , , i64); @@ -872,6 +911,7 @@ define @intrinsic_vor_vv_nxv2i64_nxv2i64_nxv2i64( @llvm.riscv.vor.nxv2i64.nxv2i64( + undef, %0, %1, i64 %2) @@ -905,6 +945,7 @@ entry: } declare @llvm.riscv.vor.nxv4i64.nxv4i64( + , , , i64); @@ -917,6 +958,7 @@ define @intrinsic_vor_vv_nxv4i64_nxv4i64_nxv4i64( @llvm.riscv.vor.nxv4i64.nxv4i64( + undef, %0, %1, i64 %2) @@ -950,6 +992,7 @@ entry: } declare @llvm.riscv.vor.nxv8i64.nxv8i64( + , , , i64); @@ -962,6 +1005,7 @@ define @intrinsic_vor_vv_nxv8i64_nxv8i64_nxv8i64( @llvm.riscv.vor.nxv8i64.nxv8i64( + undef, %0, %1, i64 %2) @@ -996,6 +1040,7 @@ entry: } declare @llvm.riscv.vor.nxv1i8.i8( + , , i8, i64); @@ -1008,6 +1053,7 @@ define @intrinsic_vor_vx_nxv1i8_nxv1i8_i8( %0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv1i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1041,6 +1087,7 @@ entry: } declare @llvm.riscv.vor.nxv2i8.i8( + , , i8, i64); @@ -1053,6 +1100,7 @@ define @intrinsic_vor_vx_nxv2i8_nxv2i8_i8( %0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv2i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1086,6 +1134,7 @@ entry: } declare @llvm.riscv.vor.nxv4i8.i8( + , , i8, i64); @@ -1098,6 +1147,7 @@ define @intrinsic_vor_vx_nxv4i8_nxv4i8_i8( %0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv4i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1131,6 +1181,7 @@ entry: } declare @llvm.riscv.vor.nxv8i8.i8( + , , i8, i64); @@ -1143,6 +1194,7 @@ define @intrinsic_vor_vx_nxv8i8_nxv8i8_i8( %0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv8i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1176,6 +1228,7 @@ entry: } declare @llvm.riscv.vor.nxv16i8.i8( + , , i8, i64); @@ -1188,6 +1241,7 @@ define @intrinsic_vor_vx_nxv16i8_nxv16i8_i8( @llvm.riscv.vor.nxv16i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1221,6 +1275,7 @@ entry: } declare @llvm.riscv.vor.nxv32i8.i8( + , , i8, i64); @@ -1233,6 +1288,7 @@ define @intrinsic_vor_vx_nxv32i8_nxv32i8_i8( @llvm.riscv.vor.nxv32i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1266,6 +1322,7 @@ entry: } declare @llvm.riscv.vor.nxv64i8.i8( + , , i8, i64); @@ -1278,6 +1335,7 @@ define @intrinsic_vor_vx_nxv64i8_nxv64i8_i8( @llvm.riscv.vor.nxv64i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1311,6 +1369,7 @@ entry: } declare @llvm.riscv.vor.nxv1i16.i16( + , , i16, i64); @@ -1323,6 +1382,7 @@ define @intrinsic_vor_vx_nxv1i16_nxv1i16_i16( @llvm.riscv.vor.nxv1i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1356,6 +1416,7 @@ entry: } declare @llvm.riscv.vor.nxv2i16.i16( + , , i16, i64); @@ -1368,6 +1429,7 @@ define @intrinsic_vor_vx_nxv2i16_nxv2i16_i16( @llvm.riscv.vor.nxv2i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1401,6 +1463,7 @@ entry: } declare @llvm.riscv.vor.nxv4i16.i16( + , , i16, i64); @@ -1413,6 +1476,7 @@ define @intrinsic_vor_vx_nxv4i16_nxv4i16_i16( @llvm.riscv.vor.nxv4i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1446,6 +1510,7 @@ entry: } declare @llvm.riscv.vor.nxv8i16.i16( + , , i16, i64); @@ -1458,6 +1523,7 @@ define @intrinsic_vor_vx_nxv8i16_nxv8i16_i16( @llvm.riscv.vor.nxv8i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1491,6 +1557,7 @@ entry: } declare @llvm.riscv.vor.nxv16i16.i16( + , , i16, i64); @@ -1503,6 +1570,7 @@ define @intrinsic_vor_vx_nxv16i16_nxv16i16_i16( @llvm.riscv.vor.nxv16i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1536,6 +1604,7 @@ entry: } declare @llvm.riscv.vor.nxv32i16.i16( + , , i16, i64); @@ -1548,6 +1617,7 @@ define @intrinsic_vor_vx_nxv32i16_nxv32i16_i16( @llvm.riscv.vor.nxv32i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1581,6 +1651,7 @@ entry: } declare @llvm.riscv.vor.nxv1i32.i32( + , , i32, i64); @@ -1593,6 +1664,7 @@ define @intrinsic_vor_vx_nxv1i32_nxv1i32_i32( @llvm.riscv.vor.nxv1i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1626,6 +1698,7 @@ entry: } declare @llvm.riscv.vor.nxv2i32.i32( + , , i32, i64); @@ -1638,6 +1711,7 @@ define @intrinsic_vor_vx_nxv2i32_nxv2i32_i32( @llvm.riscv.vor.nxv2i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1671,6 +1745,7 @@ entry: } declare @llvm.riscv.vor.nxv4i32.i32( + , , i32, i64); @@ -1683,6 +1758,7 @@ define @intrinsic_vor_vx_nxv4i32_nxv4i32_i32( @llvm.riscv.vor.nxv4i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1716,6 +1792,7 @@ entry: } declare @llvm.riscv.vor.nxv8i32.i32( + , , i32, i64); @@ -1728,6 +1805,7 @@ define @intrinsic_vor_vx_nxv8i32_nxv8i32_i32( @llvm.riscv.vor.nxv8i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1761,6 +1839,7 @@ entry: } declare @llvm.riscv.vor.nxv16i32.i32( + , , i32, i64); @@ -1773,6 +1852,7 @@ define @intrinsic_vor_vx_nxv16i32_nxv16i32_i32( @llvm.riscv.vor.nxv16i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1806,6 +1886,7 @@ entry: } declare @llvm.riscv.vor.nxv1i64.i64( + , , i64, i64); @@ -1818,6 +1899,7 @@ define @intrinsic_vor_vx_nxv1i64_nxv1i64_i64( @llvm.riscv.vor.nxv1i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1851,6 +1933,7 @@ entry: } declare @llvm.riscv.vor.nxv2i64.i64( + , , i64, i64); @@ -1863,6 +1946,7 @@ define @intrinsic_vor_vx_nxv2i64_nxv2i64_i64( @llvm.riscv.vor.nxv2i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1896,6 +1980,7 @@ entry: } declare @llvm.riscv.vor.nxv4i64.i64( + , , i64, i64); @@ -1908,6 +1993,7 @@ define @intrinsic_vor_vx_nxv4i64_nxv4i64_i64( @llvm.riscv.vor.nxv4i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1941,6 +2027,7 @@ entry: } declare @llvm.riscv.vor.nxv8i64.i64( + , , i64, i64); @@ -1953,6 +2040,7 @@ define @intrinsic_vor_vx_nxv8i64_nxv8i64_i64( @llvm.riscv.vor.nxv8i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1993,6 +2081,7 @@ define @intrinsic_vor_vi_nxv1i8_nxv1i8_i8( %0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv1i8.i8( + undef, %0, i8 9, i64 %1) @@ -2025,6 +2114,7 @@ define @intrinsic_vor_vi_nxv2i8_nxv2i8_i8( %0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv2i8.i8( + undef, %0, i8 9, i64 %1) @@ -2057,6 +2147,7 @@ define @intrinsic_vor_vi_nxv4i8_nxv4i8_i8( %0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv4i8.i8( + undef, %0, i8 9, i64 %1) @@ -2089,6 +2180,7 @@ define @intrinsic_vor_vi_nxv8i8_nxv8i8_i8( %0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv8i8.i8( + undef, %0, i8 9, i64 %1) @@ -2121,6 +2213,7 @@ define @intrinsic_vor_vi_nxv16i8_nxv16i8_i8( @llvm.riscv.vor.nxv16i8.i8( + undef, %0, i8 9, i64 %1) @@ -2153,6 +2246,7 @@ define @intrinsic_vor_vi_nxv32i8_nxv32i8_i8( @llvm.riscv.vor.nxv32i8.i8( + undef, %0, i8 9, i64 %1) @@ -2185,6 +2279,7 @@ define @intrinsic_vor_vi_nxv64i8_nxv64i8_i8( @llvm.riscv.vor.nxv64i8.i8( + undef, %0, i8 9, i64 %1) @@ -2217,6 +2312,7 @@ define @intrinsic_vor_vi_nxv1i16_nxv1i16_i16( @llvm.riscv.vor.nxv1i16.i16( + undef, %0, i16 9, i64 %1) @@ -2249,6 +2345,7 @@ define @intrinsic_vor_vi_nxv2i16_nxv2i16_i16( @llvm.riscv.vor.nxv2i16.i16( + undef, %0, i16 9, i64 %1) @@ -2281,6 +2378,7 @@ define @intrinsic_vor_vi_nxv4i16_nxv4i16_i16( @llvm.riscv.vor.nxv4i16.i16( + undef, %0, i16 9, i64 %1) @@ -2313,6 +2411,7 @@ define @intrinsic_vor_vi_nxv8i16_nxv8i16_i16( @llvm.riscv.vor.nxv8i16.i16( + undef, %0, i16 9, i64 %1) @@ -2345,6 +2444,7 @@ define @intrinsic_vor_vi_nxv16i16_nxv16i16_i16( @llvm.riscv.vor.nxv16i16.i16( + undef, %0, i16 9, i64 %1) @@ -2377,6 +2477,7 @@ define @intrinsic_vor_vi_nxv32i16_nxv32i16_i16( @llvm.riscv.vor.nxv32i16.i16( + undef, %0, i16 9, i64 %1) @@ -2409,6 +2510,7 @@ define @intrinsic_vor_vi_nxv1i32_nxv1i32_i32( @llvm.riscv.vor.nxv1i32.i32( + undef, %0, i32 9, i64 %1) @@ -2441,6 +2543,7 @@ define @intrinsic_vor_vi_nxv2i32_nxv2i32_i32( @llvm.riscv.vor.nxv2i32.i32( + undef, %0, i32 9, i64 %1) @@ -2473,6 +2576,7 @@ define @intrinsic_vor_vi_nxv4i32_nxv4i32_i32( @llvm.riscv.vor.nxv4i32.i32( + undef, %0, i32 9, i64 %1) @@ -2505,6 +2609,7 @@ define @intrinsic_vor_vi_nxv8i32_nxv8i32_i32( @llvm.riscv.vor.nxv8i32.i32( + undef, %0, i32 9, i64 %1) @@ -2537,6 +2642,7 @@ define @intrinsic_vor_vi_nxv16i32_nxv16i32_i32( @llvm.riscv.vor.nxv16i32.i32( + undef, %0, i32 9, i64 %1) @@ -2569,6 +2675,7 @@ define @intrinsic_vor_vi_nxv1i64_nxv1i64_i64( @llvm.riscv.vor.nxv1i64.i64( + undef, %0, i64 9, i64 %1) @@ -2601,6 +2708,7 @@ define @intrinsic_vor_vi_nxv2i64_nxv2i64_i64( @llvm.riscv.vor.nxv2i64.i64( + undef, %0, i64 9, i64 %1) @@ -2633,6 +2741,7 @@ define @intrinsic_vor_vi_nxv4i64_nxv4i64_i64( @llvm.riscv.vor.nxv4i64.i64( + undef, %0, i64 9, i64 %1) @@ -2665,6 +2774,7 @@ define @intrinsic_vor_vi_nxv8i64_nxv8i64_i64( @llvm.riscv.vor.nxv8i64.i64( + undef, %0, i64 9, i64 %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vrem-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vrem-rv32.ll index e9a24bcee7d69424ad3504fdec8560e6ca072d62..54b8d8808b7c07f8438f757b4fd2f0b8f772f02f 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vrem-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vrem-rv32.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vrem.nxv1i8.nxv1i8( + , , , i32); @@ -14,6 +15,7 @@ define @intrinsic_vrem_vv_nxv1i8_nxv1i8_nxv1i8( @llvm.riscv.vrem.nxv1i8.nxv1i8( + undef, %0, %1, i32 %2) @@ -47,6 +49,7 @@ entry: } declare @llvm.riscv.vrem.nxv2i8.nxv2i8( + , , , i32); @@ -59,6 +62,7 @@ define @intrinsic_vrem_vv_nxv2i8_nxv2i8_nxv2i8( @llvm.riscv.vrem.nxv2i8.nxv2i8( + undef, %0, %1, i32 %2) @@ -92,6 +96,7 @@ entry: } declare @llvm.riscv.vrem.nxv4i8.nxv4i8( + , , , i32); @@ -104,6 +109,7 @@ define @intrinsic_vrem_vv_nxv4i8_nxv4i8_nxv4i8( @llvm.riscv.vrem.nxv4i8.nxv4i8( + undef, %0, %1, i32 %2) @@ -137,6 +143,7 @@ entry: } declare @llvm.riscv.vrem.nxv8i8.nxv8i8( + , , , i32); @@ -149,6 +156,7 @@ define @intrinsic_vrem_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vrem.nxv8i8.nxv8i8( + undef, %0, %1, i32 %2) @@ -182,6 +190,7 @@ entry: } declare @llvm.riscv.vrem.nxv16i8.nxv16i8( + , , , i32); @@ -194,6 +203,7 @@ define @intrinsic_vrem_vv_nxv16i8_nxv16i8_nxv16i8( @llvm.riscv.vrem.nxv16i8.nxv16i8( + undef, %0, %1, i32 %2) @@ -227,6 +237,7 @@ entry: } declare @llvm.riscv.vrem.nxv32i8.nxv32i8( + , , , i32); @@ -239,6 +250,7 @@ define @intrinsic_vrem_vv_nxv32i8_nxv32i8_nxv32i8( @llvm.riscv.vrem.nxv32i8.nxv32i8( + undef, %0, %1, i32 %2) @@ -272,6 +284,7 @@ entry: } declare @llvm.riscv.vrem.nxv64i8.nxv64i8( + , , , i32); @@ -284,6 +297,7 @@ define @intrinsic_vrem_vv_nxv64i8_nxv64i8_nxv64i8( @llvm.riscv.vrem.nxv64i8.nxv64i8( + undef, %0, %1, i32 %2) @@ -318,6 +332,7 @@ entry: } declare @llvm.riscv.vrem.nxv1i16.nxv1i16( + , , , i32); @@ -330,6 +345,7 @@ define @intrinsic_vrem_vv_nxv1i16_nxv1i16_nxv1i16( @llvm.riscv.vrem.nxv1i16.nxv1i16( + undef, %0, %1, i32 %2) @@ -363,6 +379,7 @@ entry: } declare @llvm.riscv.vrem.nxv2i16.nxv2i16( + , , , i32); @@ -375,6 +392,7 @@ define @intrinsic_vrem_vv_nxv2i16_nxv2i16_nxv2i16( @llvm.riscv.vrem.nxv2i16.nxv2i16( + undef, %0, %1, i32 %2) @@ -408,6 +426,7 @@ entry: } declare @llvm.riscv.vrem.nxv4i16.nxv4i16( + , , , i32); @@ -420,6 +439,7 @@ define @intrinsic_vrem_vv_nxv4i16_nxv4i16_nxv4i16( @llvm.riscv.vrem.nxv4i16.nxv4i16( + undef, %0, %1, i32 %2) @@ -453,6 +473,7 @@ entry: } declare @llvm.riscv.vrem.nxv8i16.nxv8i16( + , , , i32); @@ -465,6 +486,7 @@ define @intrinsic_vrem_vv_nxv8i16_nxv8i16_nxv8i16( @llvm.riscv.vrem.nxv8i16.nxv8i16( + undef, %0, %1, i32 %2) @@ -498,6 +520,7 @@ entry: } declare @llvm.riscv.vrem.nxv16i16.nxv16i16( + , , , i32); @@ -510,6 +533,7 @@ define @intrinsic_vrem_vv_nxv16i16_nxv16i16_nxv16i16( @llvm.riscv.vrem.nxv16i16.nxv16i16( + undef, %0, %1, i32 %2) @@ -543,6 +567,7 @@ entry: } declare @llvm.riscv.vrem.nxv32i16.nxv32i16( + , , , i32); @@ -555,6 +580,7 @@ define @intrinsic_vrem_vv_nxv32i16_nxv32i16_nxv32i16( @llvm.riscv.vrem.nxv32i16.nxv32i16( + undef, %0, %1, i32 %2) @@ -589,6 +615,7 @@ entry: } declare @llvm.riscv.vrem.nxv1i32.nxv1i32( + , , , i32); @@ -601,6 +628,7 @@ define @intrinsic_vrem_vv_nxv1i32_nxv1i32_nxv1i32( @llvm.riscv.vrem.nxv1i32.nxv1i32( + undef, %0, %1, i32 %2) @@ -634,6 +662,7 @@ entry: } declare @llvm.riscv.vrem.nxv2i32.nxv2i32( + , , , i32); @@ -646,6 +675,7 @@ define @intrinsic_vrem_vv_nxv2i32_nxv2i32_nxv2i32( @llvm.riscv.vrem.nxv2i32.nxv2i32( + undef, %0, %1, i32 %2) @@ -679,6 +709,7 @@ entry: } declare @llvm.riscv.vrem.nxv4i32.nxv4i32( + , , , i32); @@ -691,6 +722,7 @@ define @intrinsic_vrem_vv_nxv4i32_nxv4i32_nxv4i32( @llvm.riscv.vrem.nxv4i32.nxv4i32( + undef, %0, %1, i32 %2) @@ -724,6 +756,7 @@ entry: } declare @llvm.riscv.vrem.nxv8i32.nxv8i32( + , , , i32); @@ -736,6 +769,7 @@ define @intrinsic_vrem_vv_nxv8i32_nxv8i32_nxv8i32( @llvm.riscv.vrem.nxv8i32.nxv8i32( + undef, %0, %1, i32 %2) @@ -769,6 +803,7 @@ entry: } declare @llvm.riscv.vrem.nxv16i32.nxv16i32( + , , , i32); @@ -781,6 +816,7 @@ define @intrinsic_vrem_vv_nxv16i32_nxv16i32_nxv16i32( @llvm.riscv.vrem.nxv16i32.nxv16i32( + undef, %0, %1, i32 %2) @@ -815,6 +851,7 @@ entry: } declare @llvm.riscv.vrem.nxv1i64.nxv1i64( + , , , i32); @@ -827,6 +864,7 @@ define @intrinsic_vrem_vv_nxv1i64_nxv1i64_nxv1i64( @llvm.riscv.vrem.nxv1i64.nxv1i64( + undef, %0, %1, i32 %2) @@ -860,6 +898,7 @@ entry: } declare @llvm.riscv.vrem.nxv2i64.nxv2i64( + , , , i32); @@ -872,6 +911,7 @@ define @intrinsic_vrem_vv_nxv2i64_nxv2i64_nxv2i64( @llvm.riscv.vrem.nxv2i64.nxv2i64( + undef, %0, %1, i32 %2) @@ -905,6 +945,7 @@ entry: } declare @llvm.riscv.vrem.nxv4i64.nxv4i64( + , , , i32); @@ -917,6 +958,7 @@ define @intrinsic_vrem_vv_nxv4i64_nxv4i64_nxv4i64( @llvm.riscv.vrem.nxv4i64.nxv4i64( + undef, %0, %1, i32 %2) @@ -950,6 +992,7 @@ entry: } declare @llvm.riscv.vrem.nxv8i64.nxv8i64( + , , , i32); @@ -962,6 +1005,7 @@ define @intrinsic_vrem_vv_nxv8i64_nxv8i64_nxv8i64( @llvm.riscv.vrem.nxv8i64.nxv8i64( + undef, %0, %1, i32 %2) @@ -996,6 +1040,7 @@ entry: } declare @llvm.riscv.vrem.nxv1i8.i8( + , , i8, i32); @@ -1008,6 +1053,7 @@ define @intrinsic_vrem_vx_nxv1i8_nxv1i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrem.nxv1i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1041,6 +1087,7 @@ entry: } declare @llvm.riscv.vrem.nxv2i8.i8( + , , i8, i32); @@ -1053,6 +1100,7 @@ define @intrinsic_vrem_vx_nxv2i8_nxv2i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrem.nxv2i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1086,6 +1134,7 @@ entry: } declare @llvm.riscv.vrem.nxv4i8.i8( + , , i8, i32); @@ -1098,6 +1147,7 @@ define @intrinsic_vrem_vx_nxv4i8_nxv4i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrem.nxv4i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1131,6 +1181,7 @@ entry: } declare @llvm.riscv.vrem.nxv8i8.i8( + , , i8, i32); @@ -1143,6 +1194,7 @@ define @intrinsic_vrem_vx_nxv8i8_nxv8i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrem.nxv8i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1176,6 +1228,7 @@ entry: } declare @llvm.riscv.vrem.nxv16i8.i8( + , , i8, i32); @@ -1188,6 +1241,7 @@ define @intrinsic_vrem_vx_nxv16i8_nxv16i8_i8( @llvm.riscv.vrem.nxv16i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1221,6 +1275,7 @@ entry: } declare @llvm.riscv.vrem.nxv32i8.i8( + , , i8, i32); @@ -1233,6 +1288,7 @@ define @intrinsic_vrem_vx_nxv32i8_nxv32i8_i8( @llvm.riscv.vrem.nxv32i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1266,6 +1322,7 @@ entry: } declare @llvm.riscv.vrem.nxv64i8.i8( + , , i8, i32); @@ -1278,6 +1335,7 @@ define @intrinsic_vrem_vx_nxv64i8_nxv64i8_i8( @llvm.riscv.vrem.nxv64i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1311,6 +1369,7 @@ entry: } declare @llvm.riscv.vrem.nxv1i16.i16( + , , i16, i32); @@ -1323,6 +1382,7 @@ define @intrinsic_vrem_vx_nxv1i16_nxv1i16_i16( @llvm.riscv.vrem.nxv1i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1356,6 +1416,7 @@ entry: } declare @llvm.riscv.vrem.nxv2i16.i16( + , , i16, i32); @@ -1368,6 +1429,7 @@ define @intrinsic_vrem_vx_nxv2i16_nxv2i16_i16( @llvm.riscv.vrem.nxv2i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1401,6 +1463,7 @@ entry: } declare @llvm.riscv.vrem.nxv4i16.i16( + , , i16, i32); @@ -1413,6 +1476,7 @@ define @intrinsic_vrem_vx_nxv4i16_nxv4i16_i16( @llvm.riscv.vrem.nxv4i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1446,6 +1510,7 @@ entry: } declare @llvm.riscv.vrem.nxv8i16.i16( + , , i16, i32); @@ -1458,6 +1523,7 @@ define @intrinsic_vrem_vx_nxv8i16_nxv8i16_i16( @llvm.riscv.vrem.nxv8i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1491,6 +1557,7 @@ entry: } declare @llvm.riscv.vrem.nxv16i16.i16( + , , i16, i32); @@ -1503,6 +1570,7 @@ define @intrinsic_vrem_vx_nxv16i16_nxv16i16_i16( @llvm.riscv.vrem.nxv16i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1536,6 +1604,7 @@ entry: } declare @llvm.riscv.vrem.nxv32i16.i16( + , , i16, i32); @@ -1548,6 +1617,7 @@ define @intrinsic_vrem_vx_nxv32i16_nxv32i16_i16( @llvm.riscv.vrem.nxv32i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1581,6 +1651,7 @@ entry: } declare @llvm.riscv.vrem.nxv1i32.i32( + , , i32, i32); @@ -1593,6 +1664,7 @@ define @intrinsic_vrem_vx_nxv1i32_nxv1i32_i32( @llvm.riscv.vrem.nxv1i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1626,6 +1698,7 @@ entry: } declare @llvm.riscv.vrem.nxv2i32.i32( + , , i32, i32); @@ -1638,6 +1711,7 @@ define @intrinsic_vrem_vx_nxv2i32_nxv2i32_i32( @llvm.riscv.vrem.nxv2i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1671,6 +1745,7 @@ entry: } declare @llvm.riscv.vrem.nxv4i32.i32( + , , i32, i32); @@ -1683,6 +1758,7 @@ define @intrinsic_vrem_vx_nxv4i32_nxv4i32_i32( @llvm.riscv.vrem.nxv4i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1716,6 +1792,7 @@ entry: } declare @llvm.riscv.vrem.nxv8i32.i32( + , , i32, i32); @@ -1728,6 +1805,7 @@ define @intrinsic_vrem_vx_nxv8i32_nxv8i32_i32( @llvm.riscv.vrem.nxv8i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1761,6 +1839,7 @@ entry: } declare @llvm.riscv.vrem.nxv16i32.i32( + , , i32, i32); @@ -1773,6 +1852,7 @@ define @intrinsic_vrem_vx_nxv16i32_nxv16i32_i32( @llvm.riscv.vrem.nxv16i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1806,6 +1886,7 @@ entry: } declare @llvm.riscv.vrem.nxv1i64.i64( + , , i64, i32); @@ -1824,6 +1905,7 @@ define @intrinsic_vrem_vx_nxv1i64_nxv1i64_i64( @llvm.riscv.vrem.nxv1i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1863,6 +1945,7 @@ entry: } declare @llvm.riscv.vrem.nxv2i64.i64( + , , i64, i32); @@ -1881,6 +1964,7 @@ define @intrinsic_vrem_vx_nxv2i64_nxv2i64_i64( @llvm.riscv.vrem.nxv2i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1920,6 +2004,7 @@ entry: } declare @llvm.riscv.vrem.nxv4i64.i64( + , , i64, i32); @@ -1938,6 +2023,7 @@ define @intrinsic_vrem_vx_nxv4i64_nxv4i64_i64( @llvm.riscv.vrem.nxv4i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1977,6 +2063,7 @@ entry: } declare @llvm.riscv.vrem.nxv8i64.i64( + , , i64, i32); @@ -1995,6 +2082,7 @@ define @intrinsic_vrem_vx_nxv8i64_nxv8i64_i64( @llvm.riscv.vrem.nxv8i64.i64( + undef, %0, i64 %1, i32 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vrem-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vrem-rv64.ll index 100b79bde6258f1d105d844798fe143e17d87c95..4c894190474a079fa74759f34559d4cd33069766 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vrem-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vrem-rv64.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vrem.nxv1i8.nxv1i8( + , , , i64); @@ -14,6 +15,7 @@ define @intrinsic_vrem_vv_nxv1i8_nxv1i8_nxv1i8( @llvm.riscv.vrem.nxv1i8.nxv1i8( + undef, %0, %1, i64 %2) @@ -47,6 +49,7 @@ entry: } declare @llvm.riscv.vrem.nxv2i8.nxv2i8( + , , , i64); @@ -59,6 +62,7 @@ define @intrinsic_vrem_vv_nxv2i8_nxv2i8_nxv2i8( @llvm.riscv.vrem.nxv2i8.nxv2i8( + undef, %0, %1, i64 %2) @@ -92,6 +96,7 @@ entry: } declare @llvm.riscv.vrem.nxv4i8.nxv4i8( + , , , i64); @@ -104,6 +109,7 @@ define @intrinsic_vrem_vv_nxv4i8_nxv4i8_nxv4i8( @llvm.riscv.vrem.nxv4i8.nxv4i8( + undef, %0, %1, i64 %2) @@ -137,6 +143,7 @@ entry: } declare @llvm.riscv.vrem.nxv8i8.nxv8i8( + , , , i64); @@ -149,6 +156,7 @@ define @intrinsic_vrem_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vrem.nxv8i8.nxv8i8( + undef, %0, %1, i64 %2) @@ -182,6 +190,7 @@ entry: } declare @llvm.riscv.vrem.nxv16i8.nxv16i8( + , , , i64); @@ -194,6 +203,7 @@ define @intrinsic_vrem_vv_nxv16i8_nxv16i8_nxv16i8( @llvm.riscv.vrem.nxv16i8.nxv16i8( + undef, %0, %1, i64 %2) @@ -227,6 +237,7 @@ entry: } declare @llvm.riscv.vrem.nxv32i8.nxv32i8( + , , , i64); @@ -239,6 +250,7 @@ define @intrinsic_vrem_vv_nxv32i8_nxv32i8_nxv32i8( @llvm.riscv.vrem.nxv32i8.nxv32i8( + undef, %0, %1, i64 %2) @@ -272,6 +284,7 @@ entry: } declare @llvm.riscv.vrem.nxv64i8.nxv64i8( + , , , i64); @@ -284,6 +297,7 @@ define @intrinsic_vrem_vv_nxv64i8_nxv64i8_nxv64i8( @llvm.riscv.vrem.nxv64i8.nxv64i8( + undef, %0, %1, i64 %2) @@ -318,6 +332,7 @@ entry: } declare @llvm.riscv.vrem.nxv1i16.nxv1i16( + , , , i64); @@ -330,6 +345,7 @@ define @intrinsic_vrem_vv_nxv1i16_nxv1i16_nxv1i16( @llvm.riscv.vrem.nxv1i16.nxv1i16( + undef, %0, %1, i64 %2) @@ -363,6 +379,7 @@ entry: } declare @llvm.riscv.vrem.nxv2i16.nxv2i16( + , , , i64); @@ -375,6 +392,7 @@ define @intrinsic_vrem_vv_nxv2i16_nxv2i16_nxv2i16( @llvm.riscv.vrem.nxv2i16.nxv2i16( + undef, %0, %1, i64 %2) @@ -408,6 +426,7 @@ entry: } declare @llvm.riscv.vrem.nxv4i16.nxv4i16( + , , , i64); @@ -420,6 +439,7 @@ define @intrinsic_vrem_vv_nxv4i16_nxv4i16_nxv4i16( @llvm.riscv.vrem.nxv4i16.nxv4i16( + undef, %0, %1, i64 %2) @@ -453,6 +473,7 @@ entry: } declare @llvm.riscv.vrem.nxv8i16.nxv8i16( + , , , i64); @@ -465,6 +486,7 @@ define @intrinsic_vrem_vv_nxv8i16_nxv8i16_nxv8i16( @llvm.riscv.vrem.nxv8i16.nxv8i16( + undef, %0, %1, i64 %2) @@ -498,6 +520,7 @@ entry: } declare @llvm.riscv.vrem.nxv16i16.nxv16i16( + , , , i64); @@ -510,6 +533,7 @@ define @intrinsic_vrem_vv_nxv16i16_nxv16i16_nxv16i16( @llvm.riscv.vrem.nxv16i16.nxv16i16( + undef, %0, %1, i64 %2) @@ -543,6 +567,7 @@ entry: } declare @llvm.riscv.vrem.nxv32i16.nxv32i16( + , , , i64); @@ -555,6 +580,7 @@ define @intrinsic_vrem_vv_nxv32i16_nxv32i16_nxv32i16( @llvm.riscv.vrem.nxv32i16.nxv32i16( + undef, %0, %1, i64 %2) @@ -589,6 +615,7 @@ entry: } declare @llvm.riscv.vrem.nxv1i32.nxv1i32( + , , , i64); @@ -601,6 +628,7 @@ define @intrinsic_vrem_vv_nxv1i32_nxv1i32_nxv1i32( @llvm.riscv.vrem.nxv1i32.nxv1i32( + undef, %0, %1, i64 %2) @@ -634,6 +662,7 @@ entry: } declare @llvm.riscv.vrem.nxv2i32.nxv2i32( + , , , i64); @@ -646,6 +675,7 @@ define @intrinsic_vrem_vv_nxv2i32_nxv2i32_nxv2i32( @llvm.riscv.vrem.nxv2i32.nxv2i32( + undef, %0, %1, i64 %2) @@ -679,6 +709,7 @@ entry: } declare @llvm.riscv.vrem.nxv4i32.nxv4i32( + , , , i64); @@ -691,6 +722,7 @@ define @intrinsic_vrem_vv_nxv4i32_nxv4i32_nxv4i32( @llvm.riscv.vrem.nxv4i32.nxv4i32( + undef, %0, %1, i64 %2) @@ -724,6 +756,7 @@ entry: } declare @llvm.riscv.vrem.nxv8i32.nxv8i32( + , , , i64); @@ -736,6 +769,7 @@ define @intrinsic_vrem_vv_nxv8i32_nxv8i32_nxv8i32( @llvm.riscv.vrem.nxv8i32.nxv8i32( + undef, %0, %1, i64 %2) @@ -769,6 +803,7 @@ entry: } declare @llvm.riscv.vrem.nxv16i32.nxv16i32( + , , , i64); @@ -781,6 +816,7 @@ define @intrinsic_vrem_vv_nxv16i32_nxv16i32_nxv16i32( @llvm.riscv.vrem.nxv16i32.nxv16i32( + undef, %0, %1, i64 %2) @@ -815,6 +851,7 @@ entry: } declare @llvm.riscv.vrem.nxv1i64.nxv1i64( + , , , i64); @@ -827,6 +864,7 @@ define @intrinsic_vrem_vv_nxv1i64_nxv1i64_nxv1i64( @llvm.riscv.vrem.nxv1i64.nxv1i64( + undef, %0, %1, i64 %2) @@ -860,6 +898,7 @@ entry: } declare @llvm.riscv.vrem.nxv2i64.nxv2i64( + , , , i64); @@ -872,6 +911,7 @@ define @intrinsic_vrem_vv_nxv2i64_nxv2i64_nxv2i64( @llvm.riscv.vrem.nxv2i64.nxv2i64( + undef, %0, %1, i64 %2) @@ -905,6 +945,7 @@ entry: } declare @llvm.riscv.vrem.nxv4i64.nxv4i64( + , , , i64); @@ -917,6 +958,7 @@ define @intrinsic_vrem_vv_nxv4i64_nxv4i64_nxv4i64( @llvm.riscv.vrem.nxv4i64.nxv4i64( + undef, %0, %1, i64 %2) @@ -950,6 +992,7 @@ entry: } declare @llvm.riscv.vrem.nxv8i64.nxv8i64( + , , , i64); @@ -962,6 +1005,7 @@ define @intrinsic_vrem_vv_nxv8i64_nxv8i64_nxv8i64( @llvm.riscv.vrem.nxv8i64.nxv8i64( + undef, %0, %1, i64 %2) @@ -996,6 +1040,7 @@ entry: } declare @llvm.riscv.vrem.nxv1i8.i8( + , , i8, i64); @@ -1008,6 +1053,7 @@ define @intrinsic_vrem_vx_nxv1i8_nxv1i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrem.nxv1i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1041,6 +1087,7 @@ entry: } declare @llvm.riscv.vrem.nxv2i8.i8( + , , i8, i64); @@ -1053,6 +1100,7 @@ define @intrinsic_vrem_vx_nxv2i8_nxv2i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrem.nxv2i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1086,6 +1134,7 @@ entry: } declare @llvm.riscv.vrem.nxv4i8.i8( + , , i8, i64); @@ -1098,6 +1147,7 @@ define @intrinsic_vrem_vx_nxv4i8_nxv4i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrem.nxv4i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1131,6 +1181,7 @@ entry: } declare @llvm.riscv.vrem.nxv8i8.i8( + , , i8, i64); @@ -1143,6 +1194,7 @@ define @intrinsic_vrem_vx_nxv8i8_nxv8i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrem.nxv8i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1176,6 +1228,7 @@ entry: } declare @llvm.riscv.vrem.nxv16i8.i8( + , , i8, i64); @@ -1188,6 +1241,7 @@ define @intrinsic_vrem_vx_nxv16i8_nxv16i8_i8( @llvm.riscv.vrem.nxv16i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1221,6 +1275,7 @@ entry: } declare @llvm.riscv.vrem.nxv32i8.i8( + , , i8, i64); @@ -1233,6 +1288,7 @@ define @intrinsic_vrem_vx_nxv32i8_nxv32i8_i8( @llvm.riscv.vrem.nxv32i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1266,6 +1322,7 @@ entry: } declare @llvm.riscv.vrem.nxv64i8.i8( + , , i8, i64); @@ -1278,6 +1335,7 @@ define @intrinsic_vrem_vx_nxv64i8_nxv64i8_i8( @llvm.riscv.vrem.nxv64i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1311,6 +1369,7 @@ entry: } declare @llvm.riscv.vrem.nxv1i16.i16( + , , i16, i64); @@ -1323,6 +1382,7 @@ define @intrinsic_vrem_vx_nxv1i16_nxv1i16_i16( @llvm.riscv.vrem.nxv1i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1356,6 +1416,7 @@ entry: } declare @llvm.riscv.vrem.nxv2i16.i16( + , , i16, i64); @@ -1368,6 +1429,7 @@ define @intrinsic_vrem_vx_nxv2i16_nxv2i16_i16( @llvm.riscv.vrem.nxv2i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1401,6 +1463,7 @@ entry: } declare @llvm.riscv.vrem.nxv4i16.i16( + , , i16, i64); @@ -1413,6 +1476,7 @@ define @intrinsic_vrem_vx_nxv4i16_nxv4i16_i16( @llvm.riscv.vrem.nxv4i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1446,6 +1510,7 @@ entry: } declare @llvm.riscv.vrem.nxv8i16.i16( + , , i16, i64); @@ -1458,6 +1523,7 @@ define @intrinsic_vrem_vx_nxv8i16_nxv8i16_i16( @llvm.riscv.vrem.nxv8i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1491,6 +1557,7 @@ entry: } declare @llvm.riscv.vrem.nxv16i16.i16( + , , i16, i64); @@ -1503,6 +1570,7 @@ define @intrinsic_vrem_vx_nxv16i16_nxv16i16_i16( @llvm.riscv.vrem.nxv16i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1536,6 +1604,7 @@ entry: } declare @llvm.riscv.vrem.nxv32i16.i16( + , , i16, i64); @@ -1548,6 +1617,7 @@ define @intrinsic_vrem_vx_nxv32i16_nxv32i16_i16( @llvm.riscv.vrem.nxv32i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1581,6 +1651,7 @@ entry: } declare @llvm.riscv.vrem.nxv1i32.i32( + , , i32, i64); @@ -1593,6 +1664,7 @@ define @intrinsic_vrem_vx_nxv1i32_nxv1i32_i32( @llvm.riscv.vrem.nxv1i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1626,6 +1698,7 @@ entry: } declare @llvm.riscv.vrem.nxv2i32.i32( + , , i32, i64); @@ -1638,6 +1711,7 @@ define @intrinsic_vrem_vx_nxv2i32_nxv2i32_i32( @llvm.riscv.vrem.nxv2i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1671,6 +1745,7 @@ entry: } declare @llvm.riscv.vrem.nxv4i32.i32( + , , i32, i64); @@ -1683,6 +1758,7 @@ define @intrinsic_vrem_vx_nxv4i32_nxv4i32_i32( @llvm.riscv.vrem.nxv4i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1716,6 +1792,7 @@ entry: } declare @llvm.riscv.vrem.nxv8i32.i32( + , , i32, i64); @@ -1728,6 +1805,7 @@ define @intrinsic_vrem_vx_nxv8i32_nxv8i32_i32( @llvm.riscv.vrem.nxv8i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1761,6 +1839,7 @@ entry: } declare @llvm.riscv.vrem.nxv16i32.i32( + , , i32, i64); @@ -1773,6 +1852,7 @@ define @intrinsic_vrem_vx_nxv16i32_nxv16i32_i32( @llvm.riscv.vrem.nxv16i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1806,6 +1886,7 @@ entry: } declare @llvm.riscv.vrem.nxv1i64.i64( + , , i64, i64); @@ -1818,6 +1899,7 @@ define @intrinsic_vrem_vx_nxv1i64_nxv1i64_i64( @llvm.riscv.vrem.nxv1i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1851,6 +1933,7 @@ entry: } declare @llvm.riscv.vrem.nxv2i64.i64( + , , i64, i64); @@ -1863,6 +1946,7 @@ define @intrinsic_vrem_vx_nxv2i64_nxv2i64_i64( @llvm.riscv.vrem.nxv2i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1896,6 +1980,7 @@ entry: } declare @llvm.riscv.vrem.nxv4i64.i64( + , , i64, i64); @@ -1908,6 +1993,7 @@ define @intrinsic_vrem_vx_nxv4i64_nxv4i64_i64( @llvm.riscv.vrem.nxv4i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1941,6 +2027,7 @@ entry: } declare @llvm.riscv.vrem.nxv8i64.i64( + , , i64, i64); @@ -1953,6 +2040,7 @@ define @intrinsic_vrem_vx_nxv8i64_nxv8i64_i64( @llvm.riscv.vrem.nxv8i64.i64( + undef, %0, i64 %1, i64 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vremu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vremu-rv32.ll index 6b3cf9df77ca6b2ade836cb56325696cfca06108..1e5cdb83f3d83dfc07681827dcbb7b4ce7e4ea4f 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vremu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vremu-rv32.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vremu.nxv1i8.nxv1i8( + , , , i32); @@ -14,6 +15,7 @@ define @intrinsic_vremu_vv_nxv1i8_nxv1i8_nxv1i8( @llvm.riscv.vremu.nxv1i8.nxv1i8( + undef, %0, %1, i32 %2) @@ -47,6 +49,7 @@ entry: } declare @llvm.riscv.vremu.nxv2i8.nxv2i8( + , , , i32); @@ -59,6 +62,7 @@ define @intrinsic_vremu_vv_nxv2i8_nxv2i8_nxv2i8( @llvm.riscv.vremu.nxv2i8.nxv2i8( + undef, %0, %1, i32 %2) @@ -92,6 +96,7 @@ entry: } declare @llvm.riscv.vremu.nxv4i8.nxv4i8( + , , , i32); @@ -104,6 +109,7 @@ define @intrinsic_vremu_vv_nxv4i8_nxv4i8_nxv4i8( @llvm.riscv.vremu.nxv4i8.nxv4i8( + undef, %0, %1, i32 %2) @@ -137,6 +143,7 @@ entry: } declare @llvm.riscv.vremu.nxv8i8.nxv8i8( + , , , i32); @@ -149,6 +156,7 @@ define @intrinsic_vremu_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vremu.nxv8i8.nxv8i8( + undef, %0, %1, i32 %2) @@ -182,6 +190,7 @@ entry: } declare @llvm.riscv.vremu.nxv16i8.nxv16i8( + , , , i32); @@ -194,6 +203,7 @@ define @intrinsic_vremu_vv_nxv16i8_nxv16i8_nxv16i8( @llvm.riscv.vremu.nxv16i8.nxv16i8( + undef, %0, %1, i32 %2) @@ -227,6 +237,7 @@ entry: } declare @llvm.riscv.vremu.nxv32i8.nxv32i8( + , , , i32); @@ -239,6 +250,7 @@ define @intrinsic_vremu_vv_nxv32i8_nxv32i8_nxv32i8( @llvm.riscv.vremu.nxv32i8.nxv32i8( + undef, %0, %1, i32 %2) @@ -272,6 +284,7 @@ entry: } declare @llvm.riscv.vremu.nxv64i8.nxv64i8( + , , , i32); @@ -284,6 +297,7 @@ define @intrinsic_vremu_vv_nxv64i8_nxv64i8_nxv64i8( @llvm.riscv.vremu.nxv64i8.nxv64i8( + undef, %0, %1, i32 %2) @@ -318,6 +332,7 @@ entry: } declare @llvm.riscv.vremu.nxv1i16.nxv1i16( + , , , i32); @@ -330,6 +345,7 @@ define @intrinsic_vremu_vv_nxv1i16_nxv1i16_nxv1i16( @llvm.riscv.vremu.nxv1i16.nxv1i16( + undef, %0, %1, i32 %2) @@ -363,6 +379,7 @@ entry: } declare @llvm.riscv.vremu.nxv2i16.nxv2i16( + , , , i32); @@ -375,6 +392,7 @@ define @intrinsic_vremu_vv_nxv2i16_nxv2i16_nxv2i16( @llvm.riscv.vremu.nxv2i16.nxv2i16( + undef, %0, %1, i32 %2) @@ -408,6 +426,7 @@ entry: } declare @llvm.riscv.vremu.nxv4i16.nxv4i16( + , , , i32); @@ -420,6 +439,7 @@ define @intrinsic_vremu_vv_nxv4i16_nxv4i16_nxv4i16( @llvm.riscv.vremu.nxv4i16.nxv4i16( + undef, %0, %1, i32 %2) @@ -453,6 +473,7 @@ entry: } declare @llvm.riscv.vremu.nxv8i16.nxv8i16( + , , , i32); @@ -465,6 +486,7 @@ define @intrinsic_vremu_vv_nxv8i16_nxv8i16_nxv8i16( @llvm.riscv.vremu.nxv8i16.nxv8i16( + undef, %0, %1, i32 %2) @@ -498,6 +520,7 @@ entry: } declare @llvm.riscv.vremu.nxv16i16.nxv16i16( + , , , i32); @@ -510,6 +533,7 @@ define @intrinsic_vremu_vv_nxv16i16_nxv16i16_nxv16i16( @llvm.riscv.vremu.nxv16i16.nxv16i16( + undef, %0, %1, i32 %2) @@ -543,6 +567,7 @@ entry: } declare @llvm.riscv.vremu.nxv32i16.nxv32i16( + , , , i32); @@ -555,6 +580,7 @@ define @intrinsic_vremu_vv_nxv32i16_nxv32i16_nxv32i16( @llvm.riscv.vremu.nxv32i16.nxv32i16( + undef, %0, %1, i32 %2) @@ -589,6 +615,7 @@ entry: } declare @llvm.riscv.vremu.nxv1i32.nxv1i32( + , , , i32); @@ -601,6 +628,7 @@ define @intrinsic_vremu_vv_nxv1i32_nxv1i32_nxv1i32( @llvm.riscv.vremu.nxv1i32.nxv1i32( + undef, %0, %1, i32 %2) @@ -634,6 +662,7 @@ entry: } declare @llvm.riscv.vremu.nxv2i32.nxv2i32( + , , , i32); @@ -646,6 +675,7 @@ define @intrinsic_vremu_vv_nxv2i32_nxv2i32_nxv2i32( @llvm.riscv.vremu.nxv2i32.nxv2i32( + undef, %0, %1, i32 %2) @@ -679,6 +709,7 @@ entry: } declare @llvm.riscv.vremu.nxv4i32.nxv4i32( + , , , i32); @@ -691,6 +722,7 @@ define @intrinsic_vremu_vv_nxv4i32_nxv4i32_nxv4i32( @llvm.riscv.vremu.nxv4i32.nxv4i32( + undef, %0, %1, i32 %2) @@ -724,6 +756,7 @@ entry: } declare @llvm.riscv.vremu.nxv8i32.nxv8i32( + , , , i32); @@ -736,6 +769,7 @@ define @intrinsic_vremu_vv_nxv8i32_nxv8i32_nxv8i32( @llvm.riscv.vremu.nxv8i32.nxv8i32( + undef, %0, %1, i32 %2) @@ -769,6 +803,7 @@ entry: } declare @llvm.riscv.vremu.nxv16i32.nxv16i32( + , , , i32); @@ -781,6 +816,7 @@ define @intrinsic_vremu_vv_nxv16i32_nxv16i32_nxv16i32( @llvm.riscv.vremu.nxv16i32.nxv16i32( + undef, %0, %1, i32 %2) @@ -815,6 +851,7 @@ entry: } declare @llvm.riscv.vremu.nxv1i64.nxv1i64( + , , , i32); @@ -827,6 +864,7 @@ define @intrinsic_vremu_vv_nxv1i64_nxv1i64_nxv1i64( @llvm.riscv.vremu.nxv1i64.nxv1i64( + undef, %0, %1, i32 %2) @@ -860,6 +898,7 @@ entry: } declare @llvm.riscv.vremu.nxv2i64.nxv2i64( + , , , i32); @@ -872,6 +911,7 @@ define @intrinsic_vremu_vv_nxv2i64_nxv2i64_nxv2i64( @llvm.riscv.vremu.nxv2i64.nxv2i64( + undef, %0, %1, i32 %2) @@ -905,6 +945,7 @@ entry: } declare @llvm.riscv.vremu.nxv4i64.nxv4i64( + , , , i32); @@ -917,6 +958,7 @@ define @intrinsic_vremu_vv_nxv4i64_nxv4i64_nxv4i64( @llvm.riscv.vremu.nxv4i64.nxv4i64( + undef, %0, %1, i32 %2) @@ -950,6 +992,7 @@ entry: } declare @llvm.riscv.vremu.nxv8i64.nxv8i64( + , , , i32); @@ -962,6 +1005,7 @@ define @intrinsic_vremu_vv_nxv8i64_nxv8i64_nxv8i64( @llvm.riscv.vremu.nxv8i64.nxv8i64( + undef, %0, %1, i32 %2) @@ -996,6 +1040,7 @@ entry: } declare @llvm.riscv.vremu.nxv1i8.i8( + , , i8, i32); @@ -1008,6 +1053,7 @@ define @intrinsic_vremu_vx_nxv1i8_nxv1i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vremu.nxv1i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1041,6 +1087,7 @@ entry: } declare @llvm.riscv.vremu.nxv2i8.i8( + , , i8, i32); @@ -1053,6 +1100,7 @@ define @intrinsic_vremu_vx_nxv2i8_nxv2i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vremu.nxv2i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1086,6 +1134,7 @@ entry: } declare @llvm.riscv.vremu.nxv4i8.i8( + , , i8, i32); @@ -1098,6 +1147,7 @@ define @intrinsic_vremu_vx_nxv4i8_nxv4i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vremu.nxv4i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1131,6 +1181,7 @@ entry: } declare @llvm.riscv.vremu.nxv8i8.i8( + , , i8, i32); @@ -1143,6 +1194,7 @@ define @intrinsic_vremu_vx_nxv8i8_nxv8i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vremu.nxv8i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1176,6 +1228,7 @@ entry: } declare @llvm.riscv.vremu.nxv16i8.i8( + , , i8, i32); @@ -1188,6 +1241,7 @@ define @intrinsic_vremu_vx_nxv16i8_nxv16i8_i8( @llvm.riscv.vremu.nxv16i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1221,6 +1275,7 @@ entry: } declare @llvm.riscv.vremu.nxv32i8.i8( + , , i8, i32); @@ -1233,6 +1288,7 @@ define @intrinsic_vremu_vx_nxv32i8_nxv32i8_i8( @llvm.riscv.vremu.nxv32i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1266,6 +1322,7 @@ entry: } declare @llvm.riscv.vremu.nxv64i8.i8( + , , i8, i32); @@ -1278,6 +1335,7 @@ define @intrinsic_vremu_vx_nxv64i8_nxv64i8_i8( @llvm.riscv.vremu.nxv64i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1311,6 +1369,7 @@ entry: } declare @llvm.riscv.vremu.nxv1i16.i16( + , , i16, i32); @@ -1323,6 +1382,7 @@ define @intrinsic_vremu_vx_nxv1i16_nxv1i16_i16( @llvm.riscv.vremu.nxv1i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1356,6 +1416,7 @@ entry: } declare @llvm.riscv.vremu.nxv2i16.i16( + , , i16, i32); @@ -1368,6 +1429,7 @@ define @intrinsic_vremu_vx_nxv2i16_nxv2i16_i16( @llvm.riscv.vremu.nxv2i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1401,6 +1463,7 @@ entry: } declare @llvm.riscv.vremu.nxv4i16.i16( + , , i16, i32); @@ -1413,6 +1476,7 @@ define @intrinsic_vremu_vx_nxv4i16_nxv4i16_i16( @llvm.riscv.vremu.nxv4i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1446,6 +1510,7 @@ entry: } declare @llvm.riscv.vremu.nxv8i16.i16( + , , i16, i32); @@ -1458,6 +1523,7 @@ define @intrinsic_vremu_vx_nxv8i16_nxv8i16_i16( @llvm.riscv.vremu.nxv8i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1491,6 +1557,7 @@ entry: } declare @llvm.riscv.vremu.nxv16i16.i16( + , , i16, i32); @@ -1503,6 +1570,7 @@ define @intrinsic_vremu_vx_nxv16i16_nxv16i16_i16( @llvm.riscv.vremu.nxv16i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1536,6 +1604,7 @@ entry: } declare @llvm.riscv.vremu.nxv32i16.i16( + , , i16, i32); @@ -1548,6 +1617,7 @@ define @intrinsic_vremu_vx_nxv32i16_nxv32i16_i16( @llvm.riscv.vremu.nxv32i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1581,6 +1651,7 @@ entry: } declare @llvm.riscv.vremu.nxv1i32.i32( + , , i32, i32); @@ -1593,6 +1664,7 @@ define @intrinsic_vremu_vx_nxv1i32_nxv1i32_i32( @llvm.riscv.vremu.nxv1i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1626,6 +1698,7 @@ entry: } declare @llvm.riscv.vremu.nxv2i32.i32( + , , i32, i32); @@ -1638,6 +1711,7 @@ define @intrinsic_vremu_vx_nxv2i32_nxv2i32_i32( @llvm.riscv.vremu.nxv2i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1671,6 +1745,7 @@ entry: } declare @llvm.riscv.vremu.nxv4i32.i32( + , , i32, i32); @@ -1683,6 +1758,7 @@ define @intrinsic_vremu_vx_nxv4i32_nxv4i32_i32( @llvm.riscv.vremu.nxv4i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1716,6 +1792,7 @@ entry: } declare @llvm.riscv.vremu.nxv8i32.i32( + , , i32, i32); @@ -1728,6 +1805,7 @@ define @intrinsic_vremu_vx_nxv8i32_nxv8i32_i32( @llvm.riscv.vremu.nxv8i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1761,6 +1839,7 @@ entry: } declare @llvm.riscv.vremu.nxv16i32.i32( + , , i32, i32); @@ -1773,6 +1852,7 @@ define @intrinsic_vremu_vx_nxv16i32_nxv16i32_i32( @llvm.riscv.vremu.nxv16i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1806,6 +1886,7 @@ entry: } declare @llvm.riscv.vremu.nxv1i64.i64( + , , i64, i32); @@ -1824,6 +1905,7 @@ define @intrinsic_vremu_vx_nxv1i64_nxv1i64_i64( @llvm.riscv.vremu.nxv1i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1863,6 +1945,7 @@ entry: } declare @llvm.riscv.vremu.nxv2i64.i64( + , , i64, i32); @@ -1881,6 +1964,7 @@ define @intrinsic_vremu_vx_nxv2i64_nxv2i64_i64( @llvm.riscv.vremu.nxv2i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1920,6 +2004,7 @@ entry: } declare @llvm.riscv.vremu.nxv4i64.i64( + , , i64, i32); @@ -1938,6 +2023,7 @@ define @intrinsic_vremu_vx_nxv4i64_nxv4i64_i64( @llvm.riscv.vremu.nxv4i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1977,6 +2063,7 @@ entry: } declare @llvm.riscv.vremu.nxv8i64.i64( + , , i64, i32); @@ -1995,6 +2082,7 @@ define @intrinsic_vremu_vx_nxv8i64_nxv8i64_i64( @llvm.riscv.vremu.nxv8i64.i64( + undef, %0, i64 %1, i32 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vremu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vremu-rv64.ll index 9b5c03b51f0b454390c9c38f39de24ce02b4583c..543868cfbf155f4013132d1006c1fae5fec579fe 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vremu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vremu-rv64.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vremu.nxv1i8.nxv1i8( + , , , i64); @@ -14,6 +15,7 @@ define @intrinsic_vremu_vv_nxv1i8_nxv1i8_nxv1i8( @llvm.riscv.vremu.nxv1i8.nxv1i8( + undef, %0, %1, i64 %2) @@ -47,6 +49,7 @@ entry: } declare @llvm.riscv.vremu.nxv2i8.nxv2i8( + , , , i64); @@ -59,6 +62,7 @@ define @intrinsic_vremu_vv_nxv2i8_nxv2i8_nxv2i8( @llvm.riscv.vremu.nxv2i8.nxv2i8( + undef, %0, %1, i64 %2) @@ -92,6 +96,7 @@ entry: } declare @llvm.riscv.vremu.nxv4i8.nxv4i8( + , , , i64); @@ -104,6 +109,7 @@ define @intrinsic_vremu_vv_nxv4i8_nxv4i8_nxv4i8( @llvm.riscv.vremu.nxv4i8.nxv4i8( + undef, %0, %1, i64 %2) @@ -137,6 +143,7 @@ entry: } declare @llvm.riscv.vremu.nxv8i8.nxv8i8( + , , , i64); @@ -149,6 +156,7 @@ define @intrinsic_vremu_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vremu.nxv8i8.nxv8i8( + undef, %0, %1, i64 %2) @@ -182,6 +190,7 @@ entry: } declare @llvm.riscv.vremu.nxv16i8.nxv16i8( + , , , i64); @@ -194,6 +203,7 @@ define @intrinsic_vremu_vv_nxv16i8_nxv16i8_nxv16i8( @llvm.riscv.vremu.nxv16i8.nxv16i8( + undef, %0, %1, i64 %2) @@ -227,6 +237,7 @@ entry: } declare @llvm.riscv.vremu.nxv32i8.nxv32i8( + , , , i64); @@ -239,6 +250,7 @@ define @intrinsic_vremu_vv_nxv32i8_nxv32i8_nxv32i8( @llvm.riscv.vremu.nxv32i8.nxv32i8( + undef, %0, %1, i64 %2) @@ -272,6 +284,7 @@ entry: } declare @llvm.riscv.vremu.nxv64i8.nxv64i8( + , , , i64); @@ -284,6 +297,7 @@ define @intrinsic_vremu_vv_nxv64i8_nxv64i8_nxv64i8( @llvm.riscv.vremu.nxv64i8.nxv64i8( + undef, %0, %1, i64 %2) @@ -318,6 +332,7 @@ entry: } declare @llvm.riscv.vremu.nxv1i16.nxv1i16( + , , , i64); @@ -330,6 +345,7 @@ define @intrinsic_vremu_vv_nxv1i16_nxv1i16_nxv1i16( @llvm.riscv.vremu.nxv1i16.nxv1i16( + undef, %0, %1, i64 %2) @@ -363,6 +379,7 @@ entry: } declare @llvm.riscv.vremu.nxv2i16.nxv2i16( + , , , i64); @@ -375,6 +392,7 @@ define @intrinsic_vremu_vv_nxv2i16_nxv2i16_nxv2i16( @llvm.riscv.vremu.nxv2i16.nxv2i16( + undef, %0, %1, i64 %2) @@ -408,6 +426,7 @@ entry: } declare @llvm.riscv.vremu.nxv4i16.nxv4i16( + , , , i64); @@ -420,6 +439,7 @@ define @intrinsic_vremu_vv_nxv4i16_nxv4i16_nxv4i16( @llvm.riscv.vremu.nxv4i16.nxv4i16( + undef, %0, %1, i64 %2) @@ -453,6 +473,7 @@ entry: } declare @llvm.riscv.vremu.nxv8i16.nxv8i16( + , , , i64); @@ -465,6 +486,7 @@ define @intrinsic_vremu_vv_nxv8i16_nxv8i16_nxv8i16( @llvm.riscv.vremu.nxv8i16.nxv8i16( + undef, %0, %1, i64 %2) @@ -498,6 +520,7 @@ entry: } declare @llvm.riscv.vremu.nxv16i16.nxv16i16( + , , , i64); @@ -510,6 +533,7 @@ define @intrinsic_vremu_vv_nxv16i16_nxv16i16_nxv16i16( @llvm.riscv.vremu.nxv16i16.nxv16i16( + undef, %0, %1, i64 %2) @@ -543,6 +567,7 @@ entry: } declare @llvm.riscv.vremu.nxv32i16.nxv32i16( + , , , i64); @@ -555,6 +580,7 @@ define @intrinsic_vremu_vv_nxv32i16_nxv32i16_nxv32i16( @llvm.riscv.vremu.nxv32i16.nxv32i16( + undef, %0, %1, i64 %2) @@ -589,6 +615,7 @@ entry: } declare @llvm.riscv.vremu.nxv1i32.nxv1i32( + , , , i64); @@ -601,6 +628,7 @@ define @intrinsic_vremu_vv_nxv1i32_nxv1i32_nxv1i32( @llvm.riscv.vremu.nxv1i32.nxv1i32( + undef, %0, %1, i64 %2) @@ -634,6 +662,7 @@ entry: } declare @llvm.riscv.vremu.nxv2i32.nxv2i32( + , , , i64); @@ -646,6 +675,7 @@ define @intrinsic_vremu_vv_nxv2i32_nxv2i32_nxv2i32( @llvm.riscv.vremu.nxv2i32.nxv2i32( + undef, %0, %1, i64 %2) @@ -679,6 +709,7 @@ entry: } declare @llvm.riscv.vremu.nxv4i32.nxv4i32( + , , , i64); @@ -691,6 +722,7 @@ define @intrinsic_vremu_vv_nxv4i32_nxv4i32_nxv4i32( @llvm.riscv.vremu.nxv4i32.nxv4i32( + undef, %0, %1, i64 %2) @@ -724,6 +756,7 @@ entry: } declare @llvm.riscv.vremu.nxv8i32.nxv8i32( + , , , i64); @@ -736,6 +769,7 @@ define @intrinsic_vremu_vv_nxv8i32_nxv8i32_nxv8i32( @llvm.riscv.vremu.nxv8i32.nxv8i32( + undef, %0, %1, i64 %2) @@ -769,6 +803,7 @@ entry: } declare @llvm.riscv.vremu.nxv16i32.nxv16i32( + , , , i64); @@ -781,6 +816,7 @@ define @intrinsic_vremu_vv_nxv16i32_nxv16i32_nxv16i32( @llvm.riscv.vremu.nxv16i32.nxv16i32( + undef, %0, %1, i64 %2) @@ -815,6 +851,7 @@ entry: } declare @llvm.riscv.vremu.nxv1i64.nxv1i64( + , , , i64); @@ -827,6 +864,7 @@ define @intrinsic_vremu_vv_nxv1i64_nxv1i64_nxv1i64( @llvm.riscv.vremu.nxv1i64.nxv1i64( + undef, %0, %1, i64 %2) @@ -860,6 +898,7 @@ entry: } declare @llvm.riscv.vremu.nxv2i64.nxv2i64( + , , , i64); @@ -872,6 +911,7 @@ define @intrinsic_vremu_vv_nxv2i64_nxv2i64_nxv2i64( @llvm.riscv.vremu.nxv2i64.nxv2i64( + undef, %0, %1, i64 %2) @@ -905,6 +945,7 @@ entry: } declare @llvm.riscv.vremu.nxv4i64.nxv4i64( + , , , i64); @@ -917,6 +958,7 @@ define @intrinsic_vremu_vv_nxv4i64_nxv4i64_nxv4i64( @llvm.riscv.vremu.nxv4i64.nxv4i64( + undef, %0, %1, i64 %2) @@ -950,6 +992,7 @@ entry: } declare @llvm.riscv.vremu.nxv8i64.nxv8i64( + , , , i64); @@ -962,6 +1005,7 @@ define @intrinsic_vremu_vv_nxv8i64_nxv8i64_nxv8i64( @llvm.riscv.vremu.nxv8i64.nxv8i64( + undef, %0, %1, i64 %2) @@ -996,6 +1040,7 @@ entry: } declare @llvm.riscv.vremu.nxv1i8.i8( + , , i8, i64); @@ -1008,6 +1053,7 @@ define @intrinsic_vremu_vx_nxv1i8_nxv1i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vremu.nxv1i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1041,6 +1087,7 @@ entry: } declare @llvm.riscv.vremu.nxv2i8.i8( + , , i8, i64); @@ -1053,6 +1100,7 @@ define @intrinsic_vremu_vx_nxv2i8_nxv2i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vremu.nxv2i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1086,6 +1134,7 @@ entry: } declare @llvm.riscv.vremu.nxv4i8.i8( + , , i8, i64); @@ -1098,6 +1147,7 @@ define @intrinsic_vremu_vx_nxv4i8_nxv4i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vremu.nxv4i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1131,6 +1181,7 @@ entry: } declare @llvm.riscv.vremu.nxv8i8.i8( + , , i8, i64); @@ -1143,6 +1194,7 @@ define @intrinsic_vremu_vx_nxv8i8_nxv8i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vremu.nxv8i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1176,6 +1228,7 @@ entry: } declare @llvm.riscv.vremu.nxv16i8.i8( + , , i8, i64); @@ -1188,6 +1241,7 @@ define @intrinsic_vremu_vx_nxv16i8_nxv16i8_i8( @llvm.riscv.vremu.nxv16i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1221,6 +1275,7 @@ entry: } declare @llvm.riscv.vremu.nxv32i8.i8( + , , i8, i64); @@ -1233,6 +1288,7 @@ define @intrinsic_vremu_vx_nxv32i8_nxv32i8_i8( @llvm.riscv.vremu.nxv32i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1266,6 +1322,7 @@ entry: } declare @llvm.riscv.vremu.nxv64i8.i8( + , , i8, i64); @@ -1278,6 +1335,7 @@ define @intrinsic_vremu_vx_nxv64i8_nxv64i8_i8( @llvm.riscv.vremu.nxv64i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1311,6 +1369,7 @@ entry: } declare @llvm.riscv.vremu.nxv1i16.i16( + , , i16, i64); @@ -1323,6 +1382,7 @@ define @intrinsic_vremu_vx_nxv1i16_nxv1i16_i16( @llvm.riscv.vremu.nxv1i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1356,6 +1416,7 @@ entry: } declare @llvm.riscv.vremu.nxv2i16.i16( + , , i16, i64); @@ -1368,6 +1429,7 @@ define @intrinsic_vremu_vx_nxv2i16_nxv2i16_i16( @llvm.riscv.vremu.nxv2i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1401,6 +1463,7 @@ entry: } declare @llvm.riscv.vremu.nxv4i16.i16( + , , i16, i64); @@ -1413,6 +1476,7 @@ define @intrinsic_vremu_vx_nxv4i16_nxv4i16_i16( @llvm.riscv.vremu.nxv4i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1446,6 +1510,7 @@ entry: } declare @llvm.riscv.vremu.nxv8i16.i16( + , , i16, i64); @@ -1458,6 +1523,7 @@ define @intrinsic_vremu_vx_nxv8i16_nxv8i16_i16( @llvm.riscv.vremu.nxv8i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1491,6 +1557,7 @@ entry: } declare @llvm.riscv.vremu.nxv16i16.i16( + , , i16, i64); @@ -1503,6 +1570,7 @@ define @intrinsic_vremu_vx_nxv16i16_nxv16i16_i16( @llvm.riscv.vremu.nxv16i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1536,6 +1604,7 @@ entry: } declare @llvm.riscv.vremu.nxv32i16.i16( + , , i16, i64); @@ -1548,6 +1617,7 @@ define @intrinsic_vremu_vx_nxv32i16_nxv32i16_i16( @llvm.riscv.vremu.nxv32i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1581,6 +1651,7 @@ entry: } declare @llvm.riscv.vremu.nxv1i32.i32( + , , i32, i64); @@ -1593,6 +1664,7 @@ define @intrinsic_vremu_vx_nxv1i32_nxv1i32_i32( @llvm.riscv.vremu.nxv1i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1626,6 +1698,7 @@ entry: } declare @llvm.riscv.vremu.nxv2i32.i32( + , , i32, i64); @@ -1638,6 +1711,7 @@ define @intrinsic_vremu_vx_nxv2i32_nxv2i32_i32( @llvm.riscv.vremu.nxv2i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1671,6 +1745,7 @@ entry: } declare @llvm.riscv.vremu.nxv4i32.i32( + , , i32, i64); @@ -1683,6 +1758,7 @@ define @intrinsic_vremu_vx_nxv4i32_nxv4i32_i32( @llvm.riscv.vremu.nxv4i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1716,6 +1792,7 @@ entry: } declare @llvm.riscv.vremu.nxv8i32.i32( + , , i32, i64); @@ -1728,6 +1805,7 @@ define @intrinsic_vremu_vx_nxv8i32_nxv8i32_i32( @llvm.riscv.vremu.nxv8i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1761,6 +1839,7 @@ entry: } declare @llvm.riscv.vremu.nxv16i32.i32( + , , i32, i64); @@ -1773,6 +1852,7 @@ define @intrinsic_vremu_vx_nxv16i32_nxv16i32_i32( @llvm.riscv.vremu.nxv16i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1806,6 +1886,7 @@ entry: } declare @llvm.riscv.vremu.nxv1i64.i64( + , , i64, i64); @@ -1818,6 +1899,7 @@ define @intrinsic_vremu_vx_nxv1i64_nxv1i64_i64( @llvm.riscv.vremu.nxv1i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1851,6 +1933,7 @@ entry: } declare @llvm.riscv.vremu.nxv2i64.i64( + , , i64, i64); @@ -1863,6 +1946,7 @@ define @intrinsic_vremu_vx_nxv2i64_nxv2i64_i64( @llvm.riscv.vremu.nxv2i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1896,6 +1980,7 @@ entry: } declare @llvm.riscv.vremu.nxv4i64.i64( + , , i64, i64); @@ -1908,6 +1993,7 @@ define @intrinsic_vremu_vx_nxv4i64_nxv4i64_i64( @llvm.riscv.vremu.nxv4i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1941,6 +2027,7 @@ entry: } declare @llvm.riscv.vremu.nxv8i64.i64( + , , i64, i64); @@ -1953,6 +2040,7 @@ define @intrinsic_vremu_vx_nxv8i64_nxv8i64_i64( @llvm.riscv.vremu.nxv8i64.i64( + undef, %0, i64 %1, i64 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vrgather-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vrgather-rv32.ll index 75e05a90a06cbdf840c14785fa67f186bf701341..e529362ceab55f3ea91c2b829c461634fa2f0171 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vrgather-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vrgather-rv32.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vrgather.vv.nxv1i8.i32( + , , , i32); @@ -15,6 +16,7 @@ define @intrinsic_vrgather_vv_nxv1i8_nxv1i8_nxv1i8( @llvm.riscv.vrgather.vv.nxv1i8.i32( + undef, %0, %1, i32 %2) @@ -48,6 +50,7 @@ entry: } declare @llvm.riscv.vrgather.vv.nxv2i8.i32( + , , , i32); @@ -61,6 +64,7 @@ define @intrinsic_vrgather_vv_nxv2i8_nxv2i8_nxv2i8( @llvm.riscv.vrgather.vv.nxv2i8.i32( + undef, %0, %1, i32 %2) @@ -94,6 +98,7 @@ entry: } declare @llvm.riscv.vrgather.vv.nxv4i8.i32( + , , , i32); @@ -107,6 +112,7 @@ define @intrinsic_vrgather_vv_nxv4i8_nxv4i8_nxv4i8( @llvm.riscv.vrgather.vv.nxv4i8.i32( + undef, %0, %1, i32 %2) @@ -140,6 +146,7 @@ entry: } declare @llvm.riscv.vrgather.vv.nxv8i8.i32( + , , , i32); @@ -153,6 +160,7 @@ define @intrinsic_vrgather_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vrgather.vv.nxv8i8.i32( + undef, %0, %1, i32 %2) @@ -186,6 +194,7 @@ entry: } declare @llvm.riscv.vrgather.vv.nxv16i8.i32( + , , , i32); @@ -199,6 +208,7 @@ define @intrinsic_vrgather_vv_nxv16i8_nxv16i8_nxv16i8( @llvm.riscv.vrgather.vv.nxv16i8.i32( + undef, %0, %1, i32 %2) @@ -232,6 +242,7 @@ entry: } declare @llvm.riscv.vrgather.vv.nxv32i8.i32( + , , , i32); @@ -245,6 +256,7 @@ define @intrinsic_vrgather_vv_nxv32i8_nxv32i8_nxv32i8( @llvm.riscv.vrgather.vv.nxv32i8.i32( + undef, %0, %1, i32 %2) @@ -278,6 +290,7 @@ entry: } declare @llvm.riscv.vrgather.vv.nxv64i8.i32( + , , , i32); @@ -291,6 +304,7 @@ define @intrinsic_vrgather_vv_nxv64i8_nxv64i8_nxv64i8( @llvm.riscv.vrgather.vv.nxv64i8.i32( + undef, %0, %1, i32 %2) @@ -325,6 +339,7 @@ entry: } declare @llvm.riscv.vrgather.vv.nxv1i16.i32( + , , , i32); @@ -338,6 +353,7 @@ define @intrinsic_vrgather_vv_nxv1i16_nxv1i16_nxv1i16( @llvm.riscv.vrgather.vv.nxv1i16.i32( + undef, %0, %1, i32 %2) @@ -371,6 +387,7 @@ entry: } declare @llvm.riscv.vrgather.vv.nxv2i16.i32( + , , , i32); @@ -384,6 +401,7 @@ define @intrinsic_vrgather_vv_nxv2i16_nxv2i16_nxv2i16( @llvm.riscv.vrgather.vv.nxv2i16.i32( + undef, %0, %1, i32 %2) @@ -417,6 +435,7 @@ entry: } declare @llvm.riscv.vrgather.vv.nxv4i16.i32( + , , , i32); @@ -430,6 +449,7 @@ define @intrinsic_vrgather_vv_nxv4i16_nxv4i16_nxv4i16( @llvm.riscv.vrgather.vv.nxv4i16.i32( + undef, %0, %1, i32 %2) @@ -463,6 +483,7 @@ entry: } declare @llvm.riscv.vrgather.vv.nxv8i16.i32( + , , , i32); @@ -476,6 +497,7 @@ define @intrinsic_vrgather_vv_nxv8i16_nxv8i16_nxv8i16( @llvm.riscv.vrgather.vv.nxv8i16.i32( + undef, %0, %1, i32 %2) @@ -509,6 +531,7 @@ entry: } declare @llvm.riscv.vrgather.vv.nxv16i16.i32( + , , , i32); @@ -522,6 +545,7 @@ define @intrinsic_vrgather_vv_nxv16i16_nxv16i16_nxv16i16( @llvm.riscv.vrgather.vv.nxv16i16.i32( + undef, %0, %1, i32 %2) @@ -555,6 +579,7 @@ entry: } declare @llvm.riscv.vrgather.vv.nxv32i16.i32( + , , , i32); @@ -568,6 +593,7 @@ define @intrinsic_vrgather_vv_nxv32i16_nxv32i16_nxv32i16( @llvm.riscv.vrgather.vv.nxv32i16.i32( + undef, %0, %1, i32 %2) @@ -602,6 +628,7 @@ entry: } declare @llvm.riscv.vrgather.vv.nxv1i32.i32( + , , , i32); @@ -615,6 +642,7 @@ define @intrinsic_vrgather_vv_nxv1i32_nxv1i32_nxv1i32( @llvm.riscv.vrgather.vv.nxv1i32.i32( + undef, %0, %1, i32 %2) @@ -648,6 +676,7 @@ entry: } declare @llvm.riscv.vrgather.vv.nxv2i32.i32( + , , , i32); @@ -661,6 +690,7 @@ define @intrinsic_vrgather_vv_nxv2i32_nxv2i32_nxv2i32( @llvm.riscv.vrgather.vv.nxv2i32.i32( + undef, %0, %1, i32 %2) @@ -694,6 +724,7 @@ entry: } declare @llvm.riscv.vrgather.vv.nxv4i32.i32( + , , , i32); @@ -707,6 +738,7 @@ define @intrinsic_vrgather_vv_nxv4i32_nxv4i32_nxv4i32( @llvm.riscv.vrgather.vv.nxv4i32.i32( + undef, %0, %1, i32 %2) @@ -740,6 +772,7 @@ entry: } declare @llvm.riscv.vrgather.vv.nxv8i32.i32( + , , , i32); @@ -753,6 +786,7 @@ define @intrinsic_vrgather_vv_nxv8i32_nxv8i32_nxv8i32( @llvm.riscv.vrgather.vv.nxv8i32.i32( + undef, %0, %1, i32 %2) @@ -786,6 +820,7 @@ entry: } declare @llvm.riscv.vrgather.vv.nxv16i32.i32( + , , , i32); @@ -799,6 +834,7 @@ define @intrinsic_vrgather_vv_nxv16i32_nxv16i32_nxv16i32( @llvm.riscv.vrgather.vv.nxv16i32.i32( + undef, %0, %1, i32 %2) @@ -833,6 +869,7 @@ entry: } declare @llvm.riscv.vrgather.vv.nxv1f16.i32( + , , , i32); @@ -846,6 +883,7 @@ define @intrinsic_vrgather_vv_nxv1f16_nxv1f16_nxv1i16( @llvm.riscv.vrgather.vv.nxv1f16.i32( + undef, %0, %1, i32 %2) @@ -879,6 +917,7 @@ entry: } declare @llvm.riscv.vrgather.vv.nxv2f16.i32( + , , , i32); @@ -892,6 +931,7 @@ define @intrinsic_vrgather_vv_nxv2f16_nxv2f16_nxv2i16( @llvm.riscv.vrgather.vv.nxv2f16.i32( + undef, %0, %1, i32 %2) @@ -925,6 +965,7 @@ entry: } declare @llvm.riscv.vrgather.vv.nxv4f16.i32( + , , , i32); @@ -938,6 +979,7 @@ define @intrinsic_vrgather_vv_nxv4f16_nxv4f16_nxv4i16( @llvm.riscv.vrgather.vv.nxv4f16.i32( + undef, %0, %1, i32 %2) @@ -971,6 +1013,7 @@ entry: } declare @llvm.riscv.vrgather.vv.nxv8f16.i32( + , , , i32); @@ -984,6 +1027,7 @@ define @intrinsic_vrgather_vv_nxv8f16_nxv8f16_nxv8i16( @llvm.riscv.vrgather.vv.nxv8f16.i32( + undef, %0, %1, i32 %2) @@ -1017,6 +1061,7 @@ entry: } declare @llvm.riscv.vrgather.vv.nxv16f16.i32( + , , , i32); @@ -1030,6 +1075,7 @@ define @intrinsic_vrgather_vv_nxv16f16_nxv16f16_nxv16i16( @llvm.riscv.vrgather.vv.nxv16f16.i32( + undef, %0, %1, i32 %2) @@ -1063,6 +1109,7 @@ entry: } declare @llvm.riscv.vrgather.vv.nxv32f16.i32( + , , , i32); @@ -1076,6 +1123,7 @@ define @intrinsic_vrgather_vv_nxv32f16_nxv32f16_nxv32i16( @llvm.riscv.vrgather.vv.nxv32f16.i32( + undef, %0, %1, i32 %2) @@ -1110,6 +1158,7 @@ entry: } declare @llvm.riscv.vrgather.vv.nxv1f32.i32( + , , , i32); @@ -1123,6 +1172,7 @@ define @intrinsic_vrgather_vv_nxv1f32_nxv1f32_nxv1i32( @llvm.riscv.vrgather.vv.nxv1f32.i32( + undef, %0, %1, i32 %2) @@ -1156,6 +1206,7 @@ entry: } declare @llvm.riscv.vrgather.vv.nxv2f32.i32( + , , , i32); @@ -1169,6 +1220,7 @@ define @intrinsic_vrgather_vv_nxv2f32_nxv2f32_nxv2i32( @llvm.riscv.vrgather.vv.nxv2f32.i32( + undef, %0, %1, i32 %2) @@ -1202,6 +1254,7 @@ entry: } declare @llvm.riscv.vrgather.vv.nxv4f32.i32( + , , , i32); @@ -1215,6 +1268,7 @@ define @intrinsic_vrgather_vv_nxv4f32_nxv4f32_nxv4i32( @llvm.riscv.vrgather.vv.nxv4f32.i32( + undef, %0, %1, i32 %2) @@ -1248,6 +1302,7 @@ entry: } declare @llvm.riscv.vrgather.vv.nxv8f32.i32( + , , , i32); @@ -1261,6 +1316,7 @@ define @intrinsic_vrgather_vv_nxv8f32_nxv8f32_nxv8i32( @llvm.riscv.vrgather.vv.nxv8f32.i32( + undef, %0, %1, i32 %2) @@ -1294,6 +1350,7 @@ entry: } declare @llvm.riscv.vrgather.vv.nxv16f32.i32( + , , , i32); @@ -1307,6 +1364,7 @@ define @intrinsic_vrgather_vv_nxv16f32_nxv16f32_nxv16i32(< ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv16f32.i32( + undef, %0, %1, i32 %2) @@ -1341,6 +1399,7 @@ entry: } declare @llvm.riscv.vrgather.vv.nxv1f64.i32( + , , , i32); @@ -1354,6 +1413,7 @@ define @intrinsic_vrgather_vv_nxv1f64_nxv1f64_nxv1i64( @llvm.riscv.vrgather.vv.nxv1f64.i32( + undef, %0, %1, i32 %2) @@ -1387,6 +1447,7 @@ entry: } declare @llvm.riscv.vrgather.vv.nxv2f64.i32( + , , , i32); @@ -1400,6 +1461,7 @@ define @intrinsic_vrgather_vv_nxv2f64_nxv2f64_nxv2i64( @llvm.riscv.vrgather.vv.nxv2f64.i32( + undef, %0, %1, i32 %2) @@ -1433,6 +1495,7 @@ entry: } declare @llvm.riscv.vrgather.vv.nxv4f64.i32( + , , , i32); @@ -1446,6 +1509,7 @@ define @intrinsic_vrgather_vv_nxv4f64_nxv4f64_nxv4i64( @llvm.riscv.vrgather.vv.nxv4f64.i32( + undef, %0, %1, i32 %2) @@ -1479,6 +1543,7 @@ entry: } declare @llvm.riscv.vrgather.vv.nxv8f64.i32( + , , , i32); @@ -1492,6 +1557,7 @@ define @intrinsic_vrgather_vv_nxv8f64_nxv8f64_nxv8i64( @llvm.riscv.vrgather.vv.nxv8f64.i32( + undef, %0, %1, i32 %2) @@ -1526,6 +1592,7 @@ entry: } declare @llvm.riscv.vrgather.vx.nxv1i8.i32( + , , i32, i32); @@ -1539,6 +1606,7 @@ define @intrinsic_vrgather_vx_nxv1i8_nxv1i8_i32( @llvm.riscv.vrgather.vx.nxv1i8.i32( + undef, %0, i32 %1, i32 %2) @@ -1572,6 +1640,7 @@ entry: } declare @llvm.riscv.vrgather.vx.nxv2i8.i32( + , , i32, i32); @@ -1585,6 +1654,7 @@ define @intrinsic_vrgather_vx_nxv2i8_nxv2i8_i32( @llvm.riscv.vrgather.vx.nxv2i8.i32( + undef, %0, i32 %1, i32 %2) @@ -1618,6 +1688,7 @@ entry: } declare @llvm.riscv.vrgather.vx.nxv4i8.i32( + , , i32, i32); @@ -1631,6 +1702,7 @@ define @intrinsic_vrgather_vx_nxv4i8_nxv4i8_i32( @llvm.riscv.vrgather.vx.nxv4i8.i32( + undef, %0, i32 %1, i32 %2) @@ -1664,6 +1736,7 @@ entry: } declare @llvm.riscv.vrgather.vx.nxv8i8.i32( + , , i32, i32); @@ -1677,6 +1750,7 @@ define @intrinsic_vrgather_vx_nxv8i8_nxv8i8_i32( @llvm.riscv.vrgather.vx.nxv8i8.i32( + undef, %0, i32 %1, i32 %2) @@ -1710,6 +1784,7 @@ entry: } declare @llvm.riscv.vrgather.vx.nxv16i8.i32( + , , i32, i32); @@ -1723,6 +1798,7 @@ define @intrinsic_vrgather_vx_nxv16i8_nxv16i8_i32( @llvm.riscv.vrgather.vx.nxv16i8.i32( + undef, %0, i32 %1, i32 %2) @@ -1756,6 +1832,7 @@ entry: } declare @llvm.riscv.vrgather.vx.nxv32i8.i32( + , , i32, i32); @@ -1769,6 +1846,7 @@ define @intrinsic_vrgather_vx_nxv32i8_nxv32i8_i32( @llvm.riscv.vrgather.vx.nxv32i8.i32( + undef, %0, i32 %1, i32 %2) @@ -1802,6 +1880,7 @@ entry: } declare @llvm.riscv.vrgather.vx.nxv64i8.i32( + , , i32, i32); @@ -1815,6 +1894,7 @@ define @intrinsic_vrgather_vx_nxv64i8_nxv64i8_i32( @llvm.riscv.vrgather.vx.nxv64i8.i32( + undef, %0, i32 %1, i32 %2) @@ -1848,6 +1928,7 @@ entry: } declare @llvm.riscv.vrgather.vx.nxv1i16.i32( + , , i32, i32); @@ -1861,6 +1942,7 @@ define @intrinsic_vrgather_vx_nxv1i16_nxv1i16_i32( @llvm.riscv.vrgather.vx.nxv1i16.i32( + undef, %0, i32 %1, i32 %2) @@ -1894,6 +1976,7 @@ entry: } declare @llvm.riscv.vrgather.vx.nxv2i16.i32( + , , i32, i32); @@ -1907,6 +1990,7 @@ define @intrinsic_vrgather_vx_nxv2i16_nxv2i16_i32( @llvm.riscv.vrgather.vx.nxv2i16.i32( + undef, %0, i32 %1, i32 %2) @@ -1940,6 +2024,7 @@ entry: } declare @llvm.riscv.vrgather.vx.nxv4i16.i32( + , , i32, i32); @@ -1953,6 +2038,7 @@ define @intrinsic_vrgather_vx_nxv4i16_nxv4i16_i32( @llvm.riscv.vrgather.vx.nxv4i16.i32( + undef, %0, i32 %1, i32 %2) @@ -1986,6 +2072,7 @@ entry: } declare @llvm.riscv.vrgather.vx.nxv8i16.i32( + , , i32, i32); @@ -1999,6 +2086,7 @@ define @intrinsic_vrgather_vx_nxv8i16_nxv8i16_i32( @llvm.riscv.vrgather.vx.nxv8i16.i32( + undef, %0, i32 %1, i32 %2) @@ -2032,6 +2120,7 @@ entry: } declare @llvm.riscv.vrgather.vx.nxv16i16.i32( + , , i32, i32); @@ -2045,6 +2134,7 @@ define @intrinsic_vrgather_vx_nxv16i16_nxv16i16_i32( @llvm.riscv.vrgather.vx.nxv16i16.i32( + undef, %0, i32 %1, i32 %2) @@ -2078,6 +2168,7 @@ entry: } declare @llvm.riscv.vrgather.vx.nxv32i16.i32( + , , i32, i32); @@ -2091,6 +2182,7 @@ define @intrinsic_vrgather_vx_nxv32i16_nxv32i16_i32( @llvm.riscv.vrgather.vx.nxv32i16.i32( + undef, %0, i32 %1, i32 %2) @@ -2124,6 +2216,7 @@ entry: } declare @llvm.riscv.vrgather.vx.nxv1i32.i32( + , , i32, i32); @@ -2137,6 +2230,7 @@ define @intrinsic_vrgather_vx_nxv1i32_nxv1i32_i32( @llvm.riscv.vrgather.vx.nxv1i32.i32( + undef, %0, i32 %1, i32 %2) @@ -2170,6 +2264,7 @@ entry: } declare @llvm.riscv.vrgather.vx.nxv2i32.i32( + , , i32, i32); @@ -2183,6 +2278,7 @@ define @intrinsic_vrgather_vx_nxv2i32_nxv2i32_i32( @llvm.riscv.vrgather.vx.nxv2i32.i32( + undef, %0, i32 %1, i32 %2) @@ -2216,6 +2312,7 @@ entry: } declare @llvm.riscv.vrgather.vx.nxv4i32.i32( + , , i32, i32); @@ -2229,6 +2326,7 @@ define @intrinsic_vrgather_vx_nxv4i32_nxv4i32_i32( @llvm.riscv.vrgather.vx.nxv4i32.i32( + undef, %0, i32 %1, i32 %2) @@ -2262,6 +2360,7 @@ entry: } declare @llvm.riscv.vrgather.vx.nxv8i32.i32( + , , i32, i32); @@ -2275,6 +2374,7 @@ define @intrinsic_vrgather_vx_nxv8i32_nxv8i32_i32( @llvm.riscv.vrgather.vx.nxv8i32.i32( + undef, %0, i32 %1, i32 %2) @@ -2308,6 +2408,7 @@ entry: } declare @llvm.riscv.vrgather.vx.nxv16i32.i32( + , , i32, i32); @@ -2321,6 +2422,7 @@ define @intrinsic_vrgather_vx_nxv16i32_nxv16i32_i32( @llvm.riscv.vrgather.vx.nxv16i32.i32( + undef, %0, i32 %1, i32 %2) @@ -2354,6 +2456,7 @@ entry: } declare @llvm.riscv.vrgather.vx.nxv1f16.i32( + , , i32, i32); @@ -2367,6 +2470,7 @@ define @intrinsic_vrgather_vx_nxv1f16_nxv1f16_i32( @llvm.riscv.vrgather.vx.nxv1f16.i32( + undef, %0, i32 %1, i32 %2) @@ -2400,6 +2504,7 @@ entry: } declare @llvm.riscv.vrgather.vx.nxv2f16.i32( + , , i32, i32); @@ -2413,6 +2518,7 @@ define @intrinsic_vrgather_vx_nxv2f16_nxv2f16_i32( @llvm.riscv.vrgather.vx.nxv2f16.i32( + undef, %0, i32 %1, i32 %2) @@ -2446,6 +2552,7 @@ entry: } declare @llvm.riscv.vrgather.vx.nxv4f16.i32( + , , i32, i32); @@ -2459,6 +2566,7 @@ define @intrinsic_vrgather_vx_nxv4f16_nxv4f16_i32( @llvm.riscv.vrgather.vx.nxv4f16.i32( + undef, %0, i32 %1, i32 %2) @@ -2492,6 +2600,7 @@ entry: } declare @llvm.riscv.vrgather.vx.nxv8f16.i32( + , , i32, i32); @@ -2505,6 +2614,7 @@ define @intrinsic_vrgather_vx_nxv8f16_nxv8f16_i32( @llvm.riscv.vrgather.vx.nxv8f16.i32( + undef, %0, i32 %1, i32 %2) @@ -2538,6 +2648,7 @@ entry: } declare @llvm.riscv.vrgather.vx.nxv16f16.i32( + , , i32, i32); @@ -2551,6 +2662,7 @@ define @intrinsic_vrgather_vx_nxv16f16_nxv16f16_i32( @llvm.riscv.vrgather.vx.nxv16f16.i32( + undef, %0, i32 %1, i32 %2) @@ -2584,6 +2696,7 @@ entry: } declare @llvm.riscv.vrgather.vx.nxv32f16.i32( + , , i32, i32); @@ -2597,6 +2710,7 @@ define @intrinsic_vrgather_vx_nxv32f16_nxv32f16_i32( @llvm.riscv.vrgather.vx.nxv32f16.i32( + undef, %0, i32 %1, i32 %2) @@ -2630,6 +2744,7 @@ entry: } declare @llvm.riscv.vrgather.vx.nxv1f32.i32( + , , i32, i32); @@ -2643,6 +2758,7 @@ define @intrinsic_vrgather_vx_nxv1f32_nxv1f32_i32( @llvm.riscv.vrgather.vx.nxv1f32.i32( + undef, %0, i32 %1, i32 %2) @@ -2676,6 +2792,7 @@ entry: } declare @llvm.riscv.vrgather.vx.nxv2f32.i32( + , , i32, i32); @@ -2689,6 +2806,7 @@ define @intrinsic_vrgather_vx_nxv2f32_nxv2f32_i32( @llvm.riscv.vrgather.vx.nxv2f32.i32( + undef, %0, i32 %1, i32 %2) @@ -2722,6 +2840,7 @@ entry: } declare @llvm.riscv.vrgather.vx.nxv4f32.i32( + , , i32, i32); @@ -2735,6 +2854,7 @@ define @intrinsic_vrgather_vx_nxv4f32_nxv4f32_i32( @llvm.riscv.vrgather.vx.nxv4f32.i32( + undef, %0, i32 %1, i32 %2) @@ -2768,6 +2888,7 @@ entry: } declare @llvm.riscv.vrgather.vx.nxv8f32.i32( + , , i32, i32); @@ -2781,6 +2902,7 @@ define @intrinsic_vrgather_vx_nxv8f32_nxv8f32_i32( @llvm.riscv.vrgather.vx.nxv8f32.i32( + undef, %0, i32 %1, i32 %2) @@ -2814,6 +2936,7 @@ entry: } declare @llvm.riscv.vrgather.vx.nxv16f32.i32( + , , i32, i32); @@ -2827,6 +2950,7 @@ define @intrinsic_vrgather_vx_nxv16f32_nxv16f32_i32( @llvm.riscv.vrgather.vx.nxv16f32.i32( + undef, %0, i32 %1, i32 %2) @@ -2860,6 +2984,7 @@ entry: } declare @llvm.riscv.vrgather.vx.nxv1f64.i32( + , , i32, i32); @@ -2873,6 +2998,7 @@ define @intrinsic_vrgather_vx_nxv1f64_nxv1f64_i32( @llvm.riscv.vrgather.vx.nxv1f64.i32( + undef, %0, i32 %1, i32 %2) @@ -2906,6 +3032,7 @@ entry: } declare @llvm.riscv.vrgather.vx.nxv2f64.i32( + , , i32, i32); @@ -2919,6 +3046,7 @@ define @intrinsic_vrgather_vx_nxv2f64_nxv2f64_i32( @llvm.riscv.vrgather.vx.nxv2f64.i32( + undef, %0, i32 %1, i32 %2) @@ -2952,6 +3080,7 @@ entry: } declare @llvm.riscv.vrgather.vx.nxv4f64.i32( + , , i32, i32); @@ -2965,6 +3094,7 @@ define @intrinsic_vrgather_vx_nxv4f64_nxv4f64_i32( @llvm.riscv.vrgather.vx.nxv4f64.i32( + undef, %0, i32 %1, i32 %2) @@ -2998,6 +3128,7 @@ entry: } declare @llvm.riscv.vrgather.vx.nxv8f64.i32( + , , i32, i32); @@ -3011,6 +3142,7 @@ define @intrinsic_vrgather_vx_nxv8f64_nxv8f64_i32( @llvm.riscv.vrgather.vx.nxv8f64.i32( + undef, %0, i32 %1, i32 %2) @@ -3052,6 +3184,7 @@ define @intrinsic_vrgather_vi_nxv1i8_nxv1i8_i32( @llvm.riscv.vrgather.vx.nxv1i8.i32( + undef, %0, i32 9, i32 %1) @@ -3085,6 +3218,7 @@ define @intrinsic_vrgather_vi_nxv2i8_nxv2i8_i32( @llvm.riscv.vrgather.vx.nxv2i8.i32( + undef, %0, i32 9, i32 %1) @@ -3118,6 +3252,7 @@ define @intrinsic_vrgather_vi_nxv4i8_nxv4i8_i32( @llvm.riscv.vrgather.vx.nxv4i8.i32( + undef, %0, i32 9, i32 %1) @@ -3151,6 +3286,7 @@ define @intrinsic_vrgather_vi_nxv8i8_nxv8i8_i32( @llvm.riscv.vrgather.vx.nxv8i8.i32( + undef, %0, i32 9, i32 %1) @@ -3184,6 +3320,7 @@ define @intrinsic_vrgather_vi_nxv16i8_nxv16i8_i32( @llvm.riscv.vrgather.vx.nxv16i8.i32( + undef, %0, i32 9, i32 %1) @@ -3217,6 +3354,7 @@ define @intrinsic_vrgather_vi_nxv32i8_nxv32i8_i32( @llvm.riscv.vrgather.vx.nxv32i8.i32( + undef, %0, i32 9, i32 %1) @@ -3250,6 +3388,7 @@ define @intrinsic_vrgather_vi_nxv64i8_nxv64i8_i32( @llvm.riscv.vrgather.vx.nxv64i8.i32( + undef, %0, i32 9, i32 %1) @@ -3283,6 +3422,7 @@ define @intrinsic_vrgather_vi_nxv1i16_nxv1i16_i32( @llvm.riscv.vrgather.vx.nxv1i16.i32( + undef, %0, i32 9, i32 %1) @@ -3316,6 +3456,7 @@ define @intrinsic_vrgather_vi_nxv2i16_nxv2i16_i32( @llvm.riscv.vrgather.vx.nxv2i16.i32( + undef, %0, i32 9, i32 %1) @@ -3349,6 +3490,7 @@ define @intrinsic_vrgather_vi_nxv4i16_nxv4i16_i32( @llvm.riscv.vrgather.vx.nxv4i16.i32( + undef, %0, i32 9, i32 %1) @@ -3382,6 +3524,7 @@ define @intrinsic_vrgather_vi_nxv8i16_nxv8i16_i32( @llvm.riscv.vrgather.vx.nxv8i16.i32( + undef, %0, i32 9, i32 %1) @@ -3415,6 +3558,7 @@ define @intrinsic_vrgather_vi_nxv16i16_nxv16i16_i32( @llvm.riscv.vrgather.vx.nxv16i16.i32( + undef, %0, i32 9, i32 %1) @@ -3448,6 +3592,7 @@ define @intrinsic_vrgather_vi_nxv32i16_nxv32i16_i32( @llvm.riscv.vrgather.vx.nxv32i16.i32( + undef, %0, i32 9, i32 %1) @@ -3481,6 +3626,7 @@ define @intrinsic_vrgather_vi_nxv1i32_nxv1i32_i32( @llvm.riscv.vrgather.vx.nxv1i32.i32( + undef, %0, i32 9, i32 %1) @@ -3514,6 +3660,7 @@ define @intrinsic_vrgather_vi_nxv2i32_nxv2i32_i32( @llvm.riscv.vrgather.vx.nxv2i32.i32( + undef, %0, i32 9, i32 %1) @@ -3547,6 +3694,7 @@ define @intrinsic_vrgather_vi_nxv4i32_nxv4i32_i32( @llvm.riscv.vrgather.vx.nxv4i32.i32( + undef, %0, i32 9, i32 %1) @@ -3580,6 +3728,7 @@ define @intrinsic_vrgather_vi_nxv8i32_nxv8i32_i32( @llvm.riscv.vrgather.vx.nxv8i32.i32( + undef, %0, i32 9, i32 %1) @@ -3613,6 +3762,7 @@ define @intrinsic_vrgather_vi_nxv16i32_nxv16i32_i32( @llvm.riscv.vrgather.vx.nxv16i32.i32( + undef, %0, i32 9, i32 %1) @@ -3646,6 +3796,7 @@ define @intrinsic_vrgather_vi_nxv1f16_nxv1f16_i32( @llvm.riscv.vrgather.vx.nxv1f16.i32( + undef, %0, i32 9, i32 %1) @@ -3679,6 +3830,7 @@ define @intrinsic_vrgather_vi_nxv2f16_nxv2f16_i32( @llvm.riscv.vrgather.vx.nxv2f16.i32( + undef, %0, i32 9, i32 %1) @@ -3712,6 +3864,7 @@ define @intrinsic_vrgather_vi_nxv4f16_nxv4f16_i32( @llvm.riscv.vrgather.vx.nxv4f16.i32( + undef, %0, i32 9, i32 %1) @@ -3745,6 +3898,7 @@ define @intrinsic_vrgather_vi_nxv8f16_nxv8f16_i32( @llvm.riscv.vrgather.vx.nxv8f16.i32( + undef, %0, i32 9, i32 %1) @@ -3778,6 +3932,7 @@ define @intrinsic_vrgather_vi_nxv16f16_nxv16f16_i32( @llvm.riscv.vrgather.vx.nxv16f16.i32( + undef, %0, i32 9, i32 %1) @@ -3811,6 +3966,7 @@ define @intrinsic_vrgather_vi_nxv32f16_nxv32f16_i32( @llvm.riscv.vrgather.vx.nxv32f16.i32( + undef, %0, i32 9, i32 %1) @@ -3844,6 +4000,7 @@ define @intrinsic_vrgather_vi_nxv1f32_nxv1f32_i32( @llvm.riscv.vrgather.vx.nxv1f32.i32( + undef, %0, i32 9, i32 %1) @@ -3877,6 +4034,7 @@ define @intrinsic_vrgather_vi_nxv2f32_nxv2f32_i32( @llvm.riscv.vrgather.vx.nxv2f32.i32( + undef, %0, i32 9, i32 %1) @@ -3910,6 +4068,7 @@ define @intrinsic_vrgather_vi_nxv4f32_nxv4f32_i32( @llvm.riscv.vrgather.vx.nxv4f32.i32( + undef, %0, i32 9, i32 %1) @@ -3943,6 +4102,7 @@ define @intrinsic_vrgather_vi_nxv8f32_nxv8f32_i32( @llvm.riscv.vrgather.vx.nxv8f32.i32( + undef, %0, i32 9, i32 %1) @@ -3976,6 +4136,7 @@ define @intrinsic_vrgather_vi_nxv16f32_nxv16f32_i32( @llvm.riscv.vrgather.vx.nxv16f32.i32( + undef, %0, i32 9, i32 %1) @@ -4009,6 +4170,7 @@ define @intrinsic_vrgather_vi_nxv1f64_nxv1f64_i32( @llvm.riscv.vrgather.vx.nxv1f64.i32( + undef, %0, i32 9, i32 %1) @@ -4042,6 +4204,7 @@ define @intrinsic_vrgather_vi_nxv2f64_nxv2f64_i32( @llvm.riscv.vrgather.vx.nxv2f64.i32( + undef, %0, i32 9, i32 %1) @@ -4075,6 +4238,7 @@ define @intrinsic_vrgather_vi_nxv4f64_nxv4f64_i32( @llvm.riscv.vrgather.vx.nxv4f64.i32( + undef, %0, i32 9, i32 %1) @@ -4108,6 +4272,7 @@ define @intrinsic_vrgather_vi_nxv8f64_nxv8f64_i32( @llvm.riscv.vrgather.vx.nxv8f64.i32( + undef, %0, i32 9, i32 %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vrgather-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vrgather-rv64.ll index a15e7ba69e3853a932871ee84550f1794b4dfc92..3a9ab1e28cd45a3f1439fd56a66ea66b92b665d9 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vrgather-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vrgather-rv64.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vrgather.vv.nxv1i8.i64( + , , , i64); @@ -15,6 +16,7 @@ define @intrinsic_vrgather_vv_nxv1i8_nxv1i8_nxv1i8( @llvm.riscv.vrgather.vv.nxv1i8.i64( + undef, %0, %1, i64 %2) @@ -48,6 +50,7 @@ entry: } declare @llvm.riscv.vrgather.vv.nxv2i8.i64( + , , , i64); @@ -61,6 +64,7 @@ define @intrinsic_vrgather_vv_nxv2i8_nxv2i8_nxv2i8( @llvm.riscv.vrgather.vv.nxv2i8.i64( + undef, %0, %1, i64 %2) @@ -94,6 +98,7 @@ entry: } declare @llvm.riscv.vrgather.vv.nxv4i8.i64( + , , , i64); @@ -107,6 +112,7 @@ define @intrinsic_vrgather_vv_nxv4i8_nxv4i8_nxv4i8( @llvm.riscv.vrgather.vv.nxv4i8.i64( + undef, %0, %1, i64 %2) @@ -140,6 +146,7 @@ entry: } declare @llvm.riscv.vrgather.vv.nxv8i8.i64( + , , , i64); @@ -153,6 +160,7 @@ define @intrinsic_vrgather_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vrgather.vv.nxv8i8.i64( + undef, %0, %1, i64 %2) @@ -186,6 +194,7 @@ entry: } declare @llvm.riscv.vrgather.vv.nxv16i8.i64( + , , , i64); @@ -199,6 +208,7 @@ define @intrinsic_vrgather_vv_nxv16i8_nxv16i8_nxv16i8( @llvm.riscv.vrgather.vv.nxv16i8.i64( + undef, %0, %1, i64 %2) @@ -232,6 +242,7 @@ entry: } declare @llvm.riscv.vrgather.vv.nxv32i8.i64( + , , , i64); @@ -245,6 +256,7 @@ define @intrinsic_vrgather_vv_nxv32i8_nxv32i8_nxv32i8( @llvm.riscv.vrgather.vv.nxv32i8.i64( + undef, %0, %1, i64 %2) @@ -278,6 +290,7 @@ entry: } declare @llvm.riscv.vrgather.vv.nxv64i8.i64( + , , , i64); @@ -291,6 +304,7 @@ define @intrinsic_vrgather_vv_nxv64i8_nxv64i8_nxv64i8( @llvm.riscv.vrgather.vv.nxv64i8.i64( + undef, %0, %1, i64 %2) @@ -325,6 +339,7 @@ entry: } declare @llvm.riscv.vrgather.vv.nxv1i16.i64( + , , , i64); @@ -338,6 +353,7 @@ define @intrinsic_vrgather_vv_nxv1i16_nxv1i16_nxv1i16( @llvm.riscv.vrgather.vv.nxv1i16.i64( + undef, %0, %1, i64 %2) @@ -371,6 +387,7 @@ entry: } declare @llvm.riscv.vrgather.vv.nxv2i16.i64( + , , , i64); @@ -384,6 +401,7 @@ define @intrinsic_vrgather_vv_nxv2i16_nxv2i16_nxv2i16( @llvm.riscv.vrgather.vv.nxv2i16.i64( + undef, %0, %1, i64 %2) @@ -417,6 +435,7 @@ entry: } declare @llvm.riscv.vrgather.vv.nxv4i16.i64( + , , , i64); @@ -430,6 +449,7 @@ define @intrinsic_vrgather_vv_nxv4i16_nxv4i16_nxv4i16( @llvm.riscv.vrgather.vv.nxv4i16.i64( + undef, %0, %1, i64 %2) @@ -463,6 +483,7 @@ entry: } declare @llvm.riscv.vrgather.vv.nxv8i16.i64( + , , , i64); @@ -476,6 +497,7 @@ define @intrinsic_vrgather_vv_nxv8i16_nxv8i16_nxv8i16( @llvm.riscv.vrgather.vv.nxv8i16.i64( + undef, %0, %1, i64 %2) @@ -509,6 +531,7 @@ entry: } declare @llvm.riscv.vrgather.vv.nxv16i16.i64( + , , , i64); @@ -522,6 +545,7 @@ define @intrinsic_vrgather_vv_nxv16i16_nxv16i16_nxv16i16( @llvm.riscv.vrgather.vv.nxv16i16.i64( + undef, %0, %1, i64 %2) @@ -555,6 +579,7 @@ entry: } declare @llvm.riscv.vrgather.vv.nxv32i16.i64( + , , , i64); @@ -568,6 +593,7 @@ define @intrinsic_vrgather_vv_nxv32i16_nxv32i16_nxv32i16( @llvm.riscv.vrgather.vv.nxv32i16.i64( + undef, %0, %1, i64 %2) @@ -602,6 +628,7 @@ entry: } declare @llvm.riscv.vrgather.vv.nxv1i32.i64( + , , , i64); @@ -615,6 +642,7 @@ define @intrinsic_vrgather_vv_nxv1i32_nxv1i32_nxv1i32( @llvm.riscv.vrgather.vv.nxv1i32.i64( + undef, %0, %1, i64 %2) @@ -648,6 +676,7 @@ entry: } declare @llvm.riscv.vrgather.vv.nxv2i32.i64( + , , , i64); @@ -661,6 +690,7 @@ define @intrinsic_vrgather_vv_nxv2i32_nxv2i32_nxv2i32( @llvm.riscv.vrgather.vv.nxv2i32.i64( + undef, %0, %1, i64 %2) @@ -694,6 +724,7 @@ entry: } declare @llvm.riscv.vrgather.vv.nxv4i32.i64( + , , , i64); @@ -707,6 +738,7 @@ define @intrinsic_vrgather_vv_nxv4i32_nxv4i32_nxv4i32( @llvm.riscv.vrgather.vv.nxv4i32.i64( + undef, %0, %1, i64 %2) @@ -740,6 +772,7 @@ entry: } declare @llvm.riscv.vrgather.vv.nxv8i32.i64( + , , , i64); @@ -753,6 +786,7 @@ define @intrinsic_vrgather_vv_nxv8i32_nxv8i32_nxv8i32( @llvm.riscv.vrgather.vv.nxv8i32.i64( + undef, %0, %1, i64 %2) @@ -786,6 +820,7 @@ entry: } declare @llvm.riscv.vrgather.vv.nxv16i32.i64( + , , , i64); @@ -799,6 +834,7 @@ define @intrinsic_vrgather_vv_nxv16i32_nxv16i32_nxv16i32( @llvm.riscv.vrgather.vv.nxv16i32.i64( + undef, %0, %1, i64 %2) @@ -833,6 +869,7 @@ entry: } declare @llvm.riscv.vrgather.vv.nxv1i64.i64( + , , , i64); @@ -846,6 +883,7 @@ define @intrinsic_vrgather_vv_nxv1i64_nxv1i64_nxv1i64( @llvm.riscv.vrgather.vv.nxv1i64.i64( + undef, %0, %1, i64 %2) @@ -879,6 +917,7 @@ entry: } declare @llvm.riscv.vrgather.vv.nxv2i64.i64( + , , , i64); @@ -892,6 +931,7 @@ define @intrinsic_vrgather_vv_nxv2i64_nxv2i64_nxv2i64( @llvm.riscv.vrgather.vv.nxv2i64.i64( + undef, %0, %1, i64 %2) @@ -925,6 +965,7 @@ entry: } declare @llvm.riscv.vrgather.vv.nxv4i64.i64( + , , , i64); @@ -938,6 +979,7 @@ define @intrinsic_vrgather_vv_nxv4i64_nxv4i64_nxv4i64( @llvm.riscv.vrgather.vv.nxv4i64.i64( + undef, %0, %1, i64 %2) @@ -971,6 +1013,7 @@ entry: } declare @llvm.riscv.vrgather.vv.nxv8i64.i64( + , , , i64); @@ -984,6 +1027,7 @@ define @intrinsic_vrgather_vv_nxv8i64_nxv8i64_nxv8i64( @llvm.riscv.vrgather.vv.nxv8i64.i64( + undef, %0, %1, i64 %2) @@ -1018,6 +1062,7 @@ entry: } declare @llvm.riscv.vrgather.vv.nxv1f16.i64( + , , , i64); @@ -1031,6 +1076,7 @@ define @intrinsic_vrgather_vv_nxv1f16_nxv1f16_nxv1i16( @llvm.riscv.vrgather.vv.nxv1f16.i64( + undef, %0, %1, i64 %2) @@ -1064,6 +1110,7 @@ entry: } declare @llvm.riscv.vrgather.vv.nxv2f16.i64( + , , , i64); @@ -1077,6 +1124,7 @@ define @intrinsic_vrgather_vv_nxv2f16_nxv2f16_nxv2i16( @llvm.riscv.vrgather.vv.nxv2f16.i64( + undef, %0, %1, i64 %2) @@ -1110,6 +1158,7 @@ entry: } declare @llvm.riscv.vrgather.vv.nxv4f16.i64( + , , , i64); @@ -1123,6 +1172,7 @@ define @intrinsic_vrgather_vv_nxv4f16_nxv4f16_nxv4i16( @llvm.riscv.vrgather.vv.nxv4f16.i64( + undef, %0, %1, i64 %2) @@ -1156,6 +1206,7 @@ entry: } declare @llvm.riscv.vrgather.vv.nxv8f16.i64( + , , , i64); @@ -1169,6 +1220,7 @@ define @intrinsic_vrgather_vv_nxv8f16_nxv8f16_nxv8i16( @llvm.riscv.vrgather.vv.nxv8f16.i64( + undef, %0, %1, i64 %2) @@ -1202,6 +1254,7 @@ entry: } declare @llvm.riscv.vrgather.vv.nxv16f16.i64( + , , , i64); @@ -1215,6 +1268,7 @@ define @intrinsic_vrgather_vv_nxv16f16_nxv16f16_nxv16i16( @llvm.riscv.vrgather.vv.nxv16f16.i64( + undef, %0, %1, i64 %2) @@ -1248,6 +1302,7 @@ entry: } declare @llvm.riscv.vrgather.vv.nxv32f16.i64( + , , , i64); @@ -1261,6 +1316,7 @@ define @intrinsic_vrgather_vv_nxv32f16_nxv32f16_nxv32i16( @llvm.riscv.vrgather.vv.nxv32f16.i64( + undef, %0, %1, i64 %2) @@ -1295,6 +1351,7 @@ entry: } declare @llvm.riscv.vrgather.vv.nxv1f32.i64( + , , , i64); @@ -1308,6 +1365,7 @@ define @intrinsic_vrgather_vv_nxv1f32_nxv1f32_nxv1i32( @llvm.riscv.vrgather.vv.nxv1f32.i64( + undef, %0, %1, i64 %2) @@ -1341,6 +1399,7 @@ entry: } declare @llvm.riscv.vrgather.vv.nxv2f32.i64( + , , , i64); @@ -1354,6 +1413,7 @@ define @intrinsic_vrgather_vv_nxv2f32_nxv2f32_nxv2i32( @llvm.riscv.vrgather.vv.nxv2f32.i64( + undef, %0, %1, i64 %2) @@ -1387,6 +1447,7 @@ entry: } declare @llvm.riscv.vrgather.vv.nxv4f32.i64( + , , , i64); @@ -1400,6 +1461,7 @@ define @intrinsic_vrgather_vv_nxv4f32_nxv4f32_nxv4i32( @llvm.riscv.vrgather.vv.nxv4f32.i64( + undef, %0, %1, i64 %2) @@ -1433,6 +1495,7 @@ entry: } declare @llvm.riscv.vrgather.vv.nxv8f32.i64( + , , , i64); @@ -1446,6 +1509,7 @@ define @intrinsic_vrgather_vv_nxv8f32_nxv8f32_nxv8i32( @llvm.riscv.vrgather.vv.nxv8f32.i64( + undef, %0, %1, i64 %2) @@ -1479,6 +1543,7 @@ entry: } declare @llvm.riscv.vrgather.vv.nxv16f32.i64( + , , , i64); @@ -1492,6 +1557,7 @@ define @intrinsic_vrgather_vv_nxv16f32_nxv16f32_nxv16i32(< ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv16f32.i64( + undef, %0, %1, i64 %2) @@ -1526,6 +1592,7 @@ entry: } declare @llvm.riscv.vrgather.vv.nxv1f64.i64( + , , , i64); @@ -1539,6 +1606,7 @@ define @intrinsic_vrgather_vv_nxv1f64_nxv1f64_nxv1i64( @llvm.riscv.vrgather.vv.nxv1f64.i64( + undef, %0, %1, i64 %2) @@ -1572,6 +1640,7 @@ entry: } declare @llvm.riscv.vrgather.vv.nxv2f64.i64( + , , , i64); @@ -1585,6 +1654,7 @@ define @intrinsic_vrgather_vv_nxv2f64_nxv2f64_nxv2i64( @llvm.riscv.vrgather.vv.nxv2f64.i64( + undef, %0, %1, i64 %2) @@ -1618,6 +1688,7 @@ entry: } declare @llvm.riscv.vrgather.vv.nxv4f64.i64( + , , , i64); @@ -1631,6 +1702,7 @@ define @intrinsic_vrgather_vv_nxv4f64_nxv4f64_nxv4i64( @llvm.riscv.vrgather.vv.nxv4f64.i64( + undef, %0, %1, i64 %2) @@ -1664,6 +1736,7 @@ entry: } declare @llvm.riscv.vrgather.vv.nxv8f64.i64( + , , , i64); @@ -1677,6 +1750,7 @@ define @intrinsic_vrgather_vv_nxv8f64_nxv8f64_nxv8i64( @llvm.riscv.vrgather.vv.nxv8f64.i64( + undef, %0, %1, i64 %2) @@ -1711,6 +1785,7 @@ entry: } declare @llvm.riscv.vrgather.vx.nxv1i8.i64( + , , i64, i64); @@ -1724,6 +1799,7 @@ define @intrinsic_vrgather_vx_nxv1i8_nxv1i8_i64( @llvm.riscv.vrgather.vx.nxv1i8.i64( + undef, %0, i64 %1, i64 %2) @@ -1757,6 +1833,7 @@ entry: } declare @llvm.riscv.vrgather.vx.nxv2i8.i64( + , , i64, i64); @@ -1770,6 +1847,7 @@ define @intrinsic_vrgather_vx_nxv2i8_nxv2i8_i64( @llvm.riscv.vrgather.vx.nxv2i8.i64( + undef, %0, i64 %1, i64 %2) @@ -1803,6 +1881,7 @@ entry: } declare @llvm.riscv.vrgather.vx.nxv4i8.i64( + , , i64, i64); @@ -1816,6 +1895,7 @@ define @intrinsic_vrgather_vx_nxv4i8_nxv4i8_i64( @llvm.riscv.vrgather.vx.nxv4i8.i64( + undef, %0, i64 %1, i64 %2) @@ -1849,6 +1929,7 @@ entry: } declare @llvm.riscv.vrgather.vx.nxv8i8.i64( + , , i64, i64); @@ -1862,6 +1943,7 @@ define @intrinsic_vrgather_vx_nxv8i8_nxv8i8_i64( @llvm.riscv.vrgather.vx.nxv8i8.i64( + undef, %0, i64 %1, i64 %2) @@ -1895,6 +1977,7 @@ entry: } declare @llvm.riscv.vrgather.vx.nxv16i8.i64( + , , i64, i64); @@ -1908,6 +1991,7 @@ define @intrinsic_vrgather_vx_nxv16i8_nxv16i8_i64( @llvm.riscv.vrgather.vx.nxv16i8.i64( + undef, %0, i64 %1, i64 %2) @@ -1941,6 +2025,7 @@ entry: } declare @llvm.riscv.vrgather.vx.nxv32i8.i64( + , , i64, i64); @@ -1954,6 +2039,7 @@ define @intrinsic_vrgather_vx_nxv32i8_nxv32i8_i64( @llvm.riscv.vrgather.vx.nxv32i8.i64( + undef, %0, i64 %1, i64 %2) @@ -1987,6 +2073,7 @@ entry: } declare @llvm.riscv.vrgather.vx.nxv64i8.i64( + , , i64, i64); @@ -2000,6 +2087,7 @@ define @intrinsic_vrgather_vx_nxv64i8_nxv64i8_i64( @llvm.riscv.vrgather.vx.nxv64i8.i64( + undef, %0, i64 %1, i64 %2) @@ -2033,6 +2121,7 @@ entry: } declare @llvm.riscv.vrgather.vx.nxv1i16.i64( + , , i64, i64); @@ -2046,6 +2135,7 @@ define @intrinsic_vrgather_vx_nxv1i16_nxv1i16_i64( @llvm.riscv.vrgather.vx.nxv1i16.i64( + undef, %0, i64 %1, i64 %2) @@ -2079,6 +2169,7 @@ entry: } declare @llvm.riscv.vrgather.vx.nxv2i16.i64( + , , i64, i64); @@ -2092,6 +2183,7 @@ define @intrinsic_vrgather_vx_nxv2i16_nxv2i16_i64( @llvm.riscv.vrgather.vx.nxv2i16.i64( + undef, %0, i64 %1, i64 %2) @@ -2125,6 +2217,7 @@ entry: } declare @llvm.riscv.vrgather.vx.nxv4i16.i64( + , , i64, i64); @@ -2138,6 +2231,7 @@ define @intrinsic_vrgather_vx_nxv4i16_nxv4i16_i64( @llvm.riscv.vrgather.vx.nxv4i16.i64( + undef, %0, i64 %1, i64 %2) @@ -2171,6 +2265,7 @@ entry: } declare @llvm.riscv.vrgather.vx.nxv8i16.i64( + , , i64, i64); @@ -2184,6 +2279,7 @@ define @intrinsic_vrgather_vx_nxv8i16_nxv8i16_i64( @llvm.riscv.vrgather.vx.nxv8i16.i64( + undef, %0, i64 %1, i64 %2) @@ -2217,6 +2313,7 @@ entry: } declare @llvm.riscv.vrgather.vx.nxv16i16.i64( + , , i64, i64); @@ -2230,6 +2327,7 @@ define @intrinsic_vrgather_vx_nxv16i16_nxv16i16_i64( @llvm.riscv.vrgather.vx.nxv16i16.i64( + undef, %0, i64 %1, i64 %2) @@ -2263,6 +2361,7 @@ entry: } declare @llvm.riscv.vrgather.vx.nxv32i16.i64( + , , i64, i64); @@ -2276,6 +2375,7 @@ define @intrinsic_vrgather_vx_nxv32i16_nxv32i16_i64( @llvm.riscv.vrgather.vx.nxv32i16.i64( + undef, %0, i64 %1, i64 %2) @@ -2309,6 +2409,7 @@ entry: } declare @llvm.riscv.vrgather.vx.nxv1i32.i64( + , , i64, i64); @@ -2322,6 +2423,7 @@ define @intrinsic_vrgather_vx_nxv1i32_nxv1i32_i64( @llvm.riscv.vrgather.vx.nxv1i32.i64( + undef, %0, i64 %1, i64 %2) @@ -2355,6 +2457,7 @@ entry: } declare @llvm.riscv.vrgather.vx.nxv2i32.i64( + , , i64, i64); @@ -2368,6 +2471,7 @@ define @intrinsic_vrgather_vx_nxv2i32_nxv2i32_i64( @llvm.riscv.vrgather.vx.nxv2i32.i64( + undef, %0, i64 %1, i64 %2) @@ -2401,6 +2505,7 @@ entry: } declare @llvm.riscv.vrgather.vx.nxv4i32.i64( + , , i64, i64); @@ -2414,6 +2519,7 @@ define @intrinsic_vrgather_vx_nxv4i32_nxv4i32_i64( @llvm.riscv.vrgather.vx.nxv4i32.i64( + undef, %0, i64 %1, i64 %2) @@ -2447,6 +2553,7 @@ entry: } declare @llvm.riscv.vrgather.vx.nxv8i32.i64( + , , i64, i64); @@ -2460,6 +2567,7 @@ define @intrinsic_vrgather_vx_nxv8i32_nxv8i32_i64( @llvm.riscv.vrgather.vx.nxv8i32.i64( + undef, %0, i64 %1, i64 %2) @@ -2493,6 +2601,7 @@ entry: } declare @llvm.riscv.vrgather.vx.nxv16i32.i64( + , , i64, i64); @@ -2506,6 +2615,7 @@ define @intrinsic_vrgather_vx_nxv16i32_nxv16i32_i64( @llvm.riscv.vrgather.vx.nxv16i32.i64( + undef, %0, i64 %1, i64 %2) @@ -2539,6 +2649,7 @@ entry: } declare @llvm.riscv.vrgather.vx.nxv1i64.i64( + , , i64, i64); @@ -2552,6 +2663,7 @@ define @intrinsic_vrgather_vx_nxv1i64_nxv1i64_i64( @llvm.riscv.vrgather.vx.nxv1i64.i64( + undef, %0, i64 %1, i64 %2) @@ -2585,6 +2697,7 @@ entry: } declare @llvm.riscv.vrgather.vx.nxv2i64.i64( + , , i64, i64); @@ -2598,6 +2711,7 @@ define @intrinsic_vrgather_vx_nxv2i64_nxv2i64_i64( @llvm.riscv.vrgather.vx.nxv2i64.i64( + undef, %0, i64 %1, i64 %2) @@ -2631,6 +2745,7 @@ entry: } declare @llvm.riscv.vrgather.vx.nxv4i64.i64( + , , i64, i64); @@ -2644,6 +2759,7 @@ define @intrinsic_vrgather_vx_nxv4i64_nxv4i64_i64( @llvm.riscv.vrgather.vx.nxv4i64.i64( + undef, %0, i64 %1, i64 %2) @@ -2677,6 +2793,7 @@ entry: } declare @llvm.riscv.vrgather.vx.nxv8i64.i64( + , , i64, i64); @@ -2690,6 +2807,7 @@ define @intrinsic_vrgather_vx_nxv8i64_nxv8i64_i64( @llvm.riscv.vrgather.vx.nxv8i64.i64( + undef, %0, i64 %1, i64 %2) @@ -2723,6 +2841,7 @@ entry: } declare @llvm.riscv.vrgather.vx.nxv1f16.i64( + , , i64, i64); @@ -2736,6 +2855,7 @@ define @intrinsic_vrgather_vx_nxv1f16_nxv1f16_i64( @llvm.riscv.vrgather.vx.nxv1f16.i64( + undef, %0, i64 %1, i64 %2) @@ -2769,6 +2889,7 @@ entry: } declare @llvm.riscv.vrgather.vx.nxv2f16.i64( + , , i64, i64); @@ -2782,6 +2903,7 @@ define @intrinsic_vrgather_vx_nxv2f16_nxv2f16_i64( @llvm.riscv.vrgather.vx.nxv2f16.i64( + undef, %0, i64 %1, i64 %2) @@ -2815,6 +2937,7 @@ entry: } declare @llvm.riscv.vrgather.vx.nxv4f16.i64( + , , i64, i64); @@ -2828,6 +2951,7 @@ define @intrinsic_vrgather_vx_nxv4f16_nxv4f16_i64( @llvm.riscv.vrgather.vx.nxv4f16.i64( + undef, %0, i64 %1, i64 %2) @@ -2861,6 +2985,7 @@ entry: } declare @llvm.riscv.vrgather.vx.nxv8f16.i64( + , , i64, i64); @@ -2874,6 +2999,7 @@ define @intrinsic_vrgather_vx_nxv8f16_nxv8f16_i64( @llvm.riscv.vrgather.vx.nxv8f16.i64( + undef, %0, i64 %1, i64 %2) @@ -2907,6 +3033,7 @@ entry: } declare @llvm.riscv.vrgather.vx.nxv16f16.i64( + , , i64, i64); @@ -2920,6 +3047,7 @@ define @intrinsic_vrgather_vx_nxv16f16_nxv16f16_i64( @llvm.riscv.vrgather.vx.nxv16f16.i64( + undef, %0, i64 %1, i64 %2) @@ -2953,6 +3081,7 @@ entry: } declare @llvm.riscv.vrgather.vx.nxv32f16.i64( + , , i64, i64); @@ -2966,6 +3095,7 @@ define @intrinsic_vrgather_vx_nxv32f16_nxv32f16_i64( @llvm.riscv.vrgather.vx.nxv32f16.i64( + undef, %0, i64 %1, i64 %2) @@ -2999,6 +3129,7 @@ entry: } declare @llvm.riscv.vrgather.vx.nxv1f32.i64( + , , i64, i64); @@ -3012,6 +3143,7 @@ define @intrinsic_vrgather_vx_nxv1f32_nxv1f32_i64( @llvm.riscv.vrgather.vx.nxv1f32.i64( + undef, %0, i64 %1, i64 %2) @@ -3045,6 +3177,7 @@ entry: } declare @llvm.riscv.vrgather.vx.nxv2f32.i64( + , , i64, i64); @@ -3058,6 +3191,7 @@ define @intrinsic_vrgather_vx_nxv2f32_nxv2f32_i64( @llvm.riscv.vrgather.vx.nxv2f32.i64( + undef, %0, i64 %1, i64 %2) @@ -3091,6 +3225,7 @@ entry: } declare @llvm.riscv.vrgather.vx.nxv4f32.i64( + , , i64, i64); @@ -3104,6 +3239,7 @@ define @intrinsic_vrgather_vx_nxv4f32_nxv4f32_i64( @llvm.riscv.vrgather.vx.nxv4f32.i64( + undef, %0, i64 %1, i64 %2) @@ -3137,6 +3273,7 @@ entry: } declare @llvm.riscv.vrgather.vx.nxv8f32.i64( + , , i64, i64); @@ -3150,6 +3287,7 @@ define @intrinsic_vrgather_vx_nxv8f32_nxv8f32_i64( @llvm.riscv.vrgather.vx.nxv8f32.i64( + undef, %0, i64 %1, i64 %2) @@ -3183,6 +3321,7 @@ entry: } declare @llvm.riscv.vrgather.vx.nxv16f32.i64( + , , i64, i64); @@ -3196,6 +3335,7 @@ define @intrinsic_vrgather_vx_nxv16f32_nxv16f32_i64( @llvm.riscv.vrgather.vx.nxv16f32.i64( + undef, %0, i64 %1, i64 %2) @@ -3229,6 +3369,7 @@ entry: } declare @llvm.riscv.vrgather.vx.nxv1f64.i64( + , , i64, i64); @@ -3242,6 +3383,7 @@ define @intrinsic_vrgather_vx_nxv1f64_nxv1f64_i64( @llvm.riscv.vrgather.vx.nxv1f64.i64( + undef, %0, i64 %1, i64 %2) @@ -3275,6 +3417,7 @@ entry: } declare @llvm.riscv.vrgather.vx.nxv2f64.i64( + , , i64, i64); @@ -3288,6 +3431,7 @@ define @intrinsic_vrgather_vx_nxv2f64_nxv2f64_i64( @llvm.riscv.vrgather.vx.nxv2f64.i64( + undef, %0, i64 %1, i64 %2) @@ -3321,6 +3465,7 @@ entry: } declare @llvm.riscv.vrgather.vx.nxv4f64.i64( + , , i64, i64); @@ -3334,6 +3479,7 @@ define @intrinsic_vrgather_vx_nxv4f64_nxv4f64_i64( @llvm.riscv.vrgather.vx.nxv4f64.i64( + undef, %0, i64 %1, i64 %2) @@ -3367,6 +3513,7 @@ entry: } declare @llvm.riscv.vrgather.vx.nxv8f64.i64( + , , i64, i64); @@ -3380,6 +3527,7 @@ define @intrinsic_vrgather_vx_nxv8f64_nxv8f64_i64( @llvm.riscv.vrgather.vx.nxv8f64.i64( + undef, %0, i64 %1, i64 %2) @@ -3421,6 +3569,7 @@ define @intrinsic_vrgather_vi_nxv1i8_nxv1i8_i64( @llvm.riscv.vrgather.vx.nxv1i8.i64( + undef, %0, i64 9, i64 %1) @@ -3454,6 +3603,7 @@ define @intrinsic_vrgather_vi_nxv2i8_nxv2i8_i64( @llvm.riscv.vrgather.vx.nxv2i8.i64( + undef, %0, i64 9, i64 %1) @@ -3487,6 +3637,7 @@ define @intrinsic_vrgather_vi_nxv4i8_nxv4i8_i64( @llvm.riscv.vrgather.vx.nxv4i8.i64( + undef, %0, i64 9, i64 %1) @@ -3520,6 +3671,7 @@ define @intrinsic_vrgather_vi_nxv8i8_nxv8i8_i64( @llvm.riscv.vrgather.vx.nxv8i8.i64( + undef, %0, i64 9, i64 %1) @@ -3553,6 +3705,7 @@ define @intrinsic_vrgather_vi_nxv16i8_nxv16i8_i64( @llvm.riscv.vrgather.vx.nxv16i8.i64( + undef, %0, i64 9, i64 %1) @@ -3586,6 +3739,7 @@ define @intrinsic_vrgather_vi_nxv32i8_nxv32i8_i64( @llvm.riscv.vrgather.vx.nxv32i8.i64( + undef, %0, i64 9, i64 %1) @@ -3619,6 +3773,7 @@ define @intrinsic_vrgather_vi_nxv64i8_nxv64i8_i64( @llvm.riscv.vrgather.vx.nxv64i8.i64( + undef, %0, i64 9, i64 %1) @@ -3652,6 +3807,7 @@ define @intrinsic_vrgather_vi_nxv1i16_nxv1i16_i64( @llvm.riscv.vrgather.vx.nxv1i16.i64( + undef, %0, i64 9, i64 %1) @@ -3685,6 +3841,7 @@ define @intrinsic_vrgather_vi_nxv2i16_nxv2i16_i64( @llvm.riscv.vrgather.vx.nxv2i16.i64( + undef, %0, i64 9, i64 %1) @@ -3718,6 +3875,7 @@ define @intrinsic_vrgather_vi_nxv4i16_nxv4i16_i64( @llvm.riscv.vrgather.vx.nxv4i16.i64( + undef, %0, i64 9, i64 %1) @@ -3751,6 +3909,7 @@ define @intrinsic_vrgather_vi_nxv8i16_nxv8i16_i64( @llvm.riscv.vrgather.vx.nxv8i16.i64( + undef, %0, i64 9, i64 %1) @@ -3784,6 +3943,7 @@ define @intrinsic_vrgather_vi_nxv16i16_nxv16i16_i64( @llvm.riscv.vrgather.vx.nxv16i16.i64( + undef, %0, i64 9, i64 %1) @@ -3817,6 +3977,7 @@ define @intrinsic_vrgather_vi_nxv32i16_nxv32i16_i64( @llvm.riscv.vrgather.vx.nxv32i16.i64( + undef, %0, i64 9, i64 %1) @@ -3850,6 +4011,7 @@ define @intrinsic_vrgather_vi_nxv1i32_nxv1i32_i64( @llvm.riscv.vrgather.vx.nxv1i32.i64( + undef, %0, i64 9, i64 %1) @@ -3883,6 +4045,7 @@ define @intrinsic_vrgather_vi_nxv2i32_nxv2i32_i64( @llvm.riscv.vrgather.vx.nxv2i32.i64( + undef, %0, i64 9, i64 %1) @@ -3916,6 +4079,7 @@ define @intrinsic_vrgather_vi_nxv4i32_nxv4i32_i64( @llvm.riscv.vrgather.vx.nxv4i32.i64( + undef, %0, i64 9, i64 %1) @@ -3949,6 +4113,7 @@ define @intrinsic_vrgather_vi_nxv8i32_nxv8i32_i64( @llvm.riscv.vrgather.vx.nxv8i32.i64( + undef, %0, i64 9, i64 %1) @@ -3982,6 +4147,7 @@ define @intrinsic_vrgather_vi_nxv16i32_nxv16i32_i64( @llvm.riscv.vrgather.vx.nxv16i32.i64( + undef, %0, i64 9, i64 %1) @@ -4015,6 +4181,7 @@ define @intrinsic_vrgather_vi_nxv1i64_nxv1i64_i64( @llvm.riscv.vrgather.vx.nxv1i64.i64( + undef, %0, i64 9, i64 %1) @@ -4048,6 +4215,7 @@ define @intrinsic_vrgather_vi_nxv2i64_nxv2i64_i64( @llvm.riscv.vrgather.vx.nxv2i64.i64( + undef, %0, i64 9, i64 %1) @@ -4081,6 +4249,7 @@ define @intrinsic_vrgather_vi_nxv4i64_nxv4i64_i64( @llvm.riscv.vrgather.vx.nxv4i64.i64( + undef, %0, i64 9, i64 %1) @@ -4114,6 +4283,7 @@ define @intrinsic_vrgather_vi_nxv8i64_nxv8i64_i64( @llvm.riscv.vrgather.vx.nxv8i64.i64( + undef, %0, i64 9, i64 %1) @@ -4147,6 +4317,7 @@ define @intrinsic_vrgather_vi_nxv1f16_nxv1f16_i64( @llvm.riscv.vrgather.vx.nxv1f16.i64( + undef, %0, i64 9, i64 %1) @@ -4180,6 +4351,7 @@ define @intrinsic_vrgather_vi_nxv2f16_nxv2f16_i64( @llvm.riscv.vrgather.vx.nxv2f16.i64( + undef, %0, i64 9, i64 %1) @@ -4213,6 +4385,7 @@ define @intrinsic_vrgather_vi_nxv4f16_nxv4f16_i64( @llvm.riscv.vrgather.vx.nxv4f16.i64( + undef, %0, i64 9, i64 %1) @@ -4246,6 +4419,7 @@ define @intrinsic_vrgather_vi_nxv8f16_nxv8f16_i64( @llvm.riscv.vrgather.vx.nxv8f16.i64( + undef, %0, i64 9, i64 %1) @@ -4279,6 +4453,7 @@ define @intrinsic_vrgather_vi_nxv16f16_nxv16f16_i64( @llvm.riscv.vrgather.vx.nxv16f16.i64( + undef, %0, i64 9, i64 %1) @@ -4312,6 +4487,7 @@ define @intrinsic_vrgather_vi_nxv32f16_nxv32f16_i64( @llvm.riscv.vrgather.vx.nxv32f16.i64( + undef, %0, i64 9, i64 %1) @@ -4345,6 +4521,7 @@ define @intrinsic_vrgather_vi_nxv1f32_nxv1f32_i64( @llvm.riscv.vrgather.vx.nxv1f32.i64( + undef, %0, i64 9, i64 %1) @@ -4378,6 +4555,7 @@ define @intrinsic_vrgather_vi_nxv2f32_nxv2f32_i64( @llvm.riscv.vrgather.vx.nxv2f32.i64( + undef, %0, i64 9, i64 %1) @@ -4411,6 +4589,7 @@ define @intrinsic_vrgather_vi_nxv4f32_nxv4f32_i64( @llvm.riscv.vrgather.vx.nxv4f32.i64( + undef, %0, i64 9, i64 %1) @@ -4444,6 +4623,7 @@ define @intrinsic_vrgather_vi_nxv8f32_nxv8f32_i64( @llvm.riscv.vrgather.vx.nxv8f32.i64( + undef, %0, i64 9, i64 %1) @@ -4477,6 +4657,7 @@ define @intrinsic_vrgather_vi_nxv16f32_nxv16f32_i64( @llvm.riscv.vrgather.vx.nxv16f32.i64( + undef, %0, i64 9, i64 %1) @@ -4510,6 +4691,7 @@ define @intrinsic_vrgather_vi_nxv1f64_nxv1f64_i64( @llvm.riscv.vrgather.vx.nxv1f64.i64( + undef, %0, i64 9, i64 %1) @@ -4543,6 +4725,7 @@ define @intrinsic_vrgather_vi_nxv2f64_nxv2f64_i64( @llvm.riscv.vrgather.vx.nxv2f64.i64( + undef, %0, i64 9, i64 %1) @@ -4576,6 +4759,7 @@ define @intrinsic_vrgather_vi_nxv4f64_nxv4f64_i64( @llvm.riscv.vrgather.vx.nxv4f64.i64( + undef, %0, i64 9, i64 %1) @@ -4609,6 +4793,7 @@ define @intrinsic_vrgather_vi_nxv8f64_nxv8f64_i64( @llvm.riscv.vrgather.vx.nxv8f64.i64( + undef, %0, i64 9, i64 %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-rv32.ll index 720d6d8585fdd378b6c4411505da9338757b07ed..522c617097425f927e881716be850b0bbfbb0caf 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-rv32.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vrgatherei16.vv.nxv1i8( + , , , i32); @@ -15,6 +16,7 @@ define @intrinsic_vrgatherei16_vv_nxv1i8_nxv1i8( @llvm.riscv.vrgatherei16.vv.nxv1i8( + undef, %0, %1, i32 %2) @@ -48,6 +50,7 @@ entry: } declare @llvm.riscv.vrgatherei16.vv.nxv2i8( + , , , i32); @@ -61,6 +64,7 @@ define @intrinsic_vrgatherei16_vv_nxv2i8_nxv2i8( @llvm.riscv.vrgatherei16.vv.nxv2i8( + undef, %0, %1, i32 %2) @@ -94,6 +98,7 @@ entry: } declare @llvm.riscv.vrgatherei16.vv.nxv4i8( + , , , i32); @@ -107,6 +112,7 @@ define @intrinsic_vrgatherei16_vv_nxv4i8_nxv4i8( @llvm.riscv.vrgatherei16.vv.nxv4i8( + undef, %0, %1, i32 %2) @@ -140,6 +146,7 @@ entry: } declare @llvm.riscv.vrgatherei16.vv.nxv8i8( + , , , i32); @@ -153,6 +160,7 @@ define @intrinsic_vrgatherei16_vv_nxv8i8_nxv8i8( @llvm.riscv.vrgatherei16.vv.nxv8i8( + undef, %0, %1, i32 %2) @@ -186,6 +194,7 @@ entry: } declare @llvm.riscv.vrgatherei16.vv.nxv16i8( + , , , i32); @@ -199,6 +208,7 @@ define @intrinsic_vrgatherei16_vv_nxv16i8_nxv16i8( @llvm.riscv.vrgatherei16.vv.nxv16i8( + undef, %0, %1, i32 %2) @@ -232,6 +242,7 @@ entry: } declare @llvm.riscv.vrgatherei16.vv.nxv32i8( + , , , i32); @@ -245,6 +256,7 @@ define @intrinsic_vrgatherei16_vv_nxv32i8_nxv32i8( @llvm.riscv.vrgatherei16.vv.nxv32i8( + undef, %0, %1, i32 %2) @@ -278,6 +290,7 @@ entry: } declare @llvm.riscv.vrgatherei16.vv.nxv1i16( + , , , i32); @@ -291,6 +304,7 @@ define @intrinsic_vrgatherei16_vv_nxv1i16_nxv1i16( @llvm.riscv.vrgatherei16.vv.nxv1i16( + undef, %0, %1, i32 %2) @@ -324,6 +338,7 @@ entry: } declare @llvm.riscv.vrgatherei16.vv.nxv2i16( + , , , i32); @@ -337,6 +352,7 @@ define @intrinsic_vrgatherei16_vv_nxv2i16_nxv2i16( @llvm.riscv.vrgatherei16.vv.nxv2i16( + undef, %0, %1, i32 %2) @@ -370,6 +386,7 @@ entry: } declare @llvm.riscv.vrgatherei16.vv.nxv4i16( + , , , i32); @@ -383,6 +400,7 @@ define @intrinsic_vrgatherei16_vv_nxv4i16_nxv4i16( @llvm.riscv.vrgatherei16.vv.nxv4i16( + undef, %0, %1, i32 %2) @@ -416,6 +434,7 @@ entry: } declare @llvm.riscv.vrgatherei16.vv.nxv8i16( + , , , i32); @@ -429,6 +448,7 @@ define @intrinsic_vrgatherei16_vv_nxv8i16_nxv8i16( @llvm.riscv.vrgatherei16.vv.nxv8i16( + undef, %0, %1, i32 %2) @@ -462,6 +482,7 @@ entry: } declare @llvm.riscv.vrgatherei16.vv.nxv16i16( + , , , i32); @@ -475,6 +496,7 @@ define @intrinsic_vrgatherei16_vv_nxv16i16_nxv16i16( @llvm.riscv.vrgatherei16.vv.nxv16i16( + undef, %0, %1, i32 %2) @@ -508,6 +530,7 @@ entry: } declare @llvm.riscv.vrgatherei16.vv.nxv32i16( + , , , i32); @@ -521,6 +544,7 @@ define @intrinsic_vrgatherei16_vv_nxv32i16_nxv32i16( @llvm.riscv.vrgatherei16.vv.nxv32i16( + undef, %0, %1, i32 %2) @@ -555,6 +579,7 @@ entry: } declare @llvm.riscv.vrgatherei16.vv.nxv1i32( + , , , i32); @@ -568,6 +593,7 @@ define @intrinsic_vrgatherei16_vv_nxv1i32_nxv1i32( @llvm.riscv.vrgatherei16.vv.nxv1i32( + undef, %0, %1, i32 %2) @@ -601,6 +627,7 @@ entry: } declare @llvm.riscv.vrgatherei16.vv.nxv4i32( + , , , i32); @@ -614,6 +641,7 @@ define @intrinsic_vrgatherei16_vv_nxv4i32_nxv4i32( @llvm.riscv.vrgatherei16.vv.nxv4i32( + undef, %0, %1, i32 %2) @@ -647,6 +675,7 @@ entry: } declare @llvm.riscv.vrgatherei16.vv.nxv8i32( + , , , i32); @@ -660,6 +689,7 @@ define @intrinsic_vrgatherei16_vv_nxv8i32_nxv8i32( @llvm.riscv.vrgatherei16.vv.nxv8i32( + undef, %0, %1, i32 %2) @@ -693,6 +723,7 @@ entry: } declare @llvm.riscv.vrgatherei16.vv.nxv16i32( + , , , i32); @@ -706,6 +737,7 @@ define @intrinsic_vrgatherei16_vv_nxv16i32_nxv16i32( @llvm.riscv.vrgatherei16.vv.nxv16i32( + undef, %0, %1, i32 %2) @@ -740,6 +772,7 @@ entry: } declare @llvm.riscv.vrgatherei16.vv.nxv4i64( + , , , i32); @@ -753,6 +786,7 @@ define @intrinsic_vrgatherei16_vv_nxv4i64_nxv4i64( @llvm.riscv.vrgatherei16.vv.nxv4i64( + undef, %0, %1, i32 %2) @@ -786,6 +820,7 @@ entry: } declare @llvm.riscv.vrgatherei16.vv.nxv8i64( + , , , i32); @@ -799,6 +834,7 @@ define @intrinsic_vrgatherei16_vv_nxv8i64_nxv8i64( @llvm.riscv.vrgatherei16.vv.nxv8i64( + undef, %0, %1, i32 %2) @@ -833,6 +869,7 @@ entry: } declare @llvm.riscv.vrgatherei16.vv.nxv1f16( + , , , i32); @@ -846,6 +883,7 @@ define @intrinsic_vrgatherei16_vv_nxv1f16_nxv1f16( @llvm.riscv.vrgatherei16.vv.nxv1f16( + undef, %0, %1, i32 %2) @@ -879,6 +917,7 @@ entry: } declare @llvm.riscv.vrgatherei16.vv.nxv2f16( + , , , i32); @@ -892,6 +931,7 @@ define @intrinsic_vrgatherei16_vv_nxv2f16_nxv2f16( @llvm.riscv.vrgatherei16.vv.nxv2f16( + undef, %0, %1, i32 %2) @@ -925,6 +965,7 @@ entry: } declare @llvm.riscv.vrgatherei16.vv.nxv4f16( + , , , i32); @@ -938,6 +979,7 @@ define @intrinsic_vrgatherei16_vv_nxv4f16_nxv4f16( @llvm.riscv.vrgatherei16.vv.nxv4f16( + undef, %0, %1, i32 %2) @@ -971,6 +1013,7 @@ entry: } declare @llvm.riscv.vrgatherei16.vv.nxv8f16( + , , , i32); @@ -984,6 +1027,7 @@ define @intrinsic_vrgatherei16_vv_nxv8f16_nxv8f16( @llvm.riscv.vrgatherei16.vv.nxv8f16( + undef, %0, %1, i32 %2) @@ -1017,6 +1061,7 @@ entry: } declare @llvm.riscv.vrgatherei16.vv.nxv16f16( + , , , i32); @@ -1030,6 +1075,7 @@ define @intrinsic_vrgatherei16_vv_nxv16f16_nxv16f16( @llvm.riscv.vrgatherei16.vv.nxv16f16( + undef, %0, %1, i32 %2) @@ -1063,6 +1109,7 @@ entry: } declare @llvm.riscv.vrgatherei16.vv.nxv32f16( + , , , i32); @@ -1076,6 +1123,7 @@ define @intrinsic_vrgatherei16_vv_nxv32f16_nxv32f16( @llvm.riscv.vrgatherei16.vv.nxv32f16( + undef, %0, %1, i32 %2) @@ -1110,6 +1158,7 @@ entry: } declare @llvm.riscv.vrgatherei16.vv.nxv1f32( + , , , i32); @@ -1123,6 +1172,7 @@ define @intrinsic_vrgatherei16_vv_nxv1f32_nxv1f32( @llvm.riscv.vrgatherei16.vv.nxv1f32( + undef, %0, %1, i32 %2) @@ -1156,6 +1206,7 @@ entry: } declare @llvm.riscv.vrgatherei16.vv.nxv4f32( + , , , i32); @@ -1169,6 +1220,7 @@ define @intrinsic_vrgatherei16_vv_nxv4f32_nxv4f32( @llvm.riscv.vrgatherei16.vv.nxv4f32( + undef, %0, %1, i32 %2) @@ -1202,6 +1254,7 @@ entry: } declare @llvm.riscv.vrgatherei16.vv.nxv8f32( + , , , i32); @@ -1215,6 +1268,7 @@ define @intrinsic_vrgatherei16_vv_nxv8f32_nxv8f32( @llvm.riscv.vrgatherei16.vv.nxv8f32( + undef, %0, %1, i32 %2) @@ -1248,6 +1302,7 @@ entry: } declare @llvm.riscv.vrgatherei16.vv.nxv16f32( + , , , i32); @@ -1261,6 +1316,7 @@ define @intrinsic_vrgatherei16_vv_nxv16f32_nxv16f32( @llvm.riscv.vrgatherei16.vv.nxv16f32( + undef, %0, %1, i32 %2) @@ -1295,6 +1351,7 @@ entry: } declare @llvm.riscv.vrgatherei16.vv.nxv4f64( + , , , i32); @@ -1308,6 +1365,7 @@ define @intrinsic_vrgatherei16_vv_nxv4f64_nxv4f64( @llvm.riscv.vrgatherei16.vv.nxv4f64( + undef, %0, %1, i32 %2) @@ -1341,6 +1399,7 @@ entry: } declare @llvm.riscv.vrgatherei16.vv.nxv8f64( + , , , i32); @@ -1354,6 +1413,7 @@ define @intrinsic_vrgatherei16_vv_nxv8f64_nxv8f64( @llvm.riscv.vrgatherei16.vv.nxv8f64( + undef, %0, %1, i32 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-rv64.ll index 9592630c19781b4646412c0e91d3b46bb0781e5c..c31e6e68bc7b406b045b094a589412edee6a2ea4 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-rv64.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vrgatherei16.vv.nxv1i8( + , , , i64); @@ -15,6 +16,7 @@ define @intrinsic_vrgatherei16_vv_nxv1i8_nxv1i8( @llvm.riscv.vrgatherei16.vv.nxv1i8( + undef, %0, %1, i64 %2) @@ -48,6 +50,7 @@ entry: } declare @llvm.riscv.vrgatherei16.vv.nxv2i8( + , , , i64); @@ -61,6 +64,7 @@ define @intrinsic_vrgatherei16_vv_nxv2i8_nxv2i8( @llvm.riscv.vrgatherei16.vv.nxv2i8( + undef, %0, %1, i64 %2) @@ -94,6 +98,7 @@ entry: } declare @llvm.riscv.vrgatherei16.vv.nxv4i8( + , , , i64); @@ -107,6 +112,7 @@ define @intrinsic_vrgatherei16_vv_nxv4i8_nxv4i8( @llvm.riscv.vrgatherei16.vv.nxv4i8( + undef, %0, %1, i64 %2) @@ -140,6 +146,7 @@ entry: } declare @llvm.riscv.vrgatherei16.vv.nxv8i8( + , , , i64); @@ -153,6 +160,7 @@ define @intrinsic_vrgatherei16_vv_nxv8i8_nxv8i8( @llvm.riscv.vrgatherei16.vv.nxv8i8( + undef, %0, %1, i64 %2) @@ -186,6 +194,7 @@ entry: } declare @llvm.riscv.vrgatherei16.vv.nxv16i8( + , , , i64); @@ -199,6 +208,7 @@ define @intrinsic_vrgatherei16_vv_nxv16i8_nxv16i8( @llvm.riscv.vrgatherei16.vv.nxv16i8( + undef, %0, %1, i64 %2) @@ -232,6 +242,7 @@ entry: } declare @llvm.riscv.vrgatherei16.vv.nxv32i8( + , , , i64); @@ -245,6 +256,7 @@ define @intrinsic_vrgatherei16_vv_nxv32i8_nxv32i8( @llvm.riscv.vrgatherei16.vv.nxv32i8( + undef, %0, %1, i64 %2) @@ -278,6 +290,7 @@ entry: } declare @llvm.riscv.vrgatherei16.vv.nxv1i16( + , , , i64); @@ -291,6 +304,7 @@ define @intrinsic_vrgatherei16_vv_nxv1i16_nxv1i16( @llvm.riscv.vrgatherei16.vv.nxv1i16( + undef, %0, %1, i64 %2) @@ -324,6 +338,7 @@ entry: } declare @llvm.riscv.vrgatherei16.vv.nxv2i16( + , , , i64); @@ -337,6 +352,7 @@ define @intrinsic_vrgatherei16_vv_nxv2i16_nxv2i16( @llvm.riscv.vrgatherei16.vv.nxv2i16( + undef, %0, %1, i64 %2) @@ -370,6 +386,7 @@ entry: } declare @llvm.riscv.vrgatherei16.vv.nxv4i16( + , , , i64); @@ -383,6 +400,7 @@ define @intrinsic_vrgatherei16_vv_nxv4i16_nxv4i16( @llvm.riscv.vrgatherei16.vv.nxv4i16( + undef, %0, %1, i64 %2) @@ -416,6 +434,7 @@ entry: } declare @llvm.riscv.vrgatherei16.vv.nxv8i16( + , , , i64); @@ -429,6 +448,7 @@ define @intrinsic_vrgatherei16_vv_nxv8i16_nxv8i16( @llvm.riscv.vrgatherei16.vv.nxv8i16( + undef, %0, %1, i64 %2) @@ -462,6 +482,7 @@ entry: } declare @llvm.riscv.vrgatherei16.vv.nxv16i16( + , , , i64); @@ -475,6 +496,7 @@ define @intrinsic_vrgatherei16_vv_nxv16i16_nxv16i16( @llvm.riscv.vrgatherei16.vv.nxv16i16( + undef, %0, %1, i64 %2) @@ -508,6 +530,7 @@ entry: } declare @llvm.riscv.vrgatherei16.vv.nxv32i16( + , , , i64); @@ -521,6 +544,7 @@ define @intrinsic_vrgatherei16_vv_nxv32i16_nxv32i16( @llvm.riscv.vrgatherei16.vv.nxv32i16( + undef, %0, %1, i64 %2) @@ -555,6 +579,7 @@ entry: } declare @llvm.riscv.vrgatherei16.vv.nxv1i32( + , , , i64); @@ -568,6 +593,7 @@ define @intrinsic_vrgatherei16_vv_nxv1i32_nxv1i32( @llvm.riscv.vrgatherei16.vv.nxv1i32( + undef, %0, %1, i64 %2) @@ -601,6 +627,7 @@ entry: } declare @llvm.riscv.vrgatherei16.vv.nxv4i32( + , , , i64); @@ -614,6 +641,7 @@ define @intrinsic_vrgatherei16_vv_nxv4i32_nxv4i32( @llvm.riscv.vrgatherei16.vv.nxv4i32( + undef, %0, %1, i64 %2) @@ -647,6 +675,7 @@ entry: } declare @llvm.riscv.vrgatherei16.vv.nxv8i32( + , , , i64); @@ -660,6 +689,7 @@ define @intrinsic_vrgatherei16_vv_nxv8i32_nxv8i32( @llvm.riscv.vrgatherei16.vv.nxv8i32( + undef, %0, %1, i64 %2) @@ -693,6 +723,7 @@ entry: } declare @llvm.riscv.vrgatherei16.vv.nxv16i32( + , , , i64); @@ -706,6 +737,7 @@ define @intrinsic_vrgatherei16_vv_nxv16i32_nxv16i32( @llvm.riscv.vrgatherei16.vv.nxv16i32( + undef, %0, %1, i64 %2) @@ -740,6 +772,7 @@ entry: } declare @llvm.riscv.vrgatherei16.vv.nxv4i64( + , , , i64); @@ -753,6 +786,7 @@ define @intrinsic_vrgatherei16_vv_nxv4i64_nxv4i64( @llvm.riscv.vrgatherei16.vv.nxv4i64( + undef, %0, %1, i64 %2) @@ -786,6 +820,7 @@ entry: } declare @llvm.riscv.vrgatherei16.vv.nxv8i64( + , , , i64); @@ -799,6 +834,7 @@ define @intrinsic_vrgatherei16_vv_nxv8i64_nxv8i64( @llvm.riscv.vrgatherei16.vv.nxv8i64( + undef, %0, %1, i64 %2) @@ -833,6 +869,7 @@ entry: } declare @llvm.riscv.vrgatherei16.vv.nxv1f16( + , , , i64); @@ -846,6 +883,7 @@ define @intrinsic_vrgatherei16_vv_nxv1f16_nxv1f16( @llvm.riscv.vrgatherei16.vv.nxv1f16( + undef, %0, %1, i64 %2) @@ -879,6 +917,7 @@ entry: } declare @llvm.riscv.vrgatherei16.vv.nxv2f16( + , , , i64); @@ -892,6 +931,7 @@ define @intrinsic_vrgatherei16_vv_nxv2f16_nxv2f16( @llvm.riscv.vrgatherei16.vv.nxv2f16( + undef, %0, %1, i64 %2) @@ -925,6 +965,7 @@ entry: } declare @llvm.riscv.vrgatherei16.vv.nxv4f16( + , , , i64); @@ -938,6 +979,7 @@ define @intrinsic_vrgatherei16_vv_nxv4f16_nxv4f16( @llvm.riscv.vrgatherei16.vv.nxv4f16( + undef, %0, %1, i64 %2) @@ -971,6 +1013,7 @@ entry: } declare @llvm.riscv.vrgatherei16.vv.nxv8f16( + , , , i64); @@ -984,6 +1027,7 @@ define @intrinsic_vrgatherei16_vv_nxv8f16_nxv8f16( @llvm.riscv.vrgatherei16.vv.nxv8f16( + undef, %0, %1, i64 %2) @@ -1017,6 +1061,7 @@ entry: } declare @llvm.riscv.vrgatherei16.vv.nxv16f16( + , , , i64); @@ -1030,6 +1075,7 @@ define @intrinsic_vrgatherei16_vv_nxv16f16_nxv16f16( @llvm.riscv.vrgatherei16.vv.nxv16f16( + undef, %0, %1, i64 %2) @@ -1063,6 +1109,7 @@ entry: } declare @llvm.riscv.vrgatherei16.vv.nxv32f16( + , , , i64); @@ -1076,6 +1123,7 @@ define @intrinsic_vrgatherei16_vv_nxv32f16_nxv32f16( @llvm.riscv.vrgatherei16.vv.nxv32f16( + undef, %0, %1, i64 %2) @@ -1110,6 +1158,7 @@ entry: } declare @llvm.riscv.vrgatherei16.vv.nxv1f32( + , , , i64); @@ -1123,6 +1172,7 @@ define @intrinsic_vrgatherei16_vv_nxv1f32_nxv1f32( @llvm.riscv.vrgatherei16.vv.nxv1f32( + undef, %0, %1, i64 %2) @@ -1156,6 +1206,7 @@ entry: } declare @llvm.riscv.vrgatherei16.vv.nxv4f32( + , , , i64); @@ -1169,6 +1220,7 @@ define @intrinsic_vrgatherei16_vv_nxv4f32_nxv4f32( @llvm.riscv.vrgatherei16.vv.nxv4f32( + undef, %0, %1, i64 %2) @@ -1202,6 +1254,7 @@ entry: } declare @llvm.riscv.vrgatherei16.vv.nxv8f32( + , , , i64); @@ -1215,6 +1268,7 @@ define @intrinsic_vrgatherei16_vv_nxv8f32_nxv8f32( @llvm.riscv.vrgatherei16.vv.nxv8f32( + undef, %0, %1, i64 %2) @@ -1248,6 +1302,7 @@ entry: } declare @llvm.riscv.vrgatherei16.vv.nxv16f32( + , , , i64); @@ -1261,6 +1316,7 @@ define @intrinsic_vrgatherei16_vv_nxv16f32_nxv16f32( @llvm.riscv.vrgatherei16.vv.nxv16f32( + undef, %0, %1, i64 %2) @@ -1295,6 +1351,7 @@ entry: } declare @llvm.riscv.vrgatherei16.vv.nxv4f64( + , , , i64); @@ -1308,6 +1365,7 @@ define @intrinsic_vrgatherei16_vv_nxv4f64_nxv4f64( @llvm.riscv.vrgatherei16.vv.nxv4f64( + undef, %0, %1, i64 %2) @@ -1341,6 +1399,7 @@ entry: } declare @llvm.riscv.vrgatherei16.vv.nxv8f64( + , , , i64); @@ -1354,6 +1413,7 @@ define @intrinsic_vrgatherei16_vv_nxv8f64_nxv8f64( @llvm.riscv.vrgatherei16.vv.nxv8f64( + undef, %0, %1, i64 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vrsub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vrsub-rv32.ll index bd00557309afdb86f773fb0bd0f0c942b0988718..422d326e248dab5ef2fe06957607e3e6067cc266 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vrsub-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vrsub-rv32.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vrsub.nxv1i8.i8( + , , i8, i32); @@ -14,6 +15,7 @@ define @intrinsic_vrsub_vx_nxv1i8_nxv1i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrsub.nxv1i8.i8( + undef, %0, i8 %1, i32 %2) @@ -47,6 +49,7 @@ entry: } declare @llvm.riscv.vrsub.nxv2i8.i8( + , , i8, i32); @@ -59,6 +62,7 @@ define @intrinsic_vrsub_vx_nxv2i8_nxv2i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrsub.nxv2i8.i8( + undef, %0, i8 %1, i32 %2) @@ -92,6 +96,7 @@ entry: } declare @llvm.riscv.vrsub.nxv4i8.i8( + , , i8, i32); @@ -104,6 +109,7 @@ define @intrinsic_vrsub_vx_nxv4i8_nxv4i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrsub.nxv4i8.i8( + undef, %0, i8 %1, i32 %2) @@ -137,6 +143,7 @@ entry: } declare @llvm.riscv.vrsub.nxv8i8.i8( + , , i8, i32); @@ -149,6 +156,7 @@ define @intrinsic_vrsub_vx_nxv8i8_nxv8i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrsub.nxv8i8.i8( + undef, %0, i8 %1, i32 %2) @@ -182,6 +190,7 @@ entry: } declare @llvm.riscv.vrsub.nxv16i8.i8( + , , i8, i32); @@ -194,6 +203,7 @@ define @intrinsic_vrsub_vx_nxv16i8_nxv16i8_i8( @llvm.riscv.vrsub.nxv16i8.i8( + undef, %0, i8 %1, i32 %2) @@ -227,6 +237,7 @@ entry: } declare @llvm.riscv.vrsub.nxv32i8.i8( + , , i8, i32); @@ -239,6 +250,7 @@ define @intrinsic_vrsub_vx_nxv32i8_nxv32i8_i8( @llvm.riscv.vrsub.nxv32i8.i8( + undef, %0, i8 %1, i32 %2) @@ -272,6 +284,7 @@ entry: } declare @llvm.riscv.vrsub.nxv64i8.i8( + , , i8, i32); @@ -284,6 +297,7 @@ define @intrinsic_vrsub_vx_nxv64i8_nxv64i8_i8( @llvm.riscv.vrsub.nxv64i8.i8( + undef, %0, i8 %1, i32 %2) @@ -317,6 +331,7 @@ entry: } declare @llvm.riscv.vrsub.nxv1i16.i16( + , , i16, i32); @@ -329,6 +344,7 @@ define @intrinsic_vrsub_vx_nxv1i16_nxv1i16_i16( @llvm.riscv.vrsub.nxv1i16.i16( + undef, %0, i16 %1, i32 %2) @@ -362,6 +378,7 @@ entry: } declare @llvm.riscv.vrsub.nxv2i16.i16( + , , i16, i32); @@ -374,6 +391,7 @@ define @intrinsic_vrsub_vx_nxv2i16_nxv2i16_i16( @llvm.riscv.vrsub.nxv2i16.i16( + undef, %0, i16 %1, i32 %2) @@ -407,6 +425,7 @@ entry: } declare @llvm.riscv.vrsub.nxv4i16.i16( + , , i16, i32); @@ -419,6 +438,7 @@ define @intrinsic_vrsub_vx_nxv4i16_nxv4i16_i16( @llvm.riscv.vrsub.nxv4i16.i16( + undef, %0, i16 %1, i32 %2) @@ -452,6 +472,7 @@ entry: } declare @llvm.riscv.vrsub.nxv8i16.i16( + , , i16, i32); @@ -464,6 +485,7 @@ define @intrinsic_vrsub_vx_nxv8i16_nxv8i16_i16( @llvm.riscv.vrsub.nxv8i16.i16( + undef, %0, i16 %1, i32 %2) @@ -497,6 +519,7 @@ entry: } declare @llvm.riscv.vrsub.nxv16i16.i16( + , , i16, i32); @@ -509,6 +532,7 @@ define @intrinsic_vrsub_vx_nxv16i16_nxv16i16_i16( @llvm.riscv.vrsub.nxv16i16.i16( + undef, %0, i16 %1, i32 %2) @@ -542,6 +566,7 @@ entry: } declare @llvm.riscv.vrsub.nxv32i16.i16( + , , i16, i32); @@ -554,6 +579,7 @@ define @intrinsic_vrsub_vx_nxv32i16_nxv32i16_i16( @llvm.riscv.vrsub.nxv32i16.i16( + undef, %0, i16 %1, i32 %2) @@ -587,6 +613,7 @@ entry: } declare @llvm.riscv.vrsub.nxv1i32.i32( + , , i32, i32); @@ -599,6 +626,7 @@ define @intrinsic_vrsub_vx_nxv1i32_nxv1i32_i32( @llvm.riscv.vrsub.nxv1i32.i32( + undef, %0, i32 %1, i32 %2) @@ -632,6 +660,7 @@ entry: } declare @llvm.riscv.vrsub.nxv2i32.i32( + , , i32, i32); @@ -644,6 +673,7 @@ define @intrinsic_vrsub_vx_nxv2i32_nxv2i32_i32( @llvm.riscv.vrsub.nxv2i32.i32( + undef, %0, i32 %1, i32 %2) @@ -677,6 +707,7 @@ entry: } declare @llvm.riscv.vrsub.nxv4i32.i32( + , , i32, i32); @@ -689,6 +720,7 @@ define @intrinsic_vrsub_vx_nxv4i32_nxv4i32_i32( @llvm.riscv.vrsub.nxv4i32.i32( + undef, %0, i32 %1, i32 %2) @@ -722,6 +754,7 @@ entry: } declare @llvm.riscv.vrsub.nxv8i32.i32( + , , i32, i32); @@ -734,6 +767,7 @@ define @intrinsic_vrsub_vx_nxv8i32_nxv8i32_i32( @llvm.riscv.vrsub.nxv8i32.i32( + undef, %0, i32 %1, i32 %2) @@ -767,6 +801,7 @@ entry: } declare @llvm.riscv.vrsub.nxv16i32.i32( + , , i32, i32); @@ -779,6 +814,7 @@ define @intrinsic_vrsub_vx_nxv16i32_nxv16i32_i32( @llvm.riscv.vrsub.nxv16i32.i32( + undef, %0, i32 %1, i32 %2) @@ -812,6 +848,7 @@ entry: } declare @llvm.riscv.vrsub.nxv1i64.i64( + , , i64, i32); @@ -830,6 +867,7 @@ define @intrinsic_vrsub_vx_nxv1i64_nxv1i64_i64( @llvm.riscv.vrsub.nxv1i64.i64( + undef, %0, i64 %1, i32 %2) @@ -869,6 +907,7 @@ entry: } declare @llvm.riscv.vrsub.nxv2i64.i64( + , , i64, i32); @@ -887,6 +926,7 @@ define @intrinsic_vrsub_vx_nxv2i64_nxv2i64_i64( @llvm.riscv.vrsub.nxv2i64.i64( + undef, %0, i64 %1, i32 %2) @@ -926,6 +966,7 @@ entry: } declare @llvm.riscv.vrsub.nxv4i64.i64( + , , i64, i32); @@ -944,6 +985,7 @@ define @intrinsic_vrsub_vx_nxv4i64_nxv4i64_i64( @llvm.riscv.vrsub.nxv4i64.i64( + undef, %0, i64 %1, i32 %2) @@ -983,6 +1025,7 @@ entry: } declare @llvm.riscv.vrsub.nxv8i64.i64( + , , i64, i32); @@ -1001,6 +1044,7 @@ define @intrinsic_vrsub_vx_nxv8i64_nxv8i64_i64( @llvm.riscv.vrsub.nxv8i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1047,6 +1091,7 @@ define @intrinsic_vrsub_vi_nxv1i8_nxv1i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrsub.nxv1i8.i8( + undef, %0, i8 9, i32 %1) @@ -1079,6 +1124,7 @@ define @intrinsic_vrsub_vi_nxv2i8_nxv2i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrsub.nxv2i8.i8( + undef, %0, i8 9, i32 %1) @@ -1111,6 +1157,7 @@ define @intrinsic_vrsub_vi_nxv4i8_nxv4i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrsub.nxv4i8.i8( + undef, %0, i8 9, i32 %1) @@ -1143,6 +1190,7 @@ define @intrinsic_vrsub_vi_nxv8i8_nxv8i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrsub.nxv8i8.i8( + undef, %0, i8 9, i32 %1) @@ -1175,6 +1223,7 @@ define @intrinsic_vrsub_vi_nxv16i8_nxv16i8_i8( @llvm.riscv.vrsub.nxv16i8.i8( + undef, %0, i8 9, i32 %1) @@ -1207,6 +1256,7 @@ define @intrinsic_vrsub_vi_nxv32i8_nxv32i8_i8( @llvm.riscv.vrsub.nxv32i8.i8( + undef, %0, i8 9, i32 %1) @@ -1239,6 +1289,7 @@ define @intrinsic_vrsub_vi_nxv64i8_nxv64i8_i8( @llvm.riscv.vrsub.nxv64i8.i8( + undef, %0, i8 9, i32 %1) @@ -1271,6 +1322,7 @@ define @intrinsic_vrsub_vi_nxv1i16_nxv1i16_i16( @llvm.riscv.vrsub.nxv1i16.i16( + undef, %0, i16 9, i32 %1) @@ -1303,6 +1355,7 @@ define @intrinsic_vrsub_vi_nxv2i16_nxv2i16_i16( @llvm.riscv.vrsub.nxv2i16.i16( + undef, %0, i16 9, i32 %1) @@ -1335,6 +1388,7 @@ define @intrinsic_vrsub_vi_nxv4i16_nxv4i16_i16( @llvm.riscv.vrsub.nxv4i16.i16( + undef, %0, i16 9, i32 %1) @@ -1367,6 +1421,7 @@ define @intrinsic_vrsub_vi_nxv8i16_nxv8i16_i16( @llvm.riscv.vrsub.nxv8i16.i16( + undef, %0, i16 9, i32 %1) @@ -1399,6 +1454,7 @@ define @intrinsic_vrsub_vi_nxv16i16_nxv16i16_i16( @llvm.riscv.vrsub.nxv16i16.i16( + undef, %0, i16 9, i32 %1) @@ -1431,6 +1487,7 @@ define @intrinsic_vrsub_vi_nxv32i16_nxv32i16_i16( @llvm.riscv.vrsub.nxv32i16.i16( + undef, %0, i16 9, i32 %1) @@ -1463,6 +1520,7 @@ define @intrinsic_vrsub_vi_nxv1i32_nxv1i32_i32( @llvm.riscv.vrsub.nxv1i32.i32( + undef, %0, i32 9, i32 %1) @@ -1495,6 +1553,7 @@ define @intrinsic_vrsub_vi_nxv2i32_nxv2i32_i32( @llvm.riscv.vrsub.nxv2i32.i32( + undef, %0, i32 9, i32 %1) @@ -1527,6 +1586,7 @@ define @intrinsic_vrsub_vi_nxv4i32_nxv4i32_i32( @llvm.riscv.vrsub.nxv4i32.i32( + undef, %0, i32 9, i32 %1) @@ -1559,6 +1619,7 @@ define @intrinsic_vrsub_vi_nxv8i32_nxv8i32_i32( @llvm.riscv.vrsub.nxv8i32.i32( + undef, %0, i32 9, i32 %1) @@ -1591,6 +1652,7 @@ define @intrinsic_vrsub_vi_nxv16i32_nxv16i32_i32( @llvm.riscv.vrsub.nxv16i32.i32( + undef, %0, i32 9, i32 %1) @@ -1623,6 +1685,7 @@ define @intrinsic_vrsub_vi_nxv1i64_nxv1i64_i64( @llvm.riscv.vrsub.nxv1i64.i64( + undef, %0, i64 9, i32 %1) @@ -1655,6 +1718,7 @@ define @intrinsic_vrsub_vi_nxv2i64_nxv2i64_i64( @llvm.riscv.vrsub.nxv2i64.i64( + undef, %0, i64 9, i32 %1) @@ -1687,6 +1751,7 @@ define @intrinsic_vrsub_vi_nxv4i64_nxv4i64_i64( @llvm.riscv.vrsub.nxv4i64.i64( + undef, %0, i64 9, i32 %1) @@ -1719,6 +1784,7 @@ define @intrinsic_vrsub_vi_nxv8i64_nxv8i64_i64( @llvm.riscv.vrsub.nxv8i64.i64( + undef, %0, i64 9, i32 %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vrsub-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vrsub-rv64.ll index fe930ea9dcce7b8640662cc79c61b093ab09f0ce..372a7cbd560da82a02f11409fb238115b5ce7042 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vrsub-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vrsub-rv64.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vrsub.nxv1i8.i8( + , , i8, i64); @@ -14,6 +15,7 @@ define @intrinsic_vrsub_vx_nxv1i8_nxv1i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrsub.nxv1i8.i8( + undef, %0, i8 %1, i64 %2) @@ -47,6 +49,7 @@ entry: } declare @llvm.riscv.vrsub.nxv2i8.i8( + , , i8, i64); @@ -59,6 +62,7 @@ define @intrinsic_vrsub_vx_nxv2i8_nxv2i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrsub.nxv2i8.i8( + undef, %0, i8 %1, i64 %2) @@ -92,6 +96,7 @@ entry: } declare @llvm.riscv.vrsub.nxv4i8.i8( + , , i8, i64); @@ -104,6 +109,7 @@ define @intrinsic_vrsub_vx_nxv4i8_nxv4i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrsub.nxv4i8.i8( + undef, %0, i8 %1, i64 %2) @@ -137,6 +143,7 @@ entry: } declare @llvm.riscv.vrsub.nxv8i8.i8( + , , i8, i64); @@ -149,6 +156,7 @@ define @intrinsic_vrsub_vx_nxv8i8_nxv8i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrsub.nxv8i8.i8( + undef, %0, i8 %1, i64 %2) @@ -182,6 +190,7 @@ entry: } declare @llvm.riscv.vrsub.nxv16i8.i8( + , , i8, i64); @@ -194,6 +203,7 @@ define @intrinsic_vrsub_vx_nxv16i8_nxv16i8_i8( @llvm.riscv.vrsub.nxv16i8.i8( + undef, %0, i8 %1, i64 %2) @@ -227,6 +237,7 @@ entry: } declare @llvm.riscv.vrsub.nxv32i8.i8( + , , i8, i64); @@ -239,6 +250,7 @@ define @intrinsic_vrsub_vx_nxv32i8_nxv32i8_i8( @llvm.riscv.vrsub.nxv32i8.i8( + undef, %0, i8 %1, i64 %2) @@ -272,6 +284,7 @@ entry: } declare @llvm.riscv.vrsub.nxv64i8.i8( + , , i8, i64); @@ -284,6 +297,7 @@ define @intrinsic_vrsub_vx_nxv64i8_nxv64i8_i8( @llvm.riscv.vrsub.nxv64i8.i8( + undef, %0, i8 %1, i64 %2) @@ -317,6 +331,7 @@ entry: } declare @llvm.riscv.vrsub.nxv1i16.i16( + , , i16, i64); @@ -329,6 +344,7 @@ define @intrinsic_vrsub_vx_nxv1i16_nxv1i16_i16( @llvm.riscv.vrsub.nxv1i16.i16( + undef, %0, i16 %1, i64 %2) @@ -362,6 +378,7 @@ entry: } declare @llvm.riscv.vrsub.nxv2i16.i16( + , , i16, i64); @@ -374,6 +391,7 @@ define @intrinsic_vrsub_vx_nxv2i16_nxv2i16_i16( @llvm.riscv.vrsub.nxv2i16.i16( + undef, %0, i16 %1, i64 %2) @@ -407,6 +425,7 @@ entry: } declare @llvm.riscv.vrsub.nxv4i16.i16( + , , i16, i64); @@ -419,6 +438,7 @@ define @intrinsic_vrsub_vx_nxv4i16_nxv4i16_i16( @llvm.riscv.vrsub.nxv4i16.i16( + undef, %0, i16 %1, i64 %2) @@ -452,6 +472,7 @@ entry: } declare @llvm.riscv.vrsub.nxv8i16.i16( + , , i16, i64); @@ -464,6 +485,7 @@ define @intrinsic_vrsub_vx_nxv8i16_nxv8i16_i16( @llvm.riscv.vrsub.nxv8i16.i16( + undef, %0, i16 %1, i64 %2) @@ -497,6 +519,7 @@ entry: } declare @llvm.riscv.vrsub.nxv16i16.i16( + , , i16, i64); @@ -509,6 +532,7 @@ define @intrinsic_vrsub_vx_nxv16i16_nxv16i16_i16( @llvm.riscv.vrsub.nxv16i16.i16( + undef, %0, i16 %1, i64 %2) @@ -542,6 +566,7 @@ entry: } declare @llvm.riscv.vrsub.nxv32i16.i16( + , , i16, i64); @@ -554,6 +579,7 @@ define @intrinsic_vrsub_vx_nxv32i16_nxv32i16_i16( @llvm.riscv.vrsub.nxv32i16.i16( + undef, %0, i16 %1, i64 %2) @@ -587,6 +613,7 @@ entry: } declare @llvm.riscv.vrsub.nxv1i32.i32( + , , i32, i64); @@ -599,6 +626,7 @@ define @intrinsic_vrsub_vx_nxv1i32_nxv1i32_i32( @llvm.riscv.vrsub.nxv1i32.i32( + undef, %0, i32 %1, i64 %2) @@ -632,6 +660,7 @@ entry: } declare @llvm.riscv.vrsub.nxv2i32.i32( + , , i32, i64); @@ -644,6 +673,7 @@ define @intrinsic_vrsub_vx_nxv2i32_nxv2i32_i32( @llvm.riscv.vrsub.nxv2i32.i32( + undef, %0, i32 %1, i64 %2) @@ -677,6 +707,7 @@ entry: } declare @llvm.riscv.vrsub.nxv4i32.i32( + , , i32, i64); @@ -689,6 +720,7 @@ define @intrinsic_vrsub_vx_nxv4i32_nxv4i32_i32( @llvm.riscv.vrsub.nxv4i32.i32( + undef, %0, i32 %1, i64 %2) @@ -722,6 +754,7 @@ entry: } declare @llvm.riscv.vrsub.nxv8i32.i32( + , , i32, i64); @@ -734,6 +767,7 @@ define @intrinsic_vrsub_vx_nxv8i32_nxv8i32_i32( @llvm.riscv.vrsub.nxv8i32.i32( + undef, %0, i32 %1, i64 %2) @@ -767,6 +801,7 @@ entry: } declare @llvm.riscv.vrsub.nxv16i32.i32( + , , i32, i64); @@ -779,6 +814,7 @@ define @intrinsic_vrsub_vx_nxv16i32_nxv16i32_i32( @llvm.riscv.vrsub.nxv16i32.i32( + undef, %0, i32 %1, i64 %2) @@ -812,6 +848,7 @@ entry: } declare @llvm.riscv.vrsub.nxv1i64.i64( + , , i64, i64); @@ -824,6 +861,7 @@ define @intrinsic_vrsub_vx_nxv1i64_nxv1i64_i64( @llvm.riscv.vrsub.nxv1i64.i64( + undef, %0, i64 %1, i64 %2) @@ -857,6 +895,7 @@ entry: } declare @llvm.riscv.vrsub.nxv2i64.i64( + , , i64, i64); @@ -869,6 +908,7 @@ define @intrinsic_vrsub_vx_nxv2i64_nxv2i64_i64( @llvm.riscv.vrsub.nxv2i64.i64( + undef, %0, i64 %1, i64 %2) @@ -902,6 +942,7 @@ entry: } declare @llvm.riscv.vrsub.nxv4i64.i64( + , , i64, i64); @@ -914,6 +955,7 @@ define @intrinsic_vrsub_vx_nxv4i64_nxv4i64_i64( @llvm.riscv.vrsub.nxv4i64.i64( + undef, %0, i64 %1, i64 %2) @@ -947,6 +989,7 @@ entry: } declare @llvm.riscv.vrsub.nxv8i64.i64( + , , i64, i64); @@ -959,6 +1002,7 @@ define @intrinsic_vrsub_vx_nxv8i64_nxv8i64_i64( @llvm.riscv.vrsub.nxv8i64.i64( + undef, %0, i64 %1, i64 %2) @@ -999,6 +1043,7 @@ define @intrinsic_vrsub_vi_nxv1i8_nxv1i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrsub.nxv1i8.i8( + undef, %0, i8 9, i64 %1) @@ -1031,6 +1076,7 @@ define @intrinsic_vrsub_vi_nxv2i8_nxv2i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrsub.nxv2i8.i8( + undef, %0, i8 9, i64 %1) @@ -1063,6 +1109,7 @@ define @intrinsic_vrsub_vi_nxv4i8_nxv4i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrsub.nxv4i8.i8( + undef, %0, i8 9, i64 %1) @@ -1095,6 +1142,7 @@ define @intrinsic_vrsub_vi_nxv8i8_nxv8i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrsub.nxv8i8.i8( + undef, %0, i8 9, i64 %1) @@ -1127,6 +1175,7 @@ define @intrinsic_vrsub_vi_nxv16i8_nxv16i8_i8( @llvm.riscv.vrsub.nxv16i8.i8( + undef, %0, i8 9, i64 %1) @@ -1159,6 +1208,7 @@ define @intrinsic_vrsub_vi_nxv32i8_nxv32i8_i8( @llvm.riscv.vrsub.nxv32i8.i8( + undef, %0, i8 9, i64 %1) @@ -1191,6 +1241,7 @@ define @intrinsic_vrsub_vi_nxv64i8_nxv64i8_i8( @llvm.riscv.vrsub.nxv64i8.i8( + undef, %0, i8 9, i64 %1) @@ -1223,6 +1274,7 @@ define @intrinsic_vrsub_vi_nxv1i16_nxv1i16_i16( @llvm.riscv.vrsub.nxv1i16.i16( + undef, %0, i16 9, i64 %1) @@ -1255,6 +1307,7 @@ define @intrinsic_vrsub_vi_nxv2i16_nxv2i16_i16( @llvm.riscv.vrsub.nxv2i16.i16( + undef, %0, i16 9, i64 %1) @@ -1287,6 +1340,7 @@ define @intrinsic_vrsub_vi_nxv4i16_nxv4i16_i16( @llvm.riscv.vrsub.nxv4i16.i16( + undef, %0, i16 9, i64 %1) @@ -1319,6 +1373,7 @@ define @intrinsic_vrsub_vi_nxv8i16_nxv8i16_i16( @llvm.riscv.vrsub.nxv8i16.i16( + undef, %0, i16 9, i64 %1) @@ -1351,6 +1406,7 @@ define @intrinsic_vrsub_vi_nxv16i16_nxv16i16_i16( @llvm.riscv.vrsub.nxv16i16.i16( + undef, %0, i16 9, i64 %1) @@ -1383,6 +1439,7 @@ define @intrinsic_vrsub_vi_nxv32i16_nxv32i16_i16( @llvm.riscv.vrsub.nxv32i16.i16( + undef, %0, i16 9, i64 %1) @@ -1415,6 +1472,7 @@ define @intrinsic_vrsub_vi_nxv1i32_nxv1i32_i32( @llvm.riscv.vrsub.nxv1i32.i32( + undef, %0, i32 9, i64 %1) @@ -1447,6 +1505,7 @@ define @intrinsic_vrsub_vi_nxv2i32_nxv2i32_i32( @llvm.riscv.vrsub.nxv2i32.i32( + undef, %0, i32 9, i64 %1) @@ -1479,6 +1538,7 @@ define @intrinsic_vrsub_vi_nxv4i32_nxv4i32_i32( @llvm.riscv.vrsub.nxv4i32.i32( + undef, %0, i32 9, i64 %1) @@ -1511,6 +1571,7 @@ define @intrinsic_vrsub_vi_nxv8i32_nxv8i32_i32( @llvm.riscv.vrsub.nxv8i32.i32( + undef, %0, i32 9, i64 %1) @@ -1543,6 +1604,7 @@ define @intrinsic_vrsub_vi_nxv16i32_nxv16i32_i32( @llvm.riscv.vrsub.nxv16i32.i32( + undef, %0, i32 9, i64 %1) @@ -1575,6 +1637,7 @@ define @intrinsic_vrsub_vi_nxv1i64_nxv1i64_i64( @llvm.riscv.vrsub.nxv1i64.i64( + undef, %0, i64 9, i64 %1) @@ -1607,6 +1670,7 @@ define @intrinsic_vrsub_vi_nxv2i64_nxv2i64_i64( @llvm.riscv.vrsub.nxv2i64.i64( + undef, %0, i64 9, i64 %1) @@ -1639,6 +1703,7 @@ define @intrinsic_vrsub_vi_nxv4i64_nxv4i64_i64( @llvm.riscv.vrsub.nxv4i64.i64( + undef, %0, i64 9, i64 %1) @@ -1671,6 +1736,7 @@ define @intrinsic_vrsub_vi_nxv8i64_nxv8i64_i64( @llvm.riscv.vrsub.nxv8i64.i64( + undef, %0, i64 9, i64 %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vsadd-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsadd-rv32.ll index 968fbf120aa3c319006740c6779c204c847c838c..2961c0db836c5fe2e5a2b9d79135efabddc54ab6 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsadd-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsadd-rv32.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vsadd.nxv1i8.nxv1i8( + , , , i32); @@ -14,6 +15,7 @@ define @intrinsic_vsadd_vv_nxv1i8_nxv1i8_nxv1i8( @llvm.riscv.vsadd.nxv1i8.nxv1i8( + undef, %0, %1, i32 %2) @@ -47,6 +49,7 @@ entry: } declare @llvm.riscv.vsadd.nxv2i8.nxv2i8( + , , , i32); @@ -59,6 +62,7 @@ define @intrinsic_vsadd_vv_nxv2i8_nxv2i8_nxv2i8( @llvm.riscv.vsadd.nxv2i8.nxv2i8( + undef, %0, %1, i32 %2) @@ -92,6 +96,7 @@ entry: } declare @llvm.riscv.vsadd.nxv4i8.nxv4i8( + , , , i32); @@ -104,6 +109,7 @@ define @intrinsic_vsadd_vv_nxv4i8_nxv4i8_nxv4i8( @llvm.riscv.vsadd.nxv4i8.nxv4i8( + undef, %0, %1, i32 %2) @@ -137,6 +143,7 @@ entry: } declare @llvm.riscv.vsadd.nxv8i8.nxv8i8( + , , , i32); @@ -149,6 +156,7 @@ define @intrinsic_vsadd_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vsadd.nxv8i8.nxv8i8( + undef, %0, %1, i32 %2) @@ -182,6 +190,7 @@ entry: } declare @llvm.riscv.vsadd.nxv16i8.nxv16i8( + , , , i32); @@ -194,6 +203,7 @@ define @intrinsic_vsadd_vv_nxv16i8_nxv16i8_nxv16i8( @llvm.riscv.vsadd.nxv16i8.nxv16i8( + undef, %0, %1, i32 %2) @@ -227,6 +237,7 @@ entry: } declare @llvm.riscv.vsadd.nxv32i8.nxv32i8( + , , , i32); @@ -239,6 +250,7 @@ define @intrinsic_vsadd_vv_nxv32i8_nxv32i8_nxv32i8( @llvm.riscv.vsadd.nxv32i8.nxv32i8( + undef, %0, %1, i32 %2) @@ -272,6 +284,7 @@ entry: } declare @llvm.riscv.vsadd.nxv64i8.nxv64i8( + , , , i32); @@ -284,6 +297,7 @@ define @intrinsic_vsadd_vv_nxv64i8_nxv64i8_nxv64i8( @llvm.riscv.vsadd.nxv64i8.nxv64i8( + undef, %0, %1, i32 %2) @@ -318,6 +332,7 @@ entry: } declare @llvm.riscv.vsadd.nxv1i16.nxv1i16( + , , , i32); @@ -330,6 +345,7 @@ define @intrinsic_vsadd_vv_nxv1i16_nxv1i16_nxv1i16( @llvm.riscv.vsadd.nxv1i16.nxv1i16( + undef, %0, %1, i32 %2) @@ -363,6 +379,7 @@ entry: } declare @llvm.riscv.vsadd.nxv2i16.nxv2i16( + , , , i32); @@ -375,6 +392,7 @@ define @intrinsic_vsadd_vv_nxv2i16_nxv2i16_nxv2i16( @llvm.riscv.vsadd.nxv2i16.nxv2i16( + undef, %0, %1, i32 %2) @@ -408,6 +426,7 @@ entry: } declare @llvm.riscv.vsadd.nxv4i16.nxv4i16( + , , , i32); @@ -420,6 +439,7 @@ define @intrinsic_vsadd_vv_nxv4i16_nxv4i16_nxv4i16( @llvm.riscv.vsadd.nxv4i16.nxv4i16( + undef, %0, %1, i32 %2) @@ -453,6 +473,7 @@ entry: } declare @llvm.riscv.vsadd.nxv8i16.nxv8i16( + , , , i32); @@ -465,6 +486,7 @@ define @intrinsic_vsadd_vv_nxv8i16_nxv8i16_nxv8i16( @llvm.riscv.vsadd.nxv8i16.nxv8i16( + undef, %0, %1, i32 %2) @@ -498,6 +520,7 @@ entry: } declare @llvm.riscv.vsadd.nxv16i16.nxv16i16( + , , , i32); @@ -510,6 +533,7 @@ define @intrinsic_vsadd_vv_nxv16i16_nxv16i16_nxv16i16( @llvm.riscv.vsadd.nxv16i16.nxv16i16( + undef, %0, %1, i32 %2) @@ -543,6 +567,7 @@ entry: } declare @llvm.riscv.vsadd.nxv32i16.nxv32i16( + , , , i32); @@ -555,6 +580,7 @@ define @intrinsic_vsadd_vv_nxv32i16_nxv32i16_nxv32i16( @llvm.riscv.vsadd.nxv32i16.nxv32i16( + undef, %0, %1, i32 %2) @@ -589,6 +615,7 @@ entry: } declare @llvm.riscv.vsadd.nxv1i32.nxv1i32( + , , , i32); @@ -601,6 +628,7 @@ define @intrinsic_vsadd_vv_nxv1i32_nxv1i32_nxv1i32( @llvm.riscv.vsadd.nxv1i32.nxv1i32( + undef, %0, %1, i32 %2) @@ -634,6 +662,7 @@ entry: } declare @llvm.riscv.vsadd.nxv2i32.nxv2i32( + , , , i32); @@ -646,6 +675,7 @@ define @intrinsic_vsadd_vv_nxv2i32_nxv2i32_nxv2i32( @llvm.riscv.vsadd.nxv2i32.nxv2i32( + undef, %0, %1, i32 %2) @@ -679,6 +709,7 @@ entry: } declare @llvm.riscv.vsadd.nxv4i32.nxv4i32( + , , , i32); @@ -691,6 +722,7 @@ define @intrinsic_vsadd_vv_nxv4i32_nxv4i32_nxv4i32( @llvm.riscv.vsadd.nxv4i32.nxv4i32( + undef, %0, %1, i32 %2) @@ -724,6 +756,7 @@ entry: } declare @llvm.riscv.vsadd.nxv8i32.nxv8i32( + , , , i32); @@ -736,6 +769,7 @@ define @intrinsic_vsadd_vv_nxv8i32_nxv8i32_nxv8i32( @llvm.riscv.vsadd.nxv8i32.nxv8i32( + undef, %0, %1, i32 %2) @@ -769,6 +803,7 @@ entry: } declare @llvm.riscv.vsadd.nxv16i32.nxv16i32( + , , , i32); @@ -781,6 +816,7 @@ define @intrinsic_vsadd_vv_nxv16i32_nxv16i32_nxv16i32( @llvm.riscv.vsadd.nxv16i32.nxv16i32( + undef, %0, %1, i32 %2) @@ -815,6 +851,7 @@ entry: } declare @llvm.riscv.vsadd.nxv1i64.nxv1i64( + , , , i32); @@ -827,6 +864,7 @@ define @intrinsic_vsadd_vv_nxv1i64_nxv1i64_nxv1i64( @llvm.riscv.vsadd.nxv1i64.nxv1i64( + undef, %0, %1, i32 %2) @@ -860,6 +898,7 @@ entry: } declare @llvm.riscv.vsadd.nxv2i64.nxv2i64( + , , , i32); @@ -872,6 +911,7 @@ define @intrinsic_vsadd_vv_nxv2i64_nxv2i64_nxv2i64( @llvm.riscv.vsadd.nxv2i64.nxv2i64( + undef, %0, %1, i32 %2) @@ -905,6 +945,7 @@ entry: } declare @llvm.riscv.vsadd.nxv4i64.nxv4i64( + , , , i32); @@ -917,6 +958,7 @@ define @intrinsic_vsadd_vv_nxv4i64_nxv4i64_nxv4i64( @llvm.riscv.vsadd.nxv4i64.nxv4i64( + undef, %0, %1, i32 %2) @@ -950,6 +992,7 @@ entry: } declare @llvm.riscv.vsadd.nxv8i64.nxv8i64( + , , , i32); @@ -962,6 +1005,7 @@ define @intrinsic_vsadd_vv_nxv8i64_nxv8i64_nxv8i64( @llvm.riscv.vsadd.nxv8i64.nxv8i64( + undef, %0, %1, i32 %2) @@ -996,6 +1040,7 @@ entry: } declare @llvm.riscv.vsadd.nxv1i8.i8( + , , i8, i32); @@ -1008,6 +1053,7 @@ define @intrinsic_vsadd_vx_nxv1i8_nxv1i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv1i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1041,6 +1087,7 @@ entry: } declare @llvm.riscv.vsadd.nxv2i8.i8( + , , i8, i32); @@ -1053,6 +1100,7 @@ define @intrinsic_vsadd_vx_nxv2i8_nxv2i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv2i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1086,6 +1134,7 @@ entry: } declare @llvm.riscv.vsadd.nxv4i8.i8( + , , i8, i32); @@ -1098,6 +1147,7 @@ define @intrinsic_vsadd_vx_nxv4i8_nxv4i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv4i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1131,6 +1181,7 @@ entry: } declare @llvm.riscv.vsadd.nxv8i8.i8( + , , i8, i32); @@ -1143,6 +1194,7 @@ define @intrinsic_vsadd_vx_nxv8i8_nxv8i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv8i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1176,6 +1228,7 @@ entry: } declare @llvm.riscv.vsadd.nxv16i8.i8( + , , i8, i32); @@ -1188,6 +1241,7 @@ define @intrinsic_vsadd_vx_nxv16i8_nxv16i8_i8( @llvm.riscv.vsadd.nxv16i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1221,6 +1275,7 @@ entry: } declare @llvm.riscv.vsadd.nxv32i8.i8( + , , i8, i32); @@ -1233,6 +1288,7 @@ define @intrinsic_vsadd_vx_nxv32i8_nxv32i8_i8( @llvm.riscv.vsadd.nxv32i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1266,6 +1322,7 @@ entry: } declare @llvm.riscv.vsadd.nxv64i8.i8( + , , i8, i32); @@ -1278,6 +1335,7 @@ define @intrinsic_vsadd_vx_nxv64i8_nxv64i8_i8( @llvm.riscv.vsadd.nxv64i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1311,6 +1369,7 @@ entry: } declare @llvm.riscv.vsadd.nxv1i16.i16( + , , i16, i32); @@ -1323,6 +1382,7 @@ define @intrinsic_vsadd_vx_nxv1i16_nxv1i16_i16( @llvm.riscv.vsadd.nxv1i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1356,6 +1416,7 @@ entry: } declare @llvm.riscv.vsadd.nxv2i16.i16( + , , i16, i32); @@ -1368,6 +1429,7 @@ define @intrinsic_vsadd_vx_nxv2i16_nxv2i16_i16( @llvm.riscv.vsadd.nxv2i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1401,6 +1463,7 @@ entry: } declare @llvm.riscv.vsadd.nxv4i16.i16( + , , i16, i32); @@ -1413,6 +1476,7 @@ define @intrinsic_vsadd_vx_nxv4i16_nxv4i16_i16( @llvm.riscv.vsadd.nxv4i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1446,6 +1510,7 @@ entry: } declare @llvm.riscv.vsadd.nxv8i16.i16( + , , i16, i32); @@ -1458,6 +1523,7 @@ define @intrinsic_vsadd_vx_nxv8i16_nxv8i16_i16( @llvm.riscv.vsadd.nxv8i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1491,6 +1557,7 @@ entry: } declare @llvm.riscv.vsadd.nxv16i16.i16( + , , i16, i32); @@ -1503,6 +1570,7 @@ define @intrinsic_vsadd_vx_nxv16i16_nxv16i16_i16( @llvm.riscv.vsadd.nxv16i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1536,6 +1604,7 @@ entry: } declare @llvm.riscv.vsadd.nxv32i16.i16( + , , i16, i32); @@ -1548,6 +1617,7 @@ define @intrinsic_vsadd_vx_nxv32i16_nxv32i16_i16( @llvm.riscv.vsadd.nxv32i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1581,6 +1651,7 @@ entry: } declare @llvm.riscv.vsadd.nxv1i32.i32( + , , i32, i32); @@ -1593,6 +1664,7 @@ define @intrinsic_vsadd_vx_nxv1i32_nxv1i32_i32( @llvm.riscv.vsadd.nxv1i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1626,6 +1698,7 @@ entry: } declare @llvm.riscv.vsadd.nxv2i32.i32( + , , i32, i32); @@ -1638,6 +1711,7 @@ define @intrinsic_vsadd_vx_nxv2i32_nxv2i32_i32( @llvm.riscv.vsadd.nxv2i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1671,6 +1745,7 @@ entry: } declare @llvm.riscv.vsadd.nxv4i32.i32( + , , i32, i32); @@ -1683,6 +1758,7 @@ define @intrinsic_vsadd_vx_nxv4i32_nxv4i32_i32( @llvm.riscv.vsadd.nxv4i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1716,6 +1792,7 @@ entry: } declare @llvm.riscv.vsadd.nxv8i32.i32( + , , i32, i32); @@ -1728,6 +1805,7 @@ define @intrinsic_vsadd_vx_nxv8i32_nxv8i32_i32( @llvm.riscv.vsadd.nxv8i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1761,6 +1839,7 @@ entry: } declare @llvm.riscv.vsadd.nxv16i32.i32( + , , i32, i32); @@ -1773,6 +1852,7 @@ define @intrinsic_vsadd_vx_nxv16i32_nxv16i32_i32( @llvm.riscv.vsadd.nxv16i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1806,6 +1886,7 @@ entry: } declare @llvm.riscv.vsadd.nxv1i64.i64( + , , i64, i32); @@ -1824,6 +1905,7 @@ define @intrinsic_vsadd_vx_nxv1i64_nxv1i64_i64( @llvm.riscv.vsadd.nxv1i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1863,6 +1945,7 @@ entry: } declare @llvm.riscv.vsadd.nxv2i64.i64( + , , i64, i32); @@ -1881,6 +1964,7 @@ define @intrinsic_vsadd_vx_nxv2i64_nxv2i64_i64( @llvm.riscv.vsadd.nxv2i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1920,6 +2004,7 @@ entry: } declare @llvm.riscv.vsadd.nxv4i64.i64( + , , i64, i32); @@ -1938,6 +2023,7 @@ define @intrinsic_vsadd_vx_nxv4i64_nxv4i64_i64( @llvm.riscv.vsadd.nxv4i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1977,6 +2063,7 @@ entry: } declare @llvm.riscv.vsadd.nxv8i64.i64( + , , i64, i32); @@ -1995,6 +2082,7 @@ define @intrinsic_vsadd_vx_nxv8i64_nxv8i64_i64( @llvm.riscv.vsadd.nxv8i64.i64( + undef, %0, i64 %1, i32 %2) @@ -2041,6 +2129,7 @@ define @intrinsic_vsadd_vi_nxv1i8_nxv1i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv1i8.i8( + undef, %0, i8 9, i32 %1) @@ -2073,6 +2162,7 @@ define @intrinsic_vsadd_vi_nxv2i8_nxv2i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv2i8.i8( + undef, %0, i8 9, i32 %1) @@ -2105,6 +2195,7 @@ define @intrinsic_vsadd_vi_nxv4i8_nxv4i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv4i8.i8( + undef, %0, i8 9, i32 %1) @@ -2137,6 +2228,7 @@ define @intrinsic_vsadd_vi_nxv8i8_nxv8i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv8i8.i8( + undef, %0, i8 9, i32 %1) @@ -2169,6 +2261,7 @@ define @intrinsic_vsadd_vi_nxv16i8_nxv16i8_i8( @llvm.riscv.vsadd.nxv16i8.i8( + undef, %0, i8 9, i32 %1) @@ -2201,6 +2294,7 @@ define @intrinsic_vsadd_vi_nxv32i8_nxv32i8_i8( @llvm.riscv.vsadd.nxv32i8.i8( + undef, %0, i8 9, i32 %1) @@ -2233,6 +2327,7 @@ define @intrinsic_vsadd_vi_nxv64i8_nxv64i8_i8( @llvm.riscv.vsadd.nxv64i8.i8( + undef, %0, i8 9, i32 %1) @@ -2265,6 +2360,7 @@ define @intrinsic_vsadd_vi_nxv1i16_nxv1i16_i16( @llvm.riscv.vsadd.nxv1i16.i16( + undef, %0, i16 9, i32 %1) @@ -2297,6 +2393,7 @@ define @intrinsic_vsadd_vi_nxv2i16_nxv2i16_i16( @llvm.riscv.vsadd.nxv2i16.i16( + undef, %0, i16 9, i32 %1) @@ -2329,6 +2426,7 @@ define @intrinsic_vsadd_vi_nxv4i16_nxv4i16_i16( @llvm.riscv.vsadd.nxv4i16.i16( + undef, %0, i16 9, i32 %1) @@ -2361,6 +2459,7 @@ define @intrinsic_vsadd_vi_nxv8i16_nxv8i16_i16( @llvm.riscv.vsadd.nxv8i16.i16( + undef, %0, i16 9, i32 %1) @@ -2393,6 +2492,7 @@ define @intrinsic_vsadd_vi_nxv16i16_nxv16i16_i16( @llvm.riscv.vsadd.nxv16i16.i16( + undef, %0, i16 9, i32 %1) @@ -2425,6 +2525,7 @@ define @intrinsic_vsadd_vi_nxv32i16_nxv32i16_i16( @llvm.riscv.vsadd.nxv32i16.i16( + undef, %0, i16 9, i32 %1) @@ -2457,6 +2558,7 @@ define @intrinsic_vsadd_vi_nxv1i32_nxv1i32_i32( @llvm.riscv.vsadd.nxv1i32.i32( + undef, %0, i32 9, i32 %1) @@ -2489,6 +2591,7 @@ define @intrinsic_vsadd_vi_nxv2i32_nxv2i32_i32( @llvm.riscv.vsadd.nxv2i32.i32( + undef, %0, i32 9, i32 %1) @@ -2521,6 +2624,7 @@ define @intrinsic_vsadd_vi_nxv4i32_nxv4i32_i32( @llvm.riscv.vsadd.nxv4i32.i32( + undef, %0, i32 9, i32 %1) @@ -2553,6 +2657,7 @@ define @intrinsic_vsadd_vi_nxv8i32_nxv8i32_i32( @llvm.riscv.vsadd.nxv8i32.i32( + undef, %0, i32 9, i32 %1) @@ -2585,6 +2690,7 @@ define @intrinsic_vsadd_vi_nxv16i32_nxv16i32_i32( @llvm.riscv.vsadd.nxv16i32.i32( + undef, %0, i32 9, i32 %1) @@ -2617,6 +2723,7 @@ define @intrinsic_vsadd_vi_nxv1i64_nxv1i64_i64( @llvm.riscv.vsadd.nxv1i64.i64( + undef, %0, i64 9, i32 %1) @@ -2649,6 +2756,7 @@ define @intrinsic_vsadd_vi_nxv2i64_nxv2i64_i64( @llvm.riscv.vsadd.nxv2i64.i64( + undef, %0, i64 9, i32 %1) @@ -2681,6 +2789,7 @@ define @intrinsic_vsadd_vi_nxv4i64_nxv4i64_i64( @llvm.riscv.vsadd.nxv4i64.i64( + undef, %0, i64 9, i32 %1) @@ -2713,6 +2822,7 @@ define @intrinsic_vsadd_vi_nxv8i64_nxv8i64_i64( @llvm.riscv.vsadd.nxv8i64.i64( + undef, %0, i64 9, i32 %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vsadd-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsadd-rv64.ll index b1cf6b22b0f97f429f7093d551a2adad1205bf9e..4042be68d6592b8dfb02915b01c5126333a3f7a6 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsadd-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsadd-rv64.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vsadd.nxv1i8.nxv1i8( + , , , i64); @@ -14,6 +15,7 @@ define @intrinsic_vsadd_vv_nxv1i8_nxv1i8_nxv1i8( @llvm.riscv.vsadd.nxv1i8.nxv1i8( + undef, %0, %1, i64 %2) @@ -47,6 +49,7 @@ entry: } declare @llvm.riscv.vsadd.nxv2i8.nxv2i8( + , , , i64); @@ -59,6 +62,7 @@ define @intrinsic_vsadd_vv_nxv2i8_nxv2i8_nxv2i8( @llvm.riscv.vsadd.nxv2i8.nxv2i8( + undef, %0, %1, i64 %2) @@ -92,6 +96,7 @@ entry: } declare @llvm.riscv.vsadd.nxv4i8.nxv4i8( + , , , i64); @@ -104,6 +109,7 @@ define @intrinsic_vsadd_vv_nxv4i8_nxv4i8_nxv4i8( @llvm.riscv.vsadd.nxv4i8.nxv4i8( + undef, %0, %1, i64 %2) @@ -137,6 +143,7 @@ entry: } declare @llvm.riscv.vsadd.nxv8i8.nxv8i8( + , , , i64); @@ -149,6 +156,7 @@ define @intrinsic_vsadd_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vsadd.nxv8i8.nxv8i8( + undef, %0, %1, i64 %2) @@ -182,6 +190,7 @@ entry: } declare @llvm.riscv.vsadd.nxv16i8.nxv16i8( + , , , i64); @@ -194,6 +203,7 @@ define @intrinsic_vsadd_vv_nxv16i8_nxv16i8_nxv16i8( @llvm.riscv.vsadd.nxv16i8.nxv16i8( + undef, %0, %1, i64 %2) @@ -227,6 +237,7 @@ entry: } declare @llvm.riscv.vsadd.nxv32i8.nxv32i8( + , , , i64); @@ -239,6 +250,7 @@ define @intrinsic_vsadd_vv_nxv32i8_nxv32i8_nxv32i8( @llvm.riscv.vsadd.nxv32i8.nxv32i8( + undef, %0, %1, i64 %2) @@ -272,6 +284,7 @@ entry: } declare @llvm.riscv.vsadd.nxv64i8.nxv64i8( + , , , i64); @@ -284,6 +297,7 @@ define @intrinsic_vsadd_vv_nxv64i8_nxv64i8_nxv64i8( @llvm.riscv.vsadd.nxv64i8.nxv64i8( + undef, %0, %1, i64 %2) @@ -318,6 +332,7 @@ entry: } declare @llvm.riscv.vsadd.nxv1i16.nxv1i16( + , , , i64); @@ -330,6 +345,7 @@ define @intrinsic_vsadd_vv_nxv1i16_nxv1i16_nxv1i16( @llvm.riscv.vsadd.nxv1i16.nxv1i16( + undef, %0, %1, i64 %2) @@ -363,6 +379,7 @@ entry: } declare @llvm.riscv.vsadd.nxv2i16.nxv2i16( + , , , i64); @@ -375,6 +392,7 @@ define @intrinsic_vsadd_vv_nxv2i16_nxv2i16_nxv2i16( @llvm.riscv.vsadd.nxv2i16.nxv2i16( + undef, %0, %1, i64 %2) @@ -408,6 +426,7 @@ entry: } declare @llvm.riscv.vsadd.nxv4i16.nxv4i16( + , , , i64); @@ -420,6 +439,7 @@ define @intrinsic_vsadd_vv_nxv4i16_nxv4i16_nxv4i16( @llvm.riscv.vsadd.nxv4i16.nxv4i16( + undef, %0, %1, i64 %2) @@ -453,6 +473,7 @@ entry: } declare @llvm.riscv.vsadd.nxv8i16.nxv8i16( + , , , i64); @@ -465,6 +486,7 @@ define @intrinsic_vsadd_vv_nxv8i16_nxv8i16_nxv8i16( @llvm.riscv.vsadd.nxv8i16.nxv8i16( + undef, %0, %1, i64 %2) @@ -498,6 +520,7 @@ entry: } declare @llvm.riscv.vsadd.nxv16i16.nxv16i16( + , , , i64); @@ -510,6 +533,7 @@ define @intrinsic_vsadd_vv_nxv16i16_nxv16i16_nxv16i16( @llvm.riscv.vsadd.nxv16i16.nxv16i16( + undef, %0, %1, i64 %2) @@ -543,6 +567,7 @@ entry: } declare @llvm.riscv.vsadd.nxv32i16.nxv32i16( + , , , i64); @@ -555,6 +580,7 @@ define @intrinsic_vsadd_vv_nxv32i16_nxv32i16_nxv32i16( @llvm.riscv.vsadd.nxv32i16.nxv32i16( + undef, %0, %1, i64 %2) @@ -589,6 +615,7 @@ entry: } declare @llvm.riscv.vsadd.nxv1i32.nxv1i32( + , , , i64); @@ -601,6 +628,7 @@ define @intrinsic_vsadd_vv_nxv1i32_nxv1i32_nxv1i32( @llvm.riscv.vsadd.nxv1i32.nxv1i32( + undef, %0, %1, i64 %2) @@ -634,6 +662,7 @@ entry: } declare @llvm.riscv.vsadd.nxv2i32.nxv2i32( + , , , i64); @@ -646,6 +675,7 @@ define @intrinsic_vsadd_vv_nxv2i32_nxv2i32_nxv2i32( @llvm.riscv.vsadd.nxv2i32.nxv2i32( + undef, %0, %1, i64 %2) @@ -679,6 +709,7 @@ entry: } declare @llvm.riscv.vsadd.nxv4i32.nxv4i32( + , , , i64); @@ -691,6 +722,7 @@ define @intrinsic_vsadd_vv_nxv4i32_nxv4i32_nxv4i32( @llvm.riscv.vsadd.nxv4i32.nxv4i32( + undef, %0, %1, i64 %2) @@ -724,6 +756,7 @@ entry: } declare @llvm.riscv.vsadd.nxv8i32.nxv8i32( + , , , i64); @@ -736,6 +769,7 @@ define @intrinsic_vsadd_vv_nxv8i32_nxv8i32_nxv8i32( @llvm.riscv.vsadd.nxv8i32.nxv8i32( + undef, %0, %1, i64 %2) @@ -769,6 +803,7 @@ entry: } declare @llvm.riscv.vsadd.nxv16i32.nxv16i32( + , , , i64); @@ -781,6 +816,7 @@ define @intrinsic_vsadd_vv_nxv16i32_nxv16i32_nxv16i32( @llvm.riscv.vsadd.nxv16i32.nxv16i32( + undef, %0, %1, i64 %2) @@ -815,6 +851,7 @@ entry: } declare @llvm.riscv.vsadd.nxv1i64.nxv1i64( + , , , i64); @@ -827,6 +864,7 @@ define @intrinsic_vsadd_vv_nxv1i64_nxv1i64_nxv1i64( @llvm.riscv.vsadd.nxv1i64.nxv1i64( + undef, %0, %1, i64 %2) @@ -860,6 +898,7 @@ entry: } declare @llvm.riscv.vsadd.nxv2i64.nxv2i64( + , , , i64); @@ -872,6 +911,7 @@ define @intrinsic_vsadd_vv_nxv2i64_nxv2i64_nxv2i64( @llvm.riscv.vsadd.nxv2i64.nxv2i64( + undef, %0, %1, i64 %2) @@ -905,6 +945,7 @@ entry: } declare @llvm.riscv.vsadd.nxv4i64.nxv4i64( + , , , i64); @@ -917,6 +958,7 @@ define @intrinsic_vsadd_vv_nxv4i64_nxv4i64_nxv4i64( @llvm.riscv.vsadd.nxv4i64.nxv4i64( + undef, %0, %1, i64 %2) @@ -950,6 +992,7 @@ entry: } declare @llvm.riscv.vsadd.nxv8i64.nxv8i64( + , , , i64); @@ -962,6 +1005,7 @@ define @intrinsic_vsadd_vv_nxv8i64_nxv8i64_nxv8i64( @llvm.riscv.vsadd.nxv8i64.nxv8i64( + undef, %0, %1, i64 %2) @@ -996,6 +1040,7 @@ entry: } declare @llvm.riscv.vsadd.nxv1i8.i8( + , , i8, i64); @@ -1008,6 +1053,7 @@ define @intrinsic_vsadd_vx_nxv1i8_nxv1i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv1i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1041,6 +1087,7 @@ entry: } declare @llvm.riscv.vsadd.nxv2i8.i8( + , , i8, i64); @@ -1053,6 +1100,7 @@ define @intrinsic_vsadd_vx_nxv2i8_nxv2i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv2i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1086,6 +1134,7 @@ entry: } declare @llvm.riscv.vsadd.nxv4i8.i8( + , , i8, i64); @@ -1098,6 +1147,7 @@ define @intrinsic_vsadd_vx_nxv4i8_nxv4i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv4i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1131,6 +1181,7 @@ entry: } declare @llvm.riscv.vsadd.nxv8i8.i8( + , , i8, i64); @@ -1143,6 +1194,7 @@ define @intrinsic_vsadd_vx_nxv8i8_nxv8i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv8i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1176,6 +1228,7 @@ entry: } declare @llvm.riscv.vsadd.nxv16i8.i8( + , , i8, i64); @@ -1188,6 +1241,7 @@ define @intrinsic_vsadd_vx_nxv16i8_nxv16i8_i8( @llvm.riscv.vsadd.nxv16i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1221,6 +1275,7 @@ entry: } declare @llvm.riscv.vsadd.nxv32i8.i8( + , , i8, i64); @@ -1233,6 +1288,7 @@ define @intrinsic_vsadd_vx_nxv32i8_nxv32i8_i8( @llvm.riscv.vsadd.nxv32i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1266,6 +1322,7 @@ entry: } declare @llvm.riscv.vsadd.nxv64i8.i8( + , , i8, i64); @@ -1278,6 +1335,7 @@ define @intrinsic_vsadd_vx_nxv64i8_nxv64i8_i8( @llvm.riscv.vsadd.nxv64i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1311,6 +1369,7 @@ entry: } declare @llvm.riscv.vsadd.nxv1i16.i16( + , , i16, i64); @@ -1323,6 +1382,7 @@ define @intrinsic_vsadd_vx_nxv1i16_nxv1i16_i16( @llvm.riscv.vsadd.nxv1i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1356,6 +1416,7 @@ entry: } declare @llvm.riscv.vsadd.nxv2i16.i16( + , , i16, i64); @@ -1368,6 +1429,7 @@ define @intrinsic_vsadd_vx_nxv2i16_nxv2i16_i16( @llvm.riscv.vsadd.nxv2i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1401,6 +1463,7 @@ entry: } declare @llvm.riscv.vsadd.nxv4i16.i16( + , , i16, i64); @@ -1413,6 +1476,7 @@ define @intrinsic_vsadd_vx_nxv4i16_nxv4i16_i16( @llvm.riscv.vsadd.nxv4i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1446,6 +1510,7 @@ entry: } declare @llvm.riscv.vsadd.nxv8i16.i16( + , , i16, i64); @@ -1458,6 +1523,7 @@ define @intrinsic_vsadd_vx_nxv8i16_nxv8i16_i16( @llvm.riscv.vsadd.nxv8i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1491,6 +1557,7 @@ entry: } declare @llvm.riscv.vsadd.nxv16i16.i16( + , , i16, i64); @@ -1503,6 +1570,7 @@ define @intrinsic_vsadd_vx_nxv16i16_nxv16i16_i16( @llvm.riscv.vsadd.nxv16i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1536,6 +1604,7 @@ entry: } declare @llvm.riscv.vsadd.nxv32i16.i16( + , , i16, i64); @@ -1548,6 +1617,7 @@ define @intrinsic_vsadd_vx_nxv32i16_nxv32i16_i16( @llvm.riscv.vsadd.nxv32i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1581,6 +1651,7 @@ entry: } declare @llvm.riscv.vsadd.nxv1i32.i32( + , , i32, i64); @@ -1593,6 +1664,7 @@ define @intrinsic_vsadd_vx_nxv1i32_nxv1i32_i32( @llvm.riscv.vsadd.nxv1i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1626,6 +1698,7 @@ entry: } declare @llvm.riscv.vsadd.nxv2i32.i32( + , , i32, i64); @@ -1638,6 +1711,7 @@ define @intrinsic_vsadd_vx_nxv2i32_nxv2i32_i32( @llvm.riscv.vsadd.nxv2i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1671,6 +1745,7 @@ entry: } declare @llvm.riscv.vsadd.nxv4i32.i32( + , , i32, i64); @@ -1683,6 +1758,7 @@ define @intrinsic_vsadd_vx_nxv4i32_nxv4i32_i32( @llvm.riscv.vsadd.nxv4i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1716,6 +1792,7 @@ entry: } declare @llvm.riscv.vsadd.nxv8i32.i32( + , , i32, i64); @@ -1728,6 +1805,7 @@ define @intrinsic_vsadd_vx_nxv8i32_nxv8i32_i32( @llvm.riscv.vsadd.nxv8i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1761,6 +1839,7 @@ entry: } declare @llvm.riscv.vsadd.nxv16i32.i32( + , , i32, i64); @@ -1773,6 +1852,7 @@ define @intrinsic_vsadd_vx_nxv16i32_nxv16i32_i32( @llvm.riscv.vsadd.nxv16i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1806,6 +1886,7 @@ entry: } declare @llvm.riscv.vsadd.nxv1i64.i64( + , , i64, i64); @@ -1818,6 +1899,7 @@ define @intrinsic_vsadd_vx_nxv1i64_nxv1i64_i64( @llvm.riscv.vsadd.nxv1i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1851,6 +1933,7 @@ entry: } declare @llvm.riscv.vsadd.nxv2i64.i64( + , , i64, i64); @@ -1863,6 +1946,7 @@ define @intrinsic_vsadd_vx_nxv2i64_nxv2i64_i64( @llvm.riscv.vsadd.nxv2i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1896,6 +1980,7 @@ entry: } declare @llvm.riscv.vsadd.nxv4i64.i64( + , , i64, i64); @@ -1908,6 +1993,7 @@ define @intrinsic_vsadd_vx_nxv4i64_nxv4i64_i64( @llvm.riscv.vsadd.nxv4i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1941,6 +2027,7 @@ entry: } declare @llvm.riscv.vsadd.nxv8i64.i64( + , , i64, i64); @@ -1953,6 +2040,7 @@ define @intrinsic_vsadd_vx_nxv8i64_nxv8i64_i64( @llvm.riscv.vsadd.nxv8i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1993,6 +2081,7 @@ define @intrinsic_vsadd_vi_nxv1i8_nxv1i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv1i8.i8( + undef, %0, i8 9, i64 %1) @@ -2025,6 +2114,7 @@ define @intrinsic_vsadd_vi_nxv2i8_nxv2i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv2i8.i8( + undef, %0, i8 9, i64 %1) @@ -2057,6 +2147,7 @@ define @intrinsic_vsadd_vi_nxv4i8_nxv4i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv4i8.i8( + undef, %0, i8 9, i64 %1) @@ -2089,6 +2180,7 @@ define @intrinsic_vsadd_vi_nxv8i8_nxv8i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv8i8.i8( + undef, %0, i8 9, i64 %1) @@ -2121,6 +2213,7 @@ define @intrinsic_vsadd_vi_nxv16i8_nxv16i8_i8( @llvm.riscv.vsadd.nxv16i8.i8( + undef, %0, i8 9, i64 %1) @@ -2153,6 +2246,7 @@ define @intrinsic_vsadd_vi_nxv32i8_nxv32i8_i8( @llvm.riscv.vsadd.nxv32i8.i8( + undef, %0, i8 9, i64 %1) @@ -2185,6 +2279,7 @@ define @intrinsic_vsadd_vi_nxv64i8_nxv64i8_i8( @llvm.riscv.vsadd.nxv64i8.i8( + undef, %0, i8 9, i64 %1) @@ -2217,6 +2312,7 @@ define @intrinsic_vsadd_vi_nxv1i16_nxv1i16_i16( @llvm.riscv.vsadd.nxv1i16.i16( + undef, %0, i16 9, i64 %1) @@ -2249,6 +2345,7 @@ define @intrinsic_vsadd_vi_nxv2i16_nxv2i16_i16( @llvm.riscv.vsadd.nxv2i16.i16( + undef, %0, i16 9, i64 %1) @@ -2281,6 +2378,7 @@ define @intrinsic_vsadd_vi_nxv4i16_nxv4i16_i16( @llvm.riscv.vsadd.nxv4i16.i16( + undef, %0, i16 9, i64 %1) @@ -2313,6 +2411,7 @@ define @intrinsic_vsadd_vi_nxv8i16_nxv8i16_i16( @llvm.riscv.vsadd.nxv8i16.i16( + undef, %0, i16 9, i64 %1) @@ -2345,6 +2444,7 @@ define @intrinsic_vsadd_vi_nxv16i16_nxv16i16_i16( @llvm.riscv.vsadd.nxv16i16.i16( + undef, %0, i16 9, i64 %1) @@ -2377,6 +2477,7 @@ define @intrinsic_vsadd_vi_nxv32i16_nxv32i16_i16( @llvm.riscv.vsadd.nxv32i16.i16( + undef, %0, i16 9, i64 %1) @@ -2409,6 +2510,7 @@ define @intrinsic_vsadd_vi_nxv1i32_nxv1i32_i32( @llvm.riscv.vsadd.nxv1i32.i32( + undef, %0, i32 9, i64 %1) @@ -2441,6 +2543,7 @@ define @intrinsic_vsadd_vi_nxv2i32_nxv2i32_i32( @llvm.riscv.vsadd.nxv2i32.i32( + undef, %0, i32 9, i64 %1) @@ -2473,6 +2576,7 @@ define @intrinsic_vsadd_vi_nxv4i32_nxv4i32_i32( @llvm.riscv.vsadd.nxv4i32.i32( + undef, %0, i32 9, i64 %1) @@ -2505,6 +2609,7 @@ define @intrinsic_vsadd_vi_nxv8i32_nxv8i32_i32( @llvm.riscv.vsadd.nxv8i32.i32( + undef, %0, i32 9, i64 %1) @@ -2537,6 +2642,7 @@ define @intrinsic_vsadd_vi_nxv16i32_nxv16i32_i32( @llvm.riscv.vsadd.nxv16i32.i32( + undef, %0, i32 9, i64 %1) @@ -2569,6 +2675,7 @@ define @intrinsic_vsadd_vi_nxv1i64_nxv1i64_i64( @llvm.riscv.vsadd.nxv1i64.i64( + undef, %0, i64 9, i64 %1) @@ -2601,6 +2708,7 @@ define @intrinsic_vsadd_vi_nxv2i64_nxv2i64_i64( @llvm.riscv.vsadd.nxv2i64.i64( + undef, %0, i64 9, i64 %1) @@ -2633,6 +2741,7 @@ define @intrinsic_vsadd_vi_nxv4i64_nxv4i64_i64( @llvm.riscv.vsadd.nxv4i64.i64( + undef, %0, i64 9, i64 %1) @@ -2665,6 +2774,7 @@ define @intrinsic_vsadd_vi_nxv8i64_nxv8i64_i64( @llvm.riscv.vsadd.nxv8i64.i64( + undef, %0, i64 9, i64 %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vsaddu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsaddu-rv32.ll index c57a590eb388891e9c4d83569563ef1a3d85a68a..9c8492f816d368305d98481d24943c8534a5b5e7 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsaddu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsaddu-rv32.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vsaddu.nxv1i8.nxv1i8( + , , , i32); @@ -14,6 +15,7 @@ define @intrinsic_vsaddu_vv_nxv1i8_nxv1i8_nxv1i8( @llvm.riscv.vsaddu.nxv1i8.nxv1i8( + undef, %0, %1, i32 %2) @@ -47,6 +49,7 @@ entry: } declare @llvm.riscv.vsaddu.nxv2i8.nxv2i8( + , , , i32); @@ -59,6 +62,7 @@ define @intrinsic_vsaddu_vv_nxv2i8_nxv2i8_nxv2i8( @llvm.riscv.vsaddu.nxv2i8.nxv2i8( + undef, %0, %1, i32 %2) @@ -92,6 +96,7 @@ entry: } declare @llvm.riscv.vsaddu.nxv4i8.nxv4i8( + , , , i32); @@ -104,6 +109,7 @@ define @intrinsic_vsaddu_vv_nxv4i8_nxv4i8_nxv4i8( @llvm.riscv.vsaddu.nxv4i8.nxv4i8( + undef, %0, %1, i32 %2) @@ -137,6 +143,7 @@ entry: } declare @llvm.riscv.vsaddu.nxv8i8.nxv8i8( + , , , i32); @@ -149,6 +156,7 @@ define @intrinsic_vsaddu_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vsaddu.nxv8i8.nxv8i8( + undef, %0, %1, i32 %2) @@ -182,6 +190,7 @@ entry: } declare @llvm.riscv.vsaddu.nxv16i8.nxv16i8( + , , , i32); @@ -194,6 +203,7 @@ define @intrinsic_vsaddu_vv_nxv16i8_nxv16i8_nxv16i8( @llvm.riscv.vsaddu.nxv16i8.nxv16i8( + undef, %0, %1, i32 %2) @@ -227,6 +237,7 @@ entry: } declare @llvm.riscv.vsaddu.nxv32i8.nxv32i8( + , , , i32); @@ -239,6 +250,7 @@ define @intrinsic_vsaddu_vv_nxv32i8_nxv32i8_nxv32i8( @llvm.riscv.vsaddu.nxv32i8.nxv32i8( + undef, %0, %1, i32 %2) @@ -272,6 +284,7 @@ entry: } declare @llvm.riscv.vsaddu.nxv64i8.nxv64i8( + , , , i32); @@ -284,6 +297,7 @@ define @intrinsic_vsaddu_vv_nxv64i8_nxv64i8_nxv64i8( @llvm.riscv.vsaddu.nxv64i8.nxv64i8( + undef, %0, %1, i32 %2) @@ -318,6 +332,7 @@ entry: } declare @llvm.riscv.vsaddu.nxv1i16.nxv1i16( + , , , i32); @@ -330,6 +345,7 @@ define @intrinsic_vsaddu_vv_nxv1i16_nxv1i16_nxv1i16( @llvm.riscv.vsaddu.nxv1i16.nxv1i16( + undef, %0, %1, i32 %2) @@ -363,6 +379,7 @@ entry: } declare @llvm.riscv.vsaddu.nxv2i16.nxv2i16( + , , , i32); @@ -375,6 +392,7 @@ define @intrinsic_vsaddu_vv_nxv2i16_nxv2i16_nxv2i16( @llvm.riscv.vsaddu.nxv2i16.nxv2i16( + undef, %0, %1, i32 %2) @@ -408,6 +426,7 @@ entry: } declare @llvm.riscv.vsaddu.nxv4i16.nxv4i16( + , , , i32); @@ -420,6 +439,7 @@ define @intrinsic_vsaddu_vv_nxv4i16_nxv4i16_nxv4i16( @llvm.riscv.vsaddu.nxv4i16.nxv4i16( + undef, %0, %1, i32 %2) @@ -453,6 +473,7 @@ entry: } declare @llvm.riscv.vsaddu.nxv8i16.nxv8i16( + , , , i32); @@ -465,6 +486,7 @@ define @intrinsic_vsaddu_vv_nxv8i16_nxv8i16_nxv8i16( @llvm.riscv.vsaddu.nxv8i16.nxv8i16( + undef, %0, %1, i32 %2) @@ -498,6 +520,7 @@ entry: } declare @llvm.riscv.vsaddu.nxv16i16.nxv16i16( + , , , i32); @@ -510,6 +533,7 @@ define @intrinsic_vsaddu_vv_nxv16i16_nxv16i16_nxv16i16( @llvm.riscv.vsaddu.nxv16i16.nxv16i16( + undef, %0, %1, i32 %2) @@ -543,6 +567,7 @@ entry: } declare @llvm.riscv.vsaddu.nxv32i16.nxv32i16( + , , , i32); @@ -555,6 +580,7 @@ define @intrinsic_vsaddu_vv_nxv32i16_nxv32i16_nxv32i16( @llvm.riscv.vsaddu.nxv32i16.nxv32i16( + undef, %0, %1, i32 %2) @@ -589,6 +615,7 @@ entry: } declare @llvm.riscv.vsaddu.nxv1i32.nxv1i32( + , , , i32); @@ -601,6 +628,7 @@ define @intrinsic_vsaddu_vv_nxv1i32_nxv1i32_nxv1i32( @llvm.riscv.vsaddu.nxv1i32.nxv1i32( + undef, %0, %1, i32 %2) @@ -634,6 +662,7 @@ entry: } declare @llvm.riscv.vsaddu.nxv2i32.nxv2i32( + , , , i32); @@ -646,6 +675,7 @@ define @intrinsic_vsaddu_vv_nxv2i32_nxv2i32_nxv2i32( @llvm.riscv.vsaddu.nxv2i32.nxv2i32( + undef, %0, %1, i32 %2) @@ -679,6 +709,7 @@ entry: } declare @llvm.riscv.vsaddu.nxv4i32.nxv4i32( + , , , i32); @@ -691,6 +722,7 @@ define @intrinsic_vsaddu_vv_nxv4i32_nxv4i32_nxv4i32( @llvm.riscv.vsaddu.nxv4i32.nxv4i32( + undef, %0, %1, i32 %2) @@ -724,6 +756,7 @@ entry: } declare @llvm.riscv.vsaddu.nxv8i32.nxv8i32( + , , , i32); @@ -736,6 +769,7 @@ define @intrinsic_vsaddu_vv_nxv8i32_nxv8i32_nxv8i32( @llvm.riscv.vsaddu.nxv8i32.nxv8i32( + undef, %0, %1, i32 %2) @@ -769,6 +803,7 @@ entry: } declare @llvm.riscv.vsaddu.nxv16i32.nxv16i32( + , , , i32); @@ -781,6 +816,7 @@ define @intrinsic_vsaddu_vv_nxv16i32_nxv16i32_nxv16i32( @llvm.riscv.vsaddu.nxv16i32.nxv16i32( + undef, %0, %1, i32 %2) @@ -815,6 +851,7 @@ entry: } declare @llvm.riscv.vsaddu.nxv1i64.nxv1i64( + , , , i32); @@ -827,6 +864,7 @@ define @intrinsic_vsaddu_vv_nxv1i64_nxv1i64_nxv1i64( @llvm.riscv.vsaddu.nxv1i64.nxv1i64( + undef, %0, %1, i32 %2) @@ -860,6 +898,7 @@ entry: } declare @llvm.riscv.vsaddu.nxv2i64.nxv2i64( + , , , i32); @@ -872,6 +911,7 @@ define @intrinsic_vsaddu_vv_nxv2i64_nxv2i64_nxv2i64( @llvm.riscv.vsaddu.nxv2i64.nxv2i64( + undef, %0, %1, i32 %2) @@ -905,6 +945,7 @@ entry: } declare @llvm.riscv.vsaddu.nxv4i64.nxv4i64( + , , , i32); @@ -917,6 +958,7 @@ define @intrinsic_vsaddu_vv_nxv4i64_nxv4i64_nxv4i64( @llvm.riscv.vsaddu.nxv4i64.nxv4i64( + undef, %0, %1, i32 %2) @@ -950,6 +992,7 @@ entry: } declare @llvm.riscv.vsaddu.nxv8i64.nxv8i64( + , , , i32); @@ -962,6 +1005,7 @@ define @intrinsic_vsaddu_vv_nxv8i64_nxv8i64_nxv8i64( @llvm.riscv.vsaddu.nxv8i64.nxv8i64( + undef, %0, %1, i32 %2) @@ -996,6 +1040,7 @@ entry: } declare @llvm.riscv.vsaddu.nxv1i8.i8( + , , i8, i32); @@ -1008,6 +1053,7 @@ define @intrinsic_vsaddu_vx_nxv1i8_nxv1i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv1i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1041,6 +1087,7 @@ entry: } declare @llvm.riscv.vsaddu.nxv2i8.i8( + , , i8, i32); @@ -1053,6 +1100,7 @@ define @intrinsic_vsaddu_vx_nxv2i8_nxv2i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv2i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1086,6 +1134,7 @@ entry: } declare @llvm.riscv.vsaddu.nxv4i8.i8( + , , i8, i32); @@ -1098,6 +1147,7 @@ define @intrinsic_vsaddu_vx_nxv4i8_nxv4i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv4i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1131,6 +1181,7 @@ entry: } declare @llvm.riscv.vsaddu.nxv8i8.i8( + , , i8, i32); @@ -1143,6 +1194,7 @@ define @intrinsic_vsaddu_vx_nxv8i8_nxv8i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv8i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1176,6 +1228,7 @@ entry: } declare @llvm.riscv.vsaddu.nxv16i8.i8( + , , i8, i32); @@ -1188,6 +1241,7 @@ define @intrinsic_vsaddu_vx_nxv16i8_nxv16i8_i8( @llvm.riscv.vsaddu.nxv16i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1221,6 +1275,7 @@ entry: } declare @llvm.riscv.vsaddu.nxv32i8.i8( + , , i8, i32); @@ -1233,6 +1288,7 @@ define @intrinsic_vsaddu_vx_nxv32i8_nxv32i8_i8( @llvm.riscv.vsaddu.nxv32i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1266,6 +1322,7 @@ entry: } declare @llvm.riscv.vsaddu.nxv64i8.i8( + , , i8, i32); @@ -1278,6 +1335,7 @@ define @intrinsic_vsaddu_vx_nxv64i8_nxv64i8_i8( @llvm.riscv.vsaddu.nxv64i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1311,6 +1369,7 @@ entry: } declare @llvm.riscv.vsaddu.nxv1i16.i16( + , , i16, i32); @@ -1323,6 +1382,7 @@ define @intrinsic_vsaddu_vx_nxv1i16_nxv1i16_i16( @llvm.riscv.vsaddu.nxv1i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1356,6 +1416,7 @@ entry: } declare @llvm.riscv.vsaddu.nxv2i16.i16( + , , i16, i32); @@ -1368,6 +1429,7 @@ define @intrinsic_vsaddu_vx_nxv2i16_nxv2i16_i16( @llvm.riscv.vsaddu.nxv2i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1401,6 +1463,7 @@ entry: } declare @llvm.riscv.vsaddu.nxv4i16.i16( + , , i16, i32); @@ -1413,6 +1476,7 @@ define @intrinsic_vsaddu_vx_nxv4i16_nxv4i16_i16( @llvm.riscv.vsaddu.nxv4i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1446,6 +1510,7 @@ entry: } declare @llvm.riscv.vsaddu.nxv8i16.i16( + , , i16, i32); @@ -1458,6 +1523,7 @@ define @intrinsic_vsaddu_vx_nxv8i16_nxv8i16_i16( @llvm.riscv.vsaddu.nxv8i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1491,6 +1557,7 @@ entry: } declare @llvm.riscv.vsaddu.nxv16i16.i16( + , , i16, i32); @@ -1503,6 +1570,7 @@ define @intrinsic_vsaddu_vx_nxv16i16_nxv16i16_i16( @llvm.riscv.vsaddu.nxv16i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1536,6 +1604,7 @@ entry: } declare @llvm.riscv.vsaddu.nxv32i16.i16( + , , i16, i32); @@ -1548,6 +1617,7 @@ define @intrinsic_vsaddu_vx_nxv32i16_nxv32i16_i16( @llvm.riscv.vsaddu.nxv32i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1581,6 +1651,7 @@ entry: } declare @llvm.riscv.vsaddu.nxv1i32.i32( + , , i32, i32); @@ -1593,6 +1664,7 @@ define @intrinsic_vsaddu_vx_nxv1i32_nxv1i32_i32( @llvm.riscv.vsaddu.nxv1i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1626,6 +1698,7 @@ entry: } declare @llvm.riscv.vsaddu.nxv2i32.i32( + , , i32, i32); @@ -1638,6 +1711,7 @@ define @intrinsic_vsaddu_vx_nxv2i32_nxv2i32_i32( @llvm.riscv.vsaddu.nxv2i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1671,6 +1745,7 @@ entry: } declare @llvm.riscv.vsaddu.nxv4i32.i32( + , , i32, i32); @@ -1683,6 +1758,7 @@ define @intrinsic_vsaddu_vx_nxv4i32_nxv4i32_i32( @llvm.riscv.vsaddu.nxv4i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1716,6 +1792,7 @@ entry: } declare @llvm.riscv.vsaddu.nxv8i32.i32( + , , i32, i32); @@ -1728,6 +1805,7 @@ define @intrinsic_vsaddu_vx_nxv8i32_nxv8i32_i32( @llvm.riscv.vsaddu.nxv8i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1761,6 +1839,7 @@ entry: } declare @llvm.riscv.vsaddu.nxv16i32.i32( + , , i32, i32); @@ -1773,6 +1852,7 @@ define @intrinsic_vsaddu_vx_nxv16i32_nxv16i32_i32( @llvm.riscv.vsaddu.nxv16i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1806,6 +1886,7 @@ entry: } declare @llvm.riscv.vsaddu.nxv1i64.i64( + , , i64, i32); @@ -1824,6 +1905,7 @@ define @intrinsic_vsaddu_vx_nxv1i64_nxv1i64_i64( @llvm.riscv.vsaddu.nxv1i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1863,6 +1945,7 @@ entry: } declare @llvm.riscv.vsaddu.nxv2i64.i64( + , , i64, i32); @@ -1881,6 +1964,7 @@ define @intrinsic_vsaddu_vx_nxv2i64_nxv2i64_i64( @llvm.riscv.vsaddu.nxv2i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1920,6 +2004,7 @@ entry: } declare @llvm.riscv.vsaddu.nxv4i64.i64( + , , i64, i32); @@ -1938,6 +2023,7 @@ define @intrinsic_vsaddu_vx_nxv4i64_nxv4i64_i64( @llvm.riscv.vsaddu.nxv4i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1977,6 +2063,7 @@ entry: } declare @llvm.riscv.vsaddu.nxv8i64.i64( + , , i64, i32); @@ -1995,6 +2082,7 @@ define @intrinsic_vsaddu_vx_nxv8i64_nxv8i64_i64( @llvm.riscv.vsaddu.nxv8i64.i64( + undef, %0, i64 %1, i32 %2) @@ -2041,6 +2129,7 @@ define @intrinsic_vsaddu_vi_nxv1i8_nxv1i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv1i8.i8( + undef, %0, i8 9, i32 %1) @@ -2073,6 +2162,7 @@ define @intrinsic_vsaddu_vi_nxv2i8_nxv2i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv2i8.i8( + undef, %0, i8 9, i32 %1) @@ -2105,6 +2195,7 @@ define @intrinsic_vsaddu_vi_nxv4i8_nxv4i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv4i8.i8( + undef, %0, i8 9, i32 %1) @@ -2137,6 +2228,7 @@ define @intrinsic_vsaddu_vi_nxv8i8_nxv8i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv8i8.i8( + undef, %0, i8 9, i32 %1) @@ -2169,6 +2261,7 @@ define @intrinsic_vsaddu_vi_nxv16i8_nxv16i8_i8( @llvm.riscv.vsaddu.nxv16i8.i8( + undef, %0, i8 9, i32 %1) @@ -2201,6 +2294,7 @@ define @intrinsic_vsaddu_vi_nxv32i8_nxv32i8_i8( @llvm.riscv.vsaddu.nxv32i8.i8( + undef, %0, i8 9, i32 %1) @@ -2233,6 +2327,7 @@ define @intrinsic_vsaddu_vi_nxv64i8_nxv64i8_i8( @llvm.riscv.vsaddu.nxv64i8.i8( + undef, %0, i8 9, i32 %1) @@ -2265,6 +2360,7 @@ define @intrinsic_vsaddu_vi_nxv1i16_nxv1i16_i16( @llvm.riscv.vsaddu.nxv1i16.i16( + undef, %0, i16 9, i32 %1) @@ -2297,6 +2393,7 @@ define @intrinsic_vsaddu_vi_nxv2i16_nxv2i16_i16( @llvm.riscv.vsaddu.nxv2i16.i16( + undef, %0, i16 9, i32 %1) @@ -2329,6 +2426,7 @@ define @intrinsic_vsaddu_vi_nxv4i16_nxv4i16_i16( @llvm.riscv.vsaddu.nxv4i16.i16( + undef, %0, i16 9, i32 %1) @@ -2361,6 +2459,7 @@ define @intrinsic_vsaddu_vi_nxv8i16_nxv8i16_i16( @llvm.riscv.vsaddu.nxv8i16.i16( + undef, %0, i16 9, i32 %1) @@ -2393,6 +2492,7 @@ define @intrinsic_vsaddu_vi_nxv16i16_nxv16i16_i16( @llvm.riscv.vsaddu.nxv16i16.i16( + undef, %0, i16 9, i32 %1) @@ -2425,6 +2525,7 @@ define @intrinsic_vsaddu_vi_nxv32i16_nxv32i16_i16( @llvm.riscv.vsaddu.nxv32i16.i16( + undef, %0, i16 9, i32 %1) @@ -2457,6 +2558,7 @@ define @intrinsic_vsaddu_vi_nxv1i32_nxv1i32_i32( @llvm.riscv.vsaddu.nxv1i32.i32( + undef, %0, i32 9, i32 %1) @@ -2489,6 +2591,7 @@ define @intrinsic_vsaddu_vi_nxv2i32_nxv2i32_i32( @llvm.riscv.vsaddu.nxv2i32.i32( + undef, %0, i32 9, i32 %1) @@ -2521,6 +2624,7 @@ define @intrinsic_vsaddu_vi_nxv4i32_nxv4i32_i32( @llvm.riscv.vsaddu.nxv4i32.i32( + undef, %0, i32 9, i32 %1) @@ -2553,6 +2657,7 @@ define @intrinsic_vsaddu_vi_nxv8i32_nxv8i32_i32( @llvm.riscv.vsaddu.nxv8i32.i32( + undef, %0, i32 9, i32 %1) @@ -2585,6 +2690,7 @@ define @intrinsic_vsaddu_vi_nxv16i32_nxv16i32_i32( @llvm.riscv.vsaddu.nxv16i32.i32( + undef, %0, i32 9, i32 %1) @@ -2617,6 +2723,7 @@ define @intrinsic_vsaddu_vi_nxv1i64_nxv1i64_i64( @llvm.riscv.vsaddu.nxv1i64.i64( + undef, %0, i64 9, i32 %1) @@ -2649,6 +2756,7 @@ define @intrinsic_vsaddu_vi_nxv2i64_nxv2i64_i64( @llvm.riscv.vsaddu.nxv2i64.i64( + undef, %0, i64 9, i32 %1) @@ -2681,6 +2789,7 @@ define @intrinsic_vsaddu_vi_nxv4i64_nxv4i64_i64( @llvm.riscv.vsaddu.nxv4i64.i64( + undef, %0, i64 9, i32 %1) @@ -2713,6 +2822,7 @@ define @intrinsic_vsaddu_vi_nxv8i64_nxv8i64_i64( @llvm.riscv.vsaddu.nxv8i64.i64( + undef, %0, i64 9, i32 %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vsaddu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsaddu-rv64.ll index 991d2302377571d413487d93f453a7842fbaf4b7..10efea7664219dcdaa8bf651ee80c17ad54af6e5 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsaddu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsaddu-rv64.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vsaddu.nxv1i8.nxv1i8( + , , , i64); @@ -14,6 +15,7 @@ define @intrinsic_vsaddu_vv_nxv1i8_nxv1i8_nxv1i8( @llvm.riscv.vsaddu.nxv1i8.nxv1i8( + undef, %0, %1, i64 %2) @@ -47,6 +49,7 @@ entry: } declare @llvm.riscv.vsaddu.nxv2i8.nxv2i8( + , , , i64); @@ -59,6 +62,7 @@ define @intrinsic_vsaddu_vv_nxv2i8_nxv2i8_nxv2i8( @llvm.riscv.vsaddu.nxv2i8.nxv2i8( + undef, %0, %1, i64 %2) @@ -92,6 +96,7 @@ entry: } declare @llvm.riscv.vsaddu.nxv4i8.nxv4i8( + , , , i64); @@ -104,6 +109,7 @@ define @intrinsic_vsaddu_vv_nxv4i8_nxv4i8_nxv4i8( @llvm.riscv.vsaddu.nxv4i8.nxv4i8( + undef, %0, %1, i64 %2) @@ -137,6 +143,7 @@ entry: } declare @llvm.riscv.vsaddu.nxv8i8.nxv8i8( + , , , i64); @@ -149,6 +156,7 @@ define @intrinsic_vsaddu_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vsaddu.nxv8i8.nxv8i8( + undef, %0, %1, i64 %2) @@ -182,6 +190,7 @@ entry: } declare @llvm.riscv.vsaddu.nxv16i8.nxv16i8( + , , , i64); @@ -194,6 +203,7 @@ define @intrinsic_vsaddu_vv_nxv16i8_nxv16i8_nxv16i8( @llvm.riscv.vsaddu.nxv16i8.nxv16i8( + undef, %0, %1, i64 %2) @@ -227,6 +237,7 @@ entry: } declare @llvm.riscv.vsaddu.nxv32i8.nxv32i8( + , , , i64); @@ -239,6 +250,7 @@ define @intrinsic_vsaddu_vv_nxv32i8_nxv32i8_nxv32i8( @llvm.riscv.vsaddu.nxv32i8.nxv32i8( + undef, %0, %1, i64 %2) @@ -272,6 +284,7 @@ entry: } declare @llvm.riscv.vsaddu.nxv64i8.nxv64i8( + , , , i64); @@ -284,6 +297,7 @@ define @intrinsic_vsaddu_vv_nxv64i8_nxv64i8_nxv64i8( @llvm.riscv.vsaddu.nxv64i8.nxv64i8( + undef, %0, %1, i64 %2) @@ -318,6 +332,7 @@ entry: } declare @llvm.riscv.vsaddu.nxv1i16.nxv1i16( + , , , i64); @@ -330,6 +345,7 @@ define @intrinsic_vsaddu_vv_nxv1i16_nxv1i16_nxv1i16( @llvm.riscv.vsaddu.nxv1i16.nxv1i16( + undef, %0, %1, i64 %2) @@ -363,6 +379,7 @@ entry: } declare @llvm.riscv.vsaddu.nxv2i16.nxv2i16( + , , , i64); @@ -375,6 +392,7 @@ define @intrinsic_vsaddu_vv_nxv2i16_nxv2i16_nxv2i16( @llvm.riscv.vsaddu.nxv2i16.nxv2i16( + undef, %0, %1, i64 %2) @@ -408,6 +426,7 @@ entry: } declare @llvm.riscv.vsaddu.nxv4i16.nxv4i16( + , , , i64); @@ -420,6 +439,7 @@ define @intrinsic_vsaddu_vv_nxv4i16_nxv4i16_nxv4i16( @llvm.riscv.vsaddu.nxv4i16.nxv4i16( + undef, %0, %1, i64 %2) @@ -453,6 +473,7 @@ entry: } declare @llvm.riscv.vsaddu.nxv8i16.nxv8i16( + , , , i64); @@ -465,6 +486,7 @@ define @intrinsic_vsaddu_vv_nxv8i16_nxv8i16_nxv8i16( @llvm.riscv.vsaddu.nxv8i16.nxv8i16( + undef, %0, %1, i64 %2) @@ -498,6 +520,7 @@ entry: } declare @llvm.riscv.vsaddu.nxv16i16.nxv16i16( + , , , i64); @@ -510,6 +533,7 @@ define @intrinsic_vsaddu_vv_nxv16i16_nxv16i16_nxv16i16( @llvm.riscv.vsaddu.nxv16i16.nxv16i16( + undef, %0, %1, i64 %2) @@ -543,6 +567,7 @@ entry: } declare @llvm.riscv.vsaddu.nxv32i16.nxv32i16( + , , , i64); @@ -555,6 +580,7 @@ define @intrinsic_vsaddu_vv_nxv32i16_nxv32i16_nxv32i16( @llvm.riscv.vsaddu.nxv32i16.nxv32i16( + undef, %0, %1, i64 %2) @@ -589,6 +615,7 @@ entry: } declare @llvm.riscv.vsaddu.nxv1i32.nxv1i32( + , , , i64); @@ -601,6 +628,7 @@ define @intrinsic_vsaddu_vv_nxv1i32_nxv1i32_nxv1i32( @llvm.riscv.vsaddu.nxv1i32.nxv1i32( + undef, %0, %1, i64 %2) @@ -634,6 +662,7 @@ entry: } declare @llvm.riscv.vsaddu.nxv2i32.nxv2i32( + , , , i64); @@ -646,6 +675,7 @@ define @intrinsic_vsaddu_vv_nxv2i32_nxv2i32_nxv2i32( @llvm.riscv.vsaddu.nxv2i32.nxv2i32( + undef, %0, %1, i64 %2) @@ -679,6 +709,7 @@ entry: } declare @llvm.riscv.vsaddu.nxv4i32.nxv4i32( + , , , i64); @@ -691,6 +722,7 @@ define @intrinsic_vsaddu_vv_nxv4i32_nxv4i32_nxv4i32( @llvm.riscv.vsaddu.nxv4i32.nxv4i32( + undef, %0, %1, i64 %2) @@ -724,6 +756,7 @@ entry: } declare @llvm.riscv.vsaddu.nxv8i32.nxv8i32( + , , , i64); @@ -736,6 +769,7 @@ define @intrinsic_vsaddu_vv_nxv8i32_nxv8i32_nxv8i32( @llvm.riscv.vsaddu.nxv8i32.nxv8i32( + undef, %0, %1, i64 %2) @@ -769,6 +803,7 @@ entry: } declare @llvm.riscv.vsaddu.nxv16i32.nxv16i32( + , , , i64); @@ -781,6 +816,7 @@ define @intrinsic_vsaddu_vv_nxv16i32_nxv16i32_nxv16i32( @llvm.riscv.vsaddu.nxv16i32.nxv16i32( + undef, %0, %1, i64 %2) @@ -815,6 +851,7 @@ entry: } declare @llvm.riscv.vsaddu.nxv1i64.nxv1i64( + , , , i64); @@ -827,6 +864,7 @@ define @intrinsic_vsaddu_vv_nxv1i64_nxv1i64_nxv1i64( @llvm.riscv.vsaddu.nxv1i64.nxv1i64( + undef, %0, %1, i64 %2) @@ -860,6 +898,7 @@ entry: } declare @llvm.riscv.vsaddu.nxv2i64.nxv2i64( + , , , i64); @@ -872,6 +911,7 @@ define @intrinsic_vsaddu_vv_nxv2i64_nxv2i64_nxv2i64( @llvm.riscv.vsaddu.nxv2i64.nxv2i64( + undef, %0, %1, i64 %2) @@ -905,6 +945,7 @@ entry: } declare @llvm.riscv.vsaddu.nxv4i64.nxv4i64( + , , , i64); @@ -917,6 +958,7 @@ define @intrinsic_vsaddu_vv_nxv4i64_nxv4i64_nxv4i64( @llvm.riscv.vsaddu.nxv4i64.nxv4i64( + undef, %0, %1, i64 %2) @@ -950,6 +992,7 @@ entry: } declare @llvm.riscv.vsaddu.nxv8i64.nxv8i64( + , , , i64); @@ -962,6 +1005,7 @@ define @intrinsic_vsaddu_vv_nxv8i64_nxv8i64_nxv8i64( @llvm.riscv.vsaddu.nxv8i64.nxv8i64( + undef, %0, %1, i64 %2) @@ -996,6 +1040,7 @@ entry: } declare @llvm.riscv.vsaddu.nxv1i8.i8( + , , i8, i64); @@ -1008,6 +1053,7 @@ define @intrinsic_vsaddu_vx_nxv1i8_nxv1i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv1i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1041,6 +1087,7 @@ entry: } declare @llvm.riscv.vsaddu.nxv2i8.i8( + , , i8, i64); @@ -1053,6 +1100,7 @@ define @intrinsic_vsaddu_vx_nxv2i8_nxv2i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv2i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1086,6 +1134,7 @@ entry: } declare @llvm.riscv.vsaddu.nxv4i8.i8( + , , i8, i64); @@ -1098,6 +1147,7 @@ define @intrinsic_vsaddu_vx_nxv4i8_nxv4i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv4i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1131,6 +1181,7 @@ entry: } declare @llvm.riscv.vsaddu.nxv8i8.i8( + , , i8, i64); @@ -1143,6 +1194,7 @@ define @intrinsic_vsaddu_vx_nxv8i8_nxv8i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv8i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1176,6 +1228,7 @@ entry: } declare @llvm.riscv.vsaddu.nxv16i8.i8( + , , i8, i64); @@ -1188,6 +1241,7 @@ define @intrinsic_vsaddu_vx_nxv16i8_nxv16i8_i8( @llvm.riscv.vsaddu.nxv16i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1221,6 +1275,7 @@ entry: } declare @llvm.riscv.vsaddu.nxv32i8.i8( + , , i8, i64); @@ -1233,6 +1288,7 @@ define @intrinsic_vsaddu_vx_nxv32i8_nxv32i8_i8( @llvm.riscv.vsaddu.nxv32i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1266,6 +1322,7 @@ entry: } declare @llvm.riscv.vsaddu.nxv64i8.i8( + , , i8, i64); @@ -1278,6 +1335,7 @@ define @intrinsic_vsaddu_vx_nxv64i8_nxv64i8_i8( @llvm.riscv.vsaddu.nxv64i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1311,6 +1369,7 @@ entry: } declare @llvm.riscv.vsaddu.nxv1i16.i16( + , , i16, i64); @@ -1323,6 +1382,7 @@ define @intrinsic_vsaddu_vx_nxv1i16_nxv1i16_i16( @llvm.riscv.vsaddu.nxv1i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1356,6 +1416,7 @@ entry: } declare @llvm.riscv.vsaddu.nxv2i16.i16( + , , i16, i64); @@ -1368,6 +1429,7 @@ define @intrinsic_vsaddu_vx_nxv2i16_nxv2i16_i16( @llvm.riscv.vsaddu.nxv2i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1401,6 +1463,7 @@ entry: } declare @llvm.riscv.vsaddu.nxv4i16.i16( + , , i16, i64); @@ -1413,6 +1476,7 @@ define @intrinsic_vsaddu_vx_nxv4i16_nxv4i16_i16( @llvm.riscv.vsaddu.nxv4i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1446,6 +1510,7 @@ entry: } declare @llvm.riscv.vsaddu.nxv8i16.i16( + , , i16, i64); @@ -1458,6 +1523,7 @@ define @intrinsic_vsaddu_vx_nxv8i16_nxv8i16_i16( @llvm.riscv.vsaddu.nxv8i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1491,6 +1557,7 @@ entry: } declare @llvm.riscv.vsaddu.nxv16i16.i16( + , , i16, i64); @@ -1503,6 +1570,7 @@ define @intrinsic_vsaddu_vx_nxv16i16_nxv16i16_i16( @llvm.riscv.vsaddu.nxv16i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1536,6 +1604,7 @@ entry: } declare @llvm.riscv.vsaddu.nxv32i16.i16( + , , i16, i64); @@ -1548,6 +1617,7 @@ define @intrinsic_vsaddu_vx_nxv32i16_nxv32i16_i16( @llvm.riscv.vsaddu.nxv32i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1581,6 +1651,7 @@ entry: } declare @llvm.riscv.vsaddu.nxv1i32.i32( + , , i32, i64); @@ -1593,6 +1664,7 @@ define @intrinsic_vsaddu_vx_nxv1i32_nxv1i32_i32( @llvm.riscv.vsaddu.nxv1i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1626,6 +1698,7 @@ entry: } declare @llvm.riscv.vsaddu.nxv2i32.i32( + , , i32, i64); @@ -1638,6 +1711,7 @@ define @intrinsic_vsaddu_vx_nxv2i32_nxv2i32_i32( @llvm.riscv.vsaddu.nxv2i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1671,6 +1745,7 @@ entry: } declare @llvm.riscv.vsaddu.nxv4i32.i32( + , , i32, i64); @@ -1683,6 +1758,7 @@ define @intrinsic_vsaddu_vx_nxv4i32_nxv4i32_i32( @llvm.riscv.vsaddu.nxv4i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1716,6 +1792,7 @@ entry: } declare @llvm.riscv.vsaddu.nxv8i32.i32( + , , i32, i64); @@ -1728,6 +1805,7 @@ define @intrinsic_vsaddu_vx_nxv8i32_nxv8i32_i32( @llvm.riscv.vsaddu.nxv8i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1761,6 +1839,7 @@ entry: } declare @llvm.riscv.vsaddu.nxv16i32.i32( + , , i32, i64); @@ -1773,6 +1852,7 @@ define @intrinsic_vsaddu_vx_nxv16i32_nxv16i32_i32( @llvm.riscv.vsaddu.nxv16i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1806,6 +1886,7 @@ entry: } declare @llvm.riscv.vsaddu.nxv1i64.i64( + , , i64, i64); @@ -1818,6 +1899,7 @@ define @intrinsic_vsaddu_vx_nxv1i64_nxv1i64_i64( @llvm.riscv.vsaddu.nxv1i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1851,6 +1933,7 @@ entry: } declare @llvm.riscv.vsaddu.nxv2i64.i64( + , , i64, i64); @@ -1863,6 +1946,7 @@ define @intrinsic_vsaddu_vx_nxv2i64_nxv2i64_i64( @llvm.riscv.vsaddu.nxv2i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1896,6 +1980,7 @@ entry: } declare @llvm.riscv.vsaddu.nxv4i64.i64( + , , i64, i64); @@ -1908,6 +1993,7 @@ define @intrinsic_vsaddu_vx_nxv4i64_nxv4i64_i64( @llvm.riscv.vsaddu.nxv4i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1941,6 +2027,7 @@ entry: } declare @llvm.riscv.vsaddu.nxv8i64.i64( + , , i64, i64); @@ -1953,6 +2040,7 @@ define @intrinsic_vsaddu_vx_nxv8i64_nxv8i64_i64( @llvm.riscv.vsaddu.nxv8i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1993,6 +2081,7 @@ define @intrinsic_vsaddu_vi_nxv1i8_nxv1i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv1i8.i8( + undef, %0, i8 9, i64 %1) @@ -2025,6 +2114,7 @@ define @intrinsic_vsaddu_vi_nxv2i8_nxv2i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv2i8.i8( + undef, %0, i8 9, i64 %1) @@ -2057,6 +2147,7 @@ define @intrinsic_vsaddu_vi_nxv4i8_nxv4i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv4i8.i8( + undef, %0, i8 9, i64 %1) @@ -2089,6 +2180,7 @@ define @intrinsic_vsaddu_vi_nxv8i8_nxv8i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv8i8.i8( + undef, %0, i8 9, i64 %1) @@ -2121,6 +2213,7 @@ define @intrinsic_vsaddu_vi_nxv16i8_nxv16i8_i8( @llvm.riscv.vsaddu.nxv16i8.i8( + undef, %0, i8 9, i64 %1) @@ -2153,6 +2246,7 @@ define @intrinsic_vsaddu_vi_nxv32i8_nxv32i8_i8( @llvm.riscv.vsaddu.nxv32i8.i8( + undef, %0, i8 9, i64 %1) @@ -2185,6 +2279,7 @@ define @intrinsic_vsaddu_vi_nxv64i8_nxv64i8_i8( @llvm.riscv.vsaddu.nxv64i8.i8( + undef, %0, i8 9, i64 %1) @@ -2217,6 +2312,7 @@ define @intrinsic_vsaddu_vi_nxv1i16_nxv1i16_i16( @llvm.riscv.vsaddu.nxv1i16.i16( + undef, %0, i16 9, i64 %1) @@ -2249,6 +2345,7 @@ define @intrinsic_vsaddu_vi_nxv2i16_nxv2i16_i16( @llvm.riscv.vsaddu.nxv2i16.i16( + undef, %0, i16 9, i64 %1) @@ -2281,6 +2378,7 @@ define @intrinsic_vsaddu_vi_nxv4i16_nxv4i16_i16( @llvm.riscv.vsaddu.nxv4i16.i16( + undef, %0, i16 9, i64 %1) @@ -2313,6 +2411,7 @@ define @intrinsic_vsaddu_vi_nxv8i16_nxv8i16_i16( @llvm.riscv.vsaddu.nxv8i16.i16( + undef, %0, i16 9, i64 %1) @@ -2345,6 +2444,7 @@ define @intrinsic_vsaddu_vi_nxv16i16_nxv16i16_i16( @llvm.riscv.vsaddu.nxv16i16.i16( + undef, %0, i16 9, i64 %1) @@ -2377,6 +2477,7 @@ define @intrinsic_vsaddu_vi_nxv32i16_nxv32i16_i16( @llvm.riscv.vsaddu.nxv32i16.i16( + undef, %0, i16 9, i64 %1) @@ -2409,6 +2510,7 @@ define @intrinsic_vsaddu_vi_nxv1i32_nxv1i32_i32( @llvm.riscv.vsaddu.nxv1i32.i32( + undef, %0, i32 9, i64 %1) @@ -2441,6 +2543,7 @@ define @intrinsic_vsaddu_vi_nxv2i32_nxv2i32_i32( @llvm.riscv.vsaddu.nxv2i32.i32( + undef, %0, i32 9, i64 %1) @@ -2473,6 +2576,7 @@ define @intrinsic_vsaddu_vi_nxv4i32_nxv4i32_i32( @llvm.riscv.vsaddu.nxv4i32.i32( + undef, %0, i32 9, i64 %1) @@ -2505,6 +2609,7 @@ define @intrinsic_vsaddu_vi_nxv8i32_nxv8i32_i32( @llvm.riscv.vsaddu.nxv8i32.i32( + undef, %0, i32 9, i64 %1) @@ -2537,6 +2642,7 @@ define @intrinsic_vsaddu_vi_nxv16i32_nxv16i32_i32( @llvm.riscv.vsaddu.nxv16i32.i32( + undef, %0, i32 9, i64 %1) @@ -2569,6 +2675,7 @@ define @intrinsic_vsaddu_vi_nxv1i64_nxv1i64_i64( @llvm.riscv.vsaddu.nxv1i64.i64( + undef, %0, i64 9, i64 %1) @@ -2601,6 +2708,7 @@ define @intrinsic_vsaddu_vi_nxv2i64_nxv2i64_i64( @llvm.riscv.vsaddu.nxv2i64.i64( + undef, %0, i64 9, i64 %1) @@ -2633,6 +2741,7 @@ define @intrinsic_vsaddu_vi_nxv4i64_nxv4i64_i64( @llvm.riscv.vsaddu.nxv4i64.i64( + undef, %0, i64 9, i64 %1) @@ -2665,6 +2774,7 @@ define @intrinsic_vsaddu_vi_nxv8i64_nxv8i64_i64( @llvm.riscv.vsaddu.nxv8i64.i64( + undef, %0, i64 9, i64 %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll index 5c0eb6be8ddaf5085846e174e23293c0f99442f0..5d0c898e43691a4a80a9b516e057923f1c5f05cf 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll @@ -7,12 +7,12 @@ declare i64 @llvm.riscv.vsetvli(i64, i64, i64) -declare @llvm.riscv.vfadd.nxv1f64.nxv1f64(, , i64) -declare @llvm.riscv.vfadd.nxv2f32.nxv2f32(, , i64) +declare @llvm.riscv.vfadd.nxv1f64.nxv1f64(, , , i64) +declare @llvm.riscv.vfadd.nxv2f32.nxv2f32(, , , i64) -declare @llvm.riscv.vfsub.nxv1f64.nxv1f64(, , i64) +declare @llvm.riscv.vfsub.nxv1f64.nxv1f64(, , , i64) -declare @llvm.riscv.vfmul.nxv1f64.nxv1f64(, , i64) +declare @llvm.riscv.vfmul.nxv1f64.nxv1f64(, , , i64) declare @llvm.riscv.vfmv.v.f.nxv1f64.f64(double, i64) declare @llvm.riscv.vfmv.v.f.nxv2f32.f32(float, i64) @@ -37,11 +37,11 @@ entry: br i1 %tobool, label %if.else, label %if.then if.then: ; preds = %entry - %1 = tail call @llvm.riscv.vfadd.nxv1f64.nxv1f64( %a, %b, i64 %0) + %1 = tail call @llvm.riscv.vfadd.nxv1f64.nxv1f64( undef, %a, %b, i64 %0) br label %if.end if.else: ; preds = %entry - %2 = tail call @llvm.riscv.vfsub.nxv1f64.nxv1f64( %a, %b, i64 %0) + %2 = tail call @llvm.riscv.vfsub.nxv1f64.nxv1f64( undef, %a, %b, i64 %0) br label %if.end if.end: ; preds = %if.else, %if.then @@ -70,16 +70,16 @@ entry: br i1 %tobool, label %if.else, label %if.then if.then: ; preds = %entry - %1 = tail call @llvm.riscv.vfadd.nxv1f64.nxv1f64( %a, %b, i64 %0) + %1 = tail call @llvm.riscv.vfadd.nxv1f64.nxv1f64( undef, %a, %b, i64 %0) br label %if.end if.else: ; preds = %entry - %2 = tail call @llvm.riscv.vfsub.nxv1f64.nxv1f64( %a, %b, i64 %0) + %2 = tail call @llvm.riscv.vfsub.nxv1f64.nxv1f64( undef, %a, %b, i64 %0) br label %if.end if.end: ; preds = %if.else, %if.then %c.0 = phi [ %1, %if.then ], [ %2, %if.else ] - %3 = tail call @llvm.riscv.vfmul.nxv1f64.nxv1f64( %c.0, %a, i64 %0) + %3 = tail call @llvm.riscv.vfmul.nxv1f64.nxv1f64( undef, %c.0, %a, i64 %0) ret %3 } @@ -103,18 +103,18 @@ entry: if.then: ; preds = %entry %0 = tail call i64 @llvm.riscv.vsetvli(i64 %avl, i64 3, i64 0) - %1 = tail call @llvm.riscv.vfadd.nxv1f64.nxv1f64( %a, %b, i64 %0) + %1 = tail call @llvm.riscv.vfadd.nxv1f64.nxv1f64( undef, %a, %b, i64 %0) br label %if.end if.else: ; preds = %entry %2 = tail call i64 @llvm.riscv.vsetvli(i64 %avl, i64 3, i64 0) - %3 = tail call @llvm.riscv.vfsub.nxv1f64.nxv1f64( %a, %b, i64 %2) + %3 = tail call @llvm.riscv.vfsub.nxv1f64.nxv1f64( undef, %a, %b, i64 %2) br label %if.end if.end: ; preds = %if.else, %if.then %vl.0 = phi i64 [ %0, %if.then], [ %2, %if.else ] %c.0 = phi [ %1, %if.then ], [ %3, %if.else ] - %4 = tail call @llvm.riscv.vfmul.nxv1f64.nxv1f64( %c.0, %a, i64 %vl.0) + %4 = tail call @llvm.riscv.vfmul.nxv1f64.nxv1f64( undef, %c.0, %a, i64 %vl.0) ret %4 } @@ -158,7 +158,7 @@ entry: if.then: ; preds = %entry %0 = tail call @llvm.riscv.vfmv.v.f.nxv1f64.f64(double 1.000000e+00, i64 %avl) %1 = tail call @llvm.riscv.vfmv.v.f.nxv1f64.f64(double 2.000000e+00, i64 %avl) - %2 = tail call @llvm.riscv.vfadd.nxv1f64.nxv1f64( %0, %1, i64 %avl) + %2 = tail call @llvm.riscv.vfadd.nxv1f64.nxv1f64( undef, %0, %1, i64 %avl) %3 = bitcast i8* @scratch to * tail call void @llvm.riscv.vse.nxv1f64( %2, * %3, i64 %avl) br label %if.end @@ -166,13 +166,13 @@ if.then: ; preds = %entry if.else: ; preds = %entry %4 = tail call @llvm.riscv.vfmv.v.f.nxv2f32.f32(float 1.000000e+00, i64 %avl) %5 = tail call @llvm.riscv.vfmv.v.f.nxv2f32.f32(float 2.000000e+00, i64 %avl) - %6 = tail call @llvm.riscv.vfadd.nxv2f32.nxv2f32( %4, %5, i64 %avl) + %6 = tail call @llvm.riscv.vfadd.nxv2f32.nxv2f32( undef, %4, %5, i64 %avl) %7 = bitcast i8* @scratch to * tail call void @llvm.riscv.vse.nxv2f32( %6, * %7, i64 %avl) br label %if.end if.end: ; preds = %if.else, %if.then - %8 = tail call @llvm.riscv.vfmul.nxv1f64.nxv1f64( %l, %r, i64 %avl) + %8 = tail call @llvm.riscv.vfmul.nxv1f64.nxv1f64( undef, %l, %r, i64 %avl) ret %8 } @@ -204,11 +204,11 @@ entry: br i1 %tobool, label %if.else, label %if.then if.then: ; preds = %entry - %1 = tail call @llvm.riscv.vfadd.nxv1f64.nxv1f64( %a, %b, i64 %0) + %1 = tail call @llvm.riscv.vfadd.nxv1f64.nxv1f64( undef, %a, %b, i64 %0) br label %if.end if.else: ; preds = %entry - %2 = tail call @llvm.riscv.vfsub.nxv1f64.nxv1f64( %a, %b, i64 %0) + %2 = tail call @llvm.riscv.vfsub.nxv1f64.nxv1f64( undef, %a, %b, i64 %0) br label %if.end if.end: ; preds = %if.else, %if.then @@ -218,11 +218,11 @@ if.end: ; preds = %if.else, %if.then br i1 %tobool3, label %if.else5, label %if.then4 if.then4: ; preds = %if.end - %3 = tail call @llvm.riscv.vfmul.nxv1f64.nxv1f64( %c.0, %a, i64 %0) + %3 = tail call @llvm.riscv.vfmul.nxv1f64.nxv1f64( undef, %c.0, %a, i64 %0) br label %if.end6 if.else5: ; preds = %if.end - %4 = tail call @llvm.riscv.vfmul.nxv1f64.nxv1f64( %a, %c.0, i64 %0) + %4 = tail call @llvm.riscv.vfmul.nxv1f64.nxv1f64( undef, %a, %c.0, i64 %0) br label %if.end6 if.end6: ; preds = %if.else5, %if.then4 @@ -284,11 +284,11 @@ entry: br i1 %tobool, label %if.else, label %if.then if.then: ; preds = %entry - %1 = tail call @llvm.riscv.vfadd.nxv1f64.nxv1f64( %a, %b, i64 %0) + %1 = tail call @llvm.riscv.vfadd.nxv1f64.nxv1f64( undef, %a, %b, i64 %0) br label %if.end if.else: ; preds = %entry - %2 = tail call @llvm.riscv.vfsub.nxv1f64.nxv1f64( %a, %b, i64 %0) + %2 = tail call @llvm.riscv.vfsub.nxv1f64.nxv1f64( undef, %a, %b, i64 %0) br label %if.end if.end: ; preds = %if.else, %if.then @@ -301,7 +301,7 @@ if.then4: ; preds = %if.end %3 = tail call i64 @llvm.riscv.vsetvli(i64 %avl, i64 3, i64 0) %4 = tail call @llvm.riscv.vfmv.v.f.nxv1f64.f64(double 1.000000e+00, i64 %3) %5 = tail call @llvm.riscv.vfmv.v.f.nxv1f64.f64(double 2.000000e+00, i64 %3) - %6 = tail call @llvm.riscv.vfadd.nxv1f64.nxv1f64( %4, %5, i64 %3) + %6 = tail call @llvm.riscv.vfadd.nxv1f64.nxv1f64( undef, %4, %5, i64 %3) %7 = bitcast i8* @scratch to * tail call void @llvm.riscv.vse.nxv1f64( %6, * %7, i64 %3) br label %if.end10 @@ -310,13 +310,13 @@ if.else5: ; preds = %if.end %8 = tail call i64 @llvm.riscv.vsetvli(i64 %avl, i64 2, i64 0) %9 = tail call @llvm.riscv.vfmv.v.f.nxv2f32.f32(float 1.000000e+00, i64 %8) %10 = tail call @llvm.riscv.vfmv.v.f.nxv2f32.f32(float 2.000000e+00, i64 %8) - %11 = tail call @llvm.riscv.vfadd.nxv2f32.nxv2f32( %9, %10, i64 %8) + %11 = tail call @llvm.riscv.vfadd.nxv2f32.nxv2f32( undef, %9, %10, i64 %8) %12 = bitcast i8* @scratch to * tail call void @llvm.riscv.vse.nxv2f32( %11, * %12, i64 %8) br label %if.end10 if.end10: ; preds = %if.else5, %if.then4 - %13 = tail call @llvm.riscv.vfmul.nxv1f64.nxv1f64( %c.0, %c.0, i64 %0) + %13 = tail call @llvm.riscv.vfmul.nxv1f64.nxv1f64( undef, %c.0, %c.0, i64 %0) ret %13 } @@ -368,12 +368,12 @@ entry: br i1 %tobool, label %if.else, label %if.then if.then: ; preds = %entry - %1 = tail call @llvm.riscv.vfadd.nxv1f64.nxv1f64( %a, %b, i64 %0) + %1 = tail call @llvm.riscv.vfadd.nxv1f64.nxv1f64( undef, %a, %b, i64 %0) br label %if.end if.else: ; preds = %entry call void @foo() - %2 = tail call @llvm.riscv.vfsub.nxv1f64.nxv1f64( %a, %b, i64 %0) + %2 = tail call @llvm.riscv.vfsub.nxv1f64.nxv1f64( undef, %a, %b, i64 %0) br label %if.end if.end: ; preds = %if.else, %if.then @@ -428,17 +428,17 @@ entry: br i1 %tobool, label %if.else, label %if.then if.then: ; preds = %entry - %1 = tail call @llvm.riscv.vfadd.nxv1f64.nxv1f64( %a, %b, i64 %0) + %1 = tail call @llvm.riscv.vfadd.nxv1f64.nxv1f64( undef, %a, %b, i64 %0) call void @foo() br label %if.end if.else: ; preds = %entry - %2 = tail call @llvm.riscv.vfsub.nxv1f64.nxv1f64( %a, %b, i64 %0) + %2 = tail call @llvm.riscv.vfsub.nxv1f64.nxv1f64( undef, %a, %b, i64 %0) br label %if.end if.end: ; preds = %if.else, %if.then %c.0 = phi [ %1, %if.then ], [ %2, %if.else ] - %3 = tail call @llvm.riscv.vfmul.nxv1f64.nxv1f64( %c.0, %a, i64 %0) + %3 = tail call @llvm.riscv.vfmul.nxv1f64.nxv1f64( undef, %c.0, %a, i64 %0) ret %3 } @@ -520,18 +520,18 @@ entry: if: %b = call @llvm.riscv.vle.nxv2i16( undef, * %y, i64 %vl) - %c = call @llvm.riscv.vwadd.nxv2i32( %b, i16 0, i64 %vl) + %c = call @llvm.riscv.vwadd.nxv2i32( undef, %b, i16 0, i64 %vl) br label %if.end if.end: %d = phi [ %z, %entry ], [ %c, %if ] - %e = call @llvm.riscv.vadd.nxv2i32( %a, %d, i64 %vl) + %e = call @llvm.riscv.vadd.nxv2i32( undef, %a, %d, i64 %vl) ret %e } declare @llvm.riscv.vle.nxv2i32(, *, i64) declare @llvm.riscv.vle.nxv2i16(, *, i64) -declare @llvm.riscv.vwadd.nxv2i32(, i16, i64) -declare @llvm.riscv.vadd.nxv2i32(, , i64) +declare @llvm.riscv.vwadd.nxv2i32(, , i16, i64) +declare @llvm.riscv.vadd.nxv2i32(, , , i64) ; We can use X0, X0 vsetvli in if2 and if2.end. The merge point as if.end will ; see two different vtypes with the same SEW/LMUL ratio. At if2.end we will only @@ -566,7 +566,7 @@ entry: if: %b = call @llvm.riscv.vle.nxv2i16( undef, * %y, i64 %vl) - %c = call @llvm.riscv.vwadd.w.nxv2i32.nxv2i16( %a, %b, i64 %vl) + %c = call @llvm.riscv.vwadd.w.nxv2i32.nxv2i16( undef, %a, %b, i64 %vl) br label %if.end if.end: @@ -575,12 +575,12 @@ if.end: if2: %e = call @llvm.riscv.vle.nxv2i16( undef, * %z, i64 %vl) - %f = call @llvm.riscv.vwadd.w.nxv2i32.nxv2i16( %d, %e, i64 %vl) + %f = call @llvm.riscv.vwadd.w.nxv2i32.nxv2i16( undef, %d, %e, i64 %vl) br label %if2.end if2.end: %g = phi [ %d, %if.end ], [ %f, %if2 ] - %h = call @llvm.riscv.vadd.nxv2i32( %g, %w, i64 %vl) + %h = call @llvm.riscv.vadd.nxv2i32( undef, %g, %w, i64 %vl) ret %h } -declare @llvm.riscv.vwadd.w.nxv2i32.nxv2i16(, , i64) +declare @llvm.riscv.vwadd.w.nxv2i32.nxv2i16(, , , i64) diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir index 1cb41692a8cc85f31ea794aede7e80350f5e81fd..489e43c5db6fa3ed0e45b46701ea1874fed4ca1f 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir +++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir @@ -15,11 +15,11 @@ br i1 %tobool, label %if.else, label %if.then if.then: ; preds = %entry - %b = call @llvm.riscv.vadd.nxv1i64.nxv1i64.i64( %a, %1, i64 %2) + %b = call @llvm.riscv.vadd.nxv1i64.nxv1i64.i64( undef, %a, %1, i64 %2) br label %if.end if.else: ; preds = %entry - %c = call @llvm.riscv.vsub.nxv1i64.nxv1i64.i64( %a, %1, i64 %2) + %c = call @llvm.riscv.vsub.nxv1i64.nxv1i64.i64( undef, %a, %1, i64 %2) br label %if.end if.end: ; preds = %if.else, %if.then @@ -56,11 +56,11 @@ br i1 %tobool, label %if.else, label %if.then if.then: ; preds = %entry - %a = call @llvm.riscv.vadd.nxv1i64.nxv1i64.i64( %0, %1, i64 %2) + %a = call @llvm.riscv.vadd.nxv1i64.nxv1i64.i64( undef, %0, %1, i64 %2) br label %if.end if.else: ; preds = %entry - %b = call @llvm.riscv.vsub.nxv1i64.nxv1i64.i64( %1, %1, i64 %2) + %b = call @llvm.riscv.vsub.nxv1i64.nxv1i64.i64( undef, %1, %1, i64 %2) br label %if.end if.end: ; preds = %if.else, %if.then @@ -79,11 +79,11 @@ br i1 %tobool, label %if.else, label %if.then if.then: ; preds = %entry - %b = call @llvm.riscv.vadd.nxv1i64.nxv1i64.i64( %0, %1, i64 %vl) + %b = call @llvm.riscv.vadd.nxv1i64.nxv1i64.i64( undef, %0, %1, i64 %vl) br label %if.end if.else: ; preds = %entry - %c = call @llvm.riscv.vsub.nxv1i64.nxv1i64.i64( %0, %1, i64 %vl) + %c = call @llvm.riscv.vsub.nxv1i64.nxv1i64.i64( undef, %0, %1, i64 %vl) br label %if.end if.end: ; preds = %if.else, %if.then @@ -125,10 +125,10 @@ declare i32 @llvm.vector.reduce.add.v4i32(<4 x i32>) ; Function Attrs: nounwind readnone - declare @llvm.riscv.vadd.nxv1i64.nxv1i64.i64(, , i64) #1 + declare @llvm.riscv.vadd.nxv1i64.nxv1i64.i64(, , , i64) #1 ; Function Attrs: nounwind readnone - declare @llvm.riscv.vsub.nxv1i64.nxv1i64.i64(, , i64) #1 + declare @llvm.riscv.vsub.nxv1i64.nxv1i64.i64(, , , i64) #1 ; Function Attrs: nounwind readonly declare @llvm.riscv.vle.nxv1i64.i64(, * nocapture, i64) #3 diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll index e708f5b75639e5d5059c7a61df80bc40869ffef6..f08cf4f84094770a4e42dd300dfcabf11dcd13a0 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll @@ -5,6 +5,7 @@ declare i64 @llvm.riscv.vsetvli(i64, i64, i64) declare i64 @llvm.riscv.vsetvlimax(i64, i64) declare @llvm.riscv.vfadd.nxv1f64.nxv1f64( + , , , i64) @@ -23,6 +24,7 @@ define @test1(i64 %avl, %a, @llvm.riscv.vfadd.nxv1f64.nxv1f64( + undef, %a, %b, i64 %0) @@ -38,6 +40,7 @@ define @test2(i64 %avl, %a, @llvm.riscv.vfadd.nxv1f64.nxv1f64( + undef, %a, %b, i64 %avl) @@ -247,6 +250,7 @@ define @test13( %a, @llvm.riscv.vfadd.nxv1f64.nxv1f64( + undef, %a, %b, i64 -1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.mir b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.mir index 4aa61e0fd5fce4ffb2b92c8421e31a6dae443cb5..d7a49aaab4c700db3570c3dedd7992899ec21135 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.mir +++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.mir @@ -10,14 +10,14 @@ define @add( %0, %1, i64 %2) #0 { entry: - %a = call @llvm.riscv.vadd.nxv1i64.nxv1i64.i64( %0, %1, i64 %2) + %a = call @llvm.riscv.vadd.nxv1i64.nxv1i64.i64( undef, %0, %1, i64 %2) ret %a } define @load_add(* %0, %1, i64 %2) #0 { entry: %a = call @llvm.riscv.vle.nxv1i64.i64( undef, * %0, i64 %2) - %b = call @llvm.riscv.vadd.nxv1i64.nxv1i64.i64( %a, %1, i64 %2) + %b = call @llvm.riscv.vadd.nxv1i64.nxv1i64.i64( undef, %a, %1, i64 %2) ret %b } @@ -60,7 +60,7 @@ define @vsetvli_add( %0, %1, i64 %avl) #0 { entry: %a = call i64 @llvm.riscv.vsetvli.i64(i64 %avl, i64 3, i64 0) - %b = call @llvm.riscv.vadd.nxv1i64.nxv1i64.i64( %0, %1, i64 %a) + %b = call @llvm.riscv.vadd.nxv1i64.nxv1i64.i64( undef, %0, %1, i64 %a) ret %b } @@ -68,12 +68,12 @@ entry: %a = call @llvm.riscv.vle.nxv1i64.i64( undef, * %0, i64 %2) call void asm sideeffect "", ""() - %b = call @llvm.riscv.vadd.nxv1i64.nxv1i64.i64( %a, %1, i64 %2) + %b = call @llvm.riscv.vadd.nxv1i64.nxv1i64.i64( undef, %a, %1, i64 %2) ret %b } ; Function Attrs: nounwind readnone - declare @llvm.riscv.vadd.nxv1i64.nxv1i64.i64(, , i64) #1 + declare @llvm.riscv.vadd.nxv1i64.nxv1i64.i64(, , , i64) #1 ; Function Attrs: nounwind readonly declare @llvm.riscv.vle.nxv1i64.i64(, * nocapture, i64) #4 diff --git a/llvm/test/CodeGen/RISCV/rvv/vslide1down-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vslide1down-rv32.ll index a9a9ea8019b0e5e3a3f8257b1b292cc72625c138..d01e7a6a006cc837b91327118b51e629058cf5f7 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vslide1down-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vslide1down-rv32.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v,+f -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vslide1down.nxv1i8.i8( + , , i8, i32); @@ -14,6 +15,7 @@ define @intrinsic_vslide1down_vx_nxv1i8_nxv1i8_i8( @llvm.riscv.vslide1down.nxv1i8.i8( + undef, %0, i8 %1, i32 %2) @@ -47,6 +49,7 @@ entry: } declare @llvm.riscv.vslide1down.nxv2i8.i8( + , , i8, i32); @@ -59,6 +62,7 @@ define @intrinsic_vslide1down_vx_nxv2i8_nxv2i8_i8( @llvm.riscv.vslide1down.nxv2i8.i8( + undef, %0, i8 %1, i32 %2) @@ -92,6 +96,7 @@ entry: } declare @llvm.riscv.vslide1down.nxv4i8.i8( + , , i8, i32); @@ -104,6 +109,7 @@ define @intrinsic_vslide1down_vx_nxv4i8_nxv4i8_i8( @llvm.riscv.vslide1down.nxv4i8.i8( + undef, %0, i8 %1, i32 %2) @@ -137,6 +143,7 @@ entry: } declare @llvm.riscv.vslide1down.nxv8i8.i8( + , , i8, i32); @@ -149,6 +156,7 @@ define @intrinsic_vslide1down_vx_nxv8i8_nxv8i8_i8( @llvm.riscv.vslide1down.nxv8i8.i8( + undef, %0, i8 %1, i32 %2) @@ -182,6 +190,7 @@ entry: } declare @llvm.riscv.vslide1down.nxv16i8.i8( + , , i8, i32); @@ -194,6 +203,7 @@ define @intrinsic_vslide1down_vx_nxv16i8_nxv16i8_i8( @llvm.riscv.vslide1down.nxv16i8.i8( + undef, %0, i8 %1, i32 %2) @@ -227,6 +237,7 @@ entry: } declare @llvm.riscv.vslide1down.nxv32i8.i8( + , , i8, i32); @@ -239,6 +250,7 @@ define @intrinsic_vslide1down_vx_nxv32i8_nxv32i8_i8( @llvm.riscv.vslide1down.nxv32i8.i8( + undef, %0, i8 %1, i32 %2) @@ -272,6 +284,7 @@ entry: } declare @llvm.riscv.vslide1down.nxv64i8.i8( + , , i8, i32); @@ -284,6 +297,7 @@ define @intrinsic_vslide1down_vx_nxv64i8_nxv64i8_i8( @llvm.riscv.vslide1down.nxv64i8.i8( + undef, %0, i8 %1, i32 %2) @@ -317,6 +331,7 @@ entry: } declare @llvm.riscv.vslide1down.nxv1i16.i16( + , , i16, i32); @@ -329,6 +344,7 @@ define @intrinsic_vslide1down_vx_nxv1i16_nxv1i16_i16( @llvm.riscv.vslide1down.nxv1i16.i16( + undef, %0, i16 %1, i32 %2) @@ -362,6 +378,7 @@ entry: } declare @llvm.riscv.vslide1down.nxv2i16.i16( + , , i16, i32); @@ -374,6 +391,7 @@ define @intrinsic_vslide1down_vx_nxv2i16_nxv2i16_i16( @llvm.riscv.vslide1down.nxv2i16.i16( + undef, %0, i16 %1, i32 %2) @@ -407,6 +425,7 @@ entry: } declare @llvm.riscv.vslide1down.nxv4i16.i16( + , , i16, i32); @@ -419,6 +438,7 @@ define @intrinsic_vslide1down_vx_nxv4i16_nxv4i16_i16( @llvm.riscv.vslide1down.nxv4i16.i16( + undef, %0, i16 %1, i32 %2) @@ -452,6 +472,7 @@ entry: } declare @llvm.riscv.vslide1down.nxv8i16.i16( + , , i16, i32); @@ -464,6 +485,7 @@ define @intrinsic_vslide1down_vx_nxv8i16_nxv8i16_i16( @llvm.riscv.vslide1down.nxv8i16.i16( + undef, %0, i16 %1, i32 %2) @@ -497,6 +519,7 @@ entry: } declare @llvm.riscv.vslide1down.nxv16i16.i16( + , , i16, i32); @@ -509,6 +532,7 @@ define @intrinsic_vslide1down_vx_nxv16i16_nxv16i16_i16( @llvm.riscv.vslide1down.nxv16i16.i16( + undef, %0, i16 %1, i32 %2) @@ -542,6 +566,7 @@ entry: } declare @llvm.riscv.vslide1down.nxv32i16.i16( + , , i16, i32); @@ -554,6 +579,7 @@ define @intrinsic_vslide1down_vx_nxv32i16_nxv32i16_i16( @llvm.riscv.vslide1down.nxv32i16.i16( + undef, %0, i16 %1, i32 %2) @@ -587,6 +613,7 @@ entry: } declare @llvm.riscv.vslide1down.nxv1i32.i32( + , , i32, i32); @@ -599,6 +626,7 @@ define @intrinsic_vslide1down_vx_nxv1i32_nxv1i32_i32( @llvm.riscv.vslide1down.nxv1i32.i32( + undef, %0, i32 %1, i32 %2) @@ -632,6 +660,7 @@ entry: } declare @llvm.riscv.vslide1down.nxv2i32.i32( + , , i32, i32); @@ -644,6 +673,7 @@ define @intrinsic_vslide1down_vx_nxv2i32_nxv2i32_i32( @llvm.riscv.vslide1down.nxv2i32.i32( + undef, %0, i32 %1, i32 %2) @@ -677,6 +707,7 @@ entry: } declare @llvm.riscv.vslide1down.nxv4i32.i32( + , , i32, i32); @@ -689,6 +720,7 @@ define @intrinsic_vslide1down_vx_nxv4i32_nxv4i32_i32( @llvm.riscv.vslide1down.nxv4i32.i32( + undef, %0, i32 %1, i32 %2) @@ -722,6 +754,7 @@ entry: } declare @llvm.riscv.vslide1down.nxv8i32.i32( + , , i32, i32); @@ -734,6 +767,7 @@ define @intrinsic_vslide1down_vx_nxv8i32_nxv8i32_i32( @llvm.riscv.vslide1down.nxv8i32.i32( + undef, %0, i32 %1, i32 %2) @@ -767,6 +801,7 @@ entry: } declare @llvm.riscv.vslide1down.nxv16i32.i32( + , , i32, i32); @@ -779,6 +814,7 @@ define @intrinsic_vslide1down_vx_nxv16i32_nxv16i32_i32( @llvm.riscv.vslide1down.nxv16i32.i32( + undef, %0, i32 %1, i32 %2) @@ -812,6 +848,7 @@ entry: } declare @llvm.riscv.vslide1down.nxv1i64.i64( + , , i64, i32); @@ -826,6 +863,7 @@ define @intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64( @llvm.riscv.vslide1down.nxv1i64.i64( + undef, %0, i64 %1, i32 %2) @@ -863,6 +901,7 @@ entry: } declare @llvm.riscv.vslide1down.nxv2i64.i64( + , , i64, i32); @@ -877,6 +916,7 @@ define @intrinsic_vslide1down_vx_nxv2i64_nxv2i64_i64( @llvm.riscv.vslide1down.nxv2i64.i64( + undef, %0, i64 %1, i32 %2) @@ -914,6 +954,7 @@ entry: } declare @llvm.riscv.vslide1down.nxv4i64.i64( + , , i64, i32); @@ -928,6 +969,7 @@ define @intrinsic_vslide1down_vx_nxv4i64_nxv4i64_i64( @llvm.riscv.vslide1down.nxv4i64.i64( + undef, %0, i64 %1, i32 %2) @@ -965,6 +1007,7 @@ entry: } declare @llvm.riscv.vslide1down.nxv8i64.i64( + , , i64, i32); @@ -979,6 +1022,7 @@ define @intrinsic_vslide1down_vx_nxv8i64_nxv8i64_i64( @llvm.riscv.vslide1down.nxv8i64.i64( + undef, %0, i64 %1, i32 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vslide1down-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vslide1down-rv64.ll index 2110f21bf13d71708d012532864b624813cdf42b..696c12a9fd6de9728d576ad2104ab571e844527c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vslide1down-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vslide1down-rv64.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v,+d -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vslide1down.nxv1i8.i8( + , , i8, i64); @@ -14,6 +15,7 @@ define @intrinsic_vslide1down_vx_nxv1i8_nxv1i8_i8( @llvm.riscv.vslide1down.nxv1i8.i8( + undef, %0, i8 %1, i64 %2) @@ -47,6 +49,7 @@ entry: } declare @llvm.riscv.vslide1down.nxv2i8.i8( + , , i8, i64); @@ -59,6 +62,7 @@ define @intrinsic_vslide1down_vx_nxv2i8_nxv2i8_i8( @llvm.riscv.vslide1down.nxv2i8.i8( + undef, %0, i8 %1, i64 %2) @@ -92,6 +96,7 @@ entry: } declare @llvm.riscv.vslide1down.nxv4i8.i8( + , , i8, i64); @@ -104,6 +109,7 @@ define @intrinsic_vslide1down_vx_nxv4i8_nxv4i8_i8( @llvm.riscv.vslide1down.nxv4i8.i8( + undef, %0, i8 %1, i64 %2) @@ -137,6 +143,7 @@ entry: } declare @llvm.riscv.vslide1down.nxv8i8.i8( + , , i8, i64); @@ -149,6 +156,7 @@ define @intrinsic_vslide1down_vx_nxv8i8_nxv8i8_i8( @llvm.riscv.vslide1down.nxv8i8.i8( + undef, %0, i8 %1, i64 %2) @@ -182,6 +190,7 @@ entry: } declare @llvm.riscv.vslide1down.nxv16i8.i8( + , , i8, i64); @@ -194,6 +203,7 @@ define @intrinsic_vslide1down_vx_nxv16i8_nxv16i8_i8( @llvm.riscv.vslide1down.nxv16i8.i8( + undef, %0, i8 %1, i64 %2) @@ -227,6 +237,7 @@ entry: } declare @llvm.riscv.vslide1down.nxv32i8.i8( + , , i8, i64); @@ -239,6 +250,7 @@ define @intrinsic_vslide1down_vx_nxv32i8_nxv32i8_i8( @llvm.riscv.vslide1down.nxv32i8.i8( + undef, %0, i8 %1, i64 %2) @@ -272,6 +284,7 @@ entry: } declare @llvm.riscv.vslide1down.nxv64i8.i8( + , , i8, i64); @@ -284,6 +297,7 @@ define @intrinsic_vslide1down_vx_nxv64i8_nxv64i8_i8( @llvm.riscv.vslide1down.nxv64i8.i8( + undef, %0, i8 %1, i64 %2) @@ -317,6 +331,7 @@ entry: } declare @llvm.riscv.vslide1down.nxv1i16.i16( + , , i16, i64); @@ -329,6 +344,7 @@ define @intrinsic_vslide1down_vx_nxv1i16_nxv1i16_i16( @llvm.riscv.vslide1down.nxv1i16.i16( + undef, %0, i16 %1, i64 %2) @@ -362,6 +378,7 @@ entry: } declare @llvm.riscv.vslide1down.nxv2i16.i16( + , , i16, i64); @@ -374,6 +391,7 @@ define @intrinsic_vslide1down_vx_nxv2i16_nxv2i16_i16( @llvm.riscv.vslide1down.nxv2i16.i16( + undef, %0, i16 %1, i64 %2) @@ -407,6 +425,7 @@ entry: } declare @llvm.riscv.vslide1down.nxv4i16.i16( + , , i16, i64); @@ -419,6 +438,7 @@ define @intrinsic_vslide1down_vx_nxv4i16_nxv4i16_i16( @llvm.riscv.vslide1down.nxv4i16.i16( + undef, %0, i16 %1, i64 %2) @@ -452,6 +472,7 @@ entry: } declare @llvm.riscv.vslide1down.nxv8i16.i16( + , , i16, i64); @@ -464,6 +485,7 @@ define @intrinsic_vslide1down_vx_nxv8i16_nxv8i16_i16( @llvm.riscv.vslide1down.nxv8i16.i16( + undef, %0, i16 %1, i64 %2) @@ -497,6 +519,7 @@ entry: } declare @llvm.riscv.vslide1down.nxv16i16.i16( + , , i16, i64); @@ -509,6 +532,7 @@ define @intrinsic_vslide1down_vx_nxv16i16_nxv16i16_i16( @llvm.riscv.vslide1down.nxv16i16.i16( + undef, %0, i16 %1, i64 %2) @@ -542,6 +566,7 @@ entry: } declare @llvm.riscv.vslide1down.nxv32i16.i16( + , , i16, i64); @@ -554,6 +579,7 @@ define @intrinsic_vslide1down_vx_nxv32i16_nxv32i16_i16( @llvm.riscv.vslide1down.nxv32i16.i16( + undef, %0, i16 %1, i64 %2) @@ -587,6 +613,7 @@ entry: } declare @llvm.riscv.vslide1down.nxv1i32.i32( + , , i32, i64); @@ -599,6 +626,7 @@ define @intrinsic_vslide1down_vx_nxv1i32_nxv1i32_i32( @llvm.riscv.vslide1down.nxv1i32.i32( + undef, %0, i32 %1, i64 %2) @@ -632,6 +660,7 @@ entry: } declare @llvm.riscv.vslide1down.nxv2i32.i32( + , , i32, i64); @@ -644,6 +673,7 @@ define @intrinsic_vslide1down_vx_nxv2i32_nxv2i32_i32( @llvm.riscv.vslide1down.nxv2i32.i32( + undef, %0, i32 %1, i64 %2) @@ -677,6 +707,7 @@ entry: } declare @llvm.riscv.vslide1down.nxv4i32.i32( + , , i32, i64); @@ -689,6 +720,7 @@ define @intrinsic_vslide1down_vx_nxv4i32_nxv4i32_i32( @llvm.riscv.vslide1down.nxv4i32.i32( + undef, %0, i32 %1, i64 %2) @@ -722,6 +754,7 @@ entry: } declare @llvm.riscv.vslide1down.nxv8i32.i32( + , , i32, i64); @@ -734,6 +767,7 @@ define @intrinsic_vslide1down_vx_nxv8i32_nxv8i32_i32( @llvm.riscv.vslide1down.nxv8i32.i32( + undef, %0, i32 %1, i64 %2) @@ -767,6 +801,7 @@ entry: } declare @llvm.riscv.vslide1down.nxv16i32.i32( + , , i32, i64); @@ -779,6 +814,7 @@ define @intrinsic_vslide1down_vx_nxv16i32_nxv16i32_i32( @llvm.riscv.vslide1down.nxv16i32.i32( + undef, %0, i32 %1, i64 %2) @@ -812,6 +848,7 @@ entry: } declare @llvm.riscv.vslide1down.nxv1i64.i64( + , , i64, i64); @@ -824,6 +861,7 @@ define @intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64( @llvm.riscv.vslide1down.nxv1i64.i64( + undef, %0, i64 %1, i64 %2) @@ -857,6 +895,7 @@ entry: } declare @llvm.riscv.vslide1down.nxv2i64.i64( + , , i64, i64); @@ -869,6 +908,7 @@ define @intrinsic_vslide1down_vx_nxv2i64_nxv2i64_i64( @llvm.riscv.vslide1down.nxv2i64.i64( + undef, %0, i64 %1, i64 %2) @@ -902,6 +942,7 @@ entry: } declare @llvm.riscv.vslide1down.nxv4i64.i64( + , , i64, i64); @@ -914,6 +955,7 @@ define @intrinsic_vslide1down_vx_nxv4i64_nxv4i64_i64( @llvm.riscv.vslide1down.nxv4i64.i64( + undef, %0, i64 %1, i64 %2) @@ -947,6 +989,7 @@ entry: } declare @llvm.riscv.vslide1down.nxv8i64.i64( + , , i64, i64); @@ -959,6 +1002,7 @@ define @intrinsic_vslide1down_vx_nxv8i64_nxv8i64_i64( @llvm.riscv.vslide1down.nxv8i64.i64( + undef, %0, i64 %1, i64 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vslide1up-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vslide1up-rv32.ll index c5b0bb3c07a2947f56924358fab53f744243837c..1209956e7161aa45ad409c31d3dc504b82e24190 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vslide1up-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vslide1up-rv32.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v,+f -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vslide1up.nxv1i8.i8( + , , i8, i32); @@ -15,6 +16,7 @@ define @intrinsic_vslide1up_vx_nxv1i8_nxv1i8_i8( @llvm.riscv.vslide1up.nxv1i8.i8( + undef, %0, i8 %1, i32 %2) @@ -48,6 +50,7 @@ entry: } declare @llvm.riscv.vslide1up.nxv2i8.i8( + , , i8, i32); @@ -61,6 +64,7 @@ define @intrinsic_vslide1up_vx_nxv2i8_nxv2i8_i8( @llvm.riscv.vslide1up.nxv2i8.i8( + undef, %0, i8 %1, i32 %2) @@ -94,6 +98,7 @@ entry: } declare @llvm.riscv.vslide1up.nxv4i8.i8( + , , i8, i32); @@ -107,6 +112,7 @@ define @intrinsic_vslide1up_vx_nxv4i8_nxv4i8_i8( @llvm.riscv.vslide1up.nxv4i8.i8( + undef, %0, i8 %1, i32 %2) @@ -140,6 +146,7 @@ entry: } declare @llvm.riscv.vslide1up.nxv8i8.i8( + , , i8, i32); @@ -153,6 +160,7 @@ define @intrinsic_vslide1up_vx_nxv8i8_nxv8i8_i8( @llvm.riscv.vslide1up.nxv8i8.i8( + undef, %0, i8 %1, i32 %2) @@ -186,6 +194,7 @@ entry: } declare @llvm.riscv.vslide1up.nxv16i8.i8( + , , i8, i32); @@ -199,6 +208,7 @@ define @intrinsic_vslide1up_vx_nxv16i8_nxv16i8_i8( @llvm.riscv.vslide1up.nxv16i8.i8( + undef, %0, i8 %1, i32 %2) @@ -232,6 +242,7 @@ entry: } declare @llvm.riscv.vslide1up.nxv32i8.i8( + , , i8, i32); @@ -245,6 +256,7 @@ define @intrinsic_vslide1up_vx_nxv32i8_nxv32i8_i8( @llvm.riscv.vslide1up.nxv32i8.i8( + undef, %0, i8 %1, i32 %2) @@ -278,6 +290,7 @@ entry: } declare @llvm.riscv.vslide1up.nxv64i8.i8( + , , i8, i32); @@ -291,6 +304,7 @@ define @intrinsic_vslide1up_vx_nxv64i8_nxv64i8_i8( @llvm.riscv.vslide1up.nxv64i8.i8( + undef, %0, i8 %1, i32 %2) @@ -324,6 +338,7 @@ entry: } declare @llvm.riscv.vslide1up.nxv1i16.i16( + , , i16, i32); @@ -337,6 +352,7 @@ define @intrinsic_vslide1up_vx_nxv1i16_nxv1i16_i16( @llvm.riscv.vslide1up.nxv1i16.i16( + undef, %0, i16 %1, i32 %2) @@ -370,6 +386,7 @@ entry: } declare @llvm.riscv.vslide1up.nxv2i16.i16( + , , i16, i32); @@ -383,6 +400,7 @@ define @intrinsic_vslide1up_vx_nxv2i16_nxv2i16_i16( @llvm.riscv.vslide1up.nxv2i16.i16( + undef, %0, i16 %1, i32 %2) @@ -416,6 +434,7 @@ entry: } declare @llvm.riscv.vslide1up.nxv4i16.i16( + , , i16, i32); @@ -429,6 +448,7 @@ define @intrinsic_vslide1up_vx_nxv4i16_nxv4i16_i16( @llvm.riscv.vslide1up.nxv4i16.i16( + undef, %0, i16 %1, i32 %2) @@ -462,6 +482,7 @@ entry: } declare @llvm.riscv.vslide1up.nxv8i16.i16( + , , i16, i32); @@ -475,6 +496,7 @@ define @intrinsic_vslide1up_vx_nxv8i16_nxv8i16_i16( @llvm.riscv.vslide1up.nxv8i16.i16( + undef, %0, i16 %1, i32 %2) @@ -508,6 +530,7 @@ entry: } declare @llvm.riscv.vslide1up.nxv16i16.i16( + , , i16, i32); @@ -521,6 +544,7 @@ define @intrinsic_vslide1up_vx_nxv16i16_nxv16i16_i16( @llvm.riscv.vslide1up.nxv16i16.i16( + undef, %0, i16 %1, i32 %2) @@ -554,6 +578,7 @@ entry: } declare @llvm.riscv.vslide1up.nxv32i16.i16( + , , i16, i32); @@ -567,6 +592,7 @@ define @intrinsic_vslide1up_vx_nxv32i16_nxv32i16_i16( @llvm.riscv.vslide1up.nxv32i16.i16( + undef, %0, i16 %1, i32 %2) @@ -600,6 +626,7 @@ entry: } declare @llvm.riscv.vslide1up.nxv1i32.i32( + , , i32, i32); @@ -613,6 +640,7 @@ define @intrinsic_vslide1up_vx_nxv1i32_nxv1i32_i32( @llvm.riscv.vslide1up.nxv1i32.i32( + undef, %0, i32 %1, i32 %2) @@ -646,6 +674,7 @@ entry: } declare @llvm.riscv.vslide1up.nxv2i32.i32( + , , i32, i32); @@ -659,6 +688,7 @@ define @intrinsic_vslide1up_vx_nxv2i32_nxv2i32_i32( @llvm.riscv.vslide1up.nxv2i32.i32( + undef, %0, i32 %1, i32 %2) @@ -692,6 +722,7 @@ entry: } declare @llvm.riscv.vslide1up.nxv4i32.i32( + , , i32, i32); @@ -705,6 +736,7 @@ define @intrinsic_vslide1up_vx_nxv4i32_nxv4i32_i32( @llvm.riscv.vslide1up.nxv4i32.i32( + undef, %0, i32 %1, i32 %2) @@ -738,6 +770,7 @@ entry: } declare @llvm.riscv.vslide1up.nxv8i32.i32( + , , i32, i32); @@ -751,6 +784,7 @@ define @intrinsic_vslide1up_vx_nxv8i32_nxv8i32_i32( @llvm.riscv.vslide1up.nxv8i32.i32( + undef, %0, i32 %1, i32 %2) @@ -784,6 +818,7 @@ entry: } declare @llvm.riscv.vslide1up.nxv16i32.i32( + , , i32, i32); @@ -797,6 +832,7 @@ define @intrinsic_vslide1up_vx_nxv16i32_nxv16i32_i32( @llvm.riscv.vslide1up.nxv16i32.i32( + undef, %0, i32 %1, i32 %2) @@ -830,6 +866,7 @@ entry: } declare @llvm.riscv.vslide1up.nxv1i64.i64( + , , i64, i32); @@ -844,6 +881,7 @@ define @intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64( @llvm.riscv.vslide1up.nxv1i64.i64( + undef, %0, i64 %1, i32 %2) @@ -881,6 +919,7 @@ entry: } declare @llvm.riscv.vslide1up.nxv2i64.i64( + , , i64, i32); @@ -895,6 +934,7 @@ define @intrinsic_vslide1up_vx_nxv2i64_nxv2i64_i64( @llvm.riscv.vslide1up.nxv2i64.i64( + undef, %0, i64 %1, i32 %2) @@ -932,6 +972,7 @@ entry: } declare @llvm.riscv.vslide1up.nxv4i64.i64( + , , i64, i32); @@ -946,6 +987,7 @@ define @intrinsic_vslide1up_vx_nxv4i64_nxv4i64_i64( @llvm.riscv.vslide1up.nxv4i64.i64( + undef, %0, i64 %1, i32 %2) @@ -983,6 +1025,7 @@ entry: } declare @llvm.riscv.vslide1up.nxv8i64.i64( + , , i64, i32); @@ -997,6 +1040,7 @@ define @intrinsic_vslide1up_vx_nxv8i64_nxv8i64_i64( @llvm.riscv.vslide1up.nxv8i64.i64( + undef, %0, i64 %1, i32 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vslide1up-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vslide1up-rv64.ll index 81c16b9c315cd71089c3e24153a2de8d899ec6e6..a0f9476845ef74a04d92cab222c17958a00d7dab 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vslide1up-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vslide1up-rv64.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v,+d -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vslide1up.nxv1i8.i8( + , , i8, i64); @@ -15,6 +16,7 @@ define @intrinsic_vslide1up_vx_nxv1i8_nxv1i8_i8( @llvm.riscv.vslide1up.nxv1i8.i8( + undef, %0, i8 %1, i64 %2) @@ -48,6 +50,7 @@ entry: } declare @llvm.riscv.vslide1up.nxv2i8.i8( + , , i8, i64); @@ -61,6 +64,7 @@ define @intrinsic_vslide1up_vx_nxv2i8_nxv2i8_i8( @llvm.riscv.vslide1up.nxv2i8.i8( + undef, %0, i8 %1, i64 %2) @@ -94,6 +98,7 @@ entry: } declare @llvm.riscv.vslide1up.nxv4i8.i8( + , , i8, i64); @@ -107,6 +112,7 @@ define @intrinsic_vslide1up_vx_nxv4i8_nxv4i8_i8( @llvm.riscv.vslide1up.nxv4i8.i8( + undef, %0, i8 %1, i64 %2) @@ -140,6 +146,7 @@ entry: } declare @llvm.riscv.vslide1up.nxv8i8.i8( + , , i8, i64); @@ -153,6 +160,7 @@ define @intrinsic_vslide1up_vx_nxv8i8_nxv8i8_i8( @llvm.riscv.vslide1up.nxv8i8.i8( + undef, %0, i8 %1, i64 %2) @@ -186,6 +194,7 @@ entry: } declare @llvm.riscv.vslide1up.nxv16i8.i8( + , , i8, i64); @@ -199,6 +208,7 @@ define @intrinsic_vslide1up_vx_nxv16i8_nxv16i8_i8( @llvm.riscv.vslide1up.nxv16i8.i8( + undef, %0, i8 %1, i64 %2) @@ -232,6 +242,7 @@ entry: } declare @llvm.riscv.vslide1up.nxv32i8.i8( + , , i8, i64); @@ -245,6 +256,7 @@ define @intrinsic_vslide1up_vx_nxv32i8_nxv32i8_i8( @llvm.riscv.vslide1up.nxv32i8.i8( + undef, %0, i8 %1, i64 %2) @@ -278,6 +290,7 @@ entry: } declare @llvm.riscv.vslide1up.nxv64i8.i8( + , , i8, i64); @@ -291,6 +304,7 @@ define @intrinsic_vslide1up_vx_nxv64i8_nxv64i8_i8( @llvm.riscv.vslide1up.nxv64i8.i8( + undef, %0, i8 %1, i64 %2) @@ -324,6 +338,7 @@ entry: } declare @llvm.riscv.vslide1up.nxv1i16.i16( + , , i16, i64); @@ -337,6 +352,7 @@ define @intrinsic_vslide1up_vx_nxv1i16_nxv1i16_i16( @llvm.riscv.vslide1up.nxv1i16.i16( + undef, %0, i16 %1, i64 %2) @@ -370,6 +386,7 @@ entry: } declare @llvm.riscv.vslide1up.nxv2i16.i16( + , , i16, i64); @@ -383,6 +400,7 @@ define @intrinsic_vslide1up_vx_nxv2i16_nxv2i16_i16( @llvm.riscv.vslide1up.nxv2i16.i16( + undef, %0, i16 %1, i64 %2) @@ -416,6 +434,7 @@ entry: } declare @llvm.riscv.vslide1up.nxv4i16.i16( + , , i16, i64); @@ -429,6 +448,7 @@ define @intrinsic_vslide1up_vx_nxv4i16_nxv4i16_i16( @llvm.riscv.vslide1up.nxv4i16.i16( + undef, %0, i16 %1, i64 %2) @@ -462,6 +482,7 @@ entry: } declare @llvm.riscv.vslide1up.nxv8i16.i16( + , , i16, i64); @@ -475,6 +496,7 @@ define @intrinsic_vslide1up_vx_nxv8i16_nxv8i16_i16( @llvm.riscv.vslide1up.nxv8i16.i16( + undef, %0, i16 %1, i64 %2) @@ -508,6 +530,7 @@ entry: } declare @llvm.riscv.vslide1up.nxv16i16.i16( + , , i16, i64); @@ -521,6 +544,7 @@ define @intrinsic_vslide1up_vx_nxv16i16_nxv16i16_i16( @llvm.riscv.vslide1up.nxv16i16.i16( + undef, %0, i16 %1, i64 %2) @@ -554,6 +578,7 @@ entry: } declare @llvm.riscv.vslide1up.nxv32i16.i16( + , , i16, i64); @@ -567,6 +592,7 @@ define @intrinsic_vslide1up_vx_nxv32i16_nxv32i16_i16( @llvm.riscv.vslide1up.nxv32i16.i16( + undef, %0, i16 %1, i64 %2) @@ -600,6 +626,7 @@ entry: } declare @llvm.riscv.vslide1up.nxv1i32.i32( + , , i32, i64); @@ -613,6 +640,7 @@ define @intrinsic_vslide1up_vx_nxv1i32_nxv1i32_i32( @llvm.riscv.vslide1up.nxv1i32.i32( + undef, %0, i32 %1, i64 %2) @@ -646,6 +674,7 @@ entry: } declare @llvm.riscv.vslide1up.nxv2i32.i32( + , , i32, i64); @@ -659,6 +688,7 @@ define @intrinsic_vslide1up_vx_nxv2i32_nxv2i32_i32( @llvm.riscv.vslide1up.nxv2i32.i32( + undef, %0, i32 %1, i64 %2) @@ -692,6 +722,7 @@ entry: } declare @llvm.riscv.vslide1up.nxv4i32.i32( + , , i32, i64); @@ -705,6 +736,7 @@ define @intrinsic_vslide1up_vx_nxv4i32_nxv4i32_i32( @llvm.riscv.vslide1up.nxv4i32.i32( + undef, %0, i32 %1, i64 %2) @@ -738,6 +770,7 @@ entry: } declare @llvm.riscv.vslide1up.nxv8i32.i32( + , , i32, i64); @@ -751,6 +784,7 @@ define @intrinsic_vslide1up_vx_nxv8i32_nxv8i32_i32( @llvm.riscv.vslide1up.nxv8i32.i32( + undef, %0, i32 %1, i64 %2) @@ -784,6 +818,7 @@ entry: } declare @llvm.riscv.vslide1up.nxv16i32.i32( + , , i32, i64); @@ -797,6 +832,7 @@ define @intrinsic_vslide1up_vx_nxv16i32_nxv16i32_i32( @llvm.riscv.vslide1up.nxv16i32.i32( + undef, %0, i32 %1, i64 %2) @@ -830,6 +866,7 @@ entry: } declare @llvm.riscv.vslide1up.nxv1i64.i64( + , , i64, i64); @@ -843,6 +880,7 @@ define @intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64( @llvm.riscv.vslide1up.nxv1i64.i64( + undef, %0, i64 %1, i64 %2) @@ -876,6 +914,7 @@ entry: } declare @llvm.riscv.vslide1up.nxv2i64.i64( + , , i64, i64); @@ -889,6 +928,7 @@ define @intrinsic_vslide1up_vx_nxv2i64_nxv2i64_i64( @llvm.riscv.vslide1up.nxv2i64.i64( + undef, %0, i64 %1, i64 %2) @@ -922,6 +962,7 @@ entry: } declare @llvm.riscv.vslide1up.nxv4i64.i64( + , , i64, i64); @@ -935,6 +976,7 @@ define @intrinsic_vslide1up_vx_nxv4i64_nxv4i64_i64( @llvm.riscv.vslide1up.nxv4i64.i64( + undef, %0, i64 %1, i64 %2) @@ -968,6 +1010,7 @@ entry: } declare @llvm.riscv.vslide1up.nxv8i64.i64( + , , i64, i64); @@ -981,6 +1024,7 @@ define @intrinsic_vslide1up_vx_nxv8i64_nxv8i64_i64( @llvm.riscv.vslide1up.nxv8i64.i64( + undef, %0, i64 %1, i64 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vsll-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsll-rv32.ll index cd141520ce540056b2e60a0146e790bad46b9a83..019b40e37ef2717392ad703dcbff28b5832d73e8 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsll-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsll-rv32.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vsll.nxv1i8.nxv1i8( + , , , i32); @@ -14,6 +15,7 @@ define @intrinsic_vsll_vv_nxv1i8_nxv1i8_nxv1i8( @llvm.riscv.vsll.nxv1i8.nxv1i8( + undef, %0, %1, i32 %2) @@ -47,6 +49,7 @@ entry: } declare @llvm.riscv.vsll.nxv2i8.nxv2i8( + , , , i32); @@ -59,6 +62,7 @@ define @intrinsic_vsll_vv_nxv2i8_nxv2i8_nxv2i8( @llvm.riscv.vsll.nxv2i8.nxv2i8( + undef, %0, %1, i32 %2) @@ -92,6 +96,7 @@ entry: } declare @llvm.riscv.vsll.nxv4i8.nxv4i8( + , , , i32); @@ -104,6 +109,7 @@ define @intrinsic_vsll_vv_nxv4i8_nxv4i8_nxv4i8( @llvm.riscv.vsll.nxv4i8.nxv4i8( + undef, %0, %1, i32 %2) @@ -137,6 +143,7 @@ entry: } declare @llvm.riscv.vsll.nxv8i8.nxv8i8( + , , , i32); @@ -149,6 +156,7 @@ define @intrinsic_vsll_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vsll.nxv8i8.nxv8i8( + undef, %0, %1, i32 %2) @@ -182,6 +190,7 @@ entry: } declare @llvm.riscv.vsll.nxv16i8.nxv16i8( + , , , i32); @@ -194,6 +203,7 @@ define @intrinsic_vsll_vv_nxv16i8_nxv16i8_nxv16i8( @llvm.riscv.vsll.nxv16i8.nxv16i8( + undef, %0, %1, i32 %2) @@ -227,6 +237,7 @@ entry: } declare @llvm.riscv.vsll.nxv32i8.nxv32i8( + , , , i32); @@ -239,6 +250,7 @@ define @intrinsic_vsll_vv_nxv32i8_nxv32i8_nxv32i8( @llvm.riscv.vsll.nxv32i8.nxv32i8( + undef, %0, %1, i32 %2) @@ -272,6 +284,7 @@ entry: } declare @llvm.riscv.vsll.nxv64i8.nxv64i8( + , , , i32); @@ -284,6 +297,7 @@ define @intrinsic_vsll_vv_nxv64i8_nxv64i8_nxv64i8( @llvm.riscv.vsll.nxv64i8.nxv64i8( + undef, %0, %1, i32 %2) @@ -318,6 +332,7 @@ entry: } declare @llvm.riscv.vsll.nxv1i16.nxv1i16( + , , , i32); @@ -330,6 +345,7 @@ define @intrinsic_vsll_vv_nxv1i16_nxv1i16_nxv1i16( @llvm.riscv.vsll.nxv1i16.nxv1i16( + undef, %0, %1, i32 %2) @@ -363,6 +379,7 @@ entry: } declare @llvm.riscv.vsll.nxv2i16.nxv2i16( + , , , i32); @@ -375,6 +392,7 @@ define @intrinsic_vsll_vv_nxv2i16_nxv2i16_nxv2i16( @llvm.riscv.vsll.nxv2i16.nxv2i16( + undef, %0, %1, i32 %2) @@ -408,6 +426,7 @@ entry: } declare @llvm.riscv.vsll.nxv4i16.nxv4i16( + , , , i32); @@ -420,6 +439,7 @@ define @intrinsic_vsll_vv_nxv4i16_nxv4i16_nxv4i16( @llvm.riscv.vsll.nxv4i16.nxv4i16( + undef, %0, %1, i32 %2) @@ -453,6 +473,7 @@ entry: } declare @llvm.riscv.vsll.nxv8i16.nxv8i16( + , , , i32); @@ -465,6 +486,7 @@ define @intrinsic_vsll_vv_nxv8i16_nxv8i16_nxv8i16( @llvm.riscv.vsll.nxv8i16.nxv8i16( + undef, %0, %1, i32 %2) @@ -498,6 +520,7 @@ entry: } declare @llvm.riscv.vsll.nxv16i16.nxv16i16( + , , , i32); @@ -510,6 +533,7 @@ define @intrinsic_vsll_vv_nxv16i16_nxv16i16_nxv16i16( @llvm.riscv.vsll.nxv16i16.nxv16i16( + undef, %0, %1, i32 %2) @@ -543,6 +567,7 @@ entry: } declare @llvm.riscv.vsll.nxv32i16.nxv32i16( + , , , i32); @@ -555,6 +580,7 @@ define @intrinsic_vsll_vv_nxv32i16_nxv32i16_nxv32i16( @llvm.riscv.vsll.nxv32i16.nxv32i16( + undef, %0, %1, i32 %2) @@ -589,6 +615,7 @@ entry: } declare @llvm.riscv.vsll.nxv1i32.nxv1i32( + , , , i32); @@ -601,6 +628,7 @@ define @intrinsic_vsll_vv_nxv1i32_nxv1i32_nxv1i32( @llvm.riscv.vsll.nxv1i32.nxv1i32( + undef, %0, %1, i32 %2) @@ -634,6 +662,7 @@ entry: } declare @llvm.riscv.vsll.nxv2i32.nxv2i32( + , , , i32); @@ -646,6 +675,7 @@ define @intrinsic_vsll_vv_nxv2i32_nxv2i32_nxv2i32( @llvm.riscv.vsll.nxv2i32.nxv2i32( + undef, %0, %1, i32 %2) @@ -679,6 +709,7 @@ entry: } declare @llvm.riscv.vsll.nxv4i32.nxv4i32( + , , , i32); @@ -691,6 +722,7 @@ define @intrinsic_vsll_vv_nxv4i32_nxv4i32_nxv4i32( @llvm.riscv.vsll.nxv4i32.nxv4i32( + undef, %0, %1, i32 %2) @@ -724,6 +756,7 @@ entry: } declare @llvm.riscv.vsll.nxv8i32.nxv8i32( + , , , i32); @@ -736,6 +769,7 @@ define @intrinsic_vsll_vv_nxv8i32_nxv8i32_nxv8i32( @llvm.riscv.vsll.nxv8i32.nxv8i32( + undef, %0, %1, i32 %2) @@ -769,6 +803,7 @@ entry: } declare @llvm.riscv.vsll.nxv16i32.nxv16i32( + , , , i32); @@ -781,6 +816,7 @@ define @intrinsic_vsll_vv_nxv16i32_nxv16i32_nxv16i32( @llvm.riscv.vsll.nxv16i32.nxv16i32( + undef, %0, %1, i32 %2) @@ -815,6 +851,7 @@ entry: } declare @llvm.riscv.vsll.nxv1i64.nxv1i64( + , , , i32); @@ -827,6 +864,7 @@ define @intrinsic_vsll_vv_nxv1i64_nxv1i64_nxv1i64( @llvm.riscv.vsll.nxv1i64.nxv1i64( + undef, %0, %1, i32 %2) @@ -860,6 +898,7 @@ entry: } declare @llvm.riscv.vsll.nxv2i64.nxv2i64( + , , , i32); @@ -872,6 +911,7 @@ define @intrinsic_vsll_vv_nxv2i64_nxv2i64_nxv2i64( @llvm.riscv.vsll.nxv2i64.nxv2i64( + undef, %0, %1, i32 %2) @@ -905,6 +945,7 @@ entry: } declare @llvm.riscv.vsll.nxv4i64.nxv4i64( + , , , i32); @@ -917,6 +958,7 @@ define @intrinsic_vsll_vv_nxv4i64_nxv4i64_nxv4i64( @llvm.riscv.vsll.nxv4i64.nxv4i64( + undef, %0, %1, i32 %2) @@ -950,6 +992,7 @@ entry: } declare @llvm.riscv.vsll.nxv8i64.nxv8i64( + , , , i32); @@ -962,6 +1005,7 @@ define @intrinsic_vsll_vv_nxv8i64_nxv8i64_nxv8i64( @llvm.riscv.vsll.nxv8i64.nxv8i64( + undef, %0, %1, i32 %2) @@ -996,6 +1040,7 @@ entry: } declare @llvm.riscv.vsll.nxv1i8( + , , i32, i32); @@ -1008,6 +1053,7 @@ define @intrinsic_vsll_vx_nxv1i8_nxv1i8( %0, ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv1i8( + undef, %0, i32 %1, i32 %2) @@ -1041,6 +1087,7 @@ entry: } declare @llvm.riscv.vsll.nxv2i8( + , , i32, i32); @@ -1053,6 +1100,7 @@ define @intrinsic_vsll_vx_nxv2i8_nxv2i8( %0, ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv2i8( + undef, %0, i32 %1, i32 %2) @@ -1086,6 +1134,7 @@ entry: } declare @llvm.riscv.vsll.nxv4i8( + , , i32, i32); @@ -1098,6 +1147,7 @@ define @intrinsic_vsll_vx_nxv4i8_nxv4i8( %0, ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv4i8( + undef, %0, i32 %1, i32 %2) @@ -1131,6 +1181,7 @@ entry: } declare @llvm.riscv.vsll.nxv8i8( + , , i32, i32); @@ -1143,6 +1194,7 @@ define @intrinsic_vsll_vx_nxv8i8_nxv8i8( %0, ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv8i8( + undef, %0, i32 %1, i32 %2) @@ -1176,6 +1228,7 @@ entry: } declare @llvm.riscv.vsll.nxv16i8( + , , i32, i32); @@ -1188,6 +1241,7 @@ define @intrinsic_vsll_vx_nxv16i8_nxv16i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv16i8( + undef, %0, i32 %1, i32 %2) @@ -1221,6 +1275,7 @@ entry: } declare @llvm.riscv.vsll.nxv32i8( + , , i32, i32); @@ -1233,6 +1288,7 @@ define @intrinsic_vsll_vx_nxv32i8_nxv32i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv32i8( + undef, %0, i32 %1, i32 %2) @@ -1266,6 +1322,7 @@ entry: } declare @llvm.riscv.vsll.nxv64i8( + , , i32, i32); @@ -1278,6 +1335,7 @@ define @intrinsic_vsll_vx_nxv64i8_nxv64i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv64i8( + undef, %0, i32 %1, i32 %2) @@ -1311,6 +1369,7 @@ entry: } declare @llvm.riscv.vsll.nxv1i16( + , , i32, i32); @@ -1323,6 +1382,7 @@ define @intrinsic_vsll_vx_nxv1i16_nxv1i16( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv1i16( + undef, %0, i32 %1, i32 %2) @@ -1356,6 +1416,7 @@ entry: } declare @llvm.riscv.vsll.nxv2i16( + , , i32, i32); @@ -1368,6 +1429,7 @@ define @intrinsic_vsll_vx_nxv2i16_nxv2i16( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv2i16( + undef, %0, i32 %1, i32 %2) @@ -1401,6 +1463,7 @@ entry: } declare @llvm.riscv.vsll.nxv4i16( + , , i32, i32); @@ -1413,6 +1476,7 @@ define @intrinsic_vsll_vx_nxv4i16_nxv4i16( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv4i16( + undef, %0, i32 %1, i32 %2) @@ -1446,6 +1510,7 @@ entry: } declare @llvm.riscv.vsll.nxv8i16( + , , i32, i32); @@ -1458,6 +1523,7 @@ define @intrinsic_vsll_vx_nxv8i16_nxv8i16( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv8i16( + undef, %0, i32 %1, i32 %2) @@ -1491,6 +1557,7 @@ entry: } declare @llvm.riscv.vsll.nxv16i16( + , , i32, i32); @@ -1503,6 +1570,7 @@ define @intrinsic_vsll_vx_nxv16i16_nxv16i16( @llvm.riscv.vsll.nxv16i16( + undef, %0, i32 %1, i32 %2) @@ -1536,6 +1604,7 @@ entry: } declare @llvm.riscv.vsll.nxv32i16( + , , i32, i32); @@ -1548,6 +1617,7 @@ define @intrinsic_vsll_vx_nxv32i16_nxv32i16( @llvm.riscv.vsll.nxv32i16( + undef, %0, i32 %1, i32 %2) @@ -1581,6 +1651,7 @@ entry: } declare @llvm.riscv.vsll.nxv1i32( + , , i32, i32); @@ -1593,6 +1664,7 @@ define @intrinsic_vsll_vx_nxv1i32_nxv1i32( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv1i32( + undef, %0, i32 %1, i32 %2) @@ -1626,6 +1698,7 @@ entry: } declare @llvm.riscv.vsll.nxv2i32( + , , i32, i32); @@ -1638,6 +1711,7 @@ define @intrinsic_vsll_vx_nxv2i32_nxv2i32( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv2i32( + undef, %0, i32 %1, i32 %2) @@ -1671,6 +1745,7 @@ entry: } declare @llvm.riscv.vsll.nxv4i32( + , , i32, i32); @@ -1683,6 +1758,7 @@ define @intrinsic_vsll_vx_nxv4i32_nxv4i32( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv4i32( + undef, %0, i32 %1, i32 %2) @@ -1716,6 +1792,7 @@ entry: } declare @llvm.riscv.vsll.nxv8i32( + , , i32, i32); @@ -1728,6 +1805,7 @@ define @intrinsic_vsll_vx_nxv8i32_nxv8i32( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv8i32( + undef, %0, i32 %1, i32 %2) @@ -1761,6 +1839,7 @@ entry: } declare @llvm.riscv.vsll.nxv16i32( + , , i32, i32); @@ -1773,6 +1852,7 @@ define @intrinsic_vsll_vx_nxv16i32_nxv16i32( @llvm.riscv.vsll.nxv16i32( + undef, %0, i32 %1, i32 %2) @@ -1806,6 +1886,7 @@ entry: } declare @llvm.riscv.vsll.nxv1i64( + , , i32, i32); @@ -1818,6 +1899,7 @@ define @intrinsic_vsll_vx_nxv1i64_nxv1i64( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv1i64( + undef, %0, i32 %1, i32 %2) @@ -1851,6 +1933,7 @@ entry: } declare @llvm.riscv.vsll.nxv2i64( + , , i32, i32); @@ -1863,6 +1946,7 @@ define @intrinsic_vsll_vx_nxv2i64_nxv2i64( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv2i64( + undef, %0, i32 %1, i32 %2) @@ -1896,6 +1980,7 @@ entry: } declare @llvm.riscv.vsll.nxv4i64( + , , i32, i32); @@ -1908,6 +1993,7 @@ define @intrinsic_vsll_vx_nxv4i64_nxv4i64( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv4i64( + undef, %0, i32 %1, i32 %2) @@ -1941,6 +2027,7 @@ entry: } declare @llvm.riscv.vsll.nxv8i64( + , , i32, i32); @@ -1953,6 +2040,7 @@ define @intrinsic_vsll_vx_nxv8i64_nxv8i64( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv8i64( + undef, %0, i32 %1, i32 %2) @@ -1993,6 +2081,7 @@ define @intrinsic_vsll_vi_nxv1i8_nxv1i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv1i8( + undef, %0, i32 9, i32 %1) @@ -2008,6 +2097,7 @@ define @intrinsic_vsll_1_nxv1i8_nxv1i8_i8( %0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv1i8( + undef, %0, i32 1, i32 %1) @@ -2057,6 +2147,7 @@ define @intrinsic_vsll_vi_nxv2i8_nxv2i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv2i8( + undef, %0, i32 9, i32 %1) @@ -2089,6 +2180,7 @@ define @intrinsic_vsll_vi_nxv4i8_nxv4i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv4i8( + undef, %0, i32 9, i32 %1) @@ -2121,6 +2213,7 @@ define @intrinsic_vsll_vi_nxv8i8_nxv8i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv8i8( + undef, %0, i32 9, i32 %1) @@ -2153,6 +2246,7 @@ define @intrinsic_vsll_vi_nxv16i8_nxv16i8_i8( @llvm.riscv.vsll.nxv16i8( + undef, %0, i32 9, i32 %1) @@ -2185,6 +2279,7 @@ define @intrinsic_vsll_vi_nxv32i8_nxv32i8_i8( @llvm.riscv.vsll.nxv32i8( + undef, %0, i32 9, i32 %1) @@ -2217,6 +2312,7 @@ define @intrinsic_vsll_vi_nxv64i8_nxv64i8_i8( @llvm.riscv.vsll.nxv64i8( + undef, %0, i32 9, i32 %1) @@ -2249,6 +2345,7 @@ define @intrinsic_vsll_vi_nxv1i16_nxv1i16_i16( @llvm.riscv.vsll.nxv1i16( + undef, %0, i32 9, i32 %1) @@ -2281,6 +2378,7 @@ define @intrinsic_vsll_vi_nxv2i16_nxv2i16_i16( @llvm.riscv.vsll.nxv2i16( + undef, %0, i32 9, i32 %1) @@ -2313,6 +2411,7 @@ define @intrinsic_vsll_vi_nxv4i16_nxv4i16_i16( @llvm.riscv.vsll.nxv4i16( + undef, %0, i32 9, i32 %1) @@ -2345,6 +2444,7 @@ define @intrinsic_vsll_vi_nxv8i16_nxv8i16_i16( @llvm.riscv.vsll.nxv8i16( + undef, %0, i32 9, i32 %1) @@ -2377,6 +2477,7 @@ define @intrinsic_vsll_vi_nxv16i16_nxv16i16_i16( @llvm.riscv.vsll.nxv16i16( + undef, %0, i32 9, i32 %1) @@ -2409,6 +2510,7 @@ define @intrinsic_vsll_vi_nxv32i16_nxv32i16_i16( @llvm.riscv.vsll.nxv32i16( + undef, %0, i32 9, i32 %1) @@ -2441,6 +2543,7 @@ define @intrinsic_vsll_vi_nxv1i32_nxv1i32_i32( @llvm.riscv.vsll.nxv1i32( + undef, %0, i32 9, i32 %1) @@ -2473,6 +2576,7 @@ define @intrinsic_vsll_vi_nxv2i32_nxv2i32_i32( @llvm.riscv.vsll.nxv2i32( + undef, %0, i32 9, i32 %1) @@ -2505,6 +2609,7 @@ define @intrinsic_vsll_vi_nxv4i32_nxv4i32_i32( @llvm.riscv.vsll.nxv4i32( + undef, %0, i32 9, i32 %1) @@ -2537,6 +2642,7 @@ define @intrinsic_vsll_vi_nxv8i32_nxv8i32_i32( @llvm.riscv.vsll.nxv8i32( + undef, %0, i32 9, i32 %1) @@ -2569,6 +2675,7 @@ define @intrinsic_vsll_vi_nxv16i32_nxv16i32_i32( @llvm.riscv.vsll.nxv16i32( + undef, %0, i32 9, i32 %1) @@ -2601,6 +2708,7 @@ define @intrinsic_vsll_vi_nxv1i64_nxv1i64_i64( @llvm.riscv.vsll.nxv1i64( + undef, %0, i32 9, i32 %1) @@ -2633,6 +2741,7 @@ define @intrinsic_vsll_vi_nxv2i64_nxv2i64_i64( @llvm.riscv.vsll.nxv2i64( + undef, %0, i32 9, i32 %1) @@ -2665,6 +2774,7 @@ define @intrinsic_vsll_vi_nxv4i64_nxv4i64_i64( @llvm.riscv.vsll.nxv4i64( + undef, %0, i32 9, i32 %1) @@ -2697,6 +2807,7 @@ define @intrinsic_vsll_vi_nxv8i64_nxv8i64_i64( @llvm.riscv.vsll.nxv8i64( + undef, %0, i32 9, i32 %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vsll-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsll-rv64.ll index 305f8712b0f4349b865524651b7f6d96344b596b..da88d4f73cfe9736136908c28a0450e5909ea3af 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsll-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsll-rv64.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vsll.nxv1i8.nxv1i8( + , , , i64); @@ -14,6 +15,7 @@ define @intrinsic_vsll_vv_nxv1i8_nxv1i8_nxv1i8( @llvm.riscv.vsll.nxv1i8.nxv1i8( + undef, %0, %1, i64 %2) @@ -47,6 +49,7 @@ entry: } declare @llvm.riscv.vsll.nxv2i8.nxv2i8( + , , , i64); @@ -59,6 +62,7 @@ define @intrinsic_vsll_vv_nxv2i8_nxv2i8_nxv2i8( @llvm.riscv.vsll.nxv2i8.nxv2i8( + undef, %0, %1, i64 %2) @@ -92,6 +96,7 @@ entry: } declare @llvm.riscv.vsll.nxv4i8.nxv4i8( + , , , i64); @@ -104,6 +109,7 @@ define @intrinsic_vsll_vv_nxv4i8_nxv4i8_nxv4i8( @llvm.riscv.vsll.nxv4i8.nxv4i8( + undef, %0, %1, i64 %2) @@ -137,6 +143,7 @@ entry: } declare @llvm.riscv.vsll.nxv8i8.nxv8i8( + , , , i64); @@ -149,6 +156,7 @@ define @intrinsic_vsll_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vsll.nxv8i8.nxv8i8( + undef, %0, %1, i64 %2) @@ -182,6 +190,7 @@ entry: } declare @llvm.riscv.vsll.nxv16i8.nxv16i8( + , , , i64); @@ -194,6 +203,7 @@ define @intrinsic_vsll_vv_nxv16i8_nxv16i8_nxv16i8( @llvm.riscv.vsll.nxv16i8.nxv16i8( + undef, %0, %1, i64 %2) @@ -227,6 +237,7 @@ entry: } declare @llvm.riscv.vsll.nxv32i8.nxv32i8( + , , , i64); @@ -239,6 +250,7 @@ define @intrinsic_vsll_vv_nxv32i8_nxv32i8_nxv32i8( @llvm.riscv.vsll.nxv32i8.nxv32i8( + undef, %0, %1, i64 %2) @@ -272,6 +284,7 @@ entry: } declare @llvm.riscv.vsll.nxv64i8.nxv64i8( + , , , i64); @@ -284,6 +297,7 @@ define @intrinsic_vsll_vv_nxv64i8_nxv64i8_nxv64i8( @llvm.riscv.vsll.nxv64i8.nxv64i8( + undef, %0, %1, i64 %2) @@ -318,6 +332,7 @@ entry: } declare @llvm.riscv.vsll.nxv1i16.nxv1i16( + , , , i64); @@ -330,6 +345,7 @@ define @intrinsic_vsll_vv_nxv1i16_nxv1i16_nxv1i16( @llvm.riscv.vsll.nxv1i16.nxv1i16( + undef, %0, %1, i64 %2) @@ -363,6 +379,7 @@ entry: } declare @llvm.riscv.vsll.nxv2i16.nxv2i16( + , , , i64); @@ -375,6 +392,7 @@ define @intrinsic_vsll_vv_nxv2i16_nxv2i16_nxv2i16( @llvm.riscv.vsll.nxv2i16.nxv2i16( + undef, %0, %1, i64 %2) @@ -408,6 +426,7 @@ entry: } declare @llvm.riscv.vsll.nxv4i16.nxv4i16( + , , , i64); @@ -420,6 +439,7 @@ define @intrinsic_vsll_vv_nxv4i16_nxv4i16_nxv4i16( @llvm.riscv.vsll.nxv4i16.nxv4i16( + undef, %0, %1, i64 %2) @@ -453,6 +473,7 @@ entry: } declare @llvm.riscv.vsll.nxv8i16.nxv8i16( + , , , i64); @@ -465,6 +486,7 @@ define @intrinsic_vsll_vv_nxv8i16_nxv8i16_nxv8i16( @llvm.riscv.vsll.nxv8i16.nxv8i16( + undef, %0, %1, i64 %2) @@ -498,6 +520,7 @@ entry: } declare @llvm.riscv.vsll.nxv16i16.nxv16i16( + , , , i64); @@ -510,6 +533,7 @@ define @intrinsic_vsll_vv_nxv16i16_nxv16i16_nxv16i16( @llvm.riscv.vsll.nxv16i16.nxv16i16( + undef, %0, %1, i64 %2) @@ -543,6 +567,7 @@ entry: } declare @llvm.riscv.vsll.nxv32i16.nxv32i16( + , , , i64); @@ -555,6 +580,7 @@ define @intrinsic_vsll_vv_nxv32i16_nxv32i16_nxv32i16( @llvm.riscv.vsll.nxv32i16.nxv32i16( + undef, %0, %1, i64 %2) @@ -589,6 +615,7 @@ entry: } declare @llvm.riscv.vsll.nxv1i32.nxv1i32( + , , , i64); @@ -601,6 +628,7 @@ define @intrinsic_vsll_vv_nxv1i32_nxv1i32_nxv1i32( @llvm.riscv.vsll.nxv1i32.nxv1i32( + undef, %0, %1, i64 %2) @@ -634,6 +662,7 @@ entry: } declare @llvm.riscv.vsll.nxv2i32.nxv2i32( + , , , i64); @@ -646,6 +675,7 @@ define @intrinsic_vsll_vv_nxv2i32_nxv2i32_nxv2i32( @llvm.riscv.vsll.nxv2i32.nxv2i32( + undef, %0, %1, i64 %2) @@ -679,6 +709,7 @@ entry: } declare @llvm.riscv.vsll.nxv4i32.nxv4i32( + , , , i64); @@ -691,6 +722,7 @@ define @intrinsic_vsll_vv_nxv4i32_nxv4i32_nxv4i32( @llvm.riscv.vsll.nxv4i32.nxv4i32( + undef, %0, %1, i64 %2) @@ -724,6 +756,7 @@ entry: } declare @llvm.riscv.vsll.nxv8i32.nxv8i32( + , , , i64); @@ -736,6 +769,7 @@ define @intrinsic_vsll_vv_nxv8i32_nxv8i32_nxv8i32( @llvm.riscv.vsll.nxv8i32.nxv8i32( + undef, %0, %1, i64 %2) @@ -769,6 +803,7 @@ entry: } declare @llvm.riscv.vsll.nxv16i32.nxv16i32( + , , , i64); @@ -781,6 +816,7 @@ define @intrinsic_vsll_vv_nxv16i32_nxv16i32_nxv16i32( @llvm.riscv.vsll.nxv16i32.nxv16i32( + undef, %0, %1, i64 %2) @@ -815,6 +851,7 @@ entry: } declare @llvm.riscv.vsll.nxv1i64.nxv1i64( + , , , i64); @@ -827,6 +864,7 @@ define @intrinsic_vsll_vv_nxv1i64_nxv1i64_nxv1i64( @llvm.riscv.vsll.nxv1i64.nxv1i64( + undef, %0, %1, i64 %2) @@ -860,6 +898,7 @@ entry: } declare @llvm.riscv.vsll.nxv2i64.nxv2i64( + , , , i64); @@ -872,6 +911,7 @@ define @intrinsic_vsll_vv_nxv2i64_nxv2i64_nxv2i64( @llvm.riscv.vsll.nxv2i64.nxv2i64( + undef, %0, %1, i64 %2) @@ -905,6 +945,7 @@ entry: } declare @llvm.riscv.vsll.nxv4i64.nxv4i64( + , , , i64); @@ -917,6 +958,7 @@ define @intrinsic_vsll_vv_nxv4i64_nxv4i64_nxv4i64( @llvm.riscv.vsll.nxv4i64.nxv4i64( + undef, %0, %1, i64 %2) @@ -950,6 +992,7 @@ entry: } declare @llvm.riscv.vsll.nxv8i64.nxv8i64( + , , , i64); @@ -962,6 +1005,7 @@ define @intrinsic_vsll_vv_nxv8i64_nxv8i64_nxv8i64( @llvm.riscv.vsll.nxv8i64.nxv8i64( + undef, %0, %1, i64 %2) @@ -996,6 +1040,7 @@ entry: } declare @llvm.riscv.vsll.nxv1i8( + , , i64, i64); @@ -1008,6 +1053,7 @@ define @intrinsic_vsll_vx_nxv1i8_nxv1i8( %0, ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv1i8( + undef, %0, i64 %1, i64 %2) @@ -1041,6 +1087,7 @@ entry: } declare @llvm.riscv.vsll.nxv2i8( + , , i64, i64); @@ -1053,6 +1100,7 @@ define @intrinsic_vsll_vx_nxv2i8_nxv2i8( %0, ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv2i8( + undef, %0, i64 %1, i64 %2) @@ -1086,6 +1134,7 @@ entry: } declare @llvm.riscv.vsll.nxv4i8( + , , i64, i64); @@ -1098,6 +1147,7 @@ define @intrinsic_vsll_vx_nxv4i8_nxv4i8( %0, ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv4i8( + undef, %0, i64 %1, i64 %2) @@ -1131,6 +1181,7 @@ entry: } declare @llvm.riscv.vsll.nxv8i8( + , , i64, i64); @@ -1143,6 +1194,7 @@ define @intrinsic_vsll_vx_nxv8i8_nxv8i8( %0, ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv8i8( + undef, %0, i64 %1, i64 %2) @@ -1176,6 +1228,7 @@ entry: } declare @llvm.riscv.vsll.nxv16i8( + , , i64, i64); @@ -1188,6 +1241,7 @@ define @intrinsic_vsll_vx_nxv16i8_nxv16i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv16i8( + undef, %0, i64 %1, i64 %2) @@ -1221,6 +1275,7 @@ entry: } declare @llvm.riscv.vsll.nxv32i8( + , , i64, i64); @@ -1233,6 +1288,7 @@ define @intrinsic_vsll_vx_nxv32i8_nxv32i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv32i8( + undef, %0, i64 %1, i64 %2) @@ -1266,6 +1322,7 @@ entry: } declare @llvm.riscv.vsll.nxv64i8( + , , i64, i64); @@ -1278,6 +1335,7 @@ define @intrinsic_vsll_vx_nxv64i8_nxv64i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv64i8( + undef, %0, i64 %1, i64 %2) @@ -1311,6 +1369,7 @@ entry: } declare @llvm.riscv.vsll.nxv1i16( + , , i64, i64); @@ -1323,6 +1382,7 @@ define @intrinsic_vsll_vx_nxv1i16_nxv1i16( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv1i16( + undef, %0, i64 %1, i64 %2) @@ -1356,6 +1416,7 @@ entry: } declare @llvm.riscv.vsll.nxv2i16( + , , i64, i64); @@ -1368,6 +1429,7 @@ define @intrinsic_vsll_vx_nxv2i16_nxv2i16( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv2i16( + undef, %0, i64 %1, i64 %2) @@ -1401,6 +1463,7 @@ entry: } declare @llvm.riscv.vsll.nxv4i16( + , , i64, i64); @@ -1413,6 +1476,7 @@ define @intrinsic_vsll_vx_nxv4i16_nxv4i16( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv4i16( + undef, %0, i64 %1, i64 %2) @@ -1446,6 +1510,7 @@ entry: } declare @llvm.riscv.vsll.nxv8i16( + , , i64, i64); @@ -1458,6 +1523,7 @@ define @intrinsic_vsll_vx_nxv8i16_nxv8i16( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv8i16( + undef, %0, i64 %1, i64 %2) @@ -1491,6 +1557,7 @@ entry: } declare @llvm.riscv.vsll.nxv16i16( + , , i64, i64); @@ -1503,6 +1570,7 @@ define @intrinsic_vsll_vx_nxv16i16_nxv16i16( @llvm.riscv.vsll.nxv16i16( + undef, %0, i64 %1, i64 %2) @@ -1536,6 +1604,7 @@ entry: } declare @llvm.riscv.vsll.nxv32i16( + , , i64, i64); @@ -1548,6 +1617,7 @@ define @intrinsic_vsll_vx_nxv32i16_nxv32i16( @llvm.riscv.vsll.nxv32i16( + undef, %0, i64 %1, i64 %2) @@ -1581,6 +1651,7 @@ entry: } declare @llvm.riscv.vsll.nxv1i32( + , , i64, i64); @@ -1593,6 +1664,7 @@ define @intrinsic_vsll_vx_nxv1i32_nxv1i32( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv1i32( + undef, %0, i64 %1, i64 %2) @@ -1626,6 +1698,7 @@ entry: } declare @llvm.riscv.vsll.nxv2i32( + , , i64, i64); @@ -1638,6 +1711,7 @@ define @intrinsic_vsll_vx_nxv2i32_nxv2i32( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv2i32( + undef, %0, i64 %1, i64 %2) @@ -1671,6 +1745,7 @@ entry: } declare @llvm.riscv.vsll.nxv4i32( + , , i64, i64); @@ -1683,6 +1758,7 @@ define @intrinsic_vsll_vx_nxv4i32_nxv4i32( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv4i32( + undef, %0, i64 %1, i64 %2) @@ -1716,6 +1792,7 @@ entry: } declare @llvm.riscv.vsll.nxv8i32( + , , i64, i64); @@ -1728,6 +1805,7 @@ define @intrinsic_vsll_vx_nxv8i32_nxv8i32( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv8i32( + undef, %0, i64 %1, i64 %2) @@ -1761,6 +1839,7 @@ entry: } declare @llvm.riscv.vsll.nxv16i32( + , , i64, i64); @@ -1773,6 +1852,7 @@ define @intrinsic_vsll_vx_nxv16i32_nxv16i32( @llvm.riscv.vsll.nxv16i32( + undef, %0, i64 %1, i64 %2) @@ -1806,6 +1886,7 @@ entry: } declare @llvm.riscv.vsll.nxv1i64( + , , i64, i64); @@ -1818,6 +1899,7 @@ define @intrinsic_vsll_vx_nxv1i64_nxv1i64( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv1i64( + undef, %0, i64 %1, i64 %2) @@ -1851,6 +1933,7 @@ entry: } declare @llvm.riscv.vsll.nxv2i64( + , , i64, i64); @@ -1863,6 +1946,7 @@ define @intrinsic_vsll_vx_nxv2i64_nxv2i64( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv2i64( + undef, %0, i64 %1, i64 %2) @@ -1896,6 +1980,7 @@ entry: } declare @llvm.riscv.vsll.nxv4i64( + , , i64, i64); @@ -1908,6 +1993,7 @@ define @intrinsic_vsll_vx_nxv4i64_nxv4i64( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv4i64( + undef, %0, i64 %1, i64 %2) @@ -1941,6 +2027,7 @@ entry: } declare @llvm.riscv.vsll.nxv8i64( + , , i64, i64); @@ -1953,6 +2040,7 @@ define @intrinsic_vsll_vx_nxv8i64_nxv8i64( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv8i64( + undef, %0, i64 %1, i64 %2) @@ -1993,6 +2081,7 @@ define @intrinsic_vsll_vi_nxv1i8_nxv1i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv1i8( + undef, %0, i64 9, i64 %1) @@ -2008,6 +2097,7 @@ define @intrinsic_vsll_1_nxv1i8_nxv1i8_i8( %0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv1i8( + undef, %0, i64 1, i64 %1) @@ -2057,6 +2147,7 @@ define @intrinsic_vsll_vi_nxv2i8_nxv2i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv2i8( + undef, %0, i64 9, i64 %1) @@ -2089,6 +2180,7 @@ define @intrinsic_vsll_vi_nxv4i8_nxv4i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv4i8( + undef, %0, i64 9, i64 %1) @@ -2121,6 +2213,7 @@ define @intrinsic_vsll_vi_nxv8i8_nxv8i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv8i8( + undef, %0, i64 9, i64 %1) @@ -2153,6 +2246,7 @@ define @intrinsic_vsll_vi_nxv16i8_nxv16i8_i8( @llvm.riscv.vsll.nxv16i8( + undef, %0, i64 9, i64 %1) @@ -2185,6 +2279,7 @@ define @intrinsic_vsll_vi_nxv32i8_nxv32i8_i8( @llvm.riscv.vsll.nxv32i8( + undef, %0, i64 9, i64 %1) @@ -2217,6 +2312,7 @@ define @intrinsic_vsll_vi_nxv64i8_nxv64i8_i8( @llvm.riscv.vsll.nxv64i8( + undef, %0, i64 9, i64 %1) @@ -2249,6 +2345,7 @@ define @intrinsic_vsll_vi_nxv1i16_nxv1i16_i16( @llvm.riscv.vsll.nxv1i16( + undef, %0, i64 9, i64 %1) @@ -2281,6 +2378,7 @@ define @intrinsic_vsll_vi_nxv2i16_nxv2i16_i16( @llvm.riscv.vsll.nxv2i16( + undef, %0, i64 9, i64 %1) @@ -2313,6 +2411,7 @@ define @intrinsic_vsll_vi_nxv4i16_nxv4i16_i16( @llvm.riscv.vsll.nxv4i16( + undef, %0, i64 9, i64 %1) @@ -2345,6 +2444,7 @@ define @intrinsic_vsll_vi_nxv8i16_nxv8i16_i16( @llvm.riscv.vsll.nxv8i16( + undef, %0, i64 9, i64 %1) @@ -2377,6 +2477,7 @@ define @intrinsic_vsll_vi_nxv16i16_nxv16i16_i16( @llvm.riscv.vsll.nxv16i16( + undef, %0, i64 9, i64 %1) @@ -2409,6 +2510,7 @@ define @intrinsic_vsll_vi_nxv32i16_nxv32i16_i16( @llvm.riscv.vsll.nxv32i16( + undef, %0, i64 9, i64 %1) @@ -2441,6 +2543,7 @@ define @intrinsic_vsll_vi_nxv1i32_nxv1i32_i32( @llvm.riscv.vsll.nxv1i32( + undef, %0, i64 9, i64 %1) @@ -2473,6 +2576,7 @@ define @intrinsic_vsll_vi_nxv2i32_nxv2i32_i32( @llvm.riscv.vsll.nxv2i32( + undef, %0, i64 9, i64 %1) @@ -2505,6 +2609,7 @@ define @intrinsic_vsll_vi_nxv4i32_nxv4i32_i32( @llvm.riscv.vsll.nxv4i32( + undef, %0, i64 9, i64 %1) @@ -2537,6 +2642,7 @@ define @intrinsic_vsll_vi_nxv8i32_nxv8i32_i32( @llvm.riscv.vsll.nxv8i32( + undef, %0, i64 9, i64 %1) @@ -2569,6 +2675,7 @@ define @intrinsic_vsll_vi_nxv16i32_nxv16i32_i32( @llvm.riscv.vsll.nxv16i32( + undef, %0, i64 9, i64 %1) @@ -2601,6 +2708,7 @@ define @intrinsic_vsll_vi_nxv1i64_nxv1i64_i64( @llvm.riscv.vsll.nxv1i64( + undef, %0, i64 9, i64 %1) @@ -2633,6 +2741,7 @@ define @intrinsic_vsll_vi_nxv2i64_nxv2i64_i64( @llvm.riscv.vsll.nxv2i64( + undef, %0, i64 9, i64 %1) @@ -2665,6 +2774,7 @@ define @intrinsic_vsll_vi_nxv4i64_nxv4i64_i64( @llvm.riscv.vsll.nxv4i64( + undef, %0, i64 9, i64 %1) @@ -2697,6 +2807,7 @@ define @intrinsic_vsll_vi_nxv8i64_nxv8i64_i64( @llvm.riscv.vsll.nxv8i64( + undef, %0, i64 9, i64 %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vsmul-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsmul-rv32.ll index be3a442cc61b7f5b181fa4b087e11a3a29bc97ab..50a08f0c60a1bf665c12dbc7ab491e3169fb3795 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsmul-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsmul-rv32.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vsmul.nxv1i8.nxv1i8( + , , , i32); @@ -14,6 +15,7 @@ define @intrinsic_vsmul_vv_nxv1i8_nxv1i8_nxv1i8( @llvm.riscv.vsmul.nxv1i8.nxv1i8( + undef, %0, %1, i32 %2) @@ -47,6 +49,7 @@ entry: } declare @llvm.riscv.vsmul.nxv2i8.nxv2i8( + , , , i32); @@ -59,6 +62,7 @@ define @intrinsic_vsmul_vv_nxv2i8_nxv2i8_nxv2i8( @llvm.riscv.vsmul.nxv2i8.nxv2i8( + undef, %0, %1, i32 %2) @@ -92,6 +96,7 @@ entry: } declare @llvm.riscv.vsmul.nxv4i8.nxv4i8( + , , , i32); @@ -104,6 +109,7 @@ define @intrinsic_vsmul_vv_nxv4i8_nxv4i8_nxv4i8( @llvm.riscv.vsmul.nxv4i8.nxv4i8( + undef, %0, %1, i32 %2) @@ -137,6 +143,7 @@ entry: } declare @llvm.riscv.vsmul.nxv8i8.nxv8i8( + , , , i32); @@ -149,6 +156,7 @@ define @intrinsic_vsmul_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vsmul.nxv8i8.nxv8i8( + undef, %0, %1, i32 %2) @@ -182,6 +190,7 @@ entry: } declare @llvm.riscv.vsmul.nxv16i8.nxv16i8( + , , , i32); @@ -194,6 +203,7 @@ define @intrinsic_vsmul_vv_nxv16i8_nxv16i8_nxv16i8( @llvm.riscv.vsmul.nxv16i8.nxv16i8( + undef, %0, %1, i32 %2) @@ -227,6 +237,7 @@ entry: } declare @llvm.riscv.vsmul.nxv32i8.nxv32i8( + , , , i32); @@ -239,6 +250,7 @@ define @intrinsic_vsmul_vv_nxv32i8_nxv32i8_nxv32i8( @llvm.riscv.vsmul.nxv32i8.nxv32i8( + undef, %0, %1, i32 %2) @@ -272,6 +284,7 @@ entry: } declare @llvm.riscv.vsmul.nxv64i8.nxv64i8( + , , , i32); @@ -284,6 +297,7 @@ define @intrinsic_vsmul_vv_nxv64i8_nxv64i8_nxv64i8( @llvm.riscv.vsmul.nxv64i8.nxv64i8( + undef, %0, %1, i32 %2) @@ -318,6 +332,7 @@ entry: } declare @llvm.riscv.vsmul.nxv1i16.nxv1i16( + , , , i32); @@ -330,6 +345,7 @@ define @intrinsic_vsmul_vv_nxv1i16_nxv1i16_nxv1i16( @llvm.riscv.vsmul.nxv1i16.nxv1i16( + undef, %0, %1, i32 %2) @@ -363,6 +379,7 @@ entry: } declare @llvm.riscv.vsmul.nxv2i16.nxv2i16( + , , , i32); @@ -375,6 +392,7 @@ define @intrinsic_vsmul_vv_nxv2i16_nxv2i16_nxv2i16( @llvm.riscv.vsmul.nxv2i16.nxv2i16( + undef, %0, %1, i32 %2) @@ -408,6 +426,7 @@ entry: } declare @llvm.riscv.vsmul.nxv4i16.nxv4i16( + , , , i32); @@ -420,6 +439,7 @@ define @intrinsic_vsmul_vv_nxv4i16_nxv4i16_nxv4i16( @llvm.riscv.vsmul.nxv4i16.nxv4i16( + undef, %0, %1, i32 %2) @@ -453,6 +473,7 @@ entry: } declare @llvm.riscv.vsmul.nxv8i16.nxv8i16( + , , , i32); @@ -465,6 +486,7 @@ define @intrinsic_vsmul_vv_nxv8i16_nxv8i16_nxv8i16( @llvm.riscv.vsmul.nxv8i16.nxv8i16( + undef, %0, %1, i32 %2) @@ -498,6 +520,7 @@ entry: } declare @llvm.riscv.vsmul.nxv16i16.nxv16i16( + , , , i32); @@ -510,6 +533,7 @@ define @intrinsic_vsmul_vv_nxv16i16_nxv16i16_nxv16i16( @llvm.riscv.vsmul.nxv16i16.nxv16i16( + undef, %0, %1, i32 %2) @@ -543,6 +567,7 @@ entry: } declare @llvm.riscv.vsmul.nxv32i16.nxv32i16( + , , , i32); @@ -555,6 +580,7 @@ define @intrinsic_vsmul_vv_nxv32i16_nxv32i16_nxv32i16( @llvm.riscv.vsmul.nxv32i16.nxv32i16( + undef, %0, %1, i32 %2) @@ -589,6 +615,7 @@ entry: } declare @llvm.riscv.vsmul.nxv1i32.nxv1i32( + , , , i32); @@ -601,6 +628,7 @@ define @intrinsic_vsmul_vv_nxv1i32_nxv1i32_nxv1i32( @llvm.riscv.vsmul.nxv1i32.nxv1i32( + undef, %0, %1, i32 %2) @@ -634,6 +662,7 @@ entry: } declare @llvm.riscv.vsmul.nxv2i32.nxv2i32( + , , , i32); @@ -646,6 +675,7 @@ define @intrinsic_vsmul_vv_nxv2i32_nxv2i32_nxv2i32( @llvm.riscv.vsmul.nxv2i32.nxv2i32( + undef, %0, %1, i32 %2) @@ -679,6 +709,7 @@ entry: } declare @llvm.riscv.vsmul.nxv4i32.nxv4i32( + , , , i32); @@ -691,6 +722,7 @@ define @intrinsic_vsmul_vv_nxv4i32_nxv4i32_nxv4i32( @llvm.riscv.vsmul.nxv4i32.nxv4i32( + undef, %0, %1, i32 %2) @@ -724,6 +756,7 @@ entry: } declare @llvm.riscv.vsmul.nxv8i32.nxv8i32( + , , , i32); @@ -736,6 +769,7 @@ define @intrinsic_vsmul_vv_nxv8i32_nxv8i32_nxv8i32( @llvm.riscv.vsmul.nxv8i32.nxv8i32( + undef, %0, %1, i32 %2) @@ -769,6 +803,7 @@ entry: } declare @llvm.riscv.vsmul.nxv16i32.nxv16i32( + , , , i32); @@ -781,6 +816,7 @@ define @intrinsic_vsmul_vv_nxv16i32_nxv16i32_nxv16i32( @llvm.riscv.vsmul.nxv16i32.nxv16i32( + undef, %0, %1, i32 %2) @@ -815,6 +851,7 @@ entry: } declare @llvm.riscv.vsmul.nxv1i64.nxv1i64( + , , , i32); @@ -827,6 +864,7 @@ define @intrinsic_vsmul_vv_nxv1i64_nxv1i64_nxv1i64( @llvm.riscv.vsmul.nxv1i64.nxv1i64( + undef, %0, %1, i32 %2) @@ -860,6 +898,7 @@ entry: } declare @llvm.riscv.vsmul.nxv2i64.nxv2i64( + , , , i32); @@ -872,6 +911,7 @@ define @intrinsic_vsmul_vv_nxv2i64_nxv2i64_nxv2i64( @llvm.riscv.vsmul.nxv2i64.nxv2i64( + undef, %0, %1, i32 %2) @@ -905,6 +945,7 @@ entry: } declare @llvm.riscv.vsmul.nxv4i64.nxv4i64( + , , , i32); @@ -917,6 +958,7 @@ define @intrinsic_vsmul_vv_nxv4i64_nxv4i64_nxv4i64( @llvm.riscv.vsmul.nxv4i64.nxv4i64( + undef, %0, %1, i32 %2) @@ -950,6 +992,7 @@ entry: } declare @llvm.riscv.vsmul.nxv8i64.nxv8i64( + , , , i32); @@ -962,6 +1005,7 @@ define @intrinsic_vsmul_vv_nxv8i64_nxv8i64_nxv8i64( @llvm.riscv.vsmul.nxv8i64.nxv8i64( + undef, %0, %1, i32 %2) @@ -996,6 +1040,7 @@ entry: } declare @llvm.riscv.vsmul.nxv1i8.i8( + , , i8, i32); @@ -1008,6 +1053,7 @@ define @intrinsic_vsmul_vx_nxv1i8_nxv1i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsmul.nxv1i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1041,6 +1087,7 @@ entry: } declare @llvm.riscv.vsmul.nxv2i8.i8( + , , i8, i32); @@ -1053,6 +1100,7 @@ define @intrinsic_vsmul_vx_nxv2i8_nxv2i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsmul.nxv2i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1086,6 +1134,7 @@ entry: } declare @llvm.riscv.vsmul.nxv4i8.i8( + , , i8, i32); @@ -1098,6 +1147,7 @@ define @intrinsic_vsmul_vx_nxv4i8_nxv4i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsmul.nxv4i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1131,6 +1181,7 @@ entry: } declare @llvm.riscv.vsmul.nxv8i8.i8( + , , i8, i32); @@ -1143,6 +1194,7 @@ define @intrinsic_vsmul_vx_nxv8i8_nxv8i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsmul.nxv8i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1176,6 +1228,7 @@ entry: } declare @llvm.riscv.vsmul.nxv16i8.i8( + , , i8, i32); @@ -1188,6 +1241,7 @@ define @intrinsic_vsmul_vx_nxv16i8_nxv16i8_i8( @llvm.riscv.vsmul.nxv16i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1221,6 +1275,7 @@ entry: } declare @llvm.riscv.vsmul.nxv32i8.i8( + , , i8, i32); @@ -1233,6 +1288,7 @@ define @intrinsic_vsmul_vx_nxv32i8_nxv32i8_i8( @llvm.riscv.vsmul.nxv32i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1266,6 +1322,7 @@ entry: } declare @llvm.riscv.vsmul.nxv64i8.i8( + , , i8, i32); @@ -1278,6 +1335,7 @@ define @intrinsic_vsmul_vx_nxv64i8_nxv64i8_i8( @llvm.riscv.vsmul.nxv64i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1311,6 +1369,7 @@ entry: } declare @llvm.riscv.vsmul.nxv1i16.i16( + , , i16, i32); @@ -1323,6 +1382,7 @@ define @intrinsic_vsmul_vx_nxv1i16_nxv1i16_i16( @llvm.riscv.vsmul.nxv1i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1356,6 +1416,7 @@ entry: } declare @llvm.riscv.vsmul.nxv2i16.i16( + , , i16, i32); @@ -1368,6 +1429,7 @@ define @intrinsic_vsmul_vx_nxv2i16_nxv2i16_i16( @llvm.riscv.vsmul.nxv2i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1401,6 +1463,7 @@ entry: } declare @llvm.riscv.vsmul.nxv4i16.i16( + , , i16, i32); @@ -1413,6 +1476,7 @@ define @intrinsic_vsmul_vx_nxv4i16_nxv4i16_i16( @llvm.riscv.vsmul.nxv4i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1446,6 +1510,7 @@ entry: } declare @llvm.riscv.vsmul.nxv8i16.i16( + , , i16, i32); @@ -1458,6 +1523,7 @@ define @intrinsic_vsmul_vx_nxv8i16_nxv8i16_i16( @llvm.riscv.vsmul.nxv8i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1491,6 +1557,7 @@ entry: } declare @llvm.riscv.vsmul.nxv16i16.i16( + , , i16, i32); @@ -1503,6 +1570,7 @@ define @intrinsic_vsmul_vx_nxv16i16_nxv16i16_i16( @llvm.riscv.vsmul.nxv16i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1536,6 +1604,7 @@ entry: } declare @llvm.riscv.vsmul.nxv32i16.i16( + , , i16, i32); @@ -1548,6 +1617,7 @@ define @intrinsic_vsmul_vx_nxv32i16_nxv32i16_i16( @llvm.riscv.vsmul.nxv32i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1581,6 +1651,7 @@ entry: } declare @llvm.riscv.vsmul.nxv1i32.i32( + , , i32, i32); @@ -1593,6 +1664,7 @@ define @intrinsic_vsmul_vx_nxv1i32_nxv1i32_i32( @llvm.riscv.vsmul.nxv1i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1626,6 +1698,7 @@ entry: } declare @llvm.riscv.vsmul.nxv2i32.i32( + , , i32, i32); @@ -1638,6 +1711,7 @@ define @intrinsic_vsmul_vx_nxv2i32_nxv2i32_i32( @llvm.riscv.vsmul.nxv2i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1671,6 +1745,7 @@ entry: } declare @llvm.riscv.vsmul.nxv4i32.i32( + , , i32, i32); @@ -1683,6 +1758,7 @@ define @intrinsic_vsmul_vx_nxv4i32_nxv4i32_i32( @llvm.riscv.vsmul.nxv4i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1716,6 +1792,7 @@ entry: } declare @llvm.riscv.vsmul.nxv8i32.i32( + , , i32, i32); @@ -1728,6 +1805,7 @@ define @intrinsic_vsmul_vx_nxv8i32_nxv8i32_i32( @llvm.riscv.vsmul.nxv8i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1761,6 +1839,7 @@ entry: } declare @llvm.riscv.vsmul.nxv16i32.i32( + , , i32, i32); @@ -1773,6 +1852,7 @@ define @intrinsic_vsmul_vx_nxv16i32_nxv16i32_i32( @llvm.riscv.vsmul.nxv16i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1806,6 +1886,7 @@ entry: } declare @llvm.riscv.vsmul.nxv1i64.i64( + , , i64, i32); @@ -1824,6 +1905,7 @@ define @intrinsic_vsmul_vx_nxv1i64_nxv1i64_i64( @llvm.riscv.vsmul.nxv1i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1863,6 +1945,7 @@ entry: } declare @llvm.riscv.vsmul.nxv2i64.i64( + , , i64, i32); @@ -1881,6 +1964,7 @@ define @intrinsic_vsmul_vx_nxv2i64_nxv2i64_i64( @llvm.riscv.vsmul.nxv2i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1920,6 +2004,7 @@ entry: } declare @llvm.riscv.vsmul.nxv4i64.i64( + , , i64, i32); @@ -1938,6 +2023,7 @@ define @intrinsic_vsmul_vx_nxv4i64_nxv4i64_i64( @llvm.riscv.vsmul.nxv4i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1977,6 +2063,7 @@ entry: } declare @llvm.riscv.vsmul.nxv8i64.i64( + , , i64, i32); @@ -1995,6 +2082,7 @@ define @intrinsic_vsmul_vx_nxv8i64_nxv8i64_i64( @llvm.riscv.vsmul.nxv8i64.i64( + undef, %0, i64 %1, i32 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vsmul-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsmul-rv64.ll index 3a5eb3c18f2b6119571fcac13c69c16a5f6983bc..5380ff997c059b87432819d1bd4f286d81a753e6 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsmul-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsmul-rv64.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vsmul.nxv1i8.nxv1i8( + , , , i64); @@ -14,6 +15,7 @@ define @intrinsic_vsmul_vv_nxv1i8_nxv1i8_nxv1i8( @llvm.riscv.vsmul.nxv1i8.nxv1i8( + undef, %0, %1, i64 %2) @@ -47,6 +49,7 @@ entry: } declare @llvm.riscv.vsmul.nxv2i8.nxv2i8( + , , , i64); @@ -59,6 +62,7 @@ define @intrinsic_vsmul_vv_nxv2i8_nxv2i8_nxv2i8( @llvm.riscv.vsmul.nxv2i8.nxv2i8( + undef, %0, %1, i64 %2) @@ -92,6 +96,7 @@ entry: } declare @llvm.riscv.vsmul.nxv4i8.nxv4i8( + , , , i64); @@ -104,6 +109,7 @@ define @intrinsic_vsmul_vv_nxv4i8_nxv4i8_nxv4i8( @llvm.riscv.vsmul.nxv4i8.nxv4i8( + undef, %0, %1, i64 %2) @@ -137,6 +143,7 @@ entry: } declare @llvm.riscv.vsmul.nxv8i8.nxv8i8( + , , , i64); @@ -149,6 +156,7 @@ define @intrinsic_vsmul_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vsmul.nxv8i8.nxv8i8( + undef, %0, %1, i64 %2) @@ -182,6 +190,7 @@ entry: } declare @llvm.riscv.vsmul.nxv16i8.nxv16i8( + , , , i64); @@ -194,6 +203,7 @@ define @intrinsic_vsmul_vv_nxv16i8_nxv16i8_nxv16i8( @llvm.riscv.vsmul.nxv16i8.nxv16i8( + undef, %0, %1, i64 %2) @@ -227,6 +237,7 @@ entry: } declare @llvm.riscv.vsmul.nxv32i8.nxv32i8( + , , , i64); @@ -239,6 +250,7 @@ define @intrinsic_vsmul_vv_nxv32i8_nxv32i8_nxv32i8( @llvm.riscv.vsmul.nxv32i8.nxv32i8( + undef, %0, %1, i64 %2) @@ -272,6 +284,7 @@ entry: } declare @llvm.riscv.vsmul.nxv64i8.nxv64i8( + , , , i64); @@ -284,6 +297,7 @@ define @intrinsic_vsmul_vv_nxv64i8_nxv64i8_nxv64i8( @llvm.riscv.vsmul.nxv64i8.nxv64i8( + undef, %0, %1, i64 %2) @@ -318,6 +332,7 @@ entry: } declare @llvm.riscv.vsmul.nxv1i16.nxv1i16( + , , , i64); @@ -330,6 +345,7 @@ define @intrinsic_vsmul_vv_nxv1i16_nxv1i16_nxv1i16( @llvm.riscv.vsmul.nxv1i16.nxv1i16( + undef, %0, %1, i64 %2) @@ -363,6 +379,7 @@ entry: } declare @llvm.riscv.vsmul.nxv2i16.nxv2i16( + , , , i64); @@ -375,6 +392,7 @@ define @intrinsic_vsmul_vv_nxv2i16_nxv2i16_nxv2i16( @llvm.riscv.vsmul.nxv2i16.nxv2i16( + undef, %0, %1, i64 %2) @@ -408,6 +426,7 @@ entry: } declare @llvm.riscv.vsmul.nxv4i16.nxv4i16( + , , , i64); @@ -420,6 +439,7 @@ define @intrinsic_vsmul_vv_nxv4i16_nxv4i16_nxv4i16( @llvm.riscv.vsmul.nxv4i16.nxv4i16( + undef, %0, %1, i64 %2) @@ -453,6 +473,7 @@ entry: } declare @llvm.riscv.vsmul.nxv8i16.nxv8i16( + , , , i64); @@ -465,6 +486,7 @@ define @intrinsic_vsmul_vv_nxv8i16_nxv8i16_nxv8i16( @llvm.riscv.vsmul.nxv8i16.nxv8i16( + undef, %0, %1, i64 %2) @@ -498,6 +520,7 @@ entry: } declare @llvm.riscv.vsmul.nxv16i16.nxv16i16( + , , , i64); @@ -510,6 +533,7 @@ define @intrinsic_vsmul_vv_nxv16i16_nxv16i16_nxv16i16( @llvm.riscv.vsmul.nxv16i16.nxv16i16( + undef, %0, %1, i64 %2) @@ -543,6 +567,7 @@ entry: } declare @llvm.riscv.vsmul.nxv32i16.nxv32i16( + , , , i64); @@ -555,6 +580,7 @@ define @intrinsic_vsmul_vv_nxv32i16_nxv32i16_nxv32i16( @llvm.riscv.vsmul.nxv32i16.nxv32i16( + undef, %0, %1, i64 %2) @@ -589,6 +615,7 @@ entry: } declare @llvm.riscv.vsmul.nxv1i32.nxv1i32( + , , , i64); @@ -601,6 +628,7 @@ define @intrinsic_vsmul_vv_nxv1i32_nxv1i32_nxv1i32( @llvm.riscv.vsmul.nxv1i32.nxv1i32( + undef, %0, %1, i64 %2) @@ -634,6 +662,7 @@ entry: } declare @llvm.riscv.vsmul.nxv2i32.nxv2i32( + , , , i64); @@ -646,6 +675,7 @@ define @intrinsic_vsmul_vv_nxv2i32_nxv2i32_nxv2i32( @llvm.riscv.vsmul.nxv2i32.nxv2i32( + undef, %0, %1, i64 %2) @@ -679,6 +709,7 @@ entry: } declare @llvm.riscv.vsmul.nxv4i32.nxv4i32( + , , , i64); @@ -691,6 +722,7 @@ define @intrinsic_vsmul_vv_nxv4i32_nxv4i32_nxv4i32( @llvm.riscv.vsmul.nxv4i32.nxv4i32( + undef, %0, %1, i64 %2) @@ -724,6 +756,7 @@ entry: } declare @llvm.riscv.vsmul.nxv8i32.nxv8i32( + , , , i64); @@ -736,6 +769,7 @@ define @intrinsic_vsmul_vv_nxv8i32_nxv8i32_nxv8i32( @llvm.riscv.vsmul.nxv8i32.nxv8i32( + undef, %0, %1, i64 %2) @@ -769,6 +803,7 @@ entry: } declare @llvm.riscv.vsmul.nxv16i32.nxv16i32( + , , , i64); @@ -781,6 +816,7 @@ define @intrinsic_vsmul_vv_nxv16i32_nxv16i32_nxv16i32( @llvm.riscv.vsmul.nxv16i32.nxv16i32( + undef, %0, %1, i64 %2) @@ -815,6 +851,7 @@ entry: } declare @llvm.riscv.vsmul.nxv1i64.nxv1i64( + , , , i64); @@ -827,6 +864,7 @@ define @intrinsic_vsmul_vv_nxv1i64_nxv1i64_nxv1i64( @llvm.riscv.vsmul.nxv1i64.nxv1i64( + undef, %0, %1, i64 %2) @@ -860,6 +898,7 @@ entry: } declare @llvm.riscv.vsmul.nxv2i64.nxv2i64( + , , , i64); @@ -872,6 +911,7 @@ define @intrinsic_vsmul_vv_nxv2i64_nxv2i64_nxv2i64( @llvm.riscv.vsmul.nxv2i64.nxv2i64( + undef, %0, %1, i64 %2) @@ -905,6 +945,7 @@ entry: } declare @llvm.riscv.vsmul.nxv4i64.nxv4i64( + , , , i64); @@ -917,6 +958,7 @@ define @intrinsic_vsmul_vv_nxv4i64_nxv4i64_nxv4i64( @llvm.riscv.vsmul.nxv4i64.nxv4i64( + undef, %0, %1, i64 %2) @@ -950,6 +992,7 @@ entry: } declare @llvm.riscv.vsmul.nxv8i64.nxv8i64( + , , , i64); @@ -962,6 +1005,7 @@ define @intrinsic_vsmul_vv_nxv8i64_nxv8i64_nxv8i64( @llvm.riscv.vsmul.nxv8i64.nxv8i64( + undef, %0, %1, i64 %2) @@ -996,6 +1040,7 @@ entry: } declare @llvm.riscv.vsmul.nxv1i8.i8( + , , i8, i64); @@ -1008,6 +1053,7 @@ define @intrinsic_vsmul_vx_nxv1i8_nxv1i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsmul.nxv1i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1041,6 +1087,7 @@ entry: } declare @llvm.riscv.vsmul.nxv2i8.i8( + , , i8, i64); @@ -1053,6 +1100,7 @@ define @intrinsic_vsmul_vx_nxv2i8_nxv2i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsmul.nxv2i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1086,6 +1134,7 @@ entry: } declare @llvm.riscv.vsmul.nxv4i8.i8( + , , i8, i64); @@ -1098,6 +1147,7 @@ define @intrinsic_vsmul_vx_nxv4i8_nxv4i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsmul.nxv4i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1131,6 +1181,7 @@ entry: } declare @llvm.riscv.vsmul.nxv8i8.i8( + , , i8, i64); @@ -1143,6 +1194,7 @@ define @intrinsic_vsmul_vx_nxv8i8_nxv8i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsmul.nxv8i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1176,6 +1228,7 @@ entry: } declare @llvm.riscv.vsmul.nxv16i8.i8( + , , i8, i64); @@ -1188,6 +1241,7 @@ define @intrinsic_vsmul_vx_nxv16i8_nxv16i8_i8( @llvm.riscv.vsmul.nxv16i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1221,6 +1275,7 @@ entry: } declare @llvm.riscv.vsmul.nxv32i8.i8( + , , i8, i64); @@ -1233,6 +1288,7 @@ define @intrinsic_vsmul_vx_nxv32i8_nxv32i8_i8( @llvm.riscv.vsmul.nxv32i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1266,6 +1322,7 @@ entry: } declare @llvm.riscv.vsmul.nxv64i8.i8( + , , i8, i64); @@ -1278,6 +1335,7 @@ define @intrinsic_vsmul_vx_nxv64i8_nxv64i8_i8( @llvm.riscv.vsmul.nxv64i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1311,6 +1369,7 @@ entry: } declare @llvm.riscv.vsmul.nxv1i16.i16( + , , i16, i64); @@ -1323,6 +1382,7 @@ define @intrinsic_vsmul_vx_nxv1i16_nxv1i16_i16( @llvm.riscv.vsmul.nxv1i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1356,6 +1416,7 @@ entry: } declare @llvm.riscv.vsmul.nxv2i16.i16( + , , i16, i64); @@ -1368,6 +1429,7 @@ define @intrinsic_vsmul_vx_nxv2i16_nxv2i16_i16( @llvm.riscv.vsmul.nxv2i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1401,6 +1463,7 @@ entry: } declare @llvm.riscv.vsmul.nxv4i16.i16( + , , i16, i64); @@ -1413,6 +1476,7 @@ define @intrinsic_vsmul_vx_nxv4i16_nxv4i16_i16( @llvm.riscv.vsmul.nxv4i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1446,6 +1510,7 @@ entry: } declare @llvm.riscv.vsmul.nxv8i16.i16( + , , i16, i64); @@ -1458,6 +1523,7 @@ define @intrinsic_vsmul_vx_nxv8i16_nxv8i16_i16( @llvm.riscv.vsmul.nxv8i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1491,6 +1557,7 @@ entry: } declare @llvm.riscv.vsmul.nxv16i16.i16( + , , i16, i64); @@ -1503,6 +1570,7 @@ define @intrinsic_vsmul_vx_nxv16i16_nxv16i16_i16( @llvm.riscv.vsmul.nxv16i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1536,6 +1604,7 @@ entry: } declare @llvm.riscv.vsmul.nxv32i16.i16( + , , i16, i64); @@ -1548,6 +1617,7 @@ define @intrinsic_vsmul_vx_nxv32i16_nxv32i16_i16( @llvm.riscv.vsmul.nxv32i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1581,6 +1651,7 @@ entry: } declare @llvm.riscv.vsmul.nxv1i32.i32( + , , i32, i64); @@ -1593,6 +1664,7 @@ define @intrinsic_vsmul_vx_nxv1i32_nxv1i32_i32( @llvm.riscv.vsmul.nxv1i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1626,6 +1698,7 @@ entry: } declare @llvm.riscv.vsmul.nxv2i32.i32( + , , i32, i64); @@ -1638,6 +1711,7 @@ define @intrinsic_vsmul_vx_nxv2i32_nxv2i32_i32( @llvm.riscv.vsmul.nxv2i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1671,6 +1745,7 @@ entry: } declare @llvm.riscv.vsmul.nxv4i32.i32( + , , i32, i64); @@ -1683,6 +1758,7 @@ define @intrinsic_vsmul_vx_nxv4i32_nxv4i32_i32( @llvm.riscv.vsmul.nxv4i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1716,6 +1792,7 @@ entry: } declare @llvm.riscv.vsmul.nxv8i32.i32( + , , i32, i64); @@ -1728,6 +1805,7 @@ define @intrinsic_vsmul_vx_nxv8i32_nxv8i32_i32( @llvm.riscv.vsmul.nxv8i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1761,6 +1839,7 @@ entry: } declare @llvm.riscv.vsmul.nxv16i32.i32( + , , i32, i64); @@ -1773,6 +1852,7 @@ define @intrinsic_vsmul_vx_nxv16i32_nxv16i32_i32( @llvm.riscv.vsmul.nxv16i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1806,6 +1886,7 @@ entry: } declare @llvm.riscv.vsmul.nxv1i64.i64( + , , i64, i64); @@ -1818,6 +1899,7 @@ define @intrinsic_vsmul_vx_nxv1i64_nxv1i64_i64( @llvm.riscv.vsmul.nxv1i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1851,6 +1933,7 @@ entry: } declare @llvm.riscv.vsmul.nxv2i64.i64( + , , i64, i64); @@ -1863,6 +1946,7 @@ define @intrinsic_vsmul_vx_nxv2i64_nxv2i64_i64( @llvm.riscv.vsmul.nxv2i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1896,6 +1980,7 @@ entry: } declare @llvm.riscv.vsmul.nxv4i64.i64( + , , i64, i64); @@ -1908,6 +1993,7 @@ define @intrinsic_vsmul_vx_nxv4i64_nxv4i64_i64( @llvm.riscv.vsmul.nxv4i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1941,6 +2027,7 @@ entry: } declare @llvm.riscv.vsmul.nxv8i64.i64( + , , i64, i64); @@ -1953,6 +2040,7 @@ define @intrinsic_vsmul_vx_nxv8i64_nxv8i64_i64( @llvm.riscv.vsmul.nxv8i64.i64( + undef, %0, i64 %1, i64 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vsra-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsra-rv32.ll index 78eac5fcc9ec78ecb2e7fda335890bbbab52dc0c..24f8d8ae572f8d82a0b88b55058578ff2ccff610 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsra-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsra-rv32.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vsra.nxv1i8.nxv1i8( + , , , i32); @@ -14,6 +15,7 @@ define @intrinsic_vsra_vv_nxv1i8_nxv1i8_nxv1i8( @llvm.riscv.vsra.nxv1i8.nxv1i8( + undef, %0, %1, i32 %2) @@ -47,6 +49,7 @@ entry: } declare @llvm.riscv.vsra.nxv2i8.nxv2i8( + , , , i32); @@ -59,6 +62,7 @@ define @intrinsic_vsra_vv_nxv2i8_nxv2i8_nxv2i8( @llvm.riscv.vsra.nxv2i8.nxv2i8( + undef, %0, %1, i32 %2) @@ -92,6 +96,7 @@ entry: } declare @llvm.riscv.vsra.nxv4i8.nxv4i8( + , , , i32); @@ -104,6 +109,7 @@ define @intrinsic_vsra_vv_nxv4i8_nxv4i8_nxv4i8( @llvm.riscv.vsra.nxv4i8.nxv4i8( + undef, %0, %1, i32 %2) @@ -137,6 +143,7 @@ entry: } declare @llvm.riscv.vsra.nxv8i8.nxv8i8( + , , , i32); @@ -149,6 +156,7 @@ define @intrinsic_vsra_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vsra.nxv8i8.nxv8i8( + undef, %0, %1, i32 %2) @@ -182,6 +190,7 @@ entry: } declare @llvm.riscv.vsra.nxv16i8.nxv16i8( + , , , i32); @@ -194,6 +203,7 @@ define @intrinsic_vsra_vv_nxv16i8_nxv16i8_nxv16i8( @llvm.riscv.vsra.nxv16i8.nxv16i8( + undef, %0, %1, i32 %2) @@ -227,6 +237,7 @@ entry: } declare @llvm.riscv.vsra.nxv32i8.nxv32i8( + , , , i32); @@ -239,6 +250,7 @@ define @intrinsic_vsra_vv_nxv32i8_nxv32i8_nxv32i8( @llvm.riscv.vsra.nxv32i8.nxv32i8( + undef, %0, %1, i32 %2) @@ -272,6 +284,7 @@ entry: } declare @llvm.riscv.vsra.nxv64i8.nxv64i8( + , , , i32); @@ -284,6 +297,7 @@ define @intrinsic_vsra_vv_nxv64i8_nxv64i8_nxv64i8( @llvm.riscv.vsra.nxv64i8.nxv64i8( + undef, %0, %1, i32 %2) @@ -318,6 +332,7 @@ entry: } declare @llvm.riscv.vsra.nxv1i16.nxv1i16( + , , , i32); @@ -330,6 +345,7 @@ define @intrinsic_vsra_vv_nxv1i16_nxv1i16_nxv1i16( @llvm.riscv.vsra.nxv1i16.nxv1i16( + undef, %0, %1, i32 %2) @@ -363,6 +379,7 @@ entry: } declare @llvm.riscv.vsra.nxv2i16.nxv2i16( + , , , i32); @@ -375,6 +392,7 @@ define @intrinsic_vsra_vv_nxv2i16_nxv2i16_nxv2i16( @llvm.riscv.vsra.nxv2i16.nxv2i16( + undef, %0, %1, i32 %2) @@ -408,6 +426,7 @@ entry: } declare @llvm.riscv.vsra.nxv4i16.nxv4i16( + , , , i32); @@ -420,6 +439,7 @@ define @intrinsic_vsra_vv_nxv4i16_nxv4i16_nxv4i16( @llvm.riscv.vsra.nxv4i16.nxv4i16( + undef, %0, %1, i32 %2) @@ -453,6 +473,7 @@ entry: } declare @llvm.riscv.vsra.nxv8i16.nxv8i16( + , , , i32); @@ -465,6 +486,7 @@ define @intrinsic_vsra_vv_nxv8i16_nxv8i16_nxv8i16( @llvm.riscv.vsra.nxv8i16.nxv8i16( + undef, %0, %1, i32 %2) @@ -498,6 +520,7 @@ entry: } declare @llvm.riscv.vsra.nxv16i16.nxv16i16( + , , , i32); @@ -510,6 +533,7 @@ define @intrinsic_vsra_vv_nxv16i16_nxv16i16_nxv16i16( @llvm.riscv.vsra.nxv16i16.nxv16i16( + undef, %0, %1, i32 %2) @@ -543,6 +567,7 @@ entry: } declare @llvm.riscv.vsra.nxv32i16.nxv32i16( + , , , i32); @@ -555,6 +580,7 @@ define @intrinsic_vsra_vv_nxv32i16_nxv32i16_nxv32i16( @llvm.riscv.vsra.nxv32i16.nxv32i16( + undef, %0, %1, i32 %2) @@ -589,6 +615,7 @@ entry: } declare @llvm.riscv.vsra.nxv1i32.nxv1i32( + , , , i32); @@ -601,6 +628,7 @@ define @intrinsic_vsra_vv_nxv1i32_nxv1i32_nxv1i32( @llvm.riscv.vsra.nxv1i32.nxv1i32( + undef, %0, %1, i32 %2) @@ -634,6 +662,7 @@ entry: } declare @llvm.riscv.vsra.nxv2i32.nxv2i32( + , , , i32); @@ -646,6 +675,7 @@ define @intrinsic_vsra_vv_nxv2i32_nxv2i32_nxv2i32( @llvm.riscv.vsra.nxv2i32.nxv2i32( + undef, %0, %1, i32 %2) @@ -679,6 +709,7 @@ entry: } declare @llvm.riscv.vsra.nxv4i32.nxv4i32( + , , , i32); @@ -691,6 +722,7 @@ define @intrinsic_vsra_vv_nxv4i32_nxv4i32_nxv4i32( @llvm.riscv.vsra.nxv4i32.nxv4i32( + undef, %0, %1, i32 %2) @@ -724,6 +756,7 @@ entry: } declare @llvm.riscv.vsra.nxv8i32.nxv8i32( + , , , i32); @@ -736,6 +769,7 @@ define @intrinsic_vsra_vv_nxv8i32_nxv8i32_nxv8i32( @llvm.riscv.vsra.nxv8i32.nxv8i32( + undef, %0, %1, i32 %2) @@ -769,6 +803,7 @@ entry: } declare @llvm.riscv.vsra.nxv16i32.nxv16i32( + , , , i32); @@ -781,6 +816,7 @@ define @intrinsic_vsra_vv_nxv16i32_nxv16i32_nxv16i32( @llvm.riscv.vsra.nxv16i32.nxv16i32( + undef, %0, %1, i32 %2) @@ -815,6 +851,7 @@ entry: } declare @llvm.riscv.vsra.nxv1i64.nxv1i64( + , , , i32); @@ -827,6 +864,7 @@ define @intrinsic_vsra_vv_nxv1i64_nxv1i64_nxv1i64( @llvm.riscv.vsra.nxv1i64.nxv1i64( + undef, %0, %1, i32 %2) @@ -860,6 +898,7 @@ entry: } declare @llvm.riscv.vsra.nxv2i64.nxv2i64( + , , , i32); @@ -872,6 +911,7 @@ define @intrinsic_vsra_vv_nxv2i64_nxv2i64_nxv2i64( @llvm.riscv.vsra.nxv2i64.nxv2i64( + undef, %0, %1, i32 %2) @@ -905,6 +945,7 @@ entry: } declare @llvm.riscv.vsra.nxv4i64.nxv4i64( + , , , i32); @@ -917,6 +958,7 @@ define @intrinsic_vsra_vv_nxv4i64_nxv4i64_nxv4i64( @llvm.riscv.vsra.nxv4i64.nxv4i64( + undef, %0, %1, i32 %2) @@ -950,6 +992,7 @@ entry: } declare @llvm.riscv.vsra.nxv8i64.nxv8i64( + , , , i32); @@ -962,6 +1005,7 @@ define @intrinsic_vsra_vv_nxv8i64_nxv8i64_nxv8i64( @llvm.riscv.vsra.nxv8i64.nxv8i64( + undef, %0, %1, i32 %2) @@ -996,6 +1040,7 @@ entry: } declare @llvm.riscv.vsra.nxv1i8( + , , i32, i32); @@ -1008,6 +1053,7 @@ define @intrinsic_vsra_vx_nxv1i8_nxv1i8( %0, ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv1i8( + undef, %0, i32 %1, i32 %2) @@ -1041,6 +1087,7 @@ entry: } declare @llvm.riscv.vsra.nxv2i8( + , , i32, i32); @@ -1053,6 +1100,7 @@ define @intrinsic_vsra_vx_nxv2i8_nxv2i8( %0, ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv2i8( + undef, %0, i32 %1, i32 %2) @@ -1086,6 +1134,7 @@ entry: } declare @llvm.riscv.vsra.nxv4i8( + , , i32, i32); @@ -1098,6 +1147,7 @@ define @intrinsic_vsra_vx_nxv4i8_nxv4i8( %0, ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv4i8( + undef, %0, i32 %1, i32 %2) @@ -1131,6 +1181,7 @@ entry: } declare @llvm.riscv.vsra.nxv8i8( + , , i32, i32); @@ -1143,6 +1194,7 @@ define @intrinsic_vsra_vx_nxv8i8_nxv8i8( %0, ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv8i8( + undef, %0, i32 %1, i32 %2) @@ -1176,6 +1228,7 @@ entry: } declare @llvm.riscv.vsra.nxv16i8( + , , i32, i32); @@ -1188,6 +1241,7 @@ define @intrinsic_vsra_vx_nxv16i8_nxv16i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv16i8( + undef, %0, i32 %1, i32 %2) @@ -1221,6 +1275,7 @@ entry: } declare @llvm.riscv.vsra.nxv32i8( + , , i32, i32); @@ -1233,6 +1288,7 @@ define @intrinsic_vsra_vx_nxv32i8_nxv32i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv32i8( + undef, %0, i32 %1, i32 %2) @@ -1266,6 +1322,7 @@ entry: } declare @llvm.riscv.vsra.nxv64i8( + , , i32, i32); @@ -1278,6 +1335,7 @@ define @intrinsic_vsra_vx_nxv64i8_nxv64i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv64i8( + undef, %0, i32 %1, i32 %2) @@ -1311,6 +1369,7 @@ entry: } declare @llvm.riscv.vsra.nxv1i16( + , , i32, i32); @@ -1323,6 +1382,7 @@ define @intrinsic_vsra_vx_nxv1i16_nxv1i16( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv1i16( + undef, %0, i32 %1, i32 %2) @@ -1356,6 +1416,7 @@ entry: } declare @llvm.riscv.vsra.nxv2i16( + , , i32, i32); @@ -1368,6 +1429,7 @@ define @intrinsic_vsra_vx_nxv2i16_nxv2i16( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv2i16( + undef, %0, i32 %1, i32 %2) @@ -1401,6 +1463,7 @@ entry: } declare @llvm.riscv.vsra.nxv4i16( + , , i32, i32); @@ -1413,6 +1476,7 @@ define @intrinsic_vsra_vx_nxv4i16_nxv4i16( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv4i16( + undef, %0, i32 %1, i32 %2) @@ -1446,6 +1510,7 @@ entry: } declare @llvm.riscv.vsra.nxv8i16( + , , i32, i32); @@ -1458,6 +1523,7 @@ define @intrinsic_vsra_vx_nxv8i16_nxv8i16( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv8i16( + undef, %0, i32 %1, i32 %2) @@ -1491,6 +1557,7 @@ entry: } declare @llvm.riscv.vsra.nxv16i16( + , , i32, i32); @@ -1503,6 +1570,7 @@ define @intrinsic_vsra_vx_nxv16i16_nxv16i16( @llvm.riscv.vsra.nxv16i16( + undef, %0, i32 %1, i32 %2) @@ -1536,6 +1604,7 @@ entry: } declare @llvm.riscv.vsra.nxv32i16( + , , i32, i32); @@ -1548,6 +1617,7 @@ define @intrinsic_vsra_vx_nxv32i16_nxv32i16( @llvm.riscv.vsra.nxv32i16( + undef, %0, i32 %1, i32 %2) @@ -1581,6 +1651,7 @@ entry: } declare @llvm.riscv.vsra.nxv1i32( + , , i32, i32); @@ -1593,6 +1664,7 @@ define @intrinsic_vsra_vx_nxv1i32_nxv1i32( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv1i32( + undef, %0, i32 %1, i32 %2) @@ -1626,6 +1698,7 @@ entry: } declare @llvm.riscv.vsra.nxv2i32( + , , i32, i32); @@ -1638,6 +1711,7 @@ define @intrinsic_vsra_vx_nxv2i32_nxv2i32( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv2i32( + undef, %0, i32 %1, i32 %2) @@ -1671,6 +1745,7 @@ entry: } declare @llvm.riscv.vsra.nxv4i32( + , , i32, i32); @@ -1683,6 +1758,7 @@ define @intrinsic_vsra_vx_nxv4i32_nxv4i32( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv4i32( + undef, %0, i32 %1, i32 %2) @@ -1716,6 +1792,7 @@ entry: } declare @llvm.riscv.vsra.nxv8i32( + , , i32, i32); @@ -1728,6 +1805,7 @@ define @intrinsic_vsra_vx_nxv8i32_nxv8i32( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv8i32( + undef, %0, i32 %1, i32 %2) @@ -1761,6 +1839,7 @@ entry: } declare @llvm.riscv.vsra.nxv16i32( + , , i32, i32); @@ -1773,6 +1852,7 @@ define @intrinsic_vsra_vx_nxv16i32_nxv16i32( @llvm.riscv.vsra.nxv16i32( + undef, %0, i32 %1, i32 %2) @@ -1806,6 +1886,7 @@ entry: } declare @llvm.riscv.vsra.nxv1i64( + , , i32, i32); @@ -1818,6 +1899,7 @@ define @intrinsic_vsra_vx_nxv1i64_nxv1i64( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv1i64( + undef, %0, i32 %1, i32 %2) @@ -1851,6 +1933,7 @@ entry: } declare @llvm.riscv.vsra.nxv2i64( + , , i32, i32); @@ -1863,6 +1946,7 @@ define @intrinsic_vsra_vx_nxv2i64_nxv2i64( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv2i64( + undef, %0, i32 %1, i32 %2) @@ -1896,6 +1980,7 @@ entry: } declare @llvm.riscv.vsra.nxv4i64( + , , i32, i32); @@ -1908,6 +1993,7 @@ define @intrinsic_vsra_vx_nxv4i64_nxv4i64( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv4i64( + undef, %0, i32 %1, i32 %2) @@ -1941,6 +2027,7 @@ entry: } declare @llvm.riscv.vsra.nxv8i64( + , , i32, i32); @@ -1953,6 +2040,7 @@ define @intrinsic_vsra_vx_nxv8i64_nxv8i64( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv8i64( + undef, %0, i32 %1, i32 %2) @@ -1993,6 +2081,7 @@ define @intrinsic_vsra_vi_nxv1i8_nxv1i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv1i8( + undef, %0, i32 9, i32 %1) @@ -2025,6 +2114,7 @@ define @intrinsic_vsra_vi_nxv2i8_nxv2i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv2i8( + undef, %0, i32 9, i32 %1) @@ -2057,6 +2147,7 @@ define @intrinsic_vsra_vi_nxv4i8_nxv4i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv4i8( + undef, %0, i32 9, i32 %1) @@ -2089,6 +2180,7 @@ define @intrinsic_vsra_vi_nxv8i8_nxv8i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv8i8( + undef, %0, i32 9, i32 %1) @@ -2121,6 +2213,7 @@ define @intrinsic_vsra_vi_nxv16i8_nxv16i8_i8( @llvm.riscv.vsra.nxv16i8( + undef, %0, i32 9, i32 %1) @@ -2153,6 +2246,7 @@ define @intrinsic_vsra_vi_nxv32i8_nxv32i8_i8( @llvm.riscv.vsra.nxv32i8( + undef, %0, i32 9, i32 %1) @@ -2185,6 +2279,7 @@ define @intrinsic_vsra_vi_nxv64i8_nxv64i8_i8( @llvm.riscv.vsra.nxv64i8( + undef, %0, i32 9, i32 %1) @@ -2217,6 +2312,7 @@ define @intrinsic_vsra_vi_nxv1i16_nxv1i16_i16( @llvm.riscv.vsra.nxv1i16( + undef, %0, i32 9, i32 %1) @@ -2249,6 +2345,7 @@ define @intrinsic_vsra_vi_nxv2i16_nxv2i16_i16( @llvm.riscv.vsra.nxv2i16( + undef, %0, i32 9, i32 %1) @@ -2281,6 +2378,7 @@ define @intrinsic_vsra_vi_nxv4i16_nxv4i16_i16( @llvm.riscv.vsra.nxv4i16( + undef, %0, i32 9, i32 %1) @@ -2313,6 +2411,7 @@ define @intrinsic_vsra_vi_nxv8i16_nxv8i16_i16( @llvm.riscv.vsra.nxv8i16( + undef, %0, i32 9, i32 %1) @@ -2345,6 +2444,7 @@ define @intrinsic_vsra_vi_nxv16i16_nxv16i16_i16( @llvm.riscv.vsra.nxv16i16( + undef, %0, i32 9, i32 %1) @@ -2377,6 +2477,7 @@ define @intrinsic_vsra_vi_nxv32i16_nxv32i16_i16( @llvm.riscv.vsra.nxv32i16( + undef, %0, i32 9, i32 %1) @@ -2409,6 +2510,7 @@ define @intrinsic_vsra_vi_nxv1i32_nxv1i32_i32( @llvm.riscv.vsra.nxv1i32( + undef, %0, i32 9, i32 %1) @@ -2441,6 +2543,7 @@ define @intrinsic_vsra_vi_nxv2i32_nxv2i32_i32( @llvm.riscv.vsra.nxv2i32( + undef, %0, i32 9, i32 %1) @@ -2473,6 +2576,7 @@ define @intrinsic_vsra_vi_nxv4i32_nxv4i32_i32( @llvm.riscv.vsra.nxv4i32( + undef, %0, i32 9, i32 %1) @@ -2505,6 +2609,7 @@ define @intrinsic_vsra_vi_nxv8i32_nxv8i32_i32( @llvm.riscv.vsra.nxv8i32( + undef, %0, i32 9, i32 %1) @@ -2537,6 +2642,7 @@ define @intrinsic_vsra_vi_nxv16i32_nxv16i32_i32( @llvm.riscv.vsra.nxv16i32( + undef, %0, i32 9, i32 %1) @@ -2569,6 +2675,7 @@ define @intrinsic_vsra_vi_nxv1i64_nxv1i64_i64( @llvm.riscv.vsra.nxv1i64( + undef, %0, i32 9, i32 %1) @@ -2601,6 +2708,7 @@ define @intrinsic_vsra_vi_nxv2i64_nxv2i64_i64( @llvm.riscv.vsra.nxv2i64( + undef, %0, i32 9, i32 %1) @@ -2633,6 +2741,7 @@ define @intrinsic_vsra_vi_nxv4i64_nxv4i64_i64( @llvm.riscv.vsra.nxv4i64( + undef, %0, i32 9, i32 %1) @@ -2665,6 +2774,7 @@ define @intrinsic_vsra_vi_nxv8i64_nxv8i64_i64( @llvm.riscv.vsra.nxv8i64( + undef, %0, i32 9, i32 %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vsra-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsra-rv64.ll index 4841ca02e382b9f7fd12cbcd774a1055baf179cb..435d59d5aa55b50c88091099158628dd630b8e6b 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsra-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsra-rv64.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vsra.nxv1i8.nxv1i8( + , , , i64); @@ -14,6 +15,7 @@ define @intrinsic_vsra_vv_nxv1i8_nxv1i8_nxv1i8( @llvm.riscv.vsra.nxv1i8.nxv1i8( + undef, %0, %1, i64 %2) @@ -47,6 +49,7 @@ entry: } declare @llvm.riscv.vsra.nxv2i8.nxv2i8( + , , , i64); @@ -59,6 +62,7 @@ define @intrinsic_vsra_vv_nxv2i8_nxv2i8_nxv2i8( @llvm.riscv.vsra.nxv2i8.nxv2i8( + undef, %0, %1, i64 %2) @@ -92,6 +96,7 @@ entry: } declare @llvm.riscv.vsra.nxv4i8.nxv4i8( + , , , i64); @@ -104,6 +109,7 @@ define @intrinsic_vsra_vv_nxv4i8_nxv4i8_nxv4i8( @llvm.riscv.vsra.nxv4i8.nxv4i8( + undef, %0, %1, i64 %2) @@ -137,6 +143,7 @@ entry: } declare @llvm.riscv.vsra.nxv8i8.nxv8i8( + , , , i64); @@ -149,6 +156,7 @@ define @intrinsic_vsra_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vsra.nxv8i8.nxv8i8( + undef, %0, %1, i64 %2) @@ -182,6 +190,7 @@ entry: } declare @llvm.riscv.vsra.nxv16i8.nxv16i8( + , , , i64); @@ -194,6 +203,7 @@ define @intrinsic_vsra_vv_nxv16i8_nxv16i8_nxv16i8( @llvm.riscv.vsra.nxv16i8.nxv16i8( + undef, %0, %1, i64 %2) @@ -227,6 +237,7 @@ entry: } declare @llvm.riscv.vsra.nxv32i8.nxv32i8( + , , , i64); @@ -239,6 +250,7 @@ define @intrinsic_vsra_vv_nxv32i8_nxv32i8_nxv32i8( @llvm.riscv.vsra.nxv32i8.nxv32i8( + undef, %0, %1, i64 %2) @@ -272,6 +284,7 @@ entry: } declare @llvm.riscv.vsra.nxv64i8.nxv64i8( + , , , i64); @@ -284,6 +297,7 @@ define @intrinsic_vsra_vv_nxv64i8_nxv64i8_nxv64i8( @llvm.riscv.vsra.nxv64i8.nxv64i8( + undef, %0, %1, i64 %2) @@ -318,6 +332,7 @@ entry: } declare @llvm.riscv.vsra.nxv1i16.nxv1i16( + , , , i64); @@ -330,6 +345,7 @@ define @intrinsic_vsra_vv_nxv1i16_nxv1i16_nxv1i16( @llvm.riscv.vsra.nxv1i16.nxv1i16( + undef, %0, %1, i64 %2) @@ -363,6 +379,7 @@ entry: } declare @llvm.riscv.vsra.nxv2i16.nxv2i16( + , , , i64); @@ -375,6 +392,7 @@ define @intrinsic_vsra_vv_nxv2i16_nxv2i16_nxv2i16( @llvm.riscv.vsra.nxv2i16.nxv2i16( + undef, %0, %1, i64 %2) @@ -408,6 +426,7 @@ entry: } declare @llvm.riscv.vsra.nxv4i16.nxv4i16( + , , , i64); @@ -420,6 +439,7 @@ define @intrinsic_vsra_vv_nxv4i16_nxv4i16_nxv4i16( @llvm.riscv.vsra.nxv4i16.nxv4i16( + undef, %0, %1, i64 %2) @@ -453,6 +473,7 @@ entry: } declare @llvm.riscv.vsra.nxv8i16.nxv8i16( + , , , i64); @@ -465,6 +486,7 @@ define @intrinsic_vsra_vv_nxv8i16_nxv8i16_nxv8i16( @llvm.riscv.vsra.nxv8i16.nxv8i16( + undef, %0, %1, i64 %2) @@ -498,6 +520,7 @@ entry: } declare @llvm.riscv.vsra.nxv16i16.nxv16i16( + , , , i64); @@ -510,6 +533,7 @@ define @intrinsic_vsra_vv_nxv16i16_nxv16i16_nxv16i16( @llvm.riscv.vsra.nxv16i16.nxv16i16( + undef, %0, %1, i64 %2) @@ -543,6 +567,7 @@ entry: } declare @llvm.riscv.vsra.nxv32i16.nxv32i16( + , , , i64); @@ -555,6 +580,7 @@ define @intrinsic_vsra_vv_nxv32i16_nxv32i16_nxv32i16( @llvm.riscv.vsra.nxv32i16.nxv32i16( + undef, %0, %1, i64 %2) @@ -589,6 +615,7 @@ entry: } declare @llvm.riscv.vsra.nxv1i32.nxv1i32( + , , , i64); @@ -601,6 +628,7 @@ define @intrinsic_vsra_vv_nxv1i32_nxv1i32_nxv1i32( @llvm.riscv.vsra.nxv1i32.nxv1i32( + undef, %0, %1, i64 %2) @@ -634,6 +662,7 @@ entry: } declare @llvm.riscv.vsra.nxv2i32.nxv2i32( + , , , i64); @@ -646,6 +675,7 @@ define @intrinsic_vsra_vv_nxv2i32_nxv2i32_nxv2i32( @llvm.riscv.vsra.nxv2i32.nxv2i32( + undef, %0, %1, i64 %2) @@ -679,6 +709,7 @@ entry: } declare @llvm.riscv.vsra.nxv4i32.nxv4i32( + , , , i64); @@ -691,6 +722,7 @@ define @intrinsic_vsra_vv_nxv4i32_nxv4i32_nxv4i32( @llvm.riscv.vsra.nxv4i32.nxv4i32( + undef, %0, %1, i64 %2) @@ -724,6 +756,7 @@ entry: } declare @llvm.riscv.vsra.nxv8i32.nxv8i32( + , , , i64); @@ -736,6 +769,7 @@ define @intrinsic_vsra_vv_nxv8i32_nxv8i32_nxv8i32( @llvm.riscv.vsra.nxv8i32.nxv8i32( + undef, %0, %1, i64 %2) @@ -769,6 +803,7 @@ entry: } declare @llvm.riscv.vsra.nxv16i32.nxv16i32( + , , , i64); @@ -781,6 +816,7 @@ define @intrinsic_vsra_vv_nxv16i32_nxv16i32_nxv16i32( @llvm.riscv.vsra.nxv16i32.nxv16i32( + undef, %0, %1, i64 %2) @@ -815,6 +851,7 @@ entry: } declare @llvm.riscv.vsra.nxv1i64.nxv1i64( + , , , i64); @@ -827,6 +864,7 @@ define @intrinsic_vsra_vv_nxv1i64_nxv1i64_nxv1i64( @llvm.riscv.vsra.nxv1i64.nxv1i64( + undef, %0, %1, i64 %2) @@ -860,6 +898,7 @@ entry: } declare @llvm.riscv.vsra.nxv2i64.nxv2i64( + , , , i64); @@ -872,6 +911,7 @@ define @intrinsic_vsra_vv_nxv2i64_nxv2i64_nxv2i64( @llvm.riscv.vsra.nxv2i64.nxv2i64( + undef, %0, %1, i64 %2) @@ -905,6 +945,7 @@ entry: } declare @llvm.riscv.vsra.nxv4i64.nxv4i64( + , , , i64); @@ -917,6 +958,7 @@ define @intrinsic_vsra_vv_nxv4i64_nxv4i64_nxv4i64( @llvm.riscv.vsra.nxv4i64.nxv4i64( + undef, %0, %1, i64 %2) @@ -950,6 +992,7 @@ entry: } declare @llvm.riscv.vsra.nxv8i64.nxv8i64( + , , , i64); @@ -962,6 +1005,7 @@ define @intrinsic_vsra_vv_nxv8i64_nxv8i64_nxv8i64( @llvm.riscv.vsra.nxv8i64.nxv8i64( + undef, %0, %1, i64 %2) @@ -996,6 +1040,7 @@ entry: } declare @llvm.riscv.vsra.nxv1i8( + , , i64, i64); @@ -1008,6 +1053,7 @@ define @intrinsic_vsra_vx_nxv1i8_nxv1i8( %0, ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv1i8( + undef, %0, i64 %1, i64 %2) @@ -1041,6 +1087,7 @@ entry: } declare @llvm.riscv.vsra.nxv2i8( + , , i64, i64); @@ -1053,6 +1100,7 @@ define @intrinsic_vsra_vx_nxv2i8_nxv2i8( %0, ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv2i8( + undef, %0, i64 %1, i64 %2) @@ -1086,6 +1134,7 @@ entry: } declare @llvm.riscv.vsra.nxv4i8( + , , i64, i64); @@ -1098,6 +1147,7 @@ define @intrinsic_vsra_vx_nxv4i8_nxv4i8( %0, ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv4i8( + undef, %0, i64 %1, i64 %2) @@ -1131,6 +1181,7 @@ entry: } declare @llvm.riscv.vsra.nxv8i8( + , , i64, i64); @@ -1143,6 +1194,7 @@ define @intrinsic_vsra_vx_nxv8i8_nxv8i8( %0, ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv8i8( + undef, %0, i64 %1, i64 %2) @@ -1176,6 +1228,7 @@ entry: } declare @llvm.riscv.vsra.nxv16i8( + , , i64, i64); @@ -1188,6 +1241,7 @@ define @intrinsic_vsra_vx_nxv16i8_nxv16i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv16i8( + undef, %0, i64 %1, i64 %2) @@ -1221,6 +1275,7 @@ entry: } declare @llvm.riscv.vsra.nxv32i8( + , , i64, i64); @@ -1233,6 +1288,7 @@ define @intrinsic_vsra_vx_nxv32i8_nxv32i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv32i8( + undef, %0, i64 %1, i64 %2) @@ -1266,6 +1322,7 @@ entry: } declare @llvm.riscv.vsra.nxv64i8( + , , i64, i64); @@ -1278,6 +1335,7 @@ define @intrinsic_vsra_vx_nxv64i8_nxv64i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv64i8( + undef, %0, i64 %1, i64 %2) @@ -1311,6 +1369,7 @@ entry: } declare @llvm.riscv.vsra.nxv1i16( + , , i64, i64); @@ -1323,6 +1382,7 @@ define @intrinsic_vsra_vx_nxv1i16_nxv1i16( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv1i16( + undef, %0, i64 %1, i64 %2) @@ -1356,6 +1416,7 @@ entry: } declare @llvm.riscv.vsra.nxv2i16( + , , i64, i64); @@ -1368,6 +1429,7 @@ define @intrinsic_vsra_vx_nxv2i16_nxv2i16( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv2i16( + undef, %0, i64 %1, i64 %2) @@ -1401,6 +1463,7 @@ entry: } declare @llvm.riscv.vsra.nxv4i16( + , , i64, i64); @@ -1413,6 +1476,7 @@ define @intrinsic_vsra_vx_nxv4i16_nxv4i16( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv4i16( + undef, %0, i64 %1, i64 %2) @@ -1446,6 +1510,7 @@ entry: } declare @llvm.riscv.vsra.nxv8i16( + , , i64, i64); @@ -1458,6 +1523,7 @@ define @intrinsic_vsra_vx_nxv8i16_nxv8i16( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv8i16( + undef, %0, i64 %1, i64 %2) @@ -1491,6 +1557,7 @@ entry: } declare @llvm.riscv.vsra.nxv16i16( + , , i64, i64); @@ -1503,6 +1570,7 @@ define @intrinsic_vsra_vx_nxv16i16_nxv16i16( @llvm.riscv.vsra.nxv16i16( + undef, %0, i64 %1, i64 %2) @@ -1536,6 +1604,7 @@ entry: } declare @llvm.riscv.vsra.nxv32i16( + , , i64, i64); @@ -1548,6 +1617,7 @@ define @intrinsic_vsra_vx_nxv32i16_nxv32i16( @llvm.riscv.vsra.nxv32i16( + undef, %0, i64 %1, i64 %2) @@ -1581,6 +1651,7 @@ entry: } declare @llvm.riscv.vsra.nxv1i32( + , , i64, i64); @@ -1593,6 +1664,7 @@ define @intrinsic_vsra_vx_nxv1i32_nxv1i32( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv1i32( + undef, %0, i64 %1, i64 %2) @@ -1626,6 +1698,7 @@ entry: } declare @llvm.riscv.vsra.nxv2i32( + , , i64, i64); @@ -1638,6 +1711,7 @@ define @intrinsic_vsra_vx_nxv2i32_nxv2i32( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv2i32( + undef, %0, i64 %1, i64 %2) @@ -1671,6 +1745,7 @@ entry: } declare @llvm.riscv.vsra.nxv4i32( + , , i64, i64); @@ -1683,6 +1758,7 @@ define @intrinsic_vsra_vx_nxv4i32_nxv4i32( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv4i32( + undef, %0, i64 %1, i64 %2) @@ -1716,6 +1792,7 @@ entry: } declare @llvm.riscv.vsra.nxv8i32( + , , i64, i64); @@ -1728,6 +1805,7 @@ define @intrinsic_vsra_vx_nxv8i32_nxv8i32( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv8i32( + undef, %0, i64 %1, i64 %2) @@ -1761,6 +1839,7 @@ entry: } declare @llvm.riscv.vsra.nxv16i32( + , , i64, i64); @@ -1773,6 +1852,7 @@ define @intrinsic_vsra_vx_nxv16i32_nxv16i32( @llvm.riscv.vsra.nxv16i32( + undef, %0, i64 %1, i64 %2) @@ -1806,6 +1886,7 @@ entry: } declare @llvm.riscv.vsra.nxv1i64( + , , i64, i64); @@ -1818,6 +1899,7 @@ define @intrinsic_vsra_vx_nxv1i64_nxv1i64( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv1i64( + undef, %0, i64 %1, i64 %2) @@ -1851,6 +1933,7 @@ entry: } declare @llvm.riscv.vsra.nxv2i64( + , , i64, i64); @@ -1863,6 +1946,7 @@ define @intrinsic_vsra_vx_nxv2i64_nxv2i64( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv2i64( + undef, %0, i64 %1, i64 %2) @@ -1896,6 +1980,7 @@ entry: } declare @llvm.riscv.vsra.nxv4i64( + , , i64, i64); @@ -1908,6 +1993,7 @@ define @intrinsic_vsra_vx_nxv4i64_nxv4i64( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv4i64( + undef, %0, i64 %1, i64 %2) @@ -1941,6 +2027,7 @@ entry: } declare @llvm.riscv.vsra.nxv8i64( + , , i64, i64); @@ -1953,6 +2040,7 @@ define @intrinsic_vsra_vx_nxv8i64_nxv8i64( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv8i64( + undef, %0, i64 %1, i64 %2) @@ -1993,6 +2081,7 @@ define @intrinsic_vsra_vi_nxv1i8_nxv1i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv1i8( + undef, %0, i64 9, i64 %1) @@ -2025,6 +2114,7 @@ define @intrinsic_vsra_vi_nxv2i8_nxv2i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv2i8( + undef, %0, i64 9, i64 %1) @@ -2057,6 +2147,7 @@ define @intrinsic_vsra_vi_nxv4i8_nxv4i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv4i8( + undef, %0, i64 9, i64 %1) @@ -2089,6 +2180,7 @@ define @intrinsic_vsra_vi_nxv8i8_nxv8i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv8i8( + undef, %0, i64 9, i64 %1) @@ -2121,6 +2213,7 @@ define @intrinsic_vsra_vi_nxv16i8_nxv16i8_i8( @llvm.riscv.vsra.nxv16i8( + undef, %0, i64 9, i64 %1) @@ -2153,6 +2246,7 @@ define @intrinsic_vsra_vi_nxv32i8_nxv32i8_i8( @llvm.riscv.vsra.nxv32i8( + undef, %0, i64 9, i64 %1) @@ -2185,6 +2279,7 @@ define @intrinsic_vsra_vi_nxv64i8_nxv64i8_i8( @llvm.riscv.vsra.nxv64i8( + undef, %0, i64 9, i64 %1) @@ -2217,6 +2312,7 @@ define @intrinsic_vsra_vi_nxv1i16_nxv1i16_i16( @llvm.riscv.vsra.nxv1i16( + undef, %0, i64 9, i64 %1) @@ -2249,6 +2345,7 @@ define @intrinsic_vsra_vi_nxv2i16_nxv2i16_i16( @llvm.riscv.vsra.nxv2i16( + undef, %0, i64 9, i64 %1) @@ -2281,6 +2378,7 @@ define @intrinsic_vsra_vi_nxv4i16_nxv4i16_i16( @llvm.riscv.vsra.nxv4i16( + undef, %0, i64 9, i64 %1) @@ -2313,6 +2411,7 @@ define @intrinsic_vsra_vi_nxv8i16_nxv8i16_i16( @llvm.riscv.vsra.nxv8i16( + undef, %0, i64 9, i64 %1) @@ -2345,6 +2444,7 @@ define @intrinsic_vsra_vi_nxv16i16_nxv16i16_i16( @llvm.riscv.vsra.nxv16i16( + undef, %0, i64 9, i64 %1) @@ -2377,6 +2477,7 @@ define @intrinsic_vsra_vi_nxv32i16_nxv32i16_i16( @llvm.riscv.vsra.nxv32i16( + undef, %0, i64 9, i64 %1) @@ -2409,6 +2510,7 @@ define @intrinsic_vsra_vi_nxv1i32_nxv1i32_i32( @llvm.riscv.vsra.nxv1i32( + undef, %0, i64 9, i64 %1) @@ -2441,6 +2543,7 @@ define @intrinsic_vsra_vi_nxv2i32_nxv2i32_i32( @llvm.riscv.vsra.nxv2i32( + undef, %0, i64 9, i64 %1) @@ -2473,6 +2576,7 @@ define @intrinsic_vsra_vi_nxv4i32_nxv4i32_i32( @llvm.riscv.vsra.nxv4i32( + undef, %0, i64 9, i64 %1) @@ -2505,6 +2609,7 @@ define @intrinsic_vsra_vi_nxv8i32_nxv8i32_i32( @llvm.riscv.vsra.nxv8i32( + undef, %0, i64 9, i64 %1) @@ -2537,6 +2642,7 @@ define @intrinsic_vsra_vi_nxv16i32_nxv16i32_i32( @llvm.riscv.vsra.nxv16i32( + undef, %0, i64 9, i64 %1) @@ -2569,6 +2675,7 @@ define @intrinsic_vsra_vi_nxv1i64_nxv1i64_i64( @llvm.riscv.vsra.nxv1i64( + undef, %0, i64 9, i64 %1) @@ -2601,6 +2708,7 @@ define @intrinsic_vsra_vi_nxv2i64_nxv2i64_i64( @llvm.riscv.vsra.nxv2i64( + undef, %0, i64 9, i64 %1) @@ -2633,6 +2741,7 @@ define @intrinsic_vsra_vi_nxv4i64_nxv4i64_i64( @llvm.riscv.vsra.nxv4i64( + undef, %0, i64 9, i64 %1) @@ -2665,6 +2774,7 @@ define @intrinsic_vsra_vi_nxv8i64_nxv8i64_i64( @llvm.riscv.vsra.nxv8i64( + undef, %0, i64 9, i64 %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vsrl-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsrl-rv32.ll index 8ae5ec19ccda0ee4819d507f5d8afd6c0ef63c43..0d68ab09c4b141d2d5280e265097dae6b2edc363 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsrl-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsrl-rv32.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vsrl.nxv1i8.nxv1i8( + , , , i32); @@ -14,6 +15,7 @@ define @intrinsic_vsrl_vv_nxv1i8_nxv1i8_nxv1i8( @llvm.riscv.vsrl.nxv1i8.nxv1i8( + undef, %0, %1, i32 %2) @@ -47,6 +49,7 @@ entry: } declare @llvm.riscv.vsrl.nxv2i8.nxv2i8( + , , , i32); @@ -59,6 +62,7 @@ define @intrinsic_vsrl_vv_nxv2i8_nxv2i8_nxv2i8( @llvm.riscv.vsrl.nxv2i8.nxv2i8( + undef, %0, %1, i32 %2) @@ -92,6 +96,7 @@ entry: } declare @llvm.riscv.vsrl.nxv4i8.nxv4i8( + , , , i32); @@ -104,6 +109,7 @@ define @intrinsic_vsrl_vv_nxv4i8_nxv4i8_nxv4i8( @llvm.riscv.vsrl.nxv4i8.nxv4i8( + undef, %0, %1, i32 %2) @@ -137,6 +143,7 @@ entry: } declare @llvm.riscv.vsrl.nxv8i8.nxv8i8( + , , , i32); @@ -149,6 +156,7 @@ define @intrinsic_vsrl_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vsrl.nxv8i8.nxv8i8( + undef, %0, %1, i32 %2) @@ -182,6 +190,7 @@ entry: } declare @llvm.riscv.vsrl.nxv16i8.nxv16i8( + , , , i32); @@ -194,6 +203,7 @@ define @intrinsic_vsrl_vv_nxv16i8_nxv16i8_nxv16i8( @llvm.riscv.vsrl.nxv16i8.nxv16i8( + undef, %0, %1, i32 %2) @@ -227,6 +237,7 @@ entry: } declare @llvm.riscv.vsrl.nxv32i8.nxv32i8( + , , , i32); @@ -239,6 +250,7 @@ define @intrinsic_vsrl_vv_nxv32i8_nxv32i8_nxv32i8( @llvm.riscv.vsrl.nxv32i8.nxv32i8( + undef, %0, %1, i32 %2) @@ -272,6 +284,7 @@ entry: } declare @llvm.riscv.vsrl.nxv64i8.nxv64i8( + , , , i32); @@ -284,6 +297,7 @@ define @intrinsic_vsrl_vv_nxv64i8_nxv64i8_nxv64i8( @llvm.riscv.vsrl.nxv64i8.nxv64i8( + undef, %0, %1, i32 %2) @@ -318,6 +332,7 @@ entry: } declare @llvm.riscv.vsrl.nxv1i16.nxv1i16( + , , , i32); @@ -330,6 +345,7 @@ define @intrinsic_vsrl_vv_nxv1i16_nxv1i16_nxv1i16( @llvm.riscv.vsrl.nxv1i16.nxv1i16( + undef, %0, %1, i32 %2) @@ -363,6 +379,7 @@ entry: } declare @llvm.riscv.vsrl.nxv2i16.nxv2i16( + , , , i32); @@ -375,6 +392,7 @@ define @intrinsic_vsrl_vv_nxv2i16_nxv2i16_nxv2i16( @llvm.riscv.vsrl.nxv2i16.nxv2i16( + undef, %0, %1, i32 %2) @@ -408,6 +426,7 @@ entry: } declare @llvm.riscv.vsrl.nxv4i16.nxv4i16( + , , , i32); @@ -420,6 +439,7 @@ define @intrinsic_vsrl_vv_nxv4i16_nxv4i16_nxv4i16( @llvm.riscv.vsrl.nxv4i16.nxv4i16( + undef, %0, %1, i32 %2) @@ -453,6 +473,7 @@ entry: } declare @llvm.riscv.vsrl.nxv8i16.nxv8i16( + , , , i32); @@ -465,6 +486,7 @@ define @intrinsic_vsrl_vv_nxv8i16_nxv8i16_nxv8i16( @llvm.riscv.vsrl.nxv8i16.nxv8i16( + undef, %0, %1, i32 %2) @@ -498,6 +520,7 @@ entry: } declare @llvm.riscv.vsrl.nxv16i16.nxv16i16( + , , , i32); @@ -510,6 +533,7 @@ define @intrinsic_vsrl_vv_nxv16i16_nxv16i16_nxv16i16( @llvm.riscv.vsrl.nxv16i16.nxv16i16( + undef, %0, %1, i32 %2) @@ -543,6 +567,7 @@ entry: } declare @llvm.riscv.vsrl.nxv32i16.nxv32i16( + , , , i32); @@ -555,6 +580,7 @@ define @intrinsic_vsrl_vv_nxv32i16_nxv32i16_nxv32i16( @llvm.riscv.vsrl.nxv32i16.nxv32i16( + undef, %0, %1, i32 %2) @@ -589,6 +615,7 @@ entry: } declare @llvm.riscv.vsrl.nxv1i32.nxv1i32( + , , , i32); @@ -601,6 +628,7 @@ define @intrinsic_vsrl_vv_nxv1i32_nxv1i32_nxv1i32( @llvm.riscv.vsrl.nxv1i32.nxv1i32( + undef, %0, %1, i32 %2) @@ -634,6 +662,7 @@ entry: } declare @llvm.riscv.vsrl.nxv2i32.nxv2i32( + , , , i32); @@ -646,6 +675,7 @@ define @intrinsic_vsrl_vv_nxv2i32_nxv2i32_nxv2i32( @llvm.riscv.vsrl.nxv2i32.nxv2i32( + undef, %0, %1, i32 %2) @@ -679,6 +709,7 @@ entry: } declare @llvm.riscv.vsrl.nxv4i32.nxv4i32( + , , , i32); @@ -691,6 +722,7 @@ define @intrinsic_vsrl_vv_nxv4i32_nxv4i32_nxv4i32( @llvm.riscv.vsrl.nxv4i32.nxv4i32( + undef, %0, %1, i32 %2) @@ -724,6 +756,7 @@ entry: } declare @llvm.riscv.vsrl.nxv8i32.nxv8i32( + , , , i32); @@ -736,6 +769,7 @@ define @intrinsic_vsrl_vv_nxv8i32_nxv8i32_nxv8i32( @llvm.riscv.vsrl.nxv8i32.nxv8i32( + undef, %0, %1, i32 %2) @@ -769,6 +803,7 @@ entry: } declare @llvm.riscv.vsrl.nxv16i32.nxv16i32( + , , , i32); @@ -781,6 +816,7 @@ define @intrinsic_vsrl_vv_nxv16i32_nxv16i32_nxv16i32( @llvm.riscv.vsrl.nxv16i32.nxv16i32( + undef, %0, %1, i32 %2) @@ -815,6 +851,7 @@ entry: } declare @llvm.riscv.vsrl.nxv1i64.nxv1i64( + , , , i32); @@ -827,6 +864,7 @@ define @intrinsic_vsrl_vv_nxv1i64_nxv1i64_nxv1i64( @llvm.riscv.vsrl.nxv1i64.nxv1i64( + undef, %0, %1, i32 %2) @@ -860,6 +898,7 @@ entry: } declare @llvm.riscv.vsrl.nxv2i64.nxv2i64( + , , , i32); @@ -872,6 +911,7 @@ define @intrinsic_vsrl_vv_nxv2i64_nxv2i64_nxv2i64( @llvm.riscv.vsrl.nxv2i64.nxv2i64( + undef, %0, %1, i32 %2) @@ -905,6 +945,7 @@ entry: } declare @llvm.riscv.vsrl.nxv4i64.nxv4i64( + , , , i32); @@ -917,6 +958,7 @@ define @intrinsic_vsrl_vv_nxv4i64_nxv4i64_nxv4i64( @llvm.riscv.vsrl.nxv4i64.nxv4i64( + undef, %0, %1, i32 %2) @@ -950,6 +992,7 @@ entry: } declare @llvm.riscv.vsrl.nxv8i64.nxv8i64( + , , , i32); @@ -962,6 +1005,7 @@ define @intrinsic_vsrl_vv_nxv8i64_nxv8i64_nxv8i64( @llvm.riscv.vsrl.nxv8i64.nxv8i64( + undef, %0, %1, i32 %2) @@ -996,6 +1040,7 @@ entry: } declare @llvm.riscv.vsrl.nxv1i8( + , , i32, i32); @@ -1008,6 +1053,7 @@ define @intrinsic_vsrl_vx_nxv1i8_nxv1i8( %0, ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv1i8( + undef, %0, i32 %1, i32 %2) @@ -1041,6 +1087,7 @@ entry: } declare @llvm.riscv.vsrl.nxv2i8( + , , i32, i32); @@ -1053,6 +1100,7 @@ define @intrinsic_vsrl_vx_nxv2i8_nxv2i8( %0, ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv2i8( + undef, %0, i32 %1, i32 %2) @@ -1086,6 +1134,7 @@ entry: } declare @llvm.riscv.vsrl.nxv4i8( + , , i32, i32); @@ -1098,6 +1147,7 @@ define @intrinsic_vsrl_vx_nxv4i8_nxv4i8( %0, ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv4i8( + undef, %0, i32 %1, i32 %2) @@ -1131,6 +1181,7 @@ entry: } declare @llvm.riscv.vsrl.nxv8i8( + , , i32, i32); @@ -1143,6 +1194,7 @@ define @intrinsic_vsrl_vx_nxv8i8_nxv8i8( %0, ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv8i8( + undef, %0, i32 %1, i32 %2) @@ -1176,6 +1228,7 @@ entry: } declare @llvm.riscv.vsrl.nxv16i8( + , , i32, i32); @@ -1188,6 +1241,7 @@ define @intrinsic_vsrl_vx_nxv16i8_nxv16i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv16i8( + undef, %0, i32 %1, i32 %2) @@ -1221,6 +1275,7 @@ entry: } declare @llvm.riscv.vsrl.nxv32i8( + , , i32, i32); @@ -1233,6 +1288,7 @@ define @intrinsic_vsrl_vx_nxv32i8_nxv32i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv32i8( + undef, %0, i32 %1, i32 %2) @@ -1266,6 +1322,7 @@ entry: } declare @llvm.riscv.vsrl.nxv64i8( + , , i32, i32); @@ -1278,6 +1335,7 @@ define @intrinsic_vsrl_vx_nxv64i8_nxv64i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv64i8( + undef, %0, i32 %1, i32 %2) @@ -1311,6 +1369,7 @@ entry: } declare @llvm.riscv.vsrl.nxv1i16( + , , i32, i32); @@ -1323,6 +1382,7 @@ define @intrinsic_vsrl_vx_nxv1i16_nxv1i16( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv1i16( + undef, %0, i32 %1, i32 %2) @@ -1356,6 +1416,7 @@ entry: } declare @llvm.riscv.vsrl.nxv2i16( + , , i32, i32); @@ -1368,6 +1429,7 @@ define @intrinsic_vsrl_vx_nxv2i16_nxv2i16( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv2i16( + undef, %0, i32 %1, i32 %2) @@ -1401,6 +1463,7 @@ entry: } declare @llvm.riscv.vsrl.nxv4i16( + , , i32, i32); @@ -1413,6 +1476,7 @@ define @intrinsic_vsrl_vx_nxv4i16_nxv4i16( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv4i16( + undef, %0, i32 %1, i32 %2) @@ -1446,6 +1510,7 @@ entry: } declare @llvm.riscv.vsrl.nxv8i16( + , , i32, i32); @@ -1458,6 +1523,7 @@ define @intrinsic_vsrl_vx_nxv8i16_nxv8i16( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv8i16( + undef, %0, i32 %1, i32 %2) @@ -1491,6 +1557,7 @@ entry: } declare @llvm.riscv.vsrl.nxv16i16( + , , i32, i32); @@ -1503,6 +1570,7 @@ define @intrinsic_vsrl_vx_nxv16i16_nxv16i16( @llvm.riscv.vsrl.nxv16i16( + undef, %0, i32 %1, i32 %2) @@ -1536,6 +1604,7 @@ entry: } declare @llvm.riscv.vsrl.nxv32i16( + , , i32, i32); @@ -1548,6 +1617,7 @@ define @intrinsic_vsrl_vx_nxv32i16_nxv32i16( @llvm.riscv.vsrl.nxv32i16( + undef, %0, i32 %1, i32 %2) @@ -1581,6 +1651,7 @@ entry: } declare @llvm.riscv.vsrl.nxv1i32( + , , i32, i32); @@ -1593,6 +1664,7 @@ define @intrinsic_vsrl_vx_nxv1i32_nxv1i32( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv1i32( + undef, %0, i32 %1, i32 %2) @@ -1626,6 +1698,7 @@ entry: } declare @llvm.riscv.vsrl.nxv2i32( + , , i32, i32); @@ -1638,6 +1711,7 @@ define @intrinsic_vsrl_vx_nxv2i32_nxv2i32( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv2i32( + undef, %0, i32 %1, i32 %2) @@ -1671,6 +1745,7 @@ entry: } declare @llvm.riscv.vsrl.nxv4i32( + , , i32, i32); @@ -1683,6 +1758,7 @@ define @intrinsic_vsrl_vx_nxv4i32_nxv4i32( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv4i32( + undef, %0, i32 %1, i32 %2) @@ -1716,6 +1792,7 @@ entry: } declare @llvm.riscv.vsrl.nxv8i32( + , , i32, i32); @@ -1728,6 +1805,7 @@ define @intrinsic_vsrl_vx_nxv8i32_nxv8i32( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv8i32( + undef, %0, i32 %1, i32 %2) @@ -1761,6 +1839,7 @@ entry: } declare @llvm.riscv.vsrl.nxv16i32( + , , i32, i32); @@ -1773,6 +1852,7 @@ define @intrinsic_vsrl_vx_nxv16i32_nxv16i32( @llvm.riscv.vsrl.nxv16i32( + undef, %0, i32 %1, i32 %2) @@ -1806,6 +1886,7 @@ entry: } declare @llvm.riscv.vsrl.nxv1i64( + , , i32, i32); @@ -1818,6 +1899,7 @@ define @intrinsic_vsrl_vx_nxv1i64_nxv1i64( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv1i64( + undef, %0, i32 %1, i32 %2) @@ -1851,6 +1933,7 @@ entry: } declare @llvm.riscv.vsrl.nxv2i64( + , , i32, i32); @@ -1863,6 +1946,7 @@ define @intrinsic_vsrl_vx_nxv2i64_nxv2i64( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv2i64( + undef, %0, i32 %1, i32 %2) @@ -1896,6 +1980,7 @@ entry: } declare @llvm.riscv.vsrl.nxv4i64( + , , i32, i32); @@ -1908,6 +1993,7 @@ define @intrinsic_vsrl_vx_nxv4i64_nxv4i64( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv4i64( + undef, %0, i32 %1, i32 %2) @@ -1941,6 +2027,7 @@ entry: } declare @llvm.riscv.vsrl.nxv8i64( + , , i32, i32); @@ -1953,6 +2040,7 @@ define @intrinsic_vsrl_vx_nxv8i64_nxv8i64( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv8i64( + undef, %0, i32 %1, i32 %2) @@ -1993,6 +2081,7 @@ define @intrinsic_vsrl_vi_nxv1i8_nxv1i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv1i8( + undef, %0, i32 9, i32 %1) @@ -2025,6 +2114,7 @@ define @intrinsic_vsrl_vi_nxv2i8_nxv2i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv2i8( + undef, %0, i32 9, i32 %1) @@ -2057,6 +2147,7 @@ define @intrinsic_vsrl_vi_nxv4i8_nxv4i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv4i8( + undef, %0, i32 9, i32 %1) @@ -2089,6 +2180,7 @@ define @intrinsic_vsrl_vi_nxv8i8_nxv8i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv8i8( + undef, %0, i32 9, i32 %1) @@ -2121,6 +2213,7 @@ define @intrinsic_vsrl_vi_nxv16i8_nxv16i8_i8( @llvm.riscv.vsrl.nxv16i8( + undef, %0, i32 9, i32 %1) @@ -2153,6 +2246,7 @@ define @intrinsic_vsrl_vi_nxv32i8_nxv32i8_i8( @llvm.riscv.vsrl.nxv32i8( + undef, %0, i32 9, i32 %1) @@ -2185,6 +2279,7 @@ define @intrinsic_vsrl_vi_nxv64i8_nxv64i8_i8( @llvm.riscv.vsrl.nxv64i8( + undef, %0, i32 9, i32 %1) @@ -2217,6 +2312,7 @@ define @intrinsic_vsrl_vi_nxv1i16_nxv1i16_i16( @llvm.riscv.vsrl.nxv1i16( + undef, %0, i32 9, i32 %1) @@ -2249,6 +2345,7 @@ define @intrinsic_vsrl_vi_nxv2i16_nxv2i16_i16( @llvm.riscv.vsrl.nxv2i16( + undef, %0, i32 9, i32 %1) @@ -2281,6 +2378,7 @@ define @intrinsic_vsrl_vi_nxv4i16_nxv4i16_i16( @llvm.riscv.vsrl.nxv4i16( + undef, %0, i32 9, i32 %1) @@ -2313,6 +2411,7 @@ define @intrinsic_vsrl_vi_nxv8i16_nxv8i16_i16( @llvm.riscv.vsrl.nxv8i16( + undef, %0, i32 9, i32 %1) @@ -2345,6 +2444,7 @@ define @intrinsic_vsrl_vi_nxv16i16_nxv16i16_i16( @llvm.riscv.vsrl.nxv16i16( + undef, %0, i32 9, i32 %1) @@ -2377,6 +2477,7 @@ define @intrinsic_vsrl_vi_nxv32i16_nxv32i16_i16( @llvm.riscv.vsrl.nxv32i16( + undef, %0, i32 9, i32 %1) @@ -2409,6 +2510,7 @@ define @intrinsic_vsrl_vi_nxv1i32_nxv1i32_i32( @llvm.riscv.vsrl.nxv1i32( + undef, %0, i32 9, i32 %1) @@ -2441,6 +2543,7 @@ define @intrinsic_vsrl_vi_nxv2i32_nxv2i32_i32( @llvm.riscv.vsrl.nxv2i32( + undef, %0, i32 9, i32 %1) @@ -2473,6 +2576,7 @@ define @intrinsic_vsrl_vi_nxv4i32_nxv4i32_i32( @llvm.riscv.vsrl.nxv4i32( + undef, %0, i32 9, i32 %1) @@ -2505,6 +2609,7 @@ define @intrinsic_vsrl_vi_nxv8i32_nxv8i32_i32( @llvm.riscv.vsrl.nxv8i32( + undef, %0, i32 9, i32 %1) @@ -2537,6 +2642,7 @@ define @intrinsic_vsrl_vi_nxv16i32_nxv16i32_i32( @llvm.riscv.vsrl.nxv16i32( + undef, %0, i32 9, i32 %1) @@ -2569,6 +2675,7 @@ define @intrinsic_vsrl_vi_nxv1i64_nxv1i64_i64( @llvm.riscv.vsrl.nxv1i64( + undef, %0, i32 9, i32 %1) @@ -2601,6 +2708,7 @@ define @intrinsic_vsrl_vi_nxv2i64_nxv2i64_i64( @llvm.riscv.vsrl.nxv2i64( + undef, %0, i32 9, i32 %1) @@ -2633,6 +2741,7 @@ define @intrinsic_vsrl_vi_nxv4i64_nxv4i64_i64( @llvm.riscv.vsrl.nxv4i64( + undef, %0, i32 9, i32 %1) @@ -2665,6 +2774,7 @@ define @intrinsic_vsrl_vi_nxv8i64_nxv8i64_i64( @llvm.riscv.vsrl.nxv8i64( + undef, %0, i32 9, i32 %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vsrl-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsrl-rv64.ll index 054d81a54db17c5438fc9e54347e40373cc9c134..270f5ca83b62c7db556b84dcfadd32bed6b932c7 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsrl-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsrl-rv64.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vsrl.nxv1i8.nxv1i8( + , , , i64); @@ -14,6 +15,7 @@ define @intrinsic_vsrl_vv_nxv1i8_nxv1i8_nxv1i8( @llvm.riscv.vsrl.nxv1i8.nxv1i8( + undef, %0, %1, i64 %2) @@ -47,6 +49,7 @@ entry: } declare @llvm.riscv.vsrl.nxv2i8.nxv2i8( + , , , i64); @@ -59,6 +62,7 @@ define @intrinsic_vsrl_vv_nxv2i8_nxv2i8_nxv2i8( @llvm.riscv.vsrl.nxv2i8.nxv2i8( + undef, %0, %1, i64 %2) @@ -92,6 +96,7 @@ entry: } declare @llvm.riscv.vsrl.nxv4i8.nxv4i8( + , , , i64); @@ -104,6 +109,7 @@ define @intrinsic_vsrl_vv_nxv4i8_nxv4i8_nxv4i8( @llvm.riscv.vsrl.nxv4i8.nxv4i8( + undef, %0, %1, i64 %2) @@ -137,6 +143,7 @@ entry: } declare @llvm.riscv.vsrl.nxv8i8.nxv8i8( + , , , i64); @@ -149,6 +156,7 @@ define @intrinsic_vsrl_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vsrl.nxv8i8.nxv8i8( + undef, %0, %1, i64 %2) @@ -182,6 +190,7 @@ entry: } declare @llvm.riscv.vsrl.nxv16i8.nxv16i8( + , , , i64); @@ -194,6 +203,7 @@ define @intrinsic_vsrl_vv_nxv16i8_nxv16i8_nxv16i8( @llvm.riscv.vsrl.nxv16i8.nxv16i8( + undef, %0, %1, i64 %2) @@ -227,6 +237,7 @@ entry: } declare @llvm.riscv.vsrl.nxv32i8.nxv32i8( + , , , i64); @@ -239,6 +250,7 @@ define @intrinsic_vsrl_vv_nxv32i8_nxv32i8_nxv32i8( @llvm.riscv.vsrl.nxv32i8.nxv32i8( + undef, %0, %1, i64 %2) @@ -272,6 +284,7 @@ entry: } declare @llvm.riscv.vsrl.nxv64i8.nxv64i8( + , , , i64); @@ -284,6 +297,7 @@ define @intrinsic_vsrl_vv_nxv64i8_nxv64i8_nxv64i8( @llvm.riscv.vsrl.nxv64i8.nxv64i8( + undef, %0, %1, i64 %2) @@ -318,6 +332,7 @@ entry: } declare @llvm.riscv.vsrl.nxv1i16.nxv1i16( + , , , i64); @@ -330,6 +345,7 @@ define @intrinsic_vsrl_vv_nxv1i16_nxv1i16_nxv1i16( @llvm.riscv.vsrl.nxv1i16.nxv1i16( + undef, %0, %1, i64 %2) @@ -363,6 +379,7 @@ entry: } declare @llvm.riscv.vsrl.nxv2i16.nxv2i16( + , , , i64); @@ -375,6 +392,7 @@ define @intrinsic_vsrl_vv_nxv2i16_nxv2i16_nxv2i16( @llvm.riscv.vsrl.nxv2i16.nxv2i16( + undef, %0, %1, i64 %2) @@ -408,6 +426,7 @@ entry: } declare @llvm.riscv.vsrl.nxv4i16.nxv4i16( + , , , i64); @@ -420,6 +439,7 @@ define @intrinsic_vsrl_vv_nxv4i16_nxv4i16_nxv4i16( @llvm.riscv.vsrl.nxv4i16.nxv4i16( + undef, %0, %1, i64 %2) @@ -453,6 +473,7 @@ entry: } declare @llvm.riscv.vsrl.nxv8i16.nxv8i16( + , , , i64); @@ -465,6 +486,7 @@ define @intrinsic_vsrl_vv_nxv8i16_nxv8i16_nxv8i16( @llvm.riscv.vsrl.nxv8i16.nxv8i16( + undef, %0, %1, i64 %2) @@ -498,6 +520,7 @@ entry: } declare @llvm.riscv.vsrl.nxv16i16.nxv16i16( + , , , i64); @@ -510,6 +533,7 @@ define @intrinsic_vsrl_vv_nxv16i16_nxv16i16_nxv16i16( @llvm.riscv.vsrl.nxv16i16.nxv16i16( + undef, %0, %1, i64 %2) @@ -543,6 +567,7 @@ entry: } declare @llvm.riscv.vsrl.nxv32i16.nxv32i16( + , , , i64); @@ -555,6 +580,7 @@ define @intrinsic_vsrl_vv_nxv32i16_nxv32i16_nxv32i16( @llvm.riscv.vsrl.nxv32i16.nxv32i16( + undef, %0, %1, i64 %2) @@ -589,6 +615,7 @@ entry: } declare @llvm.riscv.vsrl.nxv1i32.nxv1i32( + , , , i64); @@ -601,6 +628,7 @@ define @intrinsic_vsrl_vv_nxv1i32_nxv1i32_nxv1i32( @llvm.riscv.vsrl.nxv1i32.nxv1i32( + undef, %0, %1, i64 %2) @@ -634,6 +662,7 @@ entry: } declare @llvm.riscv.vsrl.nxv2i32.nxv2i32( + , , , i64); @@ -646,6 +675,7 @@ define @intrinsic_vsrl_vv_nxv2i32_nxv2i32_nxv2i32( @llvm.riscv.vsrl.nxv2i32.nxv2i32( + undef, %0, %1, i64 %2) @@ -679,6 +709,7 @@ entry: } declare @llvm.riscv.vsrl.nxv4i32.nxv4i32( + , , , i64); @@ -691,6 +722,7 @@ define @intrinsic_vsrl_vv_nxv4i32_nxv4i32_nxv4i32( @llvm.riscv.vsrl.nxv4i32.nxv4i32( + undef, %0, %1, i64 %2) @@ -724,6 +756,7 @@ entry: } declare @llvm.riscv.vsrl.nxv8i32.nxv8i32( + , , , i64); @@ -736,6 +769,7 @@ define @intrinsic_vsrl_vv_nxv8i32_nxv8i32_nxv8i32( @llvm.riscv.vsrl.nxv8i32.nxv8i32( + undef, %0, %1, i64 %2) @@ -769,6 +803,7 @@ entry: } declare @llvm.riscv.vsrl.nxv16i32.nxv16i32( + , , , i64); @@ -781,6 +816,7 @@ define @intrinsic_vsrl_vv_nxv16i32_nxv16i32_nxv16i32( @llvm.riscv.vsrl.nxv16i32.nxv16i32( + undef, %0, %1, i64 %2) @@ -815,6 +851,7 @@ entry: } declare @llvm.riscv.vsrl.nxv1i64.nxv1i64( + , , , i64); @@ -827,6 +864,7 @@ define @intrinsic_vsrl_vv_nxv1i64_nxv1i64_nxv1i64( @llvm.riscv.vsrl.nxv1i64.nxv1i64( + undef, %0, %1, i64 %2) @@ -860,6 +898,7 @@ entry: } declare @llvm.riscv.vsrl.nxv2i64.nxv2i64( + , , , i64); @@ -872,6 +911,7 @@ define @intrinsic_vsrl_vv_nxv2i64_nxv2i64_nxv2i64( @llvm.riscv.vsrl.nxv2i64.nxv2i64( + undef, %0, %1, i64 %2) @@ -905,6 +945,7 @@ entry: } declare @llvm.riscv.vsrl.nxv4i64.nxv4i64( + , , , i64); @@ -917,6 +958,7 @@ define @intrinsic_vsrl_vv_nxv4i64_nxv4i64_nxv4i64( @llvm.riscv.vsrl.nxv4i64.nxv4i64( + undef, %0, %1, i64 %2) @@ -950,6 +992,7 @@ entry: } declare @llvm.riscv.vsrl.nxv8i64.nxv8i64( + , , , i64); @@ -962,6 +1005,7 @@ define @intrinsic_vsrl_vv_nxv8i64_nxv8i64_nxv8i64( @llvm.riscv.vsrl.nxv8i64.nxv8i64( + undef, %0, %1, i64 %2) @@ -996,6 +1040,7 @@ entry: } declare @llvm.riscv.vsrl.nxv1i8( + , , i64, i64); @@ -1008,6 +1053,7 @@ define @intrinsic_vsrl_vx_nxv1i8_nxv1i8( %0, ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv1i8( + undef, %0, i64 %1, i64 %2) @@ -1041,6 +1087,7 @@ entry: } declare @llvm.riscv.vsrl.nxv2i8( + , , i64, i64); @@ -1053,6 +1100,7 @@ define @intrinsic_vsrl_vx_nxv2i8_nxv2i8( %0, ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv2i8( + undef, %0, i64 %1, i64 %2) @@ -1086,6 +1134,7 @@ entry: } declare @llvm.riscv.vsrl.nxv4i8( + , , i64, i64); @@ -1098,6 +1147,7 @@ define @intrinsic_vsrl_vx_nxv4i8_nxv4i8( %0, ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv4i8( + undef, %0, i64 %1, i64 %2) @@ -1131,6 +1181,7 @@ entry: } declare @llvm.riscv.vsrl.nxv8i8( + , , i64, i64); @@ -1143,6 +1194,7 @@ define @intrinsic_vsrl_vx_nxv8i8_nxv8i8( %0, ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv8i8( + undef, %0, i64 %1, i64 %2) @@ -1176,6 +1228,7 @@ entry: } declare @llvm.riscv.vsrl.nxv16i8( + , , i64, i64); @@ -1188,6 +1241,7 @@ define @intrinsic_vsrl_vx_nxv16i8_nxv16i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv16i8( + undef, %0, i64 %1, i64 %2) @@ -1221,6 +1275,7 @@ entry: } declare @llvm.riscv.vsrl.nxv32i8( + , , i64, i64); @@ -1233,6 +1288,7 @@ define @intrinsic_vsrl_vx_nxv32i8_nxv32i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv32i8( + undef, %0, i64 %1, i64 %2) @@ -1266,6 +1322,7 @@ entry: } declare @llvm.riscv.vsrl.nxv64i8( + , , i64, i64); @@ -1278,6 +1335,7 @@ define @intrinsic_vsrl_vx_nxv64i8_nxv64i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv64i8( + undef, %0, i64 %1, i64 %2) @@ -1311,6 +1369,7 @@ entry: } declare @llvm.riscv.vsrl.nxv1i16( + , , i64, i64); @@ -1323,6 +1382,7 @@ define @intrinsic_vsrl_vx_nxv1i16_nxv1i16( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv1i16( + undef, %0, i64 %1, i64 %2) @@ -1356,6 +1416,7 @@ entry: } declare @llvm.riscv.vsrl.nxv2i16( + , , i64, i64); @@ -1368,6 +1429,7 @@ define @intrinsic_vsrl_vx_nxv2i16_nxv2i16( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv2i16( + undef, %0, i64 %1, i64 %2) @@ -1401,6 +1463,7 @@ entry: } declare @llvm.riscv.vsrl.nxv4i16( + , , i64, i64); @@ -1413,6 +1476,7 @@ define @intrinsic_vsrl_vx_nxv4i16_nxv4i16( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv4i16( + undef, %0, i64 %1, i64 %2) @@ -1446,6 +1510,7 @@ entry: } declare @llvm.riscv.vsrl.nxv8i16( + , , i64, i64); @@ -1458,6 +1523,7 @@ define @intrinsic_vsrl_vx_nxv8i16_nxv8i16( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv8i16( + undef, %0, i64 %1, i64 %2) @@ -1491,6 +1557,7 @@ entry: } declare @llvm.riscv.vsrl.nxv16i16( + , , i64, i64); @@ -1503,6 +1570,7 @@ define @intrinsic_vsrl_vx_nxv16i16_nxv16i16( @llvm.riscv.vsrl.nxv16i16( + undef, %0, i64 %1, i64 %2) @@ -1536,6 +1604,7 @@ entry: } declare @llvm.riscv.vsrl.nxv32i16( + , , i64, i64); @@ -1548,6 +1617,7 @@ define @intrinsic_vsrl_vx_nxv32i16_nxv32i16( @llvm.riscv.vsrl.nxv32i16( + undef, %0, i64 %1, i64 %2) @@ -1581,6 +1651,7 @@ entry: } declare @llvm.riscv.vsrl.nxv1i32( + , , i64, i64); @@ -1593,6 +1664,7 @@ define @intrinsic_vsrl_vx_nxv1i32_nxv1i32( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv1i32( + undef, %0, i64 %1, i64 %2) @@ -1626,6 +1698,7 @@ entry: } declare @llvm.riscv.vsrl.nxv2i32( + , , i64, i64); @@ -1638,6 +1711,7 @@ define @intrinsic_vsrl_vx_nxv2i32_nxv2i32( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv2i32( + undef, %0, i64 %1, i64 %2) @@ -1671,6 +1745,7 @@ entry: } declare @llvm.riscv.vsrl.nxv4i32( + , , i64, i64); @@ -1683,6 +1758,7 @@ define @intrinsic_vsrl_vx_nxv4i32_nxv4i32( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv4i32( + undef, %0, i64 %1, i64 %2) @@ -1716,6 +1792,7 @@ entry: } declare @llvm.riscv.vsrl.nxv8i32( + , , i64, i64); @@ -1728,6 +1805,7 @@ define @intrinsic_vsrl_vx_nxv8i32_nxv8i32( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv8i32( + undef, %0, i64 %1, i64 %2) @@ -1761,6 +1839,7 @@ entry: } declare @llvm.riscv.vsrl.nxv16i32( + , , i64, i64); @@ -1773,6 +1852,7 @@ define @intrinsic_vsrl_vx_nxv16i32_nxv16i32( @llvm.riscv.vsrl.nxv16i32( + undef, %0, i64 %1, i64 %2) @@ -1806,6 +1886,7 @@ entry: } declare @llvm.riscv.vsrl.nxv1i64( + , , i64, i64); @@ -1818,6 +1899,7 @@ define @intrinsic_vsrl_vx_nxv1i64_nxv1i64( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv1i64( + undef, %0, i64 %1, i64 %2) @@ -1851,6 +1933,7 @@ entry: } declare @llvm.riscv.vsrl.nxv2i64( + , , i64, i64); @@ -1863,6 +1946,7 @@ define @intrinsic_vsrl_vx_nxv2i64_nxv2i64( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv2i64( + undef, %0, i64 %1, i64 %2) @@ -1896,6 +1980,7 @@ entry: } declare @llvm.riscv.vsrl.nxv4i64( + , , i64, i64); @@ -1908,6 +1993,7 @@ define @intrinsic_vsrl_vx_nxv4i64_nxv4i64( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv4i64( + undef, %0, i64 %1, i64 %2) @@ -1941,6 +2027,7 @@ entry: } declare @llvm.riscv.vsrl.nxv8i64( + , , i64, i64); @@ -1953,6 +2040,7 @@ define @intrinsic_vsrl_vx_nxv8i64_nxv8i64( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv8i64( + undef, %0, i64 %1, i64 %2) @@ -1993,6 +2081,7 @@ define @intrinsic_vsrl_vi_nxv1i8_nxv1i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv1i8( + undef, %0, i64 9, i64 %1) @@ -2025,6 +2114,7 @@ define @intrinsic_vsrl_vi_nxv2i8_nxv2i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv2i8( + undef, %0, i64 9, i64 %1) @@ -2057,6 +2147,7 @@ define @intrinsic_vsrl_vi_nxv4i8_nxv4i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv4i8( + undef, %0, i64 9, i64 %1) @@ -2089,6 +2180,7 @@ define @intrinsic_vsrl_vi_nxv8i8_nxv8i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv8i8( + undef, %0, i64 9, i64 %1) @@ -2121,6 +2213,7 @@ define @intrinsic_vsrl_vi_nxv16i8_nxv16i8_i8( @llvm.riscv.vsrl.nxv16i8( + undef, %0, i64 9, i64 %1) @@ -2153,6 +2246,7 @@ define @intrinsic_vsrl_vi_nxv32i8_nxv32i8_i8( @llvm.riscv.vsrl.nxv32i8( + undef, %0, i64 9, i64 %1) @@ -2185,6 +2279,7 @@ define @intrinsic_vsrl_vi_nxv64i8_nxv64i8_i8( @llvm.riscv.vsrl.nxv64i8( + undef, %0, i64 9, i64 %1) @@ -2217,6 +2312,7 @@ define @intrinsic_vsrl_vi_nxv1i16_nxv1i16_i16( @llvm.riscv.vsrl.nxv1i16( + undef, %0, i64 9, i64 %1) @@ -2249,6 +2345,7 @@ define @intrinsic_vsrl_vi_nxv2i16_nxv2i16_i16( @llvm.riscv.vsrl.nxv2i16( + undef, %0, i64 9, i64 %1) @@ -2281,6 +2378,7 @@ define @intrinsic_vsrl_vi_nxv4i16_nxv4i16_i16( @llvm.riscv.vsrl.nxv4i16( + undef, %0, i64 9, i64 %1) @@ -2313,6 +2411,7 @@ define @intrinsic_vsrl_vi_nxv8i16_nxv8i16_i16( @llvm.riscv.vsrl.nxv8i16( + undef, %0, i64 9, i64 %1) @@ -2345,6 +2444,7 @@ define @intrinsic_vsrl_vi_nxv16i16_nxv16i16_i16( @llvm.riscv.vsrl.nxv16i16( + undef, %0, i64 9, i64 %1) @@ -2377,6 +2477,7 @@ define @intrinsic_vsrl_vi_nxv32i16_nxv32i16_i16( @llvm.riscv.vsrl.nxv32i16( + undef, %0, i64 9, i64 %1) @@ -2409,6 +2510,7 @@ define @intrinsic_vsrl_vi_nxv1i32_nxv1i32_i32( @llvm.riscv.vsrl.nxv1i32( + undef, %0, i64 9, i64 %1) @@ -2441,6 +2543,7 @@ define @intrinsic_vsrl_vi_nxv2i32_nxv2i32_i32( @llvm.riscv.vsrl.nxv2i32( + undef, %0, i64 9, i64 %1) @@ -2473,6 +2576,7 @@ define @intrinsic_vsrl_vi_nxv4i32_nxv4i32_i32( @llvm.riscv.vsrl.nxv4i32( + undef, %0, i64 9, i64 %1) @@ -2505,6 +2609,7 @@ define @intrinsic_vsrl_vi_nxv8i32_nxv8i32_i32( @llvm.riscv.vsrl.nxv8i32( + undef, %0, i64 9, i64 %1) @@ -2537,6 +2642,7 @@ define @intrinsic_vsrl_vi_nxv16i32_nxv16i32_i32( @llvm.riscv.vsrl.nxv16i32( + undef, %0, i64 9, i64 %1) @@ -2569,6 +2675,7 @@ define @intrinsic_vsrl_vi_nxv1i64_nxv1i64_i64( @llvm.riscv.vsrl.nxv1i64( + undef, %0, i64 9, i64 %1) @@ -2601,6 +2708,7 @@ define @intrinsic_vsrl_vi_nxv2i64_nxv2i64_i64( @llvm.riscv.vsrl.nxv2i64( + undef, %0, i64 9, i64 %1) @@ -2633,6 +2741,7 @@ define @intrinsic_vsrl_vi_nxv4i64_nxv4i64_i64( @llvm.riscv.vsrl.nxv4i64( + undef, %0, i64 9, i64 %1) @@ -2665,6 +2774,7 @@ define @intrinsic_vsrl_vi_nxv8i64_nxv8i64_i64( @llvm.riscv.vsrl.nxv8i64( + undef, %0, i64 9, i64 %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vssra-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vssra-rv32.ll index 9cb0c4803370c628f1af388967b40477aaab75c5..622d1e9e3244c2a6af3a575b5c1cf4757d76bc0d 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vssra-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vssra-rv32.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vssra.nxv1i8.nxv1i8( + , , , i32); @@ -14,6 +15,7 @@ define @intrinsic_vssra_vv_nxv1i8_nxv1i8_nxv1i8( @llvm.riscv.vssra.nxv1i8.nxv1i8( + undef, %0, %1, i32 %2) @@ -47,6 +49,7 @@ entry: } declare @llvm.riscv.vssra.nxv2i8.nxv2i8( + , , , i32); @@ -59,6 +62,7 @@ define @intrinsic_vssra_vv_nxv2i8_nxv2i8_nxv2i8( @llvm.riscv.vssra.nxv2i8.nxv2i8( + undef, %0, %1, i32 %2) @@ -92,6 +96,7 @@ entry: } declare @llvm.riscv.vssra.nxv4i8.nxv4i8( + , , , i32); @@ -104,6 +109,7 @@ define @intrinsic_vssra_vv_nxv4i8_nxv4i8_nxv4i8( @llvm.riscv.vssra.nxv4i8.nxv4i8( + undef, %0, %1, i32 %2) @@ -137,6 +143,7 @@ entry: } declare @llvm.riscv.vssra.nxv8i8.nxv8i8( + , , , i32); @@ -149,6 +156,7 @@ define @intrinsic_vssra_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vssra.nxv8i8.nxv8i8( + undef, %0, %1, i32 %2) @@ -182,6 +190,7 @@ entry: } declare @llvm.riscv.vssra.nxv16i8.nxv16i8( + , , , i32); @@ -194,6 +203,7 @@ define @intrinsic_vssra_vv_nxv16i8_nxv16i8_nxv16i8( @llvm.riscv.vssra.nxv16i8.nxv16i8( + undef, %0, %1, i32 %2) @@ -227,6 +237,7 @@ entry: } declare @llvm.riscv.vssra.nxv32i8.nxv32i8( + , , , i32); @@ -239,6 +250,7 @@ define @intrinsic_vssra_vv_nxv32i8_nxv32i8_nxv32i8( @llvm.riscv.vssra.nxv32i8.nxv32i8( + undef, %0, %1, i32 %2) @@ -272,6 +284,7 @@ entry: } declare @llvm.riscv.vssra.nxv64i8.nxv64i8( + , , , i32); @@ -284,6 +297,7 @@ define @intrinsic_vssra_vv_nxv64i8_nxv64i8_nxv64i8( @llvm.riscv.vssra.nxv64i8.nxv64i8( + undef, %0, %1, i32 %2) @@ -318,6 +332,7 @@ entry: } declare @llvm.riscv.vssra.nxv1i16.nxv1i16( + , , , i32); @@ -330,6 +345,7 @@ define @intrinsic_vssra_vv_nxv1i16_nxv1i16_nxv1i16( @llvm.riscv.vssra.nxv1i16.nxv1i16( + undef, %0, %1, i32 %2) @@ -363,6 +379,7 @@ entry: } declare @llvm.riscv.vssra.nxv2i16.nxv2i16( + , , , i32); @@ -375,6 +392,7 @@ define @intrinsic_vssra_vv_nxv2i16_nxv2i16_nxv2i16( @llvm.riscv.vssra.nxv2i16.nxv2i16( + undef, %0, %1, i32 %2) @@ -408,6 +426,7 @@ entry: } declare @llvm.riscv.vssra.nxv4i16.nxv4i16( + , , , i32); @@ -420,6 +439,7 @@ define @intrinsic_vssra_vv_nxv4i16_nxv4i16_nxv4i16( @llvm.riscv.vssra.nxv4i16.nxv4i16( + undef, %0, %1, i32 %2) @@ -453,6 +473,7 @@ entry: } declare @llvm.riscv.vssra.nxv8i16.nxv8i16( + , , , i32); @@ -465,6 +486,7 @@ define @intrinsic_vssra_vv_nxv8i16_nxv8i16_nxv8i16( @llvm.riscv.vssra.nxv8i16.nxv8i16( + undef, %0, %1, i32 %2) @@ -498,6 +520,7 @@ entry: } declare @llvm.riscv.vssra.nxv16i16.nxv16i16( + , , , i32); @@ -510,6 +533,7 @@ define @intrinsic_vssra_vv_nxv16i16_nxv16i16_nxv16i16( @llvm.riscv.vssra.nxv16i16.nxv16i16( + undef, %0, %1, i32 %2) @@ -543,6 +567,7 @@ entry: } declare @llvm.riscv.vssra.nxv32i16.nxv32i16( + , , , i32); @@ -555,6 +580,7 @@ define @intrinsic_vssra_vv_nxv32i16_nxv32i16_nxv32i16( @llvm.riscv.vssra.nxv32i16.nxv32i16( + undef, %0, %1, i32 %2) @@ -589,6 +615,7 @@ entry: } declare @llvm.riscv.vssra.nxv1i32.nxv1i32( + , , , i32); @@ -601,6 +628,7 @@ define @intrinsic_vssra_vv_nxv1i32_nxv1i32_nxv1i32( @llvm.riscv.vssra.nxv1i32.nxv1i32( + undef, %0, %1, i32 %2) @@ -634,6 +662,7 @@ entry: } declare @llvm.riscv.vssra.nxv2i32.nxv2i32( + , , , i32); @@ -646,6 +675,7 @@ define @intrinsic_vssra_vv_nxv2i32_nxv2i32_nxv2i32( @llvm.riscv.vssra.nxv2i32.nxv2i32( + undef, %0, %1, i32 %2) @@ -679,6 +709,7 @@ entry: } declare @llvm.riscv.vssra.nxv4i32.nxv4i32( + , , , i32); @@ -691,6 +722,7 @@ define @intrinsic_vssra_vv_nxv4i32_nxv4i32_nxv4i32( @llvm.riscv.vssra.nxv4i32.nxv4i32( + undef, %0, %1, i32 %2) @@ -724,6 +756,7 @@ entry: } declare @llvm.riscv.vssra.nxv8i32.nxv8i32( + , , , i32); @@ -736,6 +769,7 @@ define @intrinsic_vssra_vv_nxv8i32_nxv8i32_nxv8i32( @llvm.riscv.vssra.nxv8i32.nxv8i32( + undef, %0, %1, i32 %2) @@ -769,6 +803,7 @@ entry: } declare @llvm.riscv.vssra.nxv16i32.nxv16i32( + , , , i32); @@ -781,6 +816,7 @@ define @intrinsic_vssra_vv_nxv16i32_nxv16i32_nxv16i32( @llvm.riscv.vssra.nxv16i32.nxv16i32( + undef, %0, %1, i32 %2) @@ -815,6 +851,7 @@ entry: } declare @llvm.riscv.vssra.nxv1i8( + , , i32, i32); @@ -827,6 +864,7 @@ define @intrinsic_vssra_vx_nxv1i8_nxv1i8( %0, ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv1i8( + undef, %0, i32 %1, i32 %2) @@ -860,6 +898,7 @@ entry: } declare @llvm.riscv.vssra.nxv2i8( + , , i32, i32); @@ -872,6 +911,7 @@ define @intrinsic_vssra_vx_nxv2i8_nxv2i8( %0, ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv2i8( + undef, %0, i32 %1, i32 %2) @@ -905,6 +945,7 @@ entry: } declare @llvm.riscv.vssra.nxv4i8( + , , i32, i32); @@ -917,6 +958,7 @@ define @intrinsic_vssra_vx_nxv4i8_nxv4i8( %0, ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv4i8( + undef, %0, i32 %1, i32 %2) @@ -950,6 +992,7 @@ entry: } declare @llvm.riscv.vssra.nxv8i8( + , , i32, i32); @@ -962,6 +1005,7 @@ define @intrinsic_vssra_vx_nxv8i8_nxv8i8( %0, ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv8i8( + undef, %0, i32 %1, i32 %2) @@ -995,6 +1039,7 @@ entry: } declare @llvm.riscv.vssra.nxv16i8( + , , i32, i32); @@ -1007,6 +1052,7 @@ define @intrinsic_vssra_vx_nxv16i8_nxv16i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv16i8( + undef, %0, i32 %1, i32 %2) @@ -1040,6 +1086,7 @@ entry: } declare @llvm.riscv.vssra.nxv32i8( + , , i32, i32); @@ -1052,6 +1099,7 @@ define @intrinsic_vssra_vx_nxv32i8_nxv32i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv32i8( + undef, %0, i32 %1, i32 %2) @@ -1085,6 +1133,7 @@ entry: } declare @llvm.riscv.vssra.nxv64i8( + , , i32, i32); @@ -1097,6 +1146,7 @@ define @intrinsic_vssra_vx_nxv64i8_nxv64i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv64i8( + undef, %0, i32 %1, i32 %2) @@ -1130,6 +1180,7 @@ entry: } declare @llvm.riscv.vssra.nxv1i16( + , , i32, i32); @@ -1142,6 +1193,7 @@ define @intrinsic_vssra_vx_nxv1i16_nxv1i16( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv1i16( + undef, %0, i32 %1, i32 %2) @@ -1175,6 +1227,7 @@ entry: } declare @llvm.riscv.vssra.nxv2i16( + , , i32, i32); @@ -1187,6 +1240,7 @@ define @intrinsic_vssra_vx_nxv2i16_nxv2i16( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv2i16( + undef, %0, i32 %1, i32 %2) @@ -1220,6 +1274,7 @@ entry: } declare @llvm.riscv.vssra.nxv4i16( + , , i32, i32); @@ -1232,6 +1287,7 @@ define @intrinsic_vssra_vx_nxv4i16_nxv4i16( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv4i16( + undef, %0, i32 %1, i32 %2) @@ -1265,6 +1321,7 @@ entry: } declare @llvm.riscv.vssra.nxv8i16( + , , i32, i32); @@ -1277,6 +1334,7 @@ define @intrinsic_vssra_vx_nxv8i16_nxv8i16( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv8i16( + undef, %0, i32 %1, i32 %2) @@ -1310,6 +1368,7 @@ entry: } declare @llvm.riscv.vssra.nxv16i16( + , , i32, i32); @@ -1322,6 +1381,7 @@ define @intrinsic_vssra_vx_nxv16i16_nxv16i16( @llvm.riscv.vssra.nxv16i16( + undef, %0, i32 %1, i32 %2) @@ -1355,6 +1415,7 @@ entry: } declare @llvm.riscv.vssra.nxv32i16( + , , i32, i32); @@ -1367,6 +1428,7 @@ define @intrinsic_vssra_vx_nxv32i16_nxv32i16( @llvm.riscv.vssra.nxv32i16( + undef, %0, i32 %1, i32 %2) @@ -1400,6 +1462,7 @@ entry: } declare @llvm.riscv.vssra.nxv1i32( + , , i32, i32); @@ -1412,6 +1475,7 @@ define @intrinsic_vssra_vx_nxv1i32_nxv1i32( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv1i32( + undef, %0, i32 %1, i32 %2) @@ -1445,6 +1509,7 @@ entry: } declare @llvm.riscv.vssra.nxv2i32( + , , i32, i32); @@ -1457,6 +1522,7 @@ define @intrinsic_vssra_vx_nxv2i32_nxv2i32( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv2i32( + undef, %0, i32 %1, i32 %2) @@ -1490,6 +1556,7 @@ entry: } declare @llvm.riscv.vssra.nxv4i32( + , , i32, i32); @@ -1502,6 +1569,7 @@ define @intrinsic_vssra_vx_nxv4i32_nxv4i32( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv4i32( + undef, %0, i32 %1, i32 %2) @@ -1535,6 +1603,7 @@ entry: } declare @llvm.riscv.vssra.nxv8i32( + , , i32, i32); @@ -1547,6 +1616,7 @@ define @intrinsic_vssra_vx_nxv8i32_nxv8i32( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv8i32( + undef, %0, i32 %1, i32 %2) @@ -1580,6 +1650,7 @@ entry: } declare @llvm.riscv.vssra.nxv16i32( + , , i32, i32); @@ -1592,6 +1663,7 @@ define @intrinsic_vssra_vx_nxv16i32_nxv16i32( @llvm.riscv.vssra.nxv16i32( + undef, %0, i32 %1, i32 %2) @@ -1625,6 +1697,7 @@ entry: } declare @llvm.riscv.vssra.nxv1i64( + , , i32, i32); @@ -1637,6 +1710,7 @@ define @intrinsic_vssra_vx_nxv1i64_nxv1i64( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv1i64( + undef, %0, i32 %1, i32 %2) @@ -1670,6 +1744,7 @@ entry: } declare @llvm.riscv.vssra.nxv2i64( + , , i32, i32); @@ -1682,6 +1757,7 @@ define @intrinsic_vssra_vx_nxv2i64_nxv2i64( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv2i64( + undef, %0, i32 %1, i32 %2) @@ -1715,6 +1791,7 @@ entry: } declare @llvm.riscv.vssra.nxv4i64( + , , i32, i32); @@ -1727,6 +1804,7 @@ define @intrinsic_vssra_vx_nxv4i64_nxv4i64( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv4i64( + undef, %0, i32 %1, i32 %2) @@ -1760,6 +1838,7 @@ entry: } declare @llvm.riscv.vssra.nxv8i64( + , , i32, i32); @@ -1772,6 +1851,7 @@ define @intrinsic_vssra_vx_nxv8i64_nxv8i64( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv8i64( + undef, %0, i32 %1, i32 %2) @@ -1812,6 +1892,7 @@ define @intrinsic_vssra_vi_nxv1i8_nxv1i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv1i8( + undef, %0, i32 9, i32 %1) @@ -1844,6 +1925,7 @@ define @intrinsic_vssra_vi_nxv2i8_nxv2i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv2i8( + undef, %0, i32 9, i32 %1) @@ -1876,6 +1958,7 @@ define @intrinsic_vssra_vi_nxv4i8_nxv4i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv4i8( + undef, %0, i32 9, i32 %1) @@ -1908,6 +1991,7 @@ define @intrinsic_vssra_vi_nxv8i8_nxv8i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv8i8( + undef, %0, i32 9, i32 %1) @@ -1940,6 +2024,7 @@ define @intrinsic_vssra_vi_nxv16i8_nxv16i8_i8( @llvm.riscv.vssra.nxv16i8( + undef, %0, i32 9, i32 %1) @@ -1972,6 +2057,7 @@ define @intrinsic_vssra_vi_nxv32i8_nxv32i8_i8( @llvm.riscv.vssra.nxv32i8( + undef, %0, i32 9, i32 %1) @@ -2004,6 +2090,7 @@ define @intrinsic_vssra_vi_nxv64i8_nxv64i8_i8( @llvm.riscv.vssra.nxv64i8( + undef, %0, i32 9, i32 %1) @@ -2036,6 +2123,7 @@ define @intrinsic_vssra_vi_nxv1i16_nxv1i16_i16( @llvm.riscv.vssra.nxv1i16( + undef, %0, i32 9, i32 %1) @@ -2068,6 +2156,7 @@ define @intrinsic_vssra_vi_nxv2i16_nxv2i16_i16( @llvm.riscv.vssra.nxv2i16( + undef, %0, i32 9, i32 %1) @@ -2100,6 +2189,7 @@ define @intrinsic_vssra_vi_nxv4i16_nxv4i16_i16( @llvm.riscv.vssra.nxv4i16( + undef, %0, i32 9, i32 %1) @@ -2132,6 +2222,7 @@ define @intrinsic_vssra_vi_nxv8i16_nxv8i16_i16( @llvm.riscv.vssra.nxv8i16( + undef, %0, i32 9, i32 %1) @@ -2164,6 +2255,7 @@ define @intrinsic_vssra_vi_nxv16i16_nxv16i16_i16( @llvm.riscv.vssra.nxv16i16( + undef, %0, i32 9, i32 %1) @@ -2196,6 +2288,7 @@ define @intrinsic_vssra_vi_nxv32i16_nxv32i16_i16( @llvm.riscv.vssra.nxv32i16( + undef, %0, i32 9, i32 %1) @@ -2228,6 +2321,7 @@ define @intrinsic_vssra_vi_nxv1i32_nxv1i32_i32( @llvm.riscv.vssra.nxv1i32( + undef, %0, i32 9, i32 %1) @@ -2260,6 +2354,7 @@ define @intrinsic_vssra_vi_nxv2i32_nxv2i32_i32( @llvm.riscv.vssra.nxv2i32( + undef, %0, i32 9, i32 %1) @@ -2292,6 +2387,7 @@ define @intrinsic_vssra_vi_nxv4i32_nxv4i32_i32( @llvm.riscv.vssra.nxv4i32( + undef, %0, i32 9, i32 %1) @@ -2324,6 +2420,7 @@ define @intrinsic_vssra_vi_nxv8i32_nxv8i32_i32( @llvm.riscv.vssra.nxv8i32( + undef, %0, i32 9, i32 %1) @@ -2356,6 +2453,7 @@ define @intrinsic_vssra_vi_nxv16i32_nxv16i32_i32( @llvm.riscv.vssra.nxv16i32( + undef, %0, i32 9, i32 %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vssra-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vssra-rv64.ll index b39bdb4bdda64adf673e3bc0664a482684fd6099..621d8a59b401064adbecbafdd23a12be19547e47 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vssra-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vssra-rv64.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vssra.nxv1i8.nxv1i8( + , , , i64); @@ -14,6 +15,7 @@ define @intrinsic_vssra_vv_nxv1i8_nxv1i8_nxv1i8( @llvm.riscv.vssra.nxv1i8.nxv1i8( + undef, %0, %1, i64 %2) @@ -47,6 +49,7 @@ entry: } declare @llvm.riscv.vssra.nxv2i8.nxv2i8( + , , , i64); @@ -59,6 +62,7 @@ define @intrinsic_vssra_vv_nxv2i8_nxv2i8_nxv2i8( @llvm.riscv.vssra.nxv2i8.nxv2i8( + undef, %0, %1, i64 %2) @@ -92,6 +96,7 @@ entry: } declare @llvm.riscv.vssra.nxv4i8.nxv4i8( + , , , i64); @@ -104,6 +109,7 @@ define @intrinsic_vssra_vv_nxv4i8_nxv4i8_nxv4i8( @llvm.riscv.vssra.nxv4i8.nxv4i8( + undef, %0, %1, i64 %2) @@ -137,6 +143,7 @@ entry: } declare @llvm.riscv.vssra.nxv8i8.nxv8i8( + , , , i64); @@ -149,6 +156,7 @@ define @intrinsic_vssra_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vssra.nxv8i8.nxv8i8( + undef, %0, %1, i64 %2) @@ -182,6 +190,7 @@ entry: } declare @llvm.riscv.vssra.nxv16i8.nxv16i8( + , , , i64); @@ -194,6 +203,7 @@ define @intrinsic_vssra_vv_nxv16i8_nxv16i8_nxv16i8( @llvm.riscv.vssra.nxv16i8.nxv16i8( + undef, %0, %1, i64 %2) @@ -227,6 +237,7 @@ entry: } declare @llvm.riscv.vssra.nxv32i8.nxv32i8( + , , , i64); @@ -239,6 +250,7 @@ define @intrinsic_vssra_vv_nxv32i8_nxv32i8_nxv32i8( @llvm.riscv.vssra.nxv32i8.nxv32i8( + undef, %0, %1, i64 %2) @@ -272,6 +284,7 @@ entry: } declare @llvm.riscv.vssra.nxv64i8.nxv64i8( + , , , i64); @@ -284,6 +297,7 @@ define @intrinsic_vssra_vv_nxv64i8_nxv64i8_nxv64i8( @llvm.riscv.vssra.nxv64i8.nxv64i8( + undef, %0, %1, i64 %2) @@ -318,6 +332,7 @@ entry: } declare @llvm.riscv.vssra.nxv1i16.nxv1i16( + , , , i64); @@ -330,6 +345,7 @@ define @intrinsic_vssra_vv_nxv1i16_nxv1i16_nxv1i16( @llvm.riscv.vssra.nxv1i16.nxv1i16( + undef, %0, %1, i64 %2) @@ -363,6 +379,7 @@ entry: } declare @llvm.riscv.vssra.nxv2i16.nxv2i16( + , , , i64); @@ -375,6 +392,7 @@ define @intrinsic_vssra_vv_nxv2i16_nxv2i16_nxv2i16( @llvm.riscv.vssra.nxv2i16.nxv2i16( + undef, %0, %1, i64 %2) @@ -408,6 +426,7 @@ entry: } declare @llvm.riscv.vssra.nxv4i16.nxv4i16( + , , , i64); @@ -420,6 +439,7 @@ define @intrinsic_vssra_vv_nxv4i16_nxv4i16_nxv4i16( @llvm.riscv.vssra.nxv4i16.nxv4i16( + undef, %0, %1, i64 %2) @@ -453,6 +473,7 @@ entry: } declare @llvm.riscv.vssra.nxv8i16.nxv8i16( + , , , i64); @@ -465,6 +486,7 @@ define @intrinsic_vssra_vv_nxv8i16_nxv8i16_nxv8i16( @llvm.riscv.vssra.nxv8i16.nxv8i16( + undef, %0, %1, i64 %2) @@ -498,6 +520,7 @@ entry: } declare @llvm.riscv.vssra.nxv16i16.nxv16i16( + , , , i64); @@ -510,6 +533,7 @@ define @intrinsic_vssra_vv_nxv16i16_nxv16i16_nxv16i16( @llvm.riscv.vssra.nxv16i16.nxv16i16( + undef, %0, %1, i64 %2) @@ -543,6 +567,7 @@ entry: } declare @llvm.riscv.vssra.nxv32i16.nxv32i16( + , , , i64); @@ -555,6 +580,7 @@ define @intrinsic_vssra_vv_nxv32i16_nxv32i16_nxv32i16( @llvm.riscv.vssra.nxv32i16.nxv32i16( + undef, %0, %1, i64 %2) @@ -589,6 +615,7 @@ entry: } declare @llvm.riscv.vssra.nxv1i32.nxv1i32( + , , , i64); @@ -601,6 +628,7 @@ define @intrinsic_vssra_vv_nxv1i32_nxv1i32_nxv1i32( @llvm.riscv.vssra.nxv1i32.nxv1i32( + undef, %0, %1, i64 %2) @@ -634,6 +662,7 @@ entry: } declare @llvm.riscv.vssra.nxv2i32.nxv2i32( + , , , i64); @@ -646,6 +675,7 @@ define @intrinsic_vssra_vv_nxv2i32_nxv2i32_nxv2i32( @llvm.riscv.vssra.nxv2i32.nxv2i32( + undef, %0, %1, i64 %2) @@ -679,6 +709,7 @@ entry: } declare @llvm.riscv.vssra.nxv4i32.nxv4i32( + , , , i64); @@ -691,6 +722,7 @@ define @intrinsic_vssra_vv_nxv4i32_nxv4i32_nxv4i32( @llvm.riscv.vssra.nxv4i32.nxv4i32( + undef, %0, %1, i64 %2) @@ -724,6 +756,7 @@ entry: } declare @llvm.riscv.vssra.nxv8i32.nxv8i32( + , , , i64); @@ -736,6 +769,7 @@ define @intrinsic_vssra_vv_nxv8i32_nxv8i32_nxv8i32( @llvm.riscv.vssra.nxv8i32.nxv8i32( + undef, %0, %1, i64 %2) @@ -769,6 +803,7 @@ entry: } declare @llvm.riscv.vssra.nxv16i32.nxv16i32( + , , , i64); @@ -781,6 +816,7 @@ define @intrinsic_vssra_vv_nxv16i32_nxv16i32_nxv16i32( @llvm.riscv.vssra.nxv16i32.nxv16i32( + undef, %0, %1, i64 %2) @@ -815,6 +851,7 @@ entry: } declare @llvm.riscv.vssra.nxv1i64.nxv1i64( + , , , i64); @@ -827,6 +864,7 @@ define @intrinsic_vssra_vv_nxv1i64_nxv1i64_nxv1i64( @llvm.riscv.vssra.nxv1i64.nxv1i64( + undef, %0, %1, i64 %2) @@ -860,6 +898,7 @@ entry: } declare @llvm.riscv.vssra.nxv2i64.nxv2i64( + , , , i64); @@ -872,6 +911,7 @@ define @intrinsic_vssra_vv_nxv2i64_nxv2i64_nxv2i64( @llvm.riscv.vssra.nxv2i64.nxv2i64( + undef, %0, %1, i64 %2) @@ -905,6 +945,7 @@ entry: } declare @llvm.riscv.vssra.nxv4i64.nxv4i64( + , , , i64); @@ -917,6 +958,7 @@ define @intrinsic_vssra_vv_nxv4i64_nxv4i64_nxv4i64( @llvm.riscv.vssra.nxv4i64.nxv4i64( + undef, %0, %1, i64 %2) @@ -950,6 +992,7 @@ entry: } declare @llvm.riscv.vssra.nxv8i64.nxv8i64( + , , , i64); @@ -962,6 +1005,7 @@ define @intrinsic_vssra_vv_nxv8i64_nxv8i64_nxv8i64( @llvm.riscv.vssra.nxv8i64.nxv8i64( + undef, %0, %1, i64 %2) @@ -996,6 +1040,7 @@ entry: } declare @llvm.riscv.vssra.nxv1i8( + , , i64, i64); @@ -1008,6 +1053,7 @@ define @intrinsic_vssra_vx_nxv1i8_nxv1i8( %0, ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv1i8( + undef, %0, i64 %1, i64 %2) @@ -1041,6 +1087,7 @@ entry: } declare @llvm.riscv.vssra.nxv2i8( + , , i64, i64); @@ -1053,6 +1100,7 @@ define @intrinsic_vssra_vx_nxv2i8_nxv2i8( %0, ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv2i8( + undef, %0, i64 %1, i64 %2) @@ -1086,6 +1134,7 @@ entry: } declare @llvm.riscv.vssra.nxv4i8( + , , i64, i64); @@ -1098,6 +1147,7 @@ define @intrinsic_vssra_vx_nxv4i8_nxv4i8( %0, ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv4i8( + undef, %0, i64 %1, i64 %2) @@ -1131,6 +1181,7 @@ entry: } declare @llvm.riscv.vssra.nxv8i8( + , , i64, i64); @@ -1143,6 +1194,7 @@ define @intrinsic_vssra_vx_nxv8i8_nxv8i8( %0, ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv8i8( + undef, %0, i64 %1, i64 %2) @@ -1176,6 +1228,7 @@ entry: } declare @llvm.riscv.vssra.nxv16i8( + , , i64, i64); @@ -1188,6 +1241,7 @@ define @intrinsic_vssra_vx_nxv16i8_nxv16i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv16i8( + undef, %0, i64 %1, i64 %2) @@ -1221,6 +1275,7 @@ entry: } declare @llvm.riscv.vssra.nxv32i8( + , , i64, i64); @@ -1233,6 +1288,7 @@ define @intrinsic_vssra_vx_nxv32i8_nxv32i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv32i8( + undef, %0, i64 %1, i64 %2) @@ -1266,6 +1322,7 @@ entry: } declare @llvm.riscv.vssra.nxv64i8( + , , i64, i64); @@ -1278,6 +1335,7 @@ define @intrinsic_vssra_vx_nxv64i8_nxv64i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv64i8( + undef, %0, i64 %1, i64 %2) @@ -1311,6 +1369,7 @@ entry: } declare @llvm.riscv.vssra.nxv1i16( + , , i64, i64); @@ -1323,6 +1382,7 @@ define @intrinsic_vssra_vx_nxv1i16_nxv1i16( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv1i16( + undef, %0, i64 %1, i64 %2) @@ -1356,6 +1416,7 @@ entry: } declare @llvm.riscv.vssra.nxv2i16( + , , i64, i64); @@ -1368,6 +1429,7 @@ define @intrinsic_vssra_vx_nxv2i16_nxv2i16( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv2i16( + undef, %0, i64 %1, i64 %2) @@ -1401,6 +1463,7 @@ entry: } declare @llvm.riscv.vssra.nxv4i16( + , , i64, i64); @@ -1413,6 +1476,7 @@ define @intrinsic_vssra_vx_nxv4i16_nxv4i16( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv4i16( + undef, %0, i64 %1, i64 %2) @@ -1446,6 +1510,7 @@ entry: } declare @llvm.riscv.vssra.nxv8i16( + , , i64, i64); @@ -1458,6 +1523,7 @@ define @intrinsic_vssra_vx_nxv8i16_nxv8i16( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv8i16( + undef, %0, i64 %1, i64 %2) @@ -1491,6 +1557,7 @@ entry: } declare @llvm.riscv.vssra.nxv16i16( + , , i64, i64); @@ -1503,6 +1570,7 @@ define @intrinsic_vssra_vx_nxv16i16_nxv16i16( @llvm.riscv.vssra.nxv16i16( + undef, %0, i64 %1, i64 %2) @@ -1536,6 +1604,7 @@ entry: } declare @llvm.riscv.vssra.nxv32i16( + , , i64, i64); @@ -1548,6 +1617,7 @@ define @intrinsic_vssra_vx_nxv32i16_nxv32i16( @llvm.riscv.vssra.nxv32i16( + undef, %0, i64 %1, i64 %2) @@ -1581,6 +1651,7 @@ entry: } declare @llvm.riscv.vssra.nxv1i32( + , , i64, i64); @@ -1593,6 +1664,7 @@ define @intrinsic_vssra_vx_nxv1i32_nxv1i32( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv1i32( + undef, %0, i64 %1, i64 %2) @@ -1626,6 +1698,7 @@ entry: } declare @llvm.riscv.vssra.nxv2i32( + , , i64, i64); @@ -1638,6 +1711,7 @@ define @intrinsic_vssra_vx_nxv2i32_nxv2i32( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv2i32( + undef, %0, i64 %1, i64 %2) @@ -1671,6 +1745,7 @@ entry: } declare @llvm.riscv.vssra.nxv4i32( + , , i64, i64); @@ -1683,6 +1758,7 @@ define @intrinsic_vssra_vx_nxv4i32_nxv4i32( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv4i32( + undef, %0, i64 %1, i64 %2) @@ -1716,6 +1792,7 @@ entry: } declare @llvm.riscv.vssra.nxv8i32( + , , i64, i64); @@ -1728,6 +1805,7 @@ define @intrinsic_vssra_vx_nxv8i32_nxv8i32( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv8i32( + undef, %0, i64 %1, i64 %2) @@ -1761,6 +1839,7 @@ entry: } declare @llvm.riscv.vssra.nxv16i32( + , , i64, i64); @@ -1773,6 +1852,7 @@ define @intrinsic_vssra_vx_nxv16i32_nxv16i32( @llvm.riscv.vssra.nxv16i32( + undef, %0, i64 %1, i64 %2) @@ -1806,6 +1886,7 @@ entry: } declare @llvm.riscv.vssra.nxv1i64( + , , i64, i64); @@ -1818,6 +1899,7 @@ define @intrinsic_vssra_vx_nxv1i64_nxv1i64( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv1i64( + undef, %0, i64 %1, i64 %2) @@ -1851,6 +1933,7 @@ entry: } declare @llvm.riscv.vssra.nxv2i64( + , , i64, i64); @@ -1863,6 +1946,7 @@ define @intrinsic_vssra_vx_nxv2i64_nxv2i64( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv2i64( + undef, %0, i64 %1, i64 %2) @@ -1896,6 +1980,7 @@ entry: } declare @llvm.riscv.vssra.nxv4i64( + , , i64, i64); @@ -1908,6 +1993,7 @@ define @intrinsic_vssra_vx_nxv4i64_nxv4i64( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv4i64( + undef, %0, i64 %1, i64 %2) @@ -1941,6 +2027,7 @@ entry: } declare @llvm.riscv.vssra.nxv8i64( + , , i64, i64); @@ -1953,6 +2040,7 @@ define @intrinsic_vssra_vx_nxv8i64_nxv8i64( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv8i64( + undef, %0, i64 %1, i64 %2) @@ -1993,6 +2081,7 @@ define @intrinsic_vssra_vi_nxv1i8_nxv1i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv1i8( + undef, %0, i64 9, i64 %1) @@ -2025,6 +2114,7 @@ define @intrinsic_vssra_vi_nxv2i8_nxv2i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv2i8( + undef, %0, i64 9, i64 %1) @@ -2057,6 +2147,7 @@ define @intrinsic_vssra_vi_nxv4i8_nxv4i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv4i8( + undef, %0, i64 9, i64 %1) @@ -2089,6 +2180,7 @@ define @intrinsic_vssra_vi_nxv8i8_nxv8i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv8i8( + undef, %0, i64 9, i64 %1) @@ -2121,6 +2213,7 @@ define @intrinsic_vssra_vi_nxv16i8_nxv16i8_i8( @llvm.riscv.vssra.nxv16i8( + undef, %0, i64 9, i64 %1) @@ -2153,6 +2246,7 @@ define @intrinsic_vssra_vi_nxv32i8_nxv32i8_i8( @llvm.riscv.vssra.nxv32i8( + undef, %0, i64 9, i64 %1) @@ -2185,6 +2279,7 @@ define @intrinsic_vssra_vi_nxv64i8_nxv64i8_i8( @llvm.riscv.vssra.nxv64i8( + undef, %0, i64 9, i64 %1) @@ -2217,6 +2312,7 @@ define @intrinsic_vssra_vi_nxv1i16_nxv1i16_i16( @llvm.riscv.vssra.nxv1i16( + undef, %0, i64 9, i64 %1) @@ -2249,6 +2345,7 @@ define @intrinsic_vssra_vi_nxv2i16_nxv2i16_i16( @llvm.riscv.vssra.nxv2i16( + undef, %0, i64 9, i64 %1) @@ -2281,6 +2378,7 @@ define @intrinsic_vssra_vi_nxv4i16_nxv4i16_i16( @llvm.riscv.vssra.nxv4i16( + undef, %0, i64 9, i64 %1) @@ -2313,6 +2411,7 @@ define @intrinsic_vssra_vi_nxv8i16_nxv8i16_i16( @llvm.riscv.vssra.nxv8i16( + undef, %0, i64 9, i64 %1) @@ -2345,6 +2444,7 @@ define @intrinsic_vssra_vi_nxv16i16_nxv16i16_i16( @llvm.riscv.vssra.nxv16i16( + undef, %0, i64 9, i64 %1) @@ -2377,6 +2477,7 @@ define @intrinsic_vssra_vi_nxv32i16_nxv32i16_i16( @llvm.riscv.vssra.nxv32i16( + undef, %0, i64 9, i64 %1) @@ -2409,6 +2510,7 @@ define @intrinsic_vssra_vi_nxv1i32_nxv1i32_i32( @llvm.riscv.vssra.nxv1i32( + undef, %0, i64 9, i64 %1) @@ -2441,6 +2543,7 @@ define @intrinsic_vssra_vi_nxv2i32_nxv2i32_i32( @llvm.riscv.vssra.nxv2i32( + undef, %0, i64 9, i64 %1) @@ -2473,6 +2576,7 @@ define @intrinsic_vssra_vi_nxv4i32_nxv4i32_i32( @llvm.riscv.vssra.nxv4i32( + undef, %0, i64 9, i64 %1) @@ -2505,6 +2609,7 @@ define @intrinsic_vssra_vi_nxv8i32_nxv8i32_i32( @llvm.riscv.vssra.nxv8i32( + undef, %0, i64 9, i64 %1) @@ -2537,6 +2642,7 @@ define @intrinsic_vssra_vi_nxv16i32_nxv16i32_i32( @llvm.riscv.vssra.nxv16i32( + undef, %0, i64 9, i64 %1) @@ -2569,6 +2675,7 @@ define @intrinsic_vssra_vi_nxv1i64_nxv1i64_i64( @llvm.riscv.vssra.nxv1i64( + undef, %0, i64 9, i64 %1) @@ -2601,6 +2708,7 @@ define @intrinsic_vssra_vi_nxv2i64_nxv2i64_i64( @llvm.riscv.vssra.nxv2i64( + undef, %0, i64 9, i64 %1) @@ -2633,6 +2741,7 @@ define @intrinsic_vssra_vi_nxv4i64_nxv4i64_i64( @llvm.riscv.vssra.nxv4i64( + undef, %0, i64 9, i64 %1) @@ -2665,6 +2774,7 @@ define @intrinsic_vssra_vi_nxv8i64_nxv8i64_i64( @llvm.riscv.vssra.nxv8i64( + undef, %0, i64 9, i64 %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vssrl-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vssrl-rv32.ll index 173f2f863d55bf664761124ae5417332029c3681..bac9d515b0adf49055b3c987b4512f043801fc36 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vssrl-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vssrl-rv32.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vssrl.nxv1i8.nxv1i8( + , , , i32); @@ -14,6 +15,7 @@ define @intrinsic_vssrl_vv_nxv1i8_nxv1i8_nxv1i8( @llvm.riscv.vssrl.nxv1i8.nxv1i8( + undef, %0, %1, i32 %2) @@ -47,6 +49,7 @@ entry: } declare @llvm.riscv.vssrl.nxv2i8.nxv2i8( + , , , i32); @@ -59,6 +62,7 @@ define @intrinsic_vssrl_vv_nxv2i8_nxv2i8_nxv2i8( @llvm.riscv.vssrl.nxv2i8.nxv2i8( + undef, %0, %1, i32 %2) @@ -92,6 +96,7 @@ entry: } declare @llvm.riscv.vssrl.nxv4i8.nxv4i8( + , , , i32); @@ -104,6 +109,7 @@ define @intrinsic_vssrl_vv_nxv4i8_nxv4i8_nxv4i8( @llvm.riscv.vssrl.nxv4i8.nxv4i8( + undef, %0, %1, i32 %2) @@ -137,6 +143,7 @@ entry: } declare @llvm.riscv.vssrl.nxv8i8.nxv8i8( + , , , i32); @@ -149,6 +156,7 @@ define @intrinsic_vssrl_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vssrl.nxv8i8.nxv8i8( + undef, %0, %1, i32 %2) @@ -182,6 +190,7 @@ entry: } declare @llvm.riscv.vssrl.nxv16i8.nxv16i8( + , , , i32); @@ -194,6 +203,7 @@ define @intrinsic_vssrl_vv_nxv16i8_nxv16i8_nxv16i8( @llvm.riscv.vssrl.nxv16i8.nxv16i8( + undef, %0, %1, i32 %2) @@ -227,6 +237,7 @@ entry: } declare @llvm.riscv.vssrl.nxv32i8.nxv32i8( + , , , i32); @@ -239,6 +250,7 @@ define @intrinsic_vssrl_vv_nxv32i8_nxv32i8_nxv32i8( @llvm.riscv.vssrl.nxv32i8.nxv32i8( + undef, %0, %1, i32 %2) @@ -272,6 +284,7 @@ entry: } declare @llvm.riscv.vssrl.nxv64i8.nxv64i8( + , , , i32); @@ -284,6 +297,7 @@ define @intrinsic_vssrl_vv_nxv64i8_nxv64i8_nxv64i8( @llvm.riscv.vssrl.nxv64i8.nxv64i8( + undef, %0, %1, i32 %2) @@ -318,6 +332,7 @@ entry: } declare @llvm.riscv.vssrl.nxv1i16.nxv1i16( + , , , i32); @@ -330,6 +345,7 @@ define @intrinsic_vssrl_vv_nxv1i16_nxv1i16_nxv1i16( @llvm.riscv.vssrl.nxv1i16.nxv1i16( + undef, %0, %1, i32 %2) @@ -363,6 +379,7 @@ entry: } declare @llvm.riscv.vssrl.nxv2i16.nxv2i16( + , , , i32); @@ -375,6 +392,7 @@ define @intrinsic_vssrl_vv_nxv2i16_nxv2i16_nxv2i16( @llvm.riscv.vssrl.nxv2i16.nxv2i16( + undef, %0, %1, i32 %2) @@ -408,6 +426,7 @@ entry: } declare @llvm.riscv.vssrl.nxv4i16.nxv4i16( + , , , i32); @@ -420,6 +439,7 @@ define @intrinsic_vssrl_vv_nxv4i16_nxv4i16_nxv4i16( @llvm.riscv.vssrl.nxv4i16.nxv4i16( + undef, %0, %1, i32 %2) @@ -453,6 +473,7 @@ entry: } declare @llvm.riscv.vssrl.nxv8i16.nxv8i16( + , , , i32); @@ -465,6 +486,7 @@ define @intrinsic_vssrl_vv_nxv8i16_nxv8i16_nxv8i16( @llvm.riscv.vssrl.nxv8i16.nxv8i16( + undef, %0, %1, i32 %2) @@ -498,6 +520,7 @@ entry: } declare @llvm.riscv.vssrl.nxv16i16.nxv16i16( + , , , i32); @@ -510,6 +533,7 @@ define @intrinsic_vssrl_vv_nxv16i16_nxv16i16_nxv16i16( @llvm.riscv.vssrl.nxv16i16.nxv16i16( + undef, %0, %1, i32 %2) @@ -543,6 +567,7 @@ entry: } declare @llvm.riscv.vssrl.nxv32i16.nxv32i16( + , , , i32); @@ -555,6 +580,7 @@ define @intrinsic_vssrl_vv_nxv32i16_nxv32i16_nxv32i16( @llvm.riscv.vssrl.nxv32i16.nxv32i16( + undef, %0, %1, i32 %2) @@ -589,6 +615,7 @@ entry: } declare @llvm.riscv.vssrl.nxv1i32.nxv1i32( + , , , i32); @@ -601,6 +628,7 @@ define @intrinsic_vssrl_vv_nxv1i32_nxv1i32_nxv1i32( @llvm.riscv.vssrl.nxv1i32.nxv1i32( + undef, %0, %1, i32 %2) @@ -634,6 +662,7 @@ entry: } declare @llvm.riscv.vssrl.nxv2i32.nxv2i32( + , , , i32); @@ -646,6 +675,7 @@ define @intrinsic_vssrl_vv_nxv2i32_nxv2i32_nxv2i32( @llvm.riscv.vssrl.nxv2i32.nxv2i32( + undef, %0, %1, i32 %2) @@ -679,6 +709,7 @@ entry: } declare @llvm.riscv.vssrl.nxv4i32.nxv4i32( + , , , i32); @@ -691,6 +722,7 @@ define @intrinsic_vssrl_vv_nxv4i32_nxv4i32_nxv4i32( @llvm.riscv.vssrl.nxv4i32.nxv4i32( + undef, %0, %1, i32 %2) @@ -724,6 +756,7 @@ entry: } declare @llvm.riscv.vssrl.nxv8i32.nxv8i32( + , , , i32); @@ -736,6 +769,7 @@ define @intrinsic_vssrl_vv_nxv8i32_nxv8i32_nxv8i32( @llvm.riscv.vssrl.nxv8i32.nxv8i32( + undef, %0, %1, i32 %2) @@ -769,6 +803,7 @@ entry: } declare @llvm.riscv.vssrl.nxv16i32.nxv16i32( + , , , i32); @@ -781,6 +816,7 @@ define @intrinsic_vssrl_vv_nxv16i32_nxv16i32_nxv16i32( @llvm.riscv.vssrl.nxv16i32.nxv16i32( + undef, %0, %1, i32 %2) @@ -815,6 +851,7 @@ entry: } declare @llvm.riscv.vssrl.nxv1i8( + , , i32, i32); @@ -827,6 +864,7 @@ define @intrinsic_vssrl_vx_nxv1i8_nxv1i8( %0, ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv1i8( + undef, %0, i32 %1, i32 %2) @@ -860,6 +898,7 @@ entry: } declare @llvm.riscv.vssrl.nxv2i8( + , , i32, i32); @@ -872,6 +911,7 @@ define @intrinsic_vssrl_vx_nxv2i8_nxv2i8( %0, ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv2i8( + undef, %0, i32 %1, i32 %2) @@ -905,6 +945,7 @@ entry: } declare @llvm.riscv.vssrl.nxv4i8( + , , i32, i32); @@ -917,6 +958,7 @@ define @intrinsic_vssrl_vx_nxv4i8_nxv4i8( %0, ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv4i8( + undef, %0, i32 %1, i32 %2) @@ -950,6 +992,7 @@ entry: } declare @llvm.riscv.vssrl.nxv8i8( + , , i32, i32); @@ -962,6 +1005,7 @@ define @intrinsic_vssrl_vx_nxv8i8_nxv8i8( %0, ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv8i8( + undef, %0, i32 %1, i32 %2) @@ -995,6 +1039,7 @@ entry: } declare @llvm.riscv.vssrl.nxv16i8( + , , i32, i32); @@ -1007,6 +1052,7 @@ define @intrinsic_vssrl_vx_nxv16i8_nxv16i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv16i8( + undef, %0, i32 %1, i32 %2) @@ -1040,6 +1086,7 @@ entry: } declare @llvm.riscv.vssrl.nxv32i8( + , , i32, i32); @@ -1052,6 +1099,7 @@ define @intrinsic_vssrl_vx_nxv32i8_nxv32i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv32i8( + undef, %0, i32 %1, i32 %2) @@ -1085,6 +1133,7 @@ entry: } declare @llvm.riscv.vssrl.nxv64i8( + , , i32, i32); @@ -1097,6 +1146,7 @@ define @intrinsic_vssrl_vx_nxv64i8_nxv64i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv64i8( + undef, %0, i32 %1, i32 %2) @@ -1130,6 +1180,7 @@ entry: } declare @llvm.riscv.vssrl.nxv1i16( + , , i32, i32); @@ -1142,6 +1193,7 @@ define @intrinsic_vssrl_vx_nxv1i16_nxv1i16( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv1i16( + undef, %0, i32 %1, i32 %2) @@ -1175,6 +1227,7 @@ entry: } declare @llvm.riscv.vssrl.nxv2i16( + , , i32, i32); @@ -1187,6 +1240,7 @@ define @intrinsic_vssrl_vx_nxv2i16_nxv2i16( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv2i16( + undef, %0, i32 %1, i32 %2) @@ -1220,6 +1274,7 @@ entry: } declare @llvm.riscv.vssrl.nxv4i16( + , , i32, i32); @@ -1232,6 +1287,7 @@ define @intrinsic_vssrl_vx_nxv4i16_nxv4i16( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv4i16( + undef, %0, i32 %1, i32 %2) @@ -1265,6 +1321,7 @@ entry: } declare @llvm.riscv.vssrl.nxv8i16( + , , i32, i32); @@ -1277,6 +1334,7 @@ define @intrinsic_vssrl_vx_nxv8i16_nxv8i16( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv8i16( + undef, %0, i32 %1, i32 %2) @@ -1310,6 +1368,7 @@ entry: } declare @llvm.riscv.vssrl.nxv16i16( + , , i32, i32); @@ -1322,6 +1381,7 @@ define @intrinsic_vssrl_vx_nxv16i16_nxv16i16( @llvm.riscv.vssrl.nxv16i16( + undef, %0, i32 %1, i32 %2) @@ -1355,6 +1415,7 @@ entry: } declare @llvm.riscv.vssrl.nxv32i16( + , , i32, i32); @@ -1367,6 +1428,7 @@ define @intrinsic_vssrl_vx_nxv32i16_nxv32i16( @llvm.riscv.vssrl.nxv32i16( + undef, %0, i32 %1, i32 %2) @@ -1400,6 +1462,7 @@ entry: } declare @llvm.riscv.vssrl.nxv1i32( + , , i32, i32); @@ -1412,6 +1475,7 @@ define @intrinsic_vssrl_vx_nxv1i32_nxv1i32( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv1i32( + undef, %0, i32 %1, i32 %2) @@ -1445,6 +1509,7 @@ entry: } declare @llvm.riscv.vssrl.nxv2i32( + , , i32, i32); @@ -1457,6 +1522,7 @@ define @intrinsic_vssrl_vx_nxv2i32_nxv2i32( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv2i32( + undef, %0, i32 %1, i32 %2) @@ -1490,6 +1556,7 @@ entry: } declare @llvm.riscv.vssrl.nxv4i32( + , , i32, i32); @@ -1502,6 +1569,7 @@ define @intrinsic_vssrl_vx_nxv4i32_nxv4i32( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv4i32( + undef, %0, i32 %1, i32 %2) @@ -1535,6 +1603,7 @@ entry: } declare @llvm.riscv.vssrl.nxv8i32( + , , i32, i32); @@ -1547,6 +1616,7 @@ define @intrinsic_vssrl_vx_nxv8i32_nxv8i32( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv8i32( + undef, %0, i32 %1, i32 %2) @@ -1580,6 +1650,7 @@ entry: } declare @llvm.riscv.vssrl.nxv16i32( + , , i32, i32); @@ -1592,6 +1663,7 @@ define @intrinsic_vssrl_vx_nxv16i32_nxv16i32( @llvm.riscv.vssrl.nxv16i32( + undef, %0, i32 %1, i32 %2) @@ -1625,6 +1697,7 @@ entry: } declare @llvm.riscv.vssrl.nxv1i64( + , , i32, i32); @@ -1637,6 +1710,7 @@ define @intrinsic_vssrl_vx_nxv1i64_nxv1i64( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv1i64( + undef, %0, i32 %1, i32 %2) @@ -1670,6 +1744,7 @@ entry: } declare @llvm.riscv.vssrl.nxv2i64( + , , i32, i32); @@ -1682,6 +1757,7 @@ define @intrinsic_vssrl_vx_nxv2i64_nxv2i64( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv2i64( + undef, %0, i32 %1, i32 %2) @@ -1715,6 +1791,7 @@ entry: } declare @llvm.riscv.vssrl.nxv4i64( + , , i32, i32); @@ -1727,6 +1804,7 @@ define @intrinsic_vssrl_vx_nxv4i64_nxv4i64( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv4i64( + undef, %0, i32 %1, i32 %2) @@ -1760,6 +1838,7 @@ entry: } declare @llvm.riscv.vssrl.nxv8i64( + , , i32, i32); @@ -1772,6 +1851,7 @@ define @intrinsic_vssrl_vx_nxv8i64_nxv8i64( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv8i64( + undef, %0, i32 %1, i32 %2) @@ -1812,6 +1892,7 @@ define @intrinsic_vssrl_vi_nxv1i8_nxv1i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv1i8( + undef, %0, i32 9, i32 %1) @@ -1844,6 +1925,7 @@ define @intrinsic_vssrl_vi_nxv2i8_nxv2i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv2i8( + undef, %0, i32 9, i32 %1) @@ -1876,6 +1958,7 @@ define @intrinsic_vssrl_vi_nxv4i8_nxv4i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv4i8( + undef, %0, i32 9, i32 %1) @@ -1908,6 +1991,7 @@ define @intrinsic_vssrl_vi_nxv8i8_nxv8i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv8i8( + undef, %0, i32 9, i32 %1) @@ -1940,6 +2024,7 @@ define @intrinsic_vssrl_vi_nxv16i8_nxv16i8_i8( @llvm.riscv.vssrl.nxv16i8( + undef, %0, i32 9, i32 %1) @@ -1972,6 +2057,7 @@ define @intrinsic_vssrl_vi_nxv32i8_nxv32i8_i8( @llvm.riscv.vssrl.nxv32i8( + undef, %0, i32 9, i32 %1) @@ -2004,6 +2090,7 @@ define @intrinsic_vssrl_vi_nxv64i8_nxv64i8_i8( @llvm.riscv.vssrl.nxv64i8( + undef, %0, i32 9, i32 %1) @@ -2036,6 +2123,7 @@ define @intrinsic_vssrl_vi_nxv1i16_nxv1i16_i16( @llvm.riscv.vssrl.nxv1i16( + undef, %0, i32 9, i32 %1) @@ -2068,6 +2156,7 @@ define @intrinsic_vssrl_vi_nxv2i16_nxv2i16_i16( @llvm.riscv.vssrl.nxv2i16( + undef, %0, i32 9, i32 %1) @@ -2100,6 +2189,7 @@ define @intrinsic_vssrl_vi_nxv4i16_nxv4i16_i16( @llvm.riscv.vssrl.nxv4i16( + undef, %0, i32 9, i32 %1) @@ -2132,6 +2222,7 @@ define @intrinsic_vssrl_vi_nxv8i16_nxv8i16_i16( @llvm.riscv.vssrl.nxv8i16( + undef, %0, i32 9, i32 %1) @@ -2164,6 +2255,7 @@ define @intrinsic_vssrl_vi_nxv16i16_nxv16i16_i16( @llvm.riscv.vssrl.nxv16i16( + undef, %0, i32 9, i32 %1) @@ -2196,6 +2288,7 @@ define @intrinsic_vssrl_vi_nxv32i16_nxv32i16_i16( @llvm.riscv.vssrl.nxv32i16( + undef, %0, i32 9, i32 %1) @@ -2228,6 +2321,7 @@ define @intrinsic_vssrl_vi_nxv1i32_nxv1i32_i32( @llvm.riscv.vssrl.nxv1i32( + undef, %0, i32 9, i32 %1) @@ -2260,6 +2354,7 @@ define @intrinsic_vssrl_vi_nxv2i32_nxv2i32_i32( @llvm.riscv.vssrl.nxv2i32( + undef, %0, i32 9, i32 %1) @@ -2292,6 +2387,7 @@ define @intrinsic_vssrl_vi_nxv4i32_nxv4i32_i32( @llvm.riscv.vssrl.nxv4i32( + undef, %0, i32 9, i32 %1) @@ -2324,6 +2420,7 @@ define @intrinsic_vssrl_vi_nxv8i32_nxv8i32_i32( @llvm.riscv.vssrl.nxv8i32( + undef, %0, i32 9, i32 %1) @@ -2356,6 +2453,7 @@ define @intrinsic_vssrl_vi_nxv16i32_nxv16i32_i32( @llvm.riscv.vssrl.nxv16i32( + undef, %0, i32 9, i32 %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vssrl-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vssrl-rv64.ll index 3bc2f68ebb94a007f32d5ce06f9d984da6cf87b9..98da6798eca6a750a8a8b4e1eebcce49d618e53e 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vssrl-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vssrl-rv64.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vssrl.nxv1i8.nxv1i8( + , , , i64); @@ -14,6 +15,7 @@ define @intrinsic_vssrl_vv_nxv1i8_nxv1i8_nxv1i8( @llvm.riscv.vssrl.nxv1i8.nxv1i8( + undef, %0, %1, i64 %2) @@ -47,6 +49,7 @@ entry: } declare @llvm.riscv.vssrl.nxv2i8.nxv2i8( + , , , i64); @@ -59,6 +62,7 @@ define @intrinsic_vssrl_vv_nxv2i8_nxv2i8_nxv2i8( @llvm.riscv.vssrl.nxv2i8.nxv2i8( + undef, %0, %1, i64 %2) @@ -92,6 +96,7 @@ entry: } declare @llvm.riscv.vssrl.nxv4i8.nxv4i8( + , , , i64); @@ -104,6 +109,7 @@ define @intrinsic_vssrl_vv_nxv4i8_nxv4i8_nxv4i8( @llvm.riscv.vssrl.nxv4i8.nxv4i8( + undef, %0, %1, i64 %2) @@ -137,6 +143,7 @@ entry: } declare @llvm.riscv.vssrl.nxv8i8.nxv8i8( + , , , i64); @@ -149,6 +156,7 @@ define @intrinsic_vssrl_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vssrl.nxv8i8.nxv8i8( + undef, %0, %1, i64 %2) @@ -182,6 +190,7 @@ entry: } declare @llvm.riscv.vssrl.nxv16i8.nxv16i8( + , , , i64); @@ -194,6 +203,7 @@ define @intrinsic_vssrl_vv_nxv16i8_nxv16i8_nxv16i8( @llvm.riscv.vssrl.nxv16i8.nxv16i8( + undef, %0, %1, i64 %2) @@ -227,6 +237,7 @@ entry: } declare @llvm.riscv.vssrl.nxv32i8.nxv32i8( + , , , i64); @@ -239,6 +250,7 @@ define @intrinsic_vssrl_vv_nxv32i8_nxv32i8_nxv32i8( @llvm.riscv.vssrl.nxv32i8.nxv32i8( + undef, %0, %1, i64 %2) @@ -272,6 +284,7 @@ entry: } declare @llvm.riscv.vssrl.nxv64i8.nxv64i8( + , , , i64); @@ -284,6 +297,7 @@ define @intrinsic_vssrl_vv_nxv64i8_nxv64i8_nxv64i8( @llvm.riscv.vssrl.nxv64i8.nxv64i8( + undef, %0, %1, i64 %2) @@ -318,6 +332,7 @@ entry: } declare @llvm.riscv.vssrl.nxv1i16.nxv1i16( + , , , i64); @@ -330,6 +345,7 @@ define @intrinsic_vssrl_vv_nxv1i16_nxv1i16_nxv1i16( @llvm.riscv.vssrl.nxv1i16.nxv1i16( + undef, %0, %1, i64 %2) @@ -363,6 +379,7 @@ entry: } declare @llvm.riscv.vssrl.nxv2i16.nxv2i16( + , , , i64); @@ -375,6 +392,7 @@ define @intrinsic_vssrl_vv_nxv2i16_nxv2i16_nxv2i16( @llvm.riscv.vssrl.nxv2i16.nxv2i16( + undef, %0, %1, i64 %2) @@ -408,6 +426,7 @@ entry: } declare @llvm.riscv.vssrl.nxv4i16.nxv4i16( + , , , i64); @@ -420,6 +439,7 @@ define @intrinsic_vssrl_vv_nxv4i16_nxv4i16_nxv4i16( @llvm.riscv.vssrl.nxv4i16.nxv4i16( + undef, %0, %1, i64 %2) @@ -453,6 +473,7 @@ entry: } declare @llvm.riscv.vssrl.nxv8i16.nxv8i16( + , , , i64); @@ -465,6 +486,7 @@ define @intrinsic_vssrl_vv_nxv8i16_nxv8i16_nxv8i16( @llvm.riscv.vssrl.nxv8i16.nxv8i16( + undef, %0, %1, i64 %2) @@ -498,6 +520,7 @@ entry: } declare @llvm.riscv.vssrl.nxv16i16.nxv16i16( + , , , i64); @@ -510,6 +533,7 @@ define @intrinsic_vssrl_vv_nxv16i16_nxv16i16_nxv16i16( @llvm.riscv.vssrl.nxv16i16.nxv16i16( + undef, %0, %1, i64 %2) @@ -543,6 +567,7 @@ entry: } declare @llvm.riscv.vssrl.nxv32i16.nxv32i16( + , , , i64); @@ -555,6 +580,7 @@ define @intrinsic_vssrl_vv_nxv32i16_nxv32i16_nxv32i16( @llvm.riscv.vssrl.nxv32i16.nxv32i16( + undef, %0, %1, i64 %2) @@ -589,6 +615,7 @@ entry: } declare @llvm.riscv.vssrl.nxv1i32.nxv1i32( + , , , i64); @@ -601,6 +628,7 @@ define @intrinsic_vssrl_vv_nxv1i32_nxv1i32_nxv1i32( @llvm.riscv.vssrl.nxv1i32.nxv1i32( + undef, %0, %1, i64 %2) @@ -634,6 +662,7 @@ entry: } declare @llvm.riscv.vssrl.nxv2i32.nxv2i32( + , , , i64); @@ -646,6 +675,7 @@ define @intrinsic_vssrl_vv_nxv2i32_nxv2i32_nxv2i32( @llvm.riscv.vssrl.nxv2i32.nxv2i32( + undef, %0, %1, i64 %2) @@ -679,6 +709,7 @@ entry: } declare @llvm.riscv.vssrl.nxv4i32.nxv4i32( + , , , i64); @@ -691,6 +722,7 @@ define @intrinsic_vssrl_vv_nxv4i32_nxv4i32_nxv4i32( @llvm.riscv.vssrl.nxv4i32.nxv4i32( + undef, %0, %1, i64 %2) @@ -724,6 +756,7 @@ entry: } declare @llvm.riscv.vssrl.nxv8i32.nxv8i32( + , , , i64); @@ -736,6 +769,7 @@ define @intrinsic_vssrl_vv_nxv8i32_nxv8i32_nxv8i32( @llvm.riscv.vssrl.nxv8i32.nxv8i32( + undef, %0, %1, i64 %2) @@ -769,6 +803,7 @@ entry: } declare @llvm.riscv.vssrl.nxv16i32.nxv16i32( + , , , i64); @@ -781,6 +816,7 @@ define @intrinsic_vssrl_vv_nxv16i32_nxv16i32_nxv16i32( @llvm.riscv.vssrl.nxv16i32.nxv16i32( + undef, %0, %1, i64 %2) @@ -815,6 +851,7 @@ entry: } declare @llvm.riscv.vssrl.nxv1i64.nxv1i64( + , , , i64); @@ -827,6 +864,7 @@ define @intrinsic_vssrl_vv_nxv1i64_nxv1i64_nxv1i64( @llvm.riscv.vssrl.nxv1i64.nxv1i64( + undef, %0, %1, i64 %2) @@ -860,6 +898,7 @@ entry: } declare @llvm.riscv.vssrl.nxv2i64.nxv2i64( + , , , i64); @@ -872,6 +911,7 @@ define @intrinsic_vssrl_vv_nxv2i64_nxv2i64_nxv2i64( @llvm.riscv.vssrl.nxv2i64.nxv2i64( + undef, %0, %1, i64 %2) @@ -905,6 +945,7 @@ entry: } declare @llvm.riscv.vssrl.nxv4i64.nxv4i64( + , , , i64); @@ -917,6 +958,7 @@ define @intrinsic_vssrl_vv_nxv4i64_nxv4i64_nxv4i64( @llvm.riscv.vssrl.nxv4i64.nxv4i64( + undef, %0, %1, i64 %2) @@ -950,6 +992,7 @@ entry: } declare @llvm.riscv.vssrl.nxv8i64.nxv8i64( + , , , i64); @@ -962,6 +1005,7 @@ define @intrinsic_vssrl_vv_nxv8i64_nxv8i64_nxv8i64( @llvm.riscv.vssrl.nxv8i64.nxv8i64( + undef, %0, %1, i64 %2) @@ -996,6 +1040,7 @@ entry: } declare @llvm.riscv.vssrl.nxv1i8( + , , i64, i64); @@ -1008,6 +1053,7 @@ define @intrinsic_vssrl_vx_nxv1i8_nxv1i8( %0, ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv1i8( + undef, %0, i64 %1, i64 %2) @@ -1041,6 +1087,7 @@ entry: } declare @llvm.riscv.vssrl.nxv2i8( + , , i64, i64); @@ -1053,6 +1100,7 @@ define @intrinsic_vssrl_vx_nxv2i8_nxv2i8( %0, ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv2i8( + undef, %0, i64 %1, i64 %2) @@ -1086,6 +1134,7 @@ entry: } declare @llvm.riscv.vssrl.nxv4i8( + , , i64, i64); @@ -1098,6 +1147,7 @@ define @intrinsic_vssrl_vx_nxv4i8_nxv4i8( %0, ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv4i8( + undef, %0, i64 %1, i64 %2) @@ -1131,6 +1181,7 @@ entry: } declare @llvm.riscv.vssrl.nxv8i8( + , , i64, i64); @@ -1143,6 +1194,7 @@ define @intrinsic_vssrl_vx_nxv8i8_nxv8i8( %0, ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv8i8( + undef, %0, i64 %1, i64 %2) @@ -1176,6 +1228,7 @@ entry: } declare @llvm.riscv.vssrl.nxv16i8( + , , i64, i64); @@ -1188,6 +1241,7 @@ define @intrinsic_vssrl_vx_nxv16i8_nxv16i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv16i8( + undef, %0, i64 %1, i64 %2) @@ -1221,6 +1275,7 @@ entry: } declare @llvm.riscv.vssrl.nxv32i8( + , , i64, i64); @@ -1233,6 +1288,7 @@ define @intrinsic_vssrl_vx_nxv32i8_nxv32i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv32i8( + undef, %0, i64 %1, i64 %2) @@ -1266,6 +1322,7 @@ entry: } declare @llvm.riscv.vssrl.nxv64i8( + , , i64, i64); @@ -1278,6 +1335,7 @@ define @intrinsic_vssrl_vx_nxv64i8_nxv64i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv64i8( + undef, %0, i64 %1, i64 %2) @@ -1311,6 +1369,7 @@ entry: } declare @llvm.riscv.vssrl.nxv1i16( + , , i64, i64); @@ -1323,6 +1382,7 @@ define @intrinsic_vssrl_vx_nxv1i16_nxv1i16( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv1i16( + undef, %0, i64 %1, i64 %2) @@ -1356,6 +1416,7 @@ entry: } declare @llvm.riscv.vssrl.nxv2i16( + , , i64, i64); @@ -1368,6 +1429,7 @@ define @intrinsic_vssrl_vx_nxv2i16_nxv2i16( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv2i16( + undef, %0, i64 %1, i64 %2) @@ -1401,6 +1463,7 @@ entry: } declare @llvm.riscv.vssrl.nxv4i16( + , , i64, i64); @@ -1413,6 +1476,7 @@ define @intrinsic_vssrl_vx_nxv4i16_nxv4i16( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv4i16( + undef, %0, i64 %1, i64 %2) @@ -1446,6 +1510,7 @@ entry: } declare @llvm.riscv.vssrl.nxv8i16( + , , i64, i64); @@ -1458,6 +1523,7 @@ define @intrinsic_vssrl_vx_nxv8i16_nxv8i16( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv8i16( + undef, %0, i64 %1, i64 %2) @@ -1491,6 +1557,7 @@ entry: } declare @llvm.riscv.vssrl.nxv16i16( + , , i64, i64); @@ -1503,6 +1570,7 @@ define @intrinsic_vssrl_vx_nxv16i16_nxv16i16( @llvm.riscv.vssrl.nxv16i16( + undef, %0, i64 %1, i64 %2) @@ -1536,6 +1604,7 @@ entry: } declare @llvm.riscv.vssrl.nxv32i16( + , , i64, i64); @@ -1548,6 +1617,7 @@ define @intrinsic_vssrl_vx_nxv32i16_nxv32i16( @llvm.riscv.vssrl.nxv32i16( + undef, %0, i64 %1, i64 %2) @@ -1581,6 +1651,7 @@ entry: } declare @llvm.riscv.vssrl.nxv1i32( + , , i64, i64); @@ -1593,6 +1664,7 @@ define @intrinsic_vssrl_vx_nxv1i32_nxv1i32( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv1i32( + undef, %0, i64 %1, i64 %2) @@ -1626,6 +1698,7 @@ entry: } declare @llvm.riscv.vssrl.nxv2i32( + , , i64, i64); @@ -1638,6 +1711,7 @@ define @intrinsic_vssrl_vx_nxv2i32_nxv2i32( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv2i32( + undef, %0, i64 %1, i64 %2) @@ -1671,6 +1745,7 @@ entry: } declare @llvm.riscv.vssrl.nxv4i32( + , , i64, i64); @@ -1683,6 +1758,7 @@ define @intrinsic_vssrl_vx_nxv4i32_nxv4i32( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv4i32( + undef, %0, i64 %1, i64 %2) @@ -1716,6 +1792,7 @@ entry: } declare @llvm.riscv.vssrl.nxv8i32( + , , i64, i64); @@ -1728,6 +1805,7 @@ define @intrinsic_vssrl_vx_nxv8i32_nxv8i32( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv8i32( + undef, %0, i64 %1, i64 %2) @@ -1761,6 +1839,7 @@ entry: } declare @llvm.riscv.vssrl.nxv16i32( + , , i64, i64); @@ -1773,6 +1852,7 @@ define @intrinsic_vssrl_vx_nxv16i32_nxv16i32( @llvm.riscv.vssrl.nxv16i32( + undef, %0, i64 %1, i64 %2) @@ -1806,6 +1886,7 @@ entry: } declare @llvm.riscv.vssrl.nxv1i64( + , , i64, i64); @@ -1818,6 +1899,7 @@ define @intrinsic_vssrl_vx_nxv1i64_nxv1i64( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv1i64( + undef, %0, i64 %1, i64 %2) @@ -1851,6 +1933,7 @@ entry: } declare @llvm.riscv.vssrl.nxv2i64( + , , i64, i64); @@ -1863,6 +1946,7 @@ define @intrinsic_vssrl_vx_nxv2i64_nxv2i64( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv2i64( + undef, %0, i64 %1, i64 %2) @@ -1896,6 +1980,7 @@ entry: } declare @llvm.riscv.vssrl.nxv4i64( + , , i64, i64); @@ -1908,6 +1993,7 @@ define @intrinsic_vssrl_vx_nxv4i64_nxv4i64( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv4i64( + undef, %0, i64 %1, i64 %2) @@ -1941,6 +2027,7 @@ entry: } declare @llvm.riscv.vssrl.nxv8i64( + , , i64, i64); @@ -1953,6 +2040,7 @@ define @intrinsic_vssrl_vx_nxv8i64_nxv8i64( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv8i64( + undef, %0, i64 %1, i64 %2) @@ -1993,6 +2081,7 @@ define @intrinsic_vssrl_vi_nxv1i8_nxv1i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv1i8( + undef, %0, i64 9, i64 %1) @@ -2025,6 +2114,7 @@ define @intrinsic_vssrl_vi_nxv2i8_nxv2i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv2i8( + undef, %0, i64 9, i64 %1) @@ -2057,6 +2147,7 @@ define @intrinsic_vssrl_vi_nxv4i8_nxv4i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv4i8( + undef, %0, i64 9, i64 %1) @@ -2089,6 +2180,7 @@ define @intrinsic_vssrl_vi_nxv8i8_nxv8i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv8i8( + undef, %0, i64 9, i64 %1) @@ -2121,6 +2213,7 @@ define @intrinsic_vssrl_vi_nxv16i8_nxv16i8_i8( @llvm.riscv.vssrl.nxv16i8( + undef, %0, i64 9, i64 %1) @@ -2153,6 +2246,7 @@ define @intrinsic_vssrl_vi_nxv32i8_nxv32i8_i8( @llvm.riscv.vssrl.nxv32i8( + undef, %0, i64 9, i64 %1) @@ -2185,6 +2279,7 @@ define @intrinsic_vssrl_vi_nxv64i8_nxv64i8_i8( @llvm.riscv.vssrl.nxv64i8( + undef, %0, i64 9, i64 %1) @@ -2217,6 +2312,7 @@ define @intrinsic_vssrl_vi_nxv1i16_nxv1i16_i16( @llvm.riscv.vssrl.nxv1i16( + undef, %0, i64 9, i64 %1) @@ -2249,6 +2345,7 @@ define @intrinsic_vssrl_vi_nxv2i16_nxv2i16_i16( @llvm.riscv.vssrl.nxv2i16( + undef, %0, i64 9, i64 %1) @@ -2281,6 +2378,7 @@ define @intrinsic_vssrl_vi_nxv4i16_nxv4i16_i16( @llvm.riscv.vssrl.nxv4i16( + undef, %0, i64 9, i64 %1) @@ -2313,6 +2411,7 @@ define @intrinsic_vssrl_vi_nxv8i16_nxv8i16_i16( @llvm.riscv.vssrl.nxv8i16( + undef, %0, i64 9, i64 %1) @@ -2345,6 +2444,7 @@ define @intrinsic_vssrl_vi_nxv16i16_nxv16i16_i16( @llvm.riscv.vssrl.nxv16i16( + undef, %0, i64 9, i64 %1) @@ -2377,6 +2477,7 @@ define @intrinsic_vssrl_vi_nxv32i16_nxv32i16_i16( @llvm.riscv.vssrl.nxv32i16( + undef, %0, i64 9, i64 %1) @@ -2409,6 +2510,7 @@ define @intrinsic_vssrl_vi_nxv1i32_nxv1i32_i32( @llvm.riscv.vssrl.nxv1i32( + undef, %0, i64 9, i64 %1) @@ -2441,6 +2543,7 @@ define @intrinsic_vssrl_vi_nxv2i32_nxv2i32_i32( @llvm.riscv.vssrl.nxv2i32( + undef, %0, i64 9, i64 %1) @@ -2473,6 +2576,7 @@ define @intrinsic_vssrl_vi_nxv4i32_nxv4i32_i32( @llvm.riscv.vssrl.nxv4i32( + undef, %0, i64 9, i64 %1) @@ -2505,6 +2609,7 @@ define @intrinsic_vssrl_vi_nxv8i32_nxv8i32_i32( @llvm.riscv.vssrl.nxv8i32( + undef, %0, i64 9, i64 %1) @@ -2537,6 +2642,7 @@ define @intrinsic_vssrl_vi_nxv16i32_nxv16i32_i32( @llvm.riscv.vssrl.nxv16i32( + undef, %0, i64 9, i64 %1) @@ -2569,6 +2675,7 @@ define @intrinsic_vssrl_vi_nxv1i64_nxv1i64_i64( @llvm.riscv.vssrl.nxv1i64( + undef, %0, i64 9, i64 %1) @@ -2601,6 +2708,7 @@ define @intrinsic_vssrl_vi_nxv2i64_nxv2i64_i64( @llvm.riscv.vssrl.nxv2i64( + undef, %0, i64 9, i64 %1) @@ -2633,6 +2741,7 @@ define @intrinsic_vssrl_vi_nxv4i64_nxv4i64_i64( @llvm.riscv.vssrl.nxv4i64( + undef, %0, i64 9, i64 %1) @@ -2665,6 +2774,7 @@ define @intrinsic_vssrl_vi_nxv8i64_nxv8i64_i64( @llvm.riscv.vssrl.nxv8i64( + undef, %0, i64 9, i64 %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vssub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vssub-rv32.ll index c695923f56a067c9acb5ec14e71daf4a5ce70a24..d8c62715bef94efcb03b9d8c0d35ab655f19fa0c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vssub-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vssub-rv32.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vssub.nxv1i8.nxv1i8( + , , , i32); @@ -14,6 +15,7 @@ define @intrinsic_vssub_vv_nxv1i8_nxv1i8_nxv1i8( @llvm.riscv.vssub.nxv1i8.nxv1i8( + undef, %0, %1, i32 %2) @@ -47,6 +49,7 @@ entry: } declare @llvm.riscv.vssub.nxv2i8.nxv2i8( + , , , i32); @@ -59,6 +62,7 @@ define @intrinsic_vssub_vv_nxv2i8_nxv2i8_nxv2i8( @llvm.riscv.vssub.nxv2i8.nxv2i8( + undef, %0, %1, i32 %2) @@ -92,6 +96,7 @@ entry: } declare @llvm.riscv.vssub.nxv4i8.nxv4i8( + , , , i32); @@ -104,6 +109,7 @@ define @intrinsic_vssub_vv_nxv4i8_nxv4i8_nxv4i8( @llvm.riscv.vssub.nxv4i8.nxv4i8( + undef, %0, %1, i32 %2) @@ -137,6 +143,7 @@ entry: } declare @llvm.riscv.vssub.nxv8i8.nxv8i8( + , , , i32); @@ -149,6 +156,7 @@ define @intrinsic_vssub_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vssub.nxv8i8.nxv8i8( + undef, %0, %1, i32 %2) @@ -182,6 +190,7 @@ entry: } declare @llvm.riscv.vssub.nxv16i8.nxv16i8( + , , , i32); @@ -194,6 +203,7 @@ define @intrinsic_vssub_vv_nxv16i8_nxv16i8_nxv16i8( @llvm.riscv.vssub.nxv16i8.nxv16i8( + undef, %0, %1, i32 %2) @@ -227,6 +237,7 @@ entry: } declare @llvm.riscv.vssub.nxv32i8.nxv32i8( + , , , i32); @@ -239,6 +250,7 @@ define @intrinsic_vssub_vv_nxv32i8_nxv32i8_nxv32i8( @llvm.riscv.vssub.nxv32i8.nxv32i8( + undef, %0, %1, i32 %2) @@ -272,6 +284,7 @@ entry: } declare @llvm.riscv.vssub.nxv64i8.nxv64i8( + , , , i32); @@ -284,6 +297,7 @@ define @intrinsic_vssub_vv_nxv64i8_nxv64i8_nxv64i8( @llvm.riscv.vssub.nxv64i8.nxv64i8( + undef, %0, %1, i32 %2) @@ -318,6 +332,7 @@ entry: } declare @llvm.riscv.vssub.nxv1i16.nxv1i16( + , , , i32); @@ -330,6 +345,7 @@ define @intrinsic_vssub_vv_nxv1i16_nxv1i16_nxv1i16( @llvm.riscv.vssub.nxv1i16.nxv1i16( + undef, %0, %1, i32 %2) @@ -363,6 +379,7 @@ entry: } declare @llvm.riscv.vssub.nxv2i16.nxv2i16( + , , , i32); @@ -375,6 +392,7 @@ define @intrinsic_vssub_vv_nxv2i16_nxv2i16_nxv2i16( @llvm.riscv.vssub.nxv2i16.nxv2i16( + undef, %0, %1, i32 %2) @@ -408,6 +426,7 @@ entry: } declare @llvm.riscv.vssub.nxv4i16.nxv4i16( + , , , i32); @@ -420,6 +439,7 @@ define @intrinsic_vssub_vv_nxv4i16_nxv4i16_nxv4i16( @llvm.riscv.vssub.nxv4i16.nxv4i16( + undef, %0, %1, i32 %2) @@ -453,6 +473,7 @@ entry: } declare @llvm.riscv.vssub.nxv8i16.nxv8i16( + , , , i32); @@ -465,6 +486,7 @@ define @intrinsic_vssub_vv_nxv8i16_nxv8i16_nxv8i16( @llvm.riscv.vssub.nxv8i16.nxv8i16( + undef, %0, %1, i32 %2) @@ -498,6 +520,7 @@ entry: } declare @llvm.riscv.vssub.nxv16i16.nxv16i16( + , , , i32); @@ -510,6 +533,7 @@ define @intrinsic_vssub_vv_nxv16i16_nxv16i16_nxv16i16( @llvm.riscv.vssub.nxv16i16.nxv16i16( + undef, %0, %1, i32 %2) @@ -543,6 +567,7 @@ entry: } declare @llvm.riscv.vssub.nxv32i16.nxv32i16( + , , , i32); @@ -555,6 +580,7 @@ define @intrinsic_vssub_vv_nxv32i16_nxv32i16_nxv32i16( @llvm.riscv.vssub.nxv32i16.nxv32i16( + undef, %0, %1, i32 %2) @@ -589,6 +615,7 @@ entry: } declare @llvm.riscv.vssub.nxv1i32.nxv1i32( + , , , i32); @@ -601,6 +628,7 @@ define @intrinsic_vssub_vv_nxv1i32_nxv1i32_nxv1i32( @llvm.riscv.vssub.nxv1i32.nxv1i32( + undef, %0, %1, i32 %2) @@ -634,6 +662,7 @@ entry: } declare @llvm.riscv.vssub.nxv2i32.nxv2i32( + , , , i32); @@ -646,6 +675,7 @@ define @intrinsic_vssub_vv_nxv2i32_nxv2i32_nxv2i32( @llvm.riscv.vssub.nxv2i32.nxv2i32( + undef, %0, %1, i32 %2) @@ -679,6 +709,7 @@ entry: } declare @llvm.riscv.vssub.nxv4i32.nxv4i32( + , , , i32); @@ -691,6 +722,7 @@ define @intrinsic_vssub_vv_nxv4i32_nxv4i32_nxv4i32( @llvm.riscv.vssub.nxv4i32.nxv4i32( + undef, %0, %1, i32 %2) @@ -724,6 +756,7 @@ entry: } declare @llvm.riscv.vssub.nxv8i32.nxv8i32( + , , , i32); @@ -736,6 +769,7 @@ define @intrinsic_vssub_vv_nxv8i32_nxv8i32_nxv8i32( @llvm.riscv.vssub.nxv8i32.nxv8i32( + undef, %0, %1, i32 %2) @@ -769,6 +803,7 @@ entry: } declare @llvm.riscv.vssub.nxv16i32.nxv16i32( + , , , i32); @@ -781,6 +816,7 @@ define @intrinsic_vssub_vv_nxv16i32_nxv16i32_nxv16i32( @llvm.riscv.vssub.nxv16i32.nxv16i32( + undef, %0, %1, i32 %2) @@ -815,6 +851,7 @@ entry: } declare @llvm.riscv.vssub.nxv1i64.nxv1i64( + , , , i32); @@ -827,6 +864,7 @@ define @intrinsic_vssub_vv_nxv1i64_nxv1i64_nxv1i64( @llvm.riscv.vssub.nxv1i64.nxv1i64( + undef, %0, %1, i32 %2) @@ -860,6 +898,7 @@ entry: } declare @llvm.riscv.vssub.nxv2i64.nxv2i64( + , , , i32); @@ -872,6 +911,7 @@ define @intrinsic_vssub_vv_nxv2i64_nxv2i64_nxv2i64( @llvm.riscv.vssub.nxv2i64.nxv2i64( + undef, %0, %1, i32 %2) @@ -905,6 +945,7 @@ entry: } declare @llvm.riscv.vssub.nxv4i64.nxv4i64( + , , , i32); @@ -917,6 +958,7 @@ define @intrinsic_vssub_vv_nxv4i64_nxv4i64_nxv4i64( @llvm.riscv.vssub.nxv4i64.nxv4i64( + undef, %0, %1, i32 %2) @@ -950,6 +992,7 @@ entry: } declare @llvm.riscv.vssub.nxv8i64.nxv8i64( + , , , i32); @@ -962,6 +1005,7 @@ define @intrinsic_vssub_vv_nxv8i64_nxv8i64_nxv8i64( @llvm.riscv.vssub.nxv8i64.nxv8i64( + undef, %0, %1, i32 %2) @@ -996,6 +1040,7 @@ entry: } declare @llvm.riscv.vssub.nxv1i8.i8( + , , i8, i32); @@ -1008,6 +1053,7 @@ define @intrinsic_vssub_vx_nxv1i8_nxv1i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssub.nxv1i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1041,6 +1087,7 @@ entry: } declare @llvm.riscv.vssub.nxv2i8.i8( + , , i8, i32); @@ -1053,6 +1100,7 @@ define @intrinsic_vssub_vx_nxv2i8_nxv2i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssub.nxv2i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1086,6 +1134,7 @@ entry: } declare @llvm.riscv.vssub.nxv4i8.i8( + , , i8, i32); @@ -1098,6 +1147,7 @@ define @intrinsic_vssub_vx_nxv4i8_nxv4i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssub.nxv4i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1131,6 +1181,7 @@ entry: } declare @llvm.riscv.vssub.nxv8i8.i8( + , , i8, i32); @@ -1143,6 +1194,7 @@ define @intrinsic_vssub_vx_nxv8i8_nxv8i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssub.nxv8i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1176,6 +1228,7 @@ entry: } declare @llvm.riscv.vssub.nxv16i8.i8( + , , i8, i32); @@ -1188,6 +1241,7 @@ define @intrinsic_vssub_vx_nxv16i8_nxv16i8_i8( @llvm.riscv.vssub.nxv16i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1221,6 +1275,7 @@ entry: } declare @llvm.riscv.vssub.nxv32i8.i8( + , , i8, i32); @@ -1233,6 +1288,7 @@ define @intrinsic_vssub_vx_nxv32i8_nxv32i8_i8( @llvm.riscv.vssub.nxv32i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1266,6 +1322,7 @@ entry: } declare @llvm.riscv.vssub.nxv64i8.i8( + , , i8, i32); @@ -1278,6 +1335,7 @@ define @intrinsic_vssub_vx_nxv64i8_nxv64i8_i8( @llvm.riscv.vssub.nxv64i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1311,6 +1369,7 @@ entry: } declare @llvm.riscv.vssub.nxv1i16.i16( + , , i16, i32); @@ -1323,6 +1382,7 @@ define @intrinsic_vssub_vx_nxv1i16_nxv1i16_i16( @llvm.riscv.vssub.nxv1i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1356,6 +1416,7 @@ entry: } declare @llvm.riscv.vssub.nxv2i16.i16( + , , i16, i32); @@ -1368,6 +1429,7 @@ define @intrinsic_vssub_vx_nxv2i16_nxv2i16_i16( @llvm.riscv.vssub.nxv2i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1401,6 +1463,7 @@ entry: } declare @llvm.riscv.vssub.nxv4i16.i16( + , , i16, i32); @@ -1413,6 +1476,7 @@ define @intrinsic_vssub_vx_nxv4i16_nxv4i16_i16( @llvm.riscv.vssub.nxv4i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1446,6 +1510,7 @@ entry: } declare @llvm.riscv.vssub.nxv8i16.i16( + , , i16, i32); @@ -1458,6 +1523,7 @@ define @intrinsic_vssub_vx_nxv8i16_nxv8i16_i16( @llvm.riscv.vssub.nxv8i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1491,6 +1557,7 @@ entry: } declare @llvm.riscv.vssub.nxv16i16.i16( + , , i16, i32); @@ -1503,6 +1570,7 @@ define @intrinsic_vssub_vx_nxv16i16_nxv16i16_i16( @llvm.riscv.vssub.nxv16i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1536,6 +1604,7 @@ entry: } declare @llvm.riscv.vssub.nxv32i16.i16( + , , i16, i32); @@ -1548,6 +1617,7 @@ define @intrinsic_vssub_vx_nxv32i16_nxv32i16_i16( @llvm.riscv.vssub.nxv32i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1581,6 +1651,7 @@ entry: } declare @llvm.riscv.vssub.nxv1i32.i32( + , , i32, i32); @@ -1593,6 +1664,7 @@ define @intrinsic_vssub_vx_nxv1i32_nxv1i32_i32( @llvm.riscv.vssub.nxv1i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1626,6 +1698,7 @@ entry: } declare @llvm.riscv.vssub.nxv2i32.i32( + , , i32, i32); @@ -1638,6 +1711,7 @@ define @intrinsic_vssub_vx_nxv2i32_nxv2i32_i32( @llvm.riscv.vssub.nxv2i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1671,6 +1745,7 @@ entry: } declare @llvm.riscv.vssub.nxv4i32.i32( + , , i32, i32); @@ -1683,6 +1758,7 @@ define @intrinsic_vssub_vx_nxv4i32_nxv4i32_i32( @llvm.riscv.vssub.nxv4i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1716,6 +1792,7 @@ entry: } declare @llvm.riscv.vssub.nxv8i32.i32( + , , i32, i32); @@ -1728,6 +1805,7 @@ define @intrinsic_vssub_vx_nxv8i32_nxv8i32_i32( @llvm.riscv.vssub.nxv8i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1761,6 +1839,7 @@ entry: } declare @llvm.riscv.vssub.nxv16i32.i32( + , , i32, i32); @@ -1773,6 +1852,7 @@ define @intrinsic_vssub_vx_nxv16i32_nxv16i32_i32( @llvm.riscv.vssub.nxv16i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1806,6 +1886,7 @@ entry: } declare @llvm.riscv.vssub.nxv1i64.i64( + , , i64, i32); @@ -1824,6 +1905,7 @@ define @intrinsic_vssub_vx_nxv1i64_nxv1i64_i64( @llvm.riscv.vssub.nxv1i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1863,6 +1945,7 @@ entry: } declare @llvm.riscv.vssub.nxv2i64.i64( + , , i64, i32); @@ -1881,6 +1964,7 @@ define @intrinsic_vssub_vx_nxv2i64_nxv2i64_i64( @llvm.riscv.vssub.nxv2i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1920,6 +2004,7 @@ entry: } declare @llvm.riscv.vssub.nxv4i64.i64( + , , i64, i32); @@ -1938,6 +2023,7 @@ define @intrinsic_vssub_vx_nxv4i64_nxv4i64_i64( @llvm.riscv.vssub.nxv4i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1977,6 +2063,7 @@ entry: } declare @llvm.riscv.vssub.nxv8i64.i64( + , , i64, i32); @@ -1995,6 +2082,7 @@ define @intrinsic_vssub_vx_nxv8i64_nxv8i64_i64( @llvm.riscv.vssub.nxv8i64.i64( + undef, %0, i64 %1, i32 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vssub-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vssub-rv64.ll index bff302a14b36bcb230c718ee1106d2919d8bf733..5c328c26d614ccaddb10b1dcee9876065844c46e 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vssub-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vssub-rv64.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vssub.nxv1i8.nxv1i8( + , , , i64); @@ -14,6 +15,7 @@ define @intrinsic_vssub_vv_nxv1i8_nxv1i8_nxv1i8( @llvm.riscv.vssub.nxv1i8.nxv1i8( + undef, %0, %1, i64 %2) @@ -47,6 +49,7 @@ entry: } declare @llvm.riscv.vssub.nxv2i8.nxv2i8( + , , , i64); @@ -59,6 +62,7 @@ define @intrinsic_vssub_vv_nxv2i8_nxv2i8_nxv2i8( @llvm.riscv.vssub.nxv2i8.nxv2i8( + undef, %0, %1, i64 %2) @@ -92,6 +96,7 @@ entry: } declare @llvm.riscv.vssub.nxv4i8.nxv4i8( + , , , i64); @@ -104,6 +109,7 @@ define @intrinsic_vssub_vv_nxv4i8_nxv4i8_nxv4i8( @llvm.riscv.vssub.nxv4i8.nxv4i8( + undef, %0, %1, i64 %2) @@ -137,6 +143,7 @@ entry: } declare @llvm.riscv.vssub.nxv8i8.nxv8i8( + , , , i64); @@ -149,6 +156,7 @@ define @intrinsic_vssub_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vssub.nxv8i8.nxv8i8( + undef, %0, %1, i64 %2) @@ -182,6 +190,7 @@ entry: } declare @llvm.riscv.vssub.nxv16i8.nxv16i8( + , , , i64); @@ -194,6 +203,7 @@ define @intrinsic_vssub_vv_nxv16i8_nxv16i8_nxv16i8( @llvm.riscv.vssub.nxv16i8.nxv16i8( + undef, %0, %1, i64 %2) @@ -227,6 +237,7 @@ entry: } declare @llvm.riscv.vssub.nxv32i8.nxv32i8( + , , , i64); @@ -239,6 +250,7 @@ define @intrinsic_vssub_vv_nxv32i8_nxv32i8_nxv32i8( @llvm.riscv.vssub.nxv32i8.nxv32i8( + undef, %0, %1, i64 %2) @@ -272,6 +284,7 @@ entry: } declare @llvm.riscv.vssub.nxv64i8.nxv64i8( + , , , i64); @@ -284,6 +297,7 @@ define @intrinsic_vssub_vv_nxv64i8_nxv64i8_nxv64i8( @llvm.riscv.vssub.nxv64i8.nxv64i8( + undef, %0, %1, i64 %2) @@ -318,6 +332,7 @@ entry: } declare @llvm.riscv.vssub.nxv1i16.nxv1i16( + , , , i64); @@ -330,6 +345,7 @@ define @intrinsic_vssub_vv_nxv1i16_nxv1i16_nxv1i16( @llvm.riscv.vssub.nxv1i16.nxv1i16( + undef, %0, %1, i64 %2) @@ -363,6 +379,7 @@ entry: } declare @llvm.riscv.vssub.nxv2i16.nxv2i16( + , , , i64); @@ -375,6 +392,7 @@ define @intrinsic_vssub_vv_nxv2i16_nxv2i16_nxv2i16( @llvm.riscv.vssub.nxv2i16.nxv2i16( + undef, %0, %1, i64 %2) @@ -408,6 +426,7 @@ entry: } declare @llvm.riscv.vssub.nxv4i16.nxv4i16( + , , , i64); @@ -420,6 +439,7 @@ define @intrinsic_vssub_vv_nxv4i16_nxv4i16_nxv4i16( @llvm.riscv.vssub.nxv4i16.nxv4i16( + undef, %0, %1, i64 %2) @@ -453,6 +473,7 @@ entry: } declare @llvm.riscv.vssub.nxv8i16.nxv8i16( + , , , i64); @@ -465,6 +486,7 @@ define @intrinsic_vssub_vv_nxv8i16_nxv8i16_nxv8i16( @llvm.riscv.vssub.nxv8i16.nxv8i16( + undef, %0, %1, i64 %2) @@ -498,6 +520,7 @@ entry: } declare @llvm.riscv.vssub.nxv16i16.nxv16i16( + , , , i64); @@ -510,6 +533,7 @@ define @intrinsic_vssub_vv_nxv16i16_nxv16i16_nxv16i16( @llvm.riscv.vssub.nxv16i16.nxv16i16( + undef, %0, %1, i64 %2) @@ -543,6 +567,7 @@ entry: } declare @llvm.riscv.vssub.nxv32i16.nxv32i16( + , , , i64); @@ -555,6 +580,7 @@ define @intrinsic_vssub_vv_nxv32i16_nxv32i16_nxv32i16( @llvm.riscv.vssub.nxv32i16.nxv32i16( + undef, %0, %1, i64 %2) @@ -589,6 +615,7 @@ entry: } declare @llvm.riscv.vssub.nxv1i32.nxv1i32( + , , , i64); @@ -601,6 +628,7 @@ define @intrinsic_vssub_vv_nxv1i32_nxv1i32_nxv1i32( @llvm.riscv.vssub.nxv1i32.nxv1i32( + undef, %0, %1, i64 %2) @@ -634,6 +662,7 @@ entry: } declare @llvm.riscv.vssub.nxv2i32.nxv2i32( + , , , i64); @@ -646,6 +675,7 @@ define @intrinsic_vssub_vv_nxv2i32_nxv2i32_nxv2i32( @llvm.riscv.vssub.nxv2i32.nxv2i32( + undef, %0, %1, i64 %2) @@ -679,6 +709,7 @@ entry: } declare @llvm.riscv.vssub.nxv4i32.nxv4i32( + , , , i64); @@ -691,6 +722,7 @@ define @intrinsic_vssub_vv_nxv4i32_nxv4i32_nxv4i32( @llvm.riscv.vssub.nxv4i32.nxv4i32( + undef, %0, %1, i64 %2) @@ -724,6 +756,7 @@ entry: } declare @llvm.riscv.vssub.nxv8i32.nxv8i32( + , , , i64); @@ -736,6 +769,7 @@ define @intrinsic_vssub_vv_nxv8i32_nxv8i32_nxv8i32( @llvm.riscv.vssub.nxv8i32.nxv8i32( + undef, %0, %1, i64 %2) @@ -769,6 +803,7 @@ entry: } declare @llvm.riscv.vssub.nxv16i32.nxv16i32( + , , , i64); @@ -781,6 +816,7 @@ define @intrinsic_vssub_vv_nxv16i32_nxv16i32_nxv16i32( @llvm.riscv.vssub.nxv16i32.nxv16i32( + undef, %0, %1, i64 %2) @@ -815,6 +851,7 @@ entry: } declare @llvm.riscv.vssub.nxv1i64.nxv1i64( + , , , i64); @@ -827,6 +864,7 @@ define @intrinsic_vssub_vv_nxv1i64_nxv1i64_nxv1i64( @llvm.riscv.vssub.nxv1i64.nxv1i64( + undef, %0, %1, i64 %2) @@ -860,6 +898,7 @@ entry: } declare @llvm.riscv.vssub.nxv2i64.nxv2i64( + , , , i64); @@ -872,6 +911,7 @@ define @intrinsic_vssub_vv_nxv2i64_nxv2i64_nxv2i64( @llvm.riscv.vssub.nxv2i64.nxv2i64( + undef, %0, %1, i64 %2) @@ -905,6 +945,7 @@ entry: } declare @llvm.riscv.vssub.nxv4i64.nxv4i64( + , , , i64); @@ -917,6 +958,7 @@ define @intrinsic_vssub_vv_nxv4i64_nxv4i64_nxv4i64( @llvm.riscv.vssub.nxv4i64.nxv4i64( + undef, %0, %1, i64 %2) @@ -950,6 +992,7 @@ entry: } declare @llvm.riscv.vssub.nxv8i64.nxv8i64( + , , , i64); @@ -962,6 +1005,7 @@ define @intrinsic_vssub_vv_nxv8i64_nxv8i64_nxv8i64( @llvm.riscv.vssub.nxv8i64.nxv8i64( + undef, %0, %1, i64 %2) @@ -996,6 +1040,7 @@ entry: } declare @llvm.riscv.vssub.nxv1i8.i8( + , , i8, i64); @@ -1008,6 +1053,7 @@ define @intrinsic_vssub_vx_nxv1i8_nxv1i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssub.nxv1i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1041,6 +1087,7 @@ entry: } declare @llvm.riscv.vssub.nxv2i8.i8( + , , i8, i64); @@ -1053,6 +1100,7 @@ define @intrinsic_vssub_vx_nxv2i8_nxv2i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssub.nxv2i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1086,6 +1134,7 @@ entry: } declare @llvm.riscv.vssub.nxv4i8.i8( + , , i8, i64); @@ -1098,6 +1147,7 @@ define @intrinsic_vssub_vx_nxv4i8_nxv4i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssub.nxv4i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1131,6 +1181,7 @@ entry: } declare @llvm.riscv.vssub.nxv8i8.i8( + , , i8, i64); @@ -1143,6 +1194,7 @@ define @intrinsic_vssub_vx_nxv8i8_nxv8i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssub.nxv8i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1176,6 +1228,7 @@ entry: } declare @llvm.riscv.vssub.nxv16i8.i8( + , , i8, i64); @@ -1188,6 +1241,7 @@ define @intrinsic_vssub_vx_nxv16i8_nxv16i8_i8( @llvm.riscv.vssub.nxv16i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1221,6 +1275,7 @@ entry: } declare @llvm.riscv.vssub.nxv32i8.i8( + , , i8, i64); @@ -1233,6 +1288,7 @@ define @intrinsic_vssub_vx_nxv32i8_nxv32i8_i8( @llvm.riscv.vssub.nxv32i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1266,6 +1322,7 @@ entry: } declare @llvm.riscv.vssub.nxv64i8.i8( + , , i8, i64); @@ -1278,6 +1335,7 @@ define @intrinsic_vssub_vx_nxv64i8_nxv64i8_i8( @llvm.riscv.vssub.nxv64i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1311,6 +1369,7 @@ entry: } declare @llvm.riscv.vssub.nxv1i16.i16( + , , i16, i64); @@ -1323,6 +1382,7 @@ define @intrinsic_vssub_vx_nxv1i16_nxv1i16_i16( @llvm.riscv.vssub.nxv1i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1356,6 +1416,7 @@ entry: } declare @llvm.riscv.vssub.nxv2i16.i16( + , , i16, i64); @@ -1368,6 +1429,7 @@ define @intrinsic_vssub_vx_nxv2i16_nxv2i16_i16( @llvm.riscv.vssub.nxv2i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1401,6 +1463,7 @@ entry: } declare @llvm.riscv.vssub.nxv4i16.i16( + , , i16, i64); @@ -1413,6 +1476,7 @@ define @intrinsic_vssub_vx_nxv4i16_nxv4i16_i16( @llvm.riscv.vssub.nxv4i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1446,6 +1510,7 @@ entry: } declare @llvm.riscv.vssub.nxv8i16.i16( + , , i16, i64); @@ -1458,6 +1523,7 @@ define @intrinsic_vssub_vx_nxv8i16_nxv8i16_i16( @llvm.riscv.vssub.nxv8i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1491,6 +1557,7 @@ entry: } declare @llvm.riscv.vssub.nxv16i16.i16( + , , i16, i64); @@ -1503,6 +1570,7 @@ define @intrinsic_vssub_vx_nxv16i16_nxv16i16_i16( @llvm.riscv.vssub.nxv16i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1536,6 +1604,7 @@ entry: } declare @llvm.riscv.vssub.nxv32i16.i16( + , , i16, i64); @@ -1548,6 +1617,7 @@ define @intrinsic_vssub_vx_nxv32i16_nxv32i16_i16( @llvm.riscv.vssub.nxv32i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1581,6 +1651,7 @@ entry: } declare @llvm.riscv.vssub.nxv1i32.i32( + , , i32, i64); @@ -1593,6 +1664,7 @@ define @intrinsic_vssub_vx_nxv1i32_nxv1i32_i32( @llvm.riscv.vssub.nxv1i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1626,6 +1698,7 @@ entry: } declare @llvm.riscv.vssub.nxv2i32.i32( + , , i32, i64); @@ -1638,6 +1711,7 @@ define @intrinsic_vssub_vx_nxv2i32_nxv2i32_i32( @llvm.riscv.vssub.nxv2i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1671,6 +1745,7 @@ entry: } declare @llvm.riscv.vssub.nxv4i32.i32( + , , i32, i64); @@ -1683,6 +1758,7 @@ define @intrinsic_vssub_vx_nxv4i32_nxv4i32_i32( @llvm.riscv.vssub.nxv4i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1716,6 +1792,7 @@ entry: } declare @llvm.riscv.vssub.nxv8i32.i32( + , , i32, i64); @@ -1728,6 +1805,7 @@ define @intrinsic_vssub_vx_nxv8i32_nxv8i32_i32( @llvm.riscv.vssub.nxv8i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1761,6 +1839,7 @@ entry: } declare @llvm.riscv.vssub.nxv16i32.i32( + , , i32, i64); @@ -1773,6 +1852,7 @@ define @intrinsic_vssub_vx_nxv16i32_nxv16i32_i32( @llvm.riscv.vssub.nxv16i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1806,6 +1886,7 @@ entry: } declare @llvm.riscv.vssub.nxv1i64.i64( + , , i64, i64); @@ -1818,6 +1899,7 @@ define @intrinsic_vssub_vx_nxv1i64_nxv1i64_i64( @llvm.riscv.vssub.nxv1i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1851,6 +1933,7 @@ entry: } declare @llvm.riscv.vssub.nxv2i64.i64( + , , i64, i64); @@ -1863,6 +1946,7 @@ define @intrinsic_vssub_vx_nxv2i64_nxv2i64_i64( @llvm.riscv.vssub.nxv2i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1896,6 +1980,7 @@ entry: } declare @llvm.riscv.vssub.nxv4i64.i64( + , , i64, i64); @@ -1908,6 +1993,7 @@ define @intrinsic_vssub_vx_nxv4i64_nxv4i64_i64( @llvm.riscv.vssub.nxv4i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1941,6 +2027,7 @@ entry: } declare @llvm.riscv.vssub.nxv8i64.i64( + , , i64, i64); @@ -1953,6 +2040,7 @@ define @intrinsic_vssub_vx_nxv8i64_nxv8i64_i64( @llvm.riscv.vssub.nxv8i64.i64( + undef, %0, i64 %1, i64 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vssubu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vssubu-rv32.ll index 2b0fcf54963c75c5765fcb167dc5c1254209566a..6eab93f84539de74fec41753039a30a5fc9535af 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vssubu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vssubu-rv32.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vssubu.nxv1i8.nxv1i8( + , , , i32); @@ -14,6 +15,7 @@ define @intrinsic_vssubu_vv_nxv1i8_nxv1i8_nxv1i8( @llvm.riscv.vssubu.nxv1i8.nxv1i8( + undef, %0, %1, i32 %2) @@ -47,6 +49,7 @@ entry: } declare @llvm.riscv.vssubu.nxv2i8.nxv2i8( + , , , i32); @@ -59,6 +62,7 @@ define @intrinsic_vssubu_vv_nxv2i8_nxv2i8_nxv2i8( @llvm.riscv.vssubu.nxv2i8.nxv2i8( + undef, %0, %1, i32 %2) @@ -92,6 +96,7 @@ entry: } declare @llvm.riscv.vssubu.nxv4i8.nxv4i8( + , , , i32); @@ -104,6 +109,7 @@ define @intrinsic_vssubu_vv_nxv4i8_nxv4i8_nxv4i8( @llvm.riscv.vssubu.nxv4i8.nxv4i8( + undef, %0, %1, i32 %2) @@ -137,6 +143,7 @@ entry: } declare @llvm.riscv.vssubu.nxv8i8.nxv8i8( + , , , i32); @@ -149,6 +156,7 @@ define @intrinsic_vssubu_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vssubu.nxv8i8.nxv8i8( + undef, %0, %1, i32 %2) @@ -182,6 +190,7 @@ entry: } declare @llvm.riscv.vssubu.nxv16i8.nxv16i8( + , , , i32); @@ -194,6 +203,7 @@ define @intrinsic_vssubu_vv_nxv16i8_nxv16i8_nxv16i8( @llvm.riscv.vssubu.nxv16i8.nxv16i8( + undef, %0, %1, i32 %2) @@ -227,6 +237,7 @@ entry: } declare @llvm.riscv.vssubu.nxv32i8.nxv32i8( + , , , i32); @@ -239,6 +250,7 @@ define @intrinsic_vssubu_vv_nxv32i8_nxv32i8_nxv32i8( @llvm.riscv.vssubu.nxv32i8.nxv32i8( + undef, %0, %1, i32 %2) @@ -272,6 +284,7 @@ entry: } declare @llvm.riscv.vssubu.nxv64i8.nxv64i8( + , , , i32); @@ -284,6 +297,7 @@ define @intrinsic_vssubu_vv_nxv64i8_nxv64i8_nxv64i8( @llvm.riscv.vssubu.nxv64i8.nxv64i8( + undef, %0, %1, i32 %2) @@ -318,6 +332,7 @@ entry: } declare @llvm.riscv.vssubu.nxv1i16.nxv1i16( + , , , i32); @@ -330,6 +345,7 @@ define @intrinsic_vssubu_vv_nxv1i16_nxv1i16_nxv1i16( @llvm.riscv.vssubu.nxv1i16.nxv1i16( + undef, %0, %1, i32 %2) @@ -363,6 +379,7 @@ entry: } declare @llvm.riscv.vssubu.nxv2i16.nxv2i16( + , , , i32); @@ -375,6 +392,7 @@ define @intrinsic_vssubu_vv_nxv2i16_nxv2i16_nxv2i16( @llvm.riscv.vssubu.nxv2i16.nxv2i16( + undef, %0, %1, i32 %2) @@ -408,6 +426,7 @@ entry: } declare @llvm.riscv.vssubu.nxv4i16.nxv4i16( + , , , i32); @@ -420,6 +439,7 @@ define @intrinsic_vssubu_vv_nxv4i16_nxv4i16_nxv4i16( @llvm.riscv.vssubu.nxv4i16.nxv4i16( + undef, %0, %1, i32 %2) @@ -453,6 +473,7 @@ entry: } declare @llvm.riscv.vssubu.nxv8i16.nxv8i16( + , , , i32); @@ -465,6 +486,7 @@ define @intrinsic_vssubu_vv_nxv8i16_nxv8i16_nxv8i16( @llvm.riscv.vssubu.nxv8i16.nxv8i16( + undef, %0, %1, i32 %2) @@ -498,6 +520,7 @@ entry: } declare @llvm.riscv.vssubu.nxv16i16.nxv16i16( + , , , i32); @@ -510,6 +533,7 @@ define @intrinsic_vssubu_vv_nxv16i16_nxv16i16_nxv16i16( @llvm.riscv.vssubu.nxv16i16.nxv16i16( + undef, %0, %1, i32 %2) @@ -543,6 +567,7 @@ entry: } declare @llvm.riscv.vssubu.nxv32i16.nxv32i16( + , , , i32); @@ -555,6 +580,7 @@ define @intrinsic_vssubu_vv_nxv32i16_nxv32i16_nxv32i16( @llvm.riscv.vssubu.nxv32i16.nxv32i16( + undef, %0, %1, i32 %2) @@ -589,6 +615,7 @@ entry: } declare @llvm.riscv.vssubu.nxv1i32.nxv1i32( + , , , i32); @@ -601,6 +628,7 @@ define @intrinsic_vssubu_vv_nxv1i32_nxv1i32_nxv1i32( @llvm.riscv.vssubu.nxv1i32.nxv1i32( + undef, %0, %1, i32 %2) @@ -634,6 +662,7 @@ entry: } declare @llvm.riscv.vssubu.nxv2i32.nxv2i32( + , , , i32); @@ -646,6 +675,7 @@ define @intrinsic_vssubu_vv_nxv2i32_nxv2i32_nxv2i32( @llvm.riscv.vssubu.nxv2i32.nxv2i32( + undef, %0, %1, i32 %2) @@ -679,6 +709,7 @@ entry: } declare @llvm.riscv.vssubu.nxv4i32.nxv4i32( + , , , i32); @@ -691,6 +722,7 @@ define @intrinsic_vssubu_vv_nxv4i32_nxv4i32_nxv4i32( @llvm.riscv.vssubu.nxv4i32.nxv4i32( + undef, %0, %1, i32 %2) @@ -724,6 +756,7 @@ entry: } declare @llvm.riscv.vssubu.nxv8i32.nxv8i32( + , , , i32); @@ -736,6 +769,7 @@ define @intrinsic_vssubu_vv_nxv8i32_nxv8i32_nxv8i32( @llvm.riscv.vssubu.nxv8i32.nxv8i32( + undef, %0, %1, i32 %2) @@ -769,6 +803,7 @@ entry: } declare @llvm.riscv.vssubu.nxv16i32.nxv16i32( + , , , i32); @@ -781,6 +816,7 @@ define @intrinsic_vssubu_vv_nxv16i32_nxv16i32_nxv16i32( @llvm.riscv.vssubu.nxv16i32.nxv16i32( + undef, %0, %1, i32 %2) @@ -815,6 +851,7 @@ entry: } declare @llvm.riscv.vssubu.nxv1i64.nxv1i64( + , , , i32); @@ -827,6 +864,7 @@ define @intrinsic_vssubu_vv_nxv1i64_nxv1i64_nxv1i64( @llvm.riscv.vssubu.nxv1i64.nxv1i64( + undef, %0, %1, i32 %2) @@ -860,6 +898,7 @@ entry: } declare @llvm.riscv.vssubu.nxv2i64.nxv2i64( + , , , i32); @@ -872,6 +911,7 @@ define @intrinsic_vssubu_vv_nxv2i64_nxv2i64_nxv2i64( @llvm.riscv.vssubu.nxv2i64.nxv2i64( + undef, %0, %1, i32 %2) @@ -905,6 +945,7 @@ entry: } declare @llvm.riscv.vssubu.nxv4i64.nxv4i64( + , , , i32); @@ -917,6 +958,7 @@ define @intrinsic_vssubu_vv_nxv4i64_nxv4i64_nxv4i64( @llvm.riscv.vssubu.nxv4i64.nxv4i64( + undef, %0, %1, i32 %2) @@ -950,6 +992,7 @@ entry: } declare @llvm.riscv.vssubu.nxv8i64.nxv8i64( + , , , i32); @@ -962,6 +1005,7 @@ define @intrinsic_vssubu_vv_nxv8i64_nxv8i64_nxv8i64( @llvm.riscv.vssubu.nxv8i64.nxv8i64( + undef, %0, %1, i32 %2) @@ -996,6 +1040,7 @@ entry: } declare @llvm.riscv.vssubu.nxv1i8.i8( + , , i8, i32); @@ -1008,6 +1053,7 @@ define @intrinsic_vssubu_vx_nxv1i8_nxv1i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssubu.nxv1i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1041,6 +1087,7 @@ entry: } declare @llvm.riscv.vssubu.nxv2i8.i8( + , , i8, i32); @@ -1053,6 +1100,7 @@ define @intrinsic_vssubu_vx_nxv2i8_nxv2i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssubu.nxv2i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1086,6 +1134,7 @@ entry: } declare @llvm.riscv.vssubu.nxv4i8.i8( + , , i8, i32); @@ -1098,6 +1147,7 @@ define @intrinsic_vssubu_vx_nxv4i8_nxv4i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssubu.nxv4i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1131,6 +1181,7 @@ entry: } declare @llvm.riscv.vssubu.nxv8i8.i8( + , , i8, i32); @@ -1143,6 +1194,7 @@ define @intrinsic_vssubu_vx_nxv8i8_nxv8i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssubu.nxv8i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1176,6 +1228,7 @@ entry: } declare @llvm.riscv.vssubu.nxv16i8.i8( + , , i8, i32); @@ -1188,6 +1241,7 @@ define @intrinsic_vssubu_vx_nxv16i8_nxv16i8_i8( @llvm.riscv.vssubu.nxv16i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1221,6 +1275,7 @@ entry: } declare @llvm.riscv.vssubu.nxv32i8.i8( + , , i8, i32); @@ -1233,6 +1288,7 @@ define @intrinsic_vssubu_vx_nxv32i8_nxv32i8_i8( @llvm.riscv.vssubu.nxv32i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1266,6 +1322,7 @@ entry: } declare @llvm.riscv.vssubu.nxv64i8.i8( + , , i8, i32); @@ -1278,6 +1335,7 @@ define @intrinsic_vssubu_vx_nxv64i8_nxv64i8_i8( @llvm.riscv.vssubu.nxv64i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1311,6 +1369,7 @@ entry: } declare @llvm.riscv.vssubu.nxv1i16.i16( + , , i16, i32); @@ -1323,6 +1382,7 @@ define @intrinsic_vssubu_vx_nxv1i16_nxv1i16_i16( @llvm.riscv.vssubu.nxv1i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1356,6 +1416,7 @@ entry: } declare @llvm.riscv.vssubu.nxv2i16.i16( + , , i16, i32); @@ -1368,6 +1429,7 @@ define @intrinsic_vssubu_vx_nxv2i16_nxv2i16_i16( @llvm.riscv.vssubu.nxv2i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1401,6 +1463,7 @@ entry: } declare @llvm.riscv.vssubu.nxv4i16.i16( + , , i16, i32); @@ -1413,6 +1476,7 @@ define @intrinsic_vssubu_vx_nxv4i16_nxv4i16_i16( @llvm.riscv.vssubu.nxv4i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1446,6 +1510,7 @@ entry: } declare @llvm.riscv.vssubu.nxv8i16.i16( + , , i16, i32); @@ -1458,6 +1523,7 @@ define @intrinsic_vssubu_vx_nxv8i16_nxv8i16_i16( @llvm.riscv.vssubu.nxv8i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1491,6 +1557,7 @@ entry: } declare @llvm.riscv.vssubu.nxv16i16.i16( + , , i16, i32); @@ -1503,6 +1570,7 @@ define @intrinsic_vssubu_vx_nxv16i16_nxv16i16_i16( @llvm.riscv.vssubu.nxv16i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1536,6 +1604,7 @@ entry: } declare @llvm.riscv.vssubu.nxv32i16.i16( + , , i16, i32); @@ -1548,6 +1617,7 @@ define @intrinsic_vssubu_vx_nxv32i16_nxv32i16_i16( @llvm.riscv.vssubu.nxv32i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1581,6 +1651,7 @@ entry: } declare @llvm.riscv.vssubu.nxv1i32.i32( + , , i32, i32); @@ -1593,6 +1664,7 @@ define @intrinsic_vssubu_vx_nxv1i32_nxv1i32_i32( @llvm.riscv.vssubu.nxv1i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1626,6 +1698,7 @@ entry: } declare @llvm.riscv.vssubu.nxv2i32.i32( + , , i32, i32); @@ -1638,6 +1711,7 @@ define @intrinsic_vssubu_vx_nxv2i32_nxv2i32_i32( @llvm.riscv.vssubu.nxv2i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1671,6 +1745,7 @@ entry: } declare @llvm.riscv.vssubu.nxv4i32.i32( + , , i32, i32); @@ -1683,6 +1758,7 @@ define @intrinsic_vssubu_vx_nxv4i32_nxv4i32_i32( @llvm.riscv.vssubu.nxv4i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1716,6 +1792,7 @@ entry: } declare @llvm.riscv.vssubu.nxv8i32.i32( + , , i32, i32); @@ -1728,6 +1805,7 @@ define @intrinsic_vssubu_vx_nxv8i32_nxv8i32_i32( @llvm.riscv.vssubu.nxv8i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1761,6 +1839,7 @@ entry: } declare @llvm.riscv.vssubu.nxv16i32.i32( + , , i32, i32); @@ -1773,6 +1852,7 @@ define @intrinsic_vssubu_vx_nxv16i32_nxv16i32_i32( @llvm.riscv.vssubu.nxv16i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1806,6 +1886,7 @@ entry: } declare @llvm.riscv.vssubu.nxv1i64.i64( + , , i64, i32); @@ -1824,6 +1905,7 @@ define @intrinsic_vssubu_vx_nxv1i64_nxv1i64_i64( @llvm.riscv.vssubu.nxv1i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1863,6 +1945,7 @@ entry: } declare @llvm.riscv.vssubu.nxv2i64.i64( + , , i64, i32); @@ -1881,6 +1964,7 @@ define @intrinsic_vssubu_vx_nxv2i64_nxv2i64_i64( @llvm.riscv.vssubu.nxv2i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1920,6 +2004,7 @@ entry: } declare @llvm.riscv.vssubu.nxv4i64.i64( + , , i64, i32); @@ -1938,6 +2023,7 @@ define @intrinsic_vssubu_vx_nxv4i64_nxv4i64_i64( @llvm.riscv.vssubu.nxv4i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1977,6 +2063,7 @@ entry: } declare @llvm.riscv.vssubu.nxv8i64.i64( + , , i64, i32); @@ -1995,6 +2082,7 @@ define @intrinsic_vssubu_vx_nxv8i64_nxv8i64_i64( @llvm.riscv.vssubu.nxv8i64.i64( + undef, %0, i64 %1, i32 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vssubu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vssubu-rv64.ll index 0169d78b4a798e97f389a787cac2ccb4a8e4fec1..2f054417e7a7a820559b67fbf23e5fc4a9c205e2 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vssubu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vssubu-rv64.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vssubu.nxv1i8.nxv1i8( + , , , i64); @@ -14,6 +15,7 @@ define @intrinsic_vssubu_vv_nxv1i8_nxv1i8_nxv1i8( @llvm.riscv.vssubu.nxv1i8.nxv1i8( + undef, %0, %1, i64 %2) @@ -47,6 +49,7 @@ entry: } declare @llvm.riscv.vssubu.nxv2i8.nxv2i8( + , , , i64); @@ -59,6 +62,7 @@ define @intrinsic_vssubu_vv_nxv2i8_nxv2i8_nxv2i8( @llvm.riscv.vssubu.nxv2i8.nxv2i8( + undef, %0, %1, i64 %2) @@ -92,6 +96,7 @@ entry: } declare @llvm.riscv.vssubu.nxv4i8.nxv4i8( + , , , i64); @@ -104,6 +109,7 @@ define @intrinsic_vssubu_vv_nxv4i8_nxv4i8_nxv4i8( @llvm.riscv.vssubu.nxv4i8.nxv4i8( + undef, %0, %1, i64 %2) @@ -137,6 +143,7 @@ entry: } declare @llvm.riscv.vssubu.nxv8i8.nxv8i8( + , , , i64); @@ -149,6 +156,7 @@ define @intrinsic_vssubu_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vssubu.nxv8i8.nxv8i8( + undef, %0, %1, i64 %2) @@ -182,6 +190,7 @@ entry: } declare @llvm.riscv.vssubu.nxv16i8.nxv16i8( + , , , i64); @@ -194,6 +203,7 @@ define @intrinsic_vssubu_vv_nxv16i8_nxv16i8_nxv16i8( @llvm.riscv.vssubu.nxv16i8.nxv16i8( + undef, %0, %1, i64 %2) @@ -227,6 +237,7 @@ entry: } declare @llvm.riscv.vssubu.nxv32i8.nxv32i8( + , , , i64); @@ -239,6 +250,7 @@ define @intrinsic_vssubu_vv_nxv32i8_nxv32i8_nxv32i8( @llvm.riscv.vssubu.nxv32i8.nxv32i8( + undef, %0, %1, i64 %2) @@ -272,6 +284,7 @@ entry: } declare @llvm.riscv.vssubu.nxv64i8.nxv64i8( + , , , i64); @@ -284,6 +297,7 @@ define @intrinsic_vssubu_vv_nxv64i8_nxv64i8_nxv64i8( @llvm.riscv.vssubu.nxv64i8.nxv64i8( + undef, %0, %1, i64 %2) @@ -318,6 +332,7 @@ entry: } declare @llvm.riscv.vssubu.nxv1i16.nxv1i16( + , , , i64); @@ -330,6 +345,7 @@ define @intrinsic_vssubu_vv_nxv1i16_nxv1i16_nxv1i16( @llvm.riscv.vssubu.nxv1i16.nxv1i16( + undef, %0, %1, i64 %2) @@ -363,6 +379,7 @@ entry: } declare @llvm.riscv.vssubu.nxv2i16.nxv2i16( + , , , i64); @@ -375,6 +392,7 @@ define @intrinsic_vssubu_vv_nxv2i16_nxv2i16_nxv2i16( @llvm.riscv.vssubu.nxv2i16.nxv2i16( + undef, %0, %1, i64 %2) @@ -408,6 +426,7 @@ entry: } declare @llvm.riscv.vssubu.nxv4i16.nxv4i16( + , , , i64); @@ -420,6 +439,7 @@ define @intrinsic_vssubu_vv_nxv4i16_nxv4i16_nxv4i16( @llvm.riscv.vssubu.nxv4i16.nxv4i16( + undef, %0, %1, i64 %2) @@ -453,6 +473,7 @@ entry: } declare @llvm.riscv.vssubu.nxv8i16.nxv8i16( + , , , i64); @@ -465,6 +486,7 @@ define @intrinsic_vssubu_vv_nxv8i16_nxv8i16_nxv8i16( @llvm.riscv.vssubu.nxv8i16.nxv8i16( + undef, %0, %1, i64 %2) @@ -498,6 +520,7 @@ entry: } declare @llvm.riscv.vssubu.nxv16i16.nxv16i16( + , , , i64); @@ -510,6 +533,7 @@ define @intrinsic_vssubu_vv_nxv16i16_nxv16i16_nxv16i16( @llvm.riscv.vssubu.nxv16i16.nxv16i16( + undef, %0, %1, i64 %2) @@ -543,6 +567,7 @@ entry: } declare @llvm.riscv.vssubu.nxv32i16.nxv32i16( + , , , i64); @@ -555,6 +580,7 @@ define @intrinsic_vssubu_vv_nxv32i16_nxv32i16_nxv32i16( @llvm.riscv.vssubu.nxv32i16.nxv32i16( + undef, %0, %1, i64 %2) @@ -589,6 +615,7 @@ entry: } declare @llvm.riscv.vssubu.nxv1i32.nxv1i32( + , , , i64); @@ -601,6 +628,7 @@ define @intrinsic_vssubu_vv_nxv1i32_nxv1i32_nxv1i32( @llvm.riscv.vssubu.nxv1i32.nxv1i32( + undef, %0, %1, i64 %2) @@ -634,6 +662,7 @@ entry: } declare @llvm.riscv.vssubu.nxv2i32.nxv2i32( + , , , i64); @@ -646,6 +675,7 @@ define @intrinsic_vssubu_vv_nxv2i32_nxv2i32_nxv2i32( @llvm.riscv.vssubu.nxv2i32.nxv2i32( + undef, %0, %1, i64 %2) @@ -679,6 +709,7 @@ entry: } declare @llvm.riscv.vssubu.nxv4i32.nxv4i32( + , , , i64); @@ -691,6 +722,7 @@ define @intrinsic_vssubu_vv_nxv4i32_nxv4i32_nxv4i32( @llvm.riscv.vssubu.nxv4i32.nxv4i32( + undef, %0, %1, i64 %2) @@ -724,6 +756,7 @@ entry: } declare @llvm.riscv.vssubu.nxv8i32.nxv8i32( + , , , i64); @@ -736,6 +769,7 @@ define @intrinsic_vssubu_vv_nxv8i32_nxv8i32_nxv8i32( @llvm.riscv.vssubu.nxv8i32.nxv8i32( + undef, %0, %1, i64 %2) @@ -769,6 +803,7 @@ entry: } declare @llvm.riscv.vssubu.nxv16i32.nxv16i32( + , , , i64); @@ -781,6 +816,7 @@ define @intrinsic_vssubu_vv_nxv16i32_nxv16i32_nxv16i32( @llvm.riscv.vssubu.nxv16i32.nxv16i32( + undef, %0, %1, i64 %2) @@ -815,6 +851,7 @@ entry: } declare @llvm.riscv.vssubu.nxv1i64.nxv1i64( + , , , i64); @@ -827,6 +864,7 @@ define @intrinsic_vssubu_vv_nxv1i64_nxv1i64_nxv1i64( @llvm.riscv.vssubu.nxv1i64.nxv1i64( + undef, %0, %1, i64 %2) @@ -860,6 +898,7 @@ entry: } declare @llvm.riscv.vssubu.nxv2i64.nxv2i64( + , , , i64); @@ -872,6 +911,7 @@ define @intrinsic_vssubu_vv_nxv2i64_nxv2i64_nxv2i64( @llvm.riscv.vssubu.nxv2i64.nxv2i64( + undef, %0, %1, i64 %2) @@ -905,6 +945,7 @@ entry: } declare @llvm.riscv.vssubu.nxv4i64.nxv4i64( + , , , i64); @@ -917,6 +958,7 @@ define @intrinsic_vssubu_vv_nxv4i64_nxv4i64_nxv4i64( @llvm.riscv.vssubu.nxv4i64.nxv4i64( + undef, %0, %1, i64 %2) @@ -950,6 +992,7 @@ entry: } declare @llvm.riscv.vssubu.nxv8i64.nxv8i64( + , , , i64); @@ -962,6 +1005,7 @@ define @intrinsic_vssubu_vv_nxv8i64_nxv8i64_nxv8i64( @llvm.riscv.vssubu.nxv8i64.nxv8i64( + undef, %0, %1, i64 %2) @@ -996,6 +1040,7 @@ entry: } declare @llvm.riscv.vssubu.nxv1i8.i8( + , , i8, i64); @@ -1008,6 +1053,7 @@ define @intrinsic_vssubu_vx_nxv1i8_nxv1i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssubu.nxv1i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1041,6 +1087,7 @@ entry: } declare @llvm.riscv.vssubu.nxv2i8.i8( + , , i8, i64); @@ -1053,6 +1100,7 @@ define @intrinsic_vssubu_vx_nxv2i8_nxv2i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssubu.nxv2i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1086,6 +1134,7 @@ entry: } declare @llvm.riscv.vssubu.nxv4i8.i8( + , , i8, i64); @@ -1098,6 +1147,7 @@ define @intrinsic_vssubu_vx_nxv4i8_nxv4i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssubu.nxv4i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1131,6 +1181,7 @@ entry: } declare @llvm.riscv.vssubu.nxv8i8.i8( + , , i8, i64); @@ -1143,6 +1194,7 @@ define @intrinsic_vssubu_vx_nxv8i8_nxv8i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssubu.nxv8i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1176,6 +1228,7 @@ entry: } declare @llvm.riscv.vssubu.nxv16i8.i8( + , , i8, i64); @@ -1188,6 +1241,7 @@ define @intrinsic_vssubu_vx_nxv16i8_nxv16i8_i8( @llvm.riscv.vssubu.nxv16i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1221,6 +1275,7 @@ entry: } declare @llvm.riscv.vssubu.nxv32i8.i8( + , , i8, i64); @@ -1233,6 +1288,7 @@ define @intrinsic_vssubu_vx_nxv32i8_nxv32i8_i8( @llvm.riscv.vssubu.nxv32i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1266,6 +1322,7 @@ entry: } declare @llvm.riscv.vssubu.nxv64i8.i8( + , , i8, i64); @@ -1278,6 +1335,7 @@ define @intrinsic_vssubu_vx_nxv64i8_nxv64i8_i8( @llvm.riscv.vssubu.nxv64i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1311,6 +1369,7 @@ entry: } declare @llvm.riscv.vssubu.nxv1i16.i16( + , , i16, i64); @@ -1323,6 +1382,7 @@ define @intrinsic_vssubu_vx_nxv1i16_nxv1i16_i16( @llvm.riscv.vssubu.nxv1i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1356,6 +1416,7 @@ entry: } declare @llvm.riscv.vssubu.nxv2i16.i16( + , , i16, i64); @@ -1368,6 +1429,7 @@ define @intrinsic_vssubu_vx_nxv2i16_nxv2i16_i16( @llvm.riscv.vssubu.nxv2i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1401,6 +1463,7 @@ entry: } declare @llvm.riscv.vssubu.nxv4i16.i16( + , , i16, i64); @@ -1413,6 +1476,7 @@ define @intrinsic_vssubu_vx_nxv4i16_nxv4i16_i16( @llvm.riscv.vssubu.nxv4i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1446,6 +1510,7 @@ entry: } declare @llvm.riscv.vssubu.nxv8i16.i16( + , , i16, i64); @@ -1458,6 +1523,7 @@ define @intrinsic_vssubu_vx_nxv8i16_nxv8i16_i16( @llvm.riscv.vssubu.nxv8i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1491,6 +1557,7 @@ entry: } declare @llvm.riscv.vssubu.nxv16i16.i16( + , , i16, i64); @@ -1503,6 +1570,7 @@ define @intrinsic_vssubu_vx_nxv16i16_nxv16i16_i16( @llvm.riscv.vssubu.nxv16i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1536,6 +1604,7 @@ entry: } declare @llvm.riscv.vssubu.nxv32i16.i16( + , , i16, i64); @@ -1548,6 +1617,7 @@ define @intrinsic_vssubu_vx_nxv32i16_nxv32i16_i16( @llvm.riscv.vssubu.nxv32i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1581,6 +1651,7 @@ entry: } declare @llvm.riscv.vssubu.nxv1i32.i32( + , , i32, i64); @@ -1593,6 +1664,7 @@ define @intrinsic_vssubu_vx_nxv1i32_nxv1i32_i32( @llvm.riscv.vssubu.nxv1i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1626,6 +1698,7 @@ entry: } declare @llvm.riscv.vssubu.nxv2i32.i32( + , , i32, i64); @@ -1638,6 +1711,7 @@ define @intrinsic_vssubu_vx_nxv2i32_nxv2i32_i32( @llvm.riscv.vssubu.nxv2i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1671,6 +1745,7 @@ entry: } declare @llvm.riscv.vssubu.nxv4i32.i32( + , , i32, i64); @@ -1683,6 +1758,7 @@ define @intrinsic_vssubu_vx_nxv4i32_nxv4i32_i32( @llvm.riscv.vssubu.nxv4i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1716,6 +1792,7 @@ entry: } declare @llvm.riscv.vssubu.nxv8i32.i32( + , , i32, i64); @@ -1728,6 +1805,7 @@ define @intrinsic_vssubu_vx_nxv8i32_nxv8i32_i32( @llvm.riscv.vssubu.nxv8i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1761,6 +1839,7 @@ entry: } declare @llvm.riscv.vssubu.nxv16i32.i32( + , , i32, i64); @@ -1773,6 +1852,7 @@ define @intrinsic_vssubu_vx_nxv16i32_nxv16i32_i32( @llvm.riscv.vssubu.nxv16i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1806,6 +1886,7 @@ entry: } declare @llvm.riscv.vssubu.nxv1i64.i64( + , , i64, i64); @@ -1818,6 +1899,7 @@ define @intrinsic_vssubu_vx_nxv1i64_nxv1i64_i64( @llvm.riscv.vssubu.nxv1i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1851,6 +1933,7 @@ entry: } declare @llvm.riscv.vssubu.nxv2i64.i64( + , , i64, i64); @@ -1863,6 +1946,7 @@ define @intrinsic_vssubu_vx_nxv2i64_nxv2i64_i64( @llvm.riscv.vssubu.nxv2i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1896,6 +1980,7 @@ entry: } declare @llvm.riscv.vssubu.nxv4i64.i64( + , , i64, i64); @@ -1908,6 +1993,7 @@ define @intrinsic_vssubu_vx_nxv4i64_nxv4i64_i64( @llvm.riscv.vssubu.nxv4i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1941,6 +2027,7 @@ entry: } declare @llvm.riscv.vssubu.nxv8i64.i64( + , , i64, i64); @@ -1953,6 +2040,7 @@ define @intrinsic_vssubu_vx_nxv8i64_nxv8i64_i64( @llvm.riscv.vssubu.nxv8i64.i64( + undef, %0, i64 %1, i64 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vsub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsub-rv32.ll index a9896a305516f3c8411f85d7e81796aaddf3b208..4f0bff3562ace21a8572926cd10e8adc92a77320 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsub-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsub-rv32.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vsub.nxv1i8.nxv1i8( + , , , i32); @@ -14,6 +15,7 @@ define @intrinsic_vsub_vv_nxv1i8_nxv1i8_nxv1i8( @llvm.riscv.vsub.nxv1i8.nxv1i8( + undef, %0, %1, i32 %2) @@ -47,6 +49,7 @@ entry: } declare @llvm.riscv.vsub.nxv2i8.nxv2i8( + , , , i32); @@ -59,6 +62,7 @@ define @intrinsic_vsub_vv_nxv2i8_nxv2i8_nxv2i8( @llvm.riscv.vsub.nxv2i8.nxv2i8( + undef, %0, %1, i32 %2) @@ -92,6 +96,7 @@ entry: } declare @llvm.riscv.vsub.nxv4i8.nxv4i8( + , , , i32); @@ -104,6 +109,7 @@ define @intrinsic_vsub_vv_nxv4i8_nxv4i8_nxv4i8( @llvm.riscv.vsub.nxv4i8.nxv4i8( + undef, %0, %1, i32 %2) @@ -137,6 +143,7 @@ entry: } declare @llvm.riscv.vsub.nxv8i8.nxv8i8( + , , , i32); @@ -149,6 +156,7 @@ define @intrinsic_vsub_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vsub.nxv8i8.nxv8i8( + undef, %0, %1, i32 %2) @@ -182,6 +190,7 @@ entry: } declare @llvm.riscv.vsub.nxv16i8.nxv16i8( + , , , i32); @@ -194,6 +203,7 @@ define @intrinsic_vsub_vv_nxv16i8_nxv16i8_nxv16i8( @llvm.riscv.vsub.nxv16i8.nxv16i8( + undef, %0, %1, i32 %2) @@ -227,6 +237,7 @@ entry: } declare @llvm.riscv.vsub.nxv32i8.nxv32i8( + , , , i32); @@ -239,6 +250,7 @@ define @intrinsic_vsub_vv_nxv32i8_nxv32i8_nxv32i8( @llvm.riscv.vsub.nxv32i8.nxv32i8( + undef, %0, %1, i32 %2) @@ -272,6 +284,7 @@ entry: } declare @llvm.riscv.vsub.nxv64i8.nxv64i8( + , , , i32); @@ -284,6 +297,7 @@ define @intrinsic_vsub_vv_nxv64i8_nxv64i8_nxv64i8( @llvm.riscv.vsub.nxv64i8.nxv64i8( + undef, %0, %1, i32 %2) @@ -318,6 +332,7 @@ entry: } declare @llvm.riscv.vsub.nxv1i16.nxv1i16( + , , , i32); @@ -330,6 +345,7 @@ define @intrinsic_vsub_vv_nxv1i16_nxv1i16_nxv1i16( @llvm.riscv.vsub.nxv1i16.nxv1i16( + undef, %0, %1, i32 %2) @@ -363,6 +379,7 @@ entry: } declare @llvm.riscv.vsub.nxv2i16.nxv2i16( + , , , i32); @@ -375,6 +392,7 @@ define @intrinsic_vsub_vv_nxv2i16_nxv2i16_nxv2i16( @llvm.riscv.vsub.nxv2i16.nxv2i16( + undef, %0, %1, i32 %2) @@ -408,6 +426,7 @@ entry: } declare @llvm.riscv.vsub.nxv4i16.nxv4i16( + , , , i32); @@ -420,6 +439,7 @@ define @intrinsic_vsub_vv_nxv4i16_nxv4i16_nxv4i16( @llvm.riscv.vsub.nxv4i16.nxv4i16( + undef, %0, %1, i32 %2) @@ -453,6 +473,7 @@ entry: } declare @llvm.riscv.vsub.nxv8i16.nxv8i16( + , , , i32); @@ -465,6 +486,7 @@ define @intrinsic_vsub_vv_nxv8i16_nxv8i16_nxv8i16( @llvm.riscv.vsub.nxv8i16.nxv8i16( + undef, %0, %1, i32 %2) @@ -498,6 +520,7 @@ entry: } declare @llvm.riscv.vsub.nxv16i16.nxv16i16( + , , , i32); @@ -510,6 +533,7 @@ define @intrinsic_vsub_vv_nxv16i16_nxv16i16_nxv16i16( @llvm.riscv.vsub.nxv16i16.nxv16i16( + undef, %0, %1, i32 %2) @@ -543,6 +567,7 @@ entry: } declare @llvm.riscv.vsub.nxv32i16.nxv32i16( + , , , i32); @@ -555,6 +580,7 @@ define @intrinsic_vsub_vv_nxv32i16_nxv32i16_nxv32i16( @llvm.riscv.vsub.nxv32i16.nxv32i16( + undef, %0, %1, i32 %2) @@ -589,6 +615,7 @@ entry: } declare @llvm.riscv.vsub.nxv1i32.nxv1i32( + , , , i32); @@ -601,6 +628,7 @@ define @intrinsic_vsub_vv_nxv1i32_nxv1i32_nxv1i32( @llvm.riscv.vsub.nxv1i32.nxv1i32( + undef, %0, %1, i32 %2) @@ -634,6 +662,7 @@ entry: } declare @llvm.riscv.vsub.nxv2i32.nxv2i32( + , , , i32); @@ -646,6 +675,7 @@ define @intrinsic_vsub_vv_nxv2i32_nxv2i32_nxv2i32( @llvm.riscv.vsub.nxv2i32.nxv2i32( + undef, %0, %1, i32 %2) @@ -679,6 +709,7 @@ entry: } declare @llvm.riscv.vsub.nxv4i32.nxv4i32( + , , , i32); @@ -691,6 +722,7 @@ define @intrinsic_vsub_vv_nxv4i32_nxv4i32_nxv4i32( @llvm.riscv.vsub.nxv4i32.nxv4i32( + undef, %0, %1, i32 %2) @@ -724,6 +756,7 @@ entry: } declare @llvm.riscv.vsub.nxv8i32.nxv8i32( + , , , i32); @@ -736,6 +769,7 @@ define @intrinsic_vsub_vv_nxv8i32_nxv8i32_nxv8i32( @llvm.riscv.vsub.nxv8i32.nxv8i32( + undef, %0, %1, i32 %2) @@ -769,6 +803,7 @@ entry: } declare @llvm.riscv.vsub.nxv16i32.nxv16i32( + , , , i32); @@ -781,6 +816,7 @@ define @intrinsic_vsub_vv_nxv16i32_nxv16i32_nxv16i32( @llvm.riscv.vsub.nxv16i32.nxv16i32( + undef, %0, %1, i32 %2) @@ -815,6 +851,7 @@ entry: } declare @llvm.riscv.vsub.nxv1i64.nxv1i64( + , , , i32); @@ -827,6 +864,7 @@ define @intrinsic_vsub_vv_nxv1i64_nxv1i64_nxv1i64( @llvm.riscv.vsub.nxv1i64.nxv1i64( + undef, %0, %1, i32 %2) @@ -860,6 +898,7 @@ entry: } declare @llvm.riscv.vsub.nxv2i64.nxv2i64( + , , , i32); @@ -872,6 +911,7 @@ define @intrinsic_vsub_vv_nxv2i64_nxv2i64_nxv2i64( @llvm.riscv.vsub.nxv2i64.nxv2i64( + undef, %0, %1, i32 %2) @@ -905,6 +945,7 @@ entry: } declare @llvm.riscv.vsub.nxv4i64.nxv4i64( + , , , i32); @@ -917,6 +958,7 @@ define @intrinsic_vsub_vv_nxv4i64_nxv4i64_nxv4i64( @llvm.riscv.vsub.nxv4i64.nxv4i64( + undef, %0, %1, i32 %2) @@ -950,6 +992,7 @@ entry: } declare @llvm.riscv.vsub.nxv8i64.nxv8i64( + , , , i32); @@ -962,6 +1005,7 @@ define @intrinsic_vsub_vv_nxv8i64_nxv8i64_nxv8i64( @llvm.riscv.vsub.nxv8i64.nxv8i64( + undef, %0, %1, i32 %2) @@ -996,6 +1040,7 @@ entry: } declare @llvm.riscv.vsub.nxv1i8.i8( + , , i8, i32); @@ -1008,6 +1053,7 @@ define @intrinsic_vsub_vx_nxv1i8_nxv1i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv1i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1041,6 +1087,7 @@ entry: } declare @llvm.riscv.vsub.nxv2i8.i8( + , , i8, i32); @@ -1053,6 +1100,7 @@ define @intrinsic_vsub_vx_nxv2i8_nxv2i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv2i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1086,6 +1134,7 @@ entry: } declare @llvm.riscv.vsub.nxv4i8.i8( + , , i8, i32); @@ -1098,6 +1147,7 @@ define @intrinsic_vsub_vx_nxv4i8_nxv4i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv4i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1131,6 +1181,7 @@ entry: } declare @llvm.riscv.vsub.nxv8i8.i8( + , , i8, i32); @@ -1143,6 +1194,7 @@ define @intrinsic_vsub_vx_nxv8i8_nxv8i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv8i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1176,6 +1228,7 @@ entry: } declare @llvm.riscv.vsub.nxv16i8.i8( + , , i8, i32); @@ -1188,6 +1241,7 @@ define @intrinsic_vsub_vx_nxv16i8_nxv16i8_i8( @llvm.riscv.vsub.nxv16i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1221,6 +1275,7 @@ entry: } declare @llvm.riscv.vsub.nxv32i8.i8( + , , i8, i32); @@ -1233,6 +1288,7 @@ define @intrinsic_vsub_vx_nxv32i8_nxv32i8_i8( @llvm.riscv.vsub.nxv32i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1266,6 +1322,7 @@ entry: } declare @llvm.riscv.vsub.nxv64i8.i8( + , , i8, i32); @@ -1278,6 +1335,7 @@ define @intrinsic_vsub_vx_nxv64i8_nxv64i8_i8( @llvm.riscv.vsub.nxv64i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1311,6 +1369,7 @@ entry: } declare @llvm.riscv.vsub.nxv1i16.i16( + , , i16, i32); @@ -1323,6 +1382,7 @@ define @intrinsic_vsub_vx_nxv1i16_nxv1i16_i16( @llvm.riscv.vsub.nxv1i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1356,6 +1416,7 @@ entry: } declare @llvm.riscv.vsub.nxv2i16.i16( + , , i16, i32); @@ -1368,6 +1429,7 @@ define @intrinsic_vsub_vx_nxv2i16_nxv2i16_i16( @llvm.riscv.vsub.nxv2i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1401,6 +1463,7 @@ entry: } declare @llvm.riscv.vsub.nxv4i16.i16( + , , i16, i32); @@ -1413,6 +1476,7 @@ define @intrinsic_vsub_vx_nxv4i16_nxv4i16_i16( @llvm.riscv.vsub.nxv4i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1446,6 +1510,7 @@ entry: } declare @llvm.riscv.vsub.nxv8i16.i16( + , , i16, i32); @@ -1458,6 +1523,7 @@ define @intrinsic_vsub_vx_nxv8i16_nxv8i16_i16( @llvm.riscv.vsub.nxv8i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1491,6 +1557,7 @@ entry: } declare @llvm.riscv.vsub.nxv16i16.i16( + , , i16, i32); @@ -1503,6 +1570,7 @@ define @intrinsic_vsub_vx_nxv16i16_nxv16i16_i16( @llvm.riscv.vsub.nxv16i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1536,6 +1604,7 @@ entry: } declare @llvm.riscv.vsub.nxv32i16.i16( + , , i16, i32); @@ -1548,6 +1617,7 @@ define @intrinsic_vsub_vx_nxv32i16_nxv32i16_i16( @llvm.riscv.vsub.nxv32i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1581,6 +1651,7 @@ entry: } declare @llvm.riscv.vsub.nxv1i32.i32( + , , i32, i32); @@ -1593,6 +1664,7 @@ define @intrinsic_vsub_vx_nxv1i32_nxv1i32_i32( @llvm.riscv.vsub.nxv1i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1626,6 +1698,7 @@ entry: } declare @llvm.riscv.vsub.nxv2i32.i32( + , , i32, i32); @@ -1638,6 +1711,7 @@ define @intrinsic_vsub_vx_nxv2i32_nxv2i32_i32( @llvm.riscv.vsub.nxv2i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1671,6 +1745,7 @@ entry: } declare @llvm.riscv.vsub.nxv4i32.i32( + , , i32, i32); @@ -1683,6 +1758,7 @@ define @intrinsic_vsub_vx_nxv4i32_nxv4i32_i32( @llvm.riscv.vsub.nxv4i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1716,6 +1792,7 @@ entry: } declare @llvm.riscv.vsub.nxv8i32.i32( + , , i32, i32); @@ -1728,6 +1805,7 @@ define @intrinsic_vsub_vx_nxv8i32_nxv8i32_i32( @llvm.riscv.vsub.nxv8i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1761,6 +1839,7 @@ entry: } declare @llvm.riscv.vsub.nxv16i32.i32( + , , i32, i32); @@ -1773,6 +1852,7 @@ define @intrinsic_vsub_vx_nxv16i32_nxv16i32_i32( @llvm.riscv.vsub.nxv16i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1806,6 +1886,7 @@ entry: } declare @llvm.riscv.vsub.nxv1i64.i64( + , , i64, i32); @@ -1824,6 +1905,7 @@ define @intrinsic_vsub_vx_nxv1i64_nxv1i64_i64( @llvm.riscv.vsub.nxv1i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1863,6 +1945,7 @@ entry: } declare @llvm.riscv.vsub.nxv2i64.i64( + , , i64, i32); @@ -1881,6 +1964,7 @@ define @intrinsic_vsub_vx_nxv2i64_nxv2i64_i64( @llvm.riscv.vsub.nxv2i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1920,6 +2004,7 @@ entry: } declare @llvm.riscv.vsub.nxv4i64.i64( + , , i64, i32); @@ -1938,6 +2023,7 @@ define @intrinsic_vsub_vx_nxv4i64_nxv4i64_i64( @llvm.riscv.vsub.nxv4i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1977,6 +2063,7 @@ entry: } declare @llvm.riscv.vsub.nxv8i64.i64( + , , i64, i32); @@ -1995,6 +2082,7 @@ define @intrinsic_vsub_vx_nxv8i64_nxv8i64_i64( @llvm.riscv.vsub.nxv8i64.i64( + undef, %0, i64 %1, i32 %2) @@ -2041,6 +2129,7 @@ define @intrinsic_vsub_vi_nxv1i8_nxv1i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv1i8.i8( + undef, %0, i8 9, i32 %1) @@ -2073,6 +2162,7 @@ define @intrinsic_vsub_vi_nxv2i8_nxv2i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv2i8.i8( + undef, %0, i8 9, i32 %1) @@ -2105,6 +2195,7 @@ define @intrinsic_vsub_vi_nxv4i8_nxv4i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv4i8.i8( + undef, %0, i8 9, i32 %1) @@ -2137,6 +2228,7 @@ define @intrinsic_vsub_vi_nxv8i8_nxv8i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv8i8.i8( + undef, %0, i8 9, i32 %1) @@ -2169,6 +2261,7 @@ define @intrinsic_vsub_vi_nxv16i8_nxv16i8_i8( @llvm.riscv.vsub.nxv16i8.i8( + undef, %0, i8 9, i32 %1) @@ -2201,6 +2294,7 @@ define @intrinsic_vsub_vi_nxv32i8_nxv32i8_i8( @llvm.riscv.vsub.nxv32i8.i8( + undef, %0, i8 9, i32 %1) @@ -2233,6 +2327,7 @@ define @intrinsic_vsub_vi_nxv64i8_nxv64i8_i8( @llvm.riscv.vsub.nxv64i8.i8( + undef, %0, i8 -9, i32 %1) @@ -2265,6 +2360,7 @@ define @intrinsic_vsub_vi_nxv1i16_nxv1i16_i16( @llvm.riscv.vsub.nxv1i16.i16( + undef, %0, i16 9, i32 %1) @@ -2297,6 +2393,7 @@ define @intrinsic_vsub_vi_nxv2i16_nxv2i16_i16( @llvm.riscv.vsub.nxv2i16.i16( + undef, %0, i16 9, i32 %1) @@ -2329,6 +2426,7 @@ define @intrinsic_vsub_vi_nxv4i16_nxv4i16_i16( @llvm.riscv.vsub.nxv4i16.i16( + undef, %0, i16 9, i32 %1) @@ -2361,6 +2459,7 @@ define @intrinsic_vsub_vi_nxv8i16_nxv8i16_i16( @llvm.riscv.vsub.nxv8i16.i16( + undef, %0, i16 9, i32 %1) @@ -2393,6 +2492,7 @@ define @intrinsic_vsub_vi_nxv16i16_nxv16i16_i16( @llvm.riscv.vsub.nxv16i16.i16( + undef, %0, i16 9, i32 %1) @@ -2425,6 +2525,7 @@ define @intrinsic_vsub_vi_nxv32i16_nxv32i16_i16( @llvm.riscv.vsub.nxv32i16.i16( + undef, %0, i16 9, i32 %1) @@ -2457,6 +2558,7 @@ define @intrinsic_vsub_vi_nxv1i32_nxv1i32_i32( @llvm.riscv.vsub.nxv1i32.i32( + undef, %0, i32 9, i32 %1) @@ -2489,6 +2591,7 @@ define @intrinsic_vsub_vi_nxv2i32_nxv2i32_i32( @llvm.riscv.vsub.nxv2i32.i32( + undef, %0, i32 9, i32 %1) @@ -2521,6 +2624,7 @@ define @intrinsic_vsub_vi_nxv4i32_nxv4i32_i32( @llvm.riscv.vsub.nxv4i32.i32( + undef, %0, i32 9, i32 %1) @@ -2553,6 +2657,7 @@ define @intrinsic_vsub_vi_nxv8i32_nxv8i32_i32( @llvm.riscv.vsub.nxv8i32.i32( + undef, %0, i32 9, i32 %1) @@ -2585,6 +2690,7 @@ define @intrinsic_vsub_vi_nxv16i32_nxv16i32_i32( @llvm.riscv.vsub.nxv16i32.i32( + undef, %0, i32 9, i32 %1) @@ -2617,6 +2723,7 @@ define @intrinsic_vsub_vi_nxv1i64_nxv1i64_i64( @llvm.riscv.vsub.nxv1i64.i64( + undef, %0, i64 9, i32 %1) @@ -2649,6 +2756,7 @@ define @intrinsic_vsub_vi_nxv2i64_nxv2i64_i64( @llvm.riscv.vsub.nxv2i64.i64( + undef, %0, i64 9, i32 %1) @@ -2681,6 +2789,7 @@ define @intrinsic_vsub_vi_nxv4i64_nxv4i64_i64( @llvm.riscv.vsub.nxv4i64.i64( + undef, %0, i64 9, i32 %1) @@ -2713,6 +2822,7 @@ define @intrinsic_vsub_vi_nxv8i64_nxv8i64_i64( @llvm.riscv.vsub.nxv8i64.i64( + undef, %0, i64 9, i32 %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vsub-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsub-rv64.ll index 4840fa6030363946c7253b4c8d8632714e7a7070..2cb7f359dcf0d6c3dc647887507e319d3d786a31 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsub-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsub-rv64.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vsub.nxv1i8.nxv1i8( + , , , i64); @@ -14,6 +15,7 @@ define @intrinsic_vsub_vv_nxv1i8_nxv1i8_nxv1i8( @llvm.riscv.vsub.nxv1i8.nxv1i8( + undef, %0, %1, i64 %2) @@ -47,6 +49,7 @@ entry: } declare @llvm.riscv.vsub.nxv2i8.nxv2i8( + , , , i64); @@ -59,6 +62,7 @@ define @intrinsic_vsub_vv_nxv2i8_nxv2i8_nxv2i8( @llvm.riscv.vsub.nxv2i8.nxv2i8( + undef, %0, %1, i64 %2) @@ -92,6 +96,7 @@ entry: } declare @llvm.riscv.vsub.nxv4i8.nxv4i8( + , , , i64); @@ -104,6 +109,7 @@ define @intrinsic_vsub_vv_nxv4i8_nxv4i8_nxv4i8( @llvm.riscv.vsub.nxv4i8.nxv4i8( + undef, %0, %1, i64 %2) @@ -137,6 +143,7 @@ entry: } declare @llvm.riscv.vsub.nxv8i8.nxv8i8( + , , , i64); @@ -149,6 +156,7 @@ define @intrinsic_vsub_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vsub.nxv8i8.nxv8i8( + undef, %0, %1, i64 %2) @@ -182,6 +190,7 @@ entry: } declare @llvm.riscv.vsub.nxv16i8.nxv16i8( + , , , i64); @@ -194,6 +203,7 @@ define @intrinsic_vsub_vv_nxv16i8_nxv16i8_nxv16i8( @llvm.riscv.vsub.nxv16i8.nxv16i8( + undef, %0, %1, i64 %2) @@ -227,6 +237,7 @@ entry: } declare @llvm.riscv.vsub.nxv32i8.nxv32i8( + , , , i64); @@ -239,6 +250,7 @@ define @intrinsic_vsub_vv_nxv32i8_nxv32i8_nxv32i8( @llvm.riscv.vsub.nxv32i8.nxv32i8( + undef, %0, %1, i64 %2) @@ -272,6 +284,7 @@ entry: } declare @llvm.riscv.vsub.nxv64i8.nxv64i8( + , , , i64); @@ -284,6 +297,7 @@ define @intrinsic_vsub_vv_nxv64i8_nxv64i8_nxv64i8( @llvm.riscv.vsub.nxv64i8.nxv64i8( + undef, %0, %1, i64 %2) @@ -318,6 +332,7 @@ entry: } declare @llvm.riscv.vsub.nxv1i16.nxv1i16( + , , , i64); @@ -330,6 +345,7 @@ define @intrinsic_vsub_vv_nxv1i16_nxv1i16_nxv1i16( @llvm.riscv.vsub.nxv1i16.nxv1i16( + undef, %0, %1, i64 %2) @@ -363,6 +379,7 @@ entry: } declare @llvm.riscv.vsub.nxv2i16.nxv2i16( + , , , i64); @@ -375,6 +392,7 @@ define @intrinsic_vsub_vv_nxv2i16_nxv2i16_nxv2i16( @llvm.riscv.vsub.nxv2i16.nxv2i16( + undef, %0, %1, i64 %2) @@ -408,6 +426,7 @@ entry: } declare @llvm.riscv.vsub.nxv4i16.nxv4i16( + , , , i64); @@ -420,6 +439,7 @@ define @intrinsic_vsub_vv_nxv4i16_nxv4i16_nxv4i16( @llvm.riscv.vsub.nxv4i16.nxv4i16( + undef, %0, %1, i64 %2) @@ -453,6 +473,7 @@ entry: } declare @llvm.riscv.vsub.nxv8i16.nxv8i16( + , , , i64); @@ -465,6 +486,7 @@ define @intrinsic_vsub_vv_nxv8i16_nxv8i16_nxv8i16( @llvm.riscv.vsub.nxv8i16.nxv8i16( + undef, %0, %1, i64 %2) @@ -498,6 +520,7 @@ entry: } declare @llvm.riscv.vsub.nxv16i16.nxv16i16( + , , , i64); @@ -510,6 +533,7 @@ define @intrinsic_vsub_vv_nxv16i16_nxv16i16_nxv16i16( @llvm.riscv.vsub.nxv16i16.nxv16i16( + undef, %0, %1, i64 %2) @@ -543,6 +567,7 @@ entry: } declare @llvm.riscv.vsub.nxv32i16.nxv32i16( + , , , i64); @@ -555,6 +580,7 @@ define @intrinsic_vsub_vv_nxv32i16_nxv32i16_nxv32i16( @llvm.riscv.vsub.nxv32i16.nxv32i16( + undef, %0, %1, i64 %2) @@ -589,6 +615,7 @@ entry: } declare @llvm.riscv.vsub.nxv1i32.nxv1i32( + , , , i64); @@ -601,6 +628,7 @@ define @intrinsic_vsub_vv_nxv1i32_nxv1i32_nxv1i32( @llvm.riscv.vsub.nxv1i32.nxv1i32( + undef, %0, %1, i64 %2) @@ -634,6 +662,7 @@ entry: } declare @llvm.riscv.vsub.nxv2i32.nxv2i32( + , , , i64); @@ -646,6 +675,7 @@ define @intrinsic_vsub_vv_nxv2i32_nxv2i32_nxv2i32( @llvm.riscv.vsub.nxv2i32.nxv2i32( + undef, %0, %1, i64 %2) @@ -679,6 +709,7 @@ entry: } declare @llvm.riscv.vsub.nxv4i32.nxv4i32( + , , , i64); @@ -691,6 +722,7 @@ define @intrinsic_vsub_vv_nxv4i32_nxv4i32_nxv4i32( @llvm.riscv.vsub.nxv4i32.nxv4i32( + undef, %0, %1, i64 %2) @@ -724,6 +756,7 @@ entry: } declare @llvm.riscv.vsub.nxv8i32.nxv8i32( + , , , i64); @@ -736,6 +769,7 @@ define @intrinsic_vsub_vv_nxv8i32_nxv8i32_nxv8i32( @llvm.riscv.vsub.nxv8i32.nxv8i32( + undef, %0, %1, i64 %2) @@ -769,6 +803,7 @@ entry: } declare @llvm.riscv.vsub.nxv16i32.nxv16i32( + , , , i64); @@ -781,6 +816,7 @@ define @intrinsic_vsub_vv_nxv16i32_nxv16i32_nxv16i32( @llvm.riscv.vsub.nxv16i32.nxv16i32( + undef, %0, %1, i64 %2) @@ -815,6 +851,7 @@ entry: } declare @llvm.riscv.vsub.nxv1i64.nxv1i64( + , , , i64); @@ -827,6 +864,7 @@ define @intrinsic_vsub_vv_nxv1i64_nxv1i64_nxv1i64( @llvm.riscv.vsub.nxv1i64.nxv1i64( + undef, %0, %1, i64 %2) @@ -860,6 +898,7 @@ entry: } declare @llvm.riscv.vsub.nxv2i64.nxv2i64( + , , , i64); @@ -872,6 +911,7 @@ define @intrinsic_vsub_vv_nxv2i64_nxv2i64_nxv2i64( @llvm.riscv.vsub.nxv2i64.nxv2i64( + undef, %0, %1, i64 %2) @@ -905,6 +945,7 @@ entry: } declare @llvm.riscv.vsub.nxv4i64.nxv4i64( + , , , i64); @@ -917,6 +958,7 @@ define @intrinsic_vsub_vv_nxv4i64_nxv4i64_nxv4i64( @llvm.riscv.vsub.nxv4i64.nxv4i64( + undef, %0, %1, i64 %2) @@ -950,6 +992,7 @@ entry: } declare @llvm.riscv.vsub.nxv8i64.nxv8i64( + , , , i64); @@ -962,6 +1005,7 @@ define @intrinsic_vsub_vv_nxv8i64_nxv8i64_nxv8i64( @llvm.riscv.vsub.nxv8i64.nxv8i64( + undef, %0, %1, i64 %2) @@ -996,6 +1040,7 @@ entry: } declare @llvm.riscv.vsub.nxv1i8.i8( + , , i8, i64); @@ -1008,6 +1053,7 @@ define @intrinsic_vsub_vx_nxv1i8_nxv1i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv1i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1041,6 +1087,7 @@ entry: } declare @llvm.riscv.vsub.nxv2i8.i8( + , , i8, i64); @@ -1053,6 +1100,7 @@ define @intrinsic_vsub_vx_nxv2i8_nxv2i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv2i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1086,6 +1134,7 @@ entry: } declare @llvm.riscv.vsub.nxv4i8.i8( + , , i8, i64); @@ -1098,6 +1147,7 @@ define @intrinsic_vsub_vx_nxv4i8_nxv4i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv4i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1131,6 +1181,7 @@ entry: } declare @llvm.riscv.vsub.nxv8i8.i8( + , , i8, i64); @@ -1143,6 +1194,7 @@ define @intrinsic_vsub_vx_nxv8i8_nxv8i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv8i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1176,6 +1228,7 @@ entry: } declare @llvm.riscv.vsub.nxv16i8.i8( + , , i8, i64); @@ -1188,6 +1241,7 @@ define @intrinsic_vsub_vx_nxv16i8_nxv16i8_i8( @llvm.riscv.vsub.nxv16i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1221,6 +1275,7 @@ entry: } declare @llvm.riscv.vsub.nxv32i8.i8( + , , i8, i64); @@ -1233,6 +1288,7 @@ define @intrinsic_vsub_vx_nxv32i8_nxv32i8_i8( @llvm.riscv.vsub.nxv32i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1266,6 +1322,7 @@ entry: } declare @llvm.riscv.vsub.nxv64i8.i8( + , , i8, i64); @@ -1278,6 +1335,7 @@ define @intrinsic_vsub_vx_nxv64i8_nxv64i8_i8( @llvm.riscv.vsub.nxv64i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1311,6 +1369,7 @@ entry: } declare @llvm.riscv.vsub.nxv1i16.i16( + , , i16, i64); @@ -1323,6 +1382,7 @@ define @intrinsic_vsub_vx_nxv1i16_nxv1i16_i16( @llvm.riscv.vsub.nxv1i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1356,6 +1416,7 @@ entry: } declare @llvm.riscv.vsub.nxv2i16.i16( + , , i16, i64); @@ -1368,6 +1429,7 @@ define @intrinsic_vsub_vx_nxv2i16_nxv2i16_i16( @llvm.riscv.vsub.nxv2i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1401,6 +1463,7 @@ entry: } declare @llvm.riscv.vsub.nxv4i16.i16( + , , i16, i64); @@ -1413,6 +1476,7 @@ define @intrinsic_vsub_vx_nxv4i16_nxv4i16_i16( @llvm.riscv.vsub.nxv4i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1446,6 +1510,7 @@ entry: } declare @llvm.riscv.vsub.nxv8i16.i16( + , , i16, i64); @@ -1458,6 +1523,7 @@ define @intrinsic_vsub_vx_nxv8i16_nxv8i16_i16( @llvm.riscv.vsub.nxv8i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1491,6 +1557,7 @@ entry: } declare @llvm.riscv.vsub.nxv16i16.i16( + , , i16, i64); @@ -1503,6 +1570,7 @@ define @intrinsic_vsub_vx_nxv16i16_nxv16i16_i16( @llvm.riscv.vsub.nxv16i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1536,6 +1604,7 @@ entry: } declare @llvm.riscv.vsub.nxv32i16.i16( + , , i16, i64); @@ -1548,6 +1617,7 @@ define @intrinsic_vsub_vx_nxv32i16_nxv32i16_i16( @llvm.riscv.vsub.nxv32i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1581,6 +1651,7 @@ entry: } declare @llvm.riscv.vsub.nxv1i32.i32( + , , i32, i64); @@ -1593,6 +1664,7 @@ define @intrinsic_vsub_vx_nxv1i32_nxv1i32_i32( @llvm.riscv.vsub.nxv1i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1626,6 +1698,7 @@ entry: } declare @llvm.riscv.vsub.nxv2i32.i32( + , , i32, i64); @@ -1638,6 +1711,7 @@ define @intrinsic_vsub_vx_nxv2i32_nxv2i32_i32( @llvm.riscv.vsub.nxv2i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1671,6 +1745,7 @@ entry: } declare @llvm.riscv.vsub.nxv4i32.i32( + , , i32, i64); @@ -1683,6 +1758,7 @@ define @intrinsic_vsub_vx_nxv4i32_nxv4i32_i32( @llvm.riscv.vsub.nxv4i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1716,6 +1792,7 @@ entry: } declare @llvm.riscv.vsub.nxv8i32.i32( + , , i32, i64); @@ -1728,6 +1805,7 @@ define @intrinsic_vsub_vx_nxv8i32_nxv8i32_i32( @llvm.riscv.vsub.nxv8i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1761,6 +1839,7 @@ entry: } declare @llvm.riscv.vsub.nxv16i32.i32( + , , i32, i64); @@ -1773,6 +1852,7 @@ define @intrinsic_vsub_vx_nxv16i32_nxv16i32_i32( @llvm.riscv.vsub.nxv16i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1806,6 +1886,7 @@ entry: } declare @llvm.riscv.vsub.nxv1i64.i64( + , , i64, i64); @@ -1818,6 +1899,7 @@ define @intrinsic_vsub_vx_nxv1i64_nxv1i64_i64( @llvm.riscv.vsub.nxv1i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1851,6 +1933,7 @@ entry: } declare @llvm.riscv.vsub.nxv2i64.i64( + , , i64, i64); @@ -1863,6 +1946,7 @@ define @intrinsic_vsub_vx_nxv2i64_nxv2i64_i64( @llvm.riscv.vsub.nxv2i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1896,6 +1980,7 @@ entry: } declare @llvm.riscv.vsub.nxv4i64.i64( + , , i64, i64); @@ -1908,6 +1993,7 @@ define @intrinsic_vsub_vx_nxv4i64_nxv4i64_i64( @llvm.riscv.vsub.nxv4i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1941,6 +2027,7 @@ entry: } declare @llvm.riscv.vsub.nxv8i64.i64( + , , i64, i64); @@ -1953,6 +2040,7 @@ define @intrinsic_vsub_vx_nxv8i64_nxv8i64_i64( @llvm.riscv.vsub.nxv8i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1993,6 +2081,7 @@ define @intrinsic_vsub_vi_nxv1i8_nxv1i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv1i8.i8( + undef, %0, i8 9, i64 %1) @@ -2025,6 +2114,7 @@ define @intrinsic_vsub_vi_nxv2i8_nxv2i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv2i8.i8( + undef, %0, i8 9, i64 %1) @@ -2057,6 +2147,7 @@ define @intrinsic_vsub_vi_nxv4i8_nxv4i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv4i8.i8( + undef, %0, i8 9, i64 %1) @@ -2089,6 +2180,7 @@ define @intrinsic_vsub_vi_nxv8i8_nxv8i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv8i8.i8( + undef, %0, i8 9, i64 %1) @@ -2121,6 +2213,7 @@ define @intrinsic_vsub_vi_nxv16i8_nxv16i8_i8( @llvm.riscv.vsub.nxv16i8.i8( + undef, %0, i8 9, i64 %1) @@ -2153,6 +2246,7 @@ define @intrinsic_vsub_vi_nxv32i8_nxv32i8_i8( @llvm.riscv.vsub.nxv32i8.i8( + undef, %0, i8 9, i64 %1) @@ -2185,6 +2279,7 @@ define @intrinsic_vsub_vi_nxv64i8_nxv64i8_i8( @llvm.riscv.vsub.nxv64i8.i8( + undef, %0, i8 9, i64 %1) @@ -2217,6 +2312,7 @@ define @intrinsic_vsub_vi_nxv1i16_nxv1i16_i16( @llvm.riscv.vsub.nxv1i16.i16( + undef, %0, i16 9, i64 %1) @@ -2249,6 +2345,7 @@ define @intrinsic_vsub_vi_nxv2i16_nxv2i16_i16( @llvm.riscv.vsub.nxv2i16.i16( + undef, %0, i16 9, i64 %1) @@ -2281,6 +2378,7 @@ define @intrinsic_vsub_vi_nxv4i16_nxv4i16_i16( @llvm.riscv.vsub.nxv4i16.i16( + undef, %0, i16 9, i64 %1) @@ -2313,6 +2411,7 @@ define @intrinsic_vsub_vi_nxv8i16_nxv8i16_i16( @llvm.riscv.vsub.nxv8i16.i16( + undef, %0, i16 9, i64 %1) @@ -2345,6 +2444,7 @@ define @intrinsic_vsub_vi_nxv16i16_nxv16i16_i16( @llvm.riscv.vsub.nxv16i16.i16( + undef, %0, i16 9, i64 %1) @@ -2377,6 +2477,7 @@ define @intrinsic_vsub_vi_nxv32i16_nxv32i16_i16( @llvm.riscv.vsub.nxv32i16.i16( + undef, %0, i16 9, i64 %1) @@ -2409,6 +2510,7 @@ define @intrinsic_vsub_vi_nxv1i32_nxv1i32_i32( @llvm.riscv.vsub.nxv1i32.i32( + undef, %0, i32 9, i64 %1) @@ -2441,6 +2543,7 @@ define @intrinsic_vsub_vi_nxv2i32_nxv2i32_i32( @llvm.riscv.vsub.nxv2i32.i32( + undef, %0, i32 9, i64 %1) @@ -2473,6 +2576,7 @@ define @intrinsic_vsub_vi_nxv4i32_nxv4i32_i32( @llvm.riscv.vsub.nxv4i32.i32( + undef, %0, i32 9, i64 %1) @@ -2505,6 +2609,7 @@ define @intrinsic_vsub_vi_nxv8i32_nxv8i32_i32( @llvm.riscv.vsub.nxv8i32.i32( + undef, %0, i32 9, i64 %1) @@ -2537,6 +2642,7 @@ define @intrinsic_vsub_vi_nxv16i32_nxv16i32_i32( @llvm.riscv.vsub.nxv16i32.i32( + undef, %0, i32 9, i64 %1) @@ -2569,6 +2675,7 @@ define @intrinsic_vsub_vi_nxv1i64_nxv1i64_i64( @llvm.riscv.vsub.nxv1i64.i64( + undef, %0, i64 9, i64 %1) @@ -2601,6 +2708,7 @@ define @intrinsic_vsub_vi_nxv2i64_nxv2i64_i64( @llvm.riscv.vsub.nxv2i64.i64( + undef, %0, i64 9, i64 %1) @@ -2633,6 +2741,7 @@ define @intrinsic_vsub_vi_nxv4i64_nxv4i64_i64( @llvm.riscv.vsub.nxv4i64.i64( + undef, %0, i64 9, i64 %1) @@ -2665,6 +2774,7 @@ define @intrinsic_vsub_vi_nxv8i64_nxv8i64_i64( @llvm.riscv.vsub.nxv8i64.i64( + undef, %0, i64 9, i64 %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vwadd-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwadd-rv32.ll index 72f559952e294371a5aaea62ef10f2aff07ab3c5..4c2c1a9aa8ec3900dbafbbfc0eda6b9f877598f2 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vwadd-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwadd-rv32.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vwadd.nxv1i16.nxv1i8.nxv1i8( + , , , i32); @@ -15,6 +16,7 @@ define @intrinsic_vwadd_vv_nxv1i16_nxv1i8_nxv1i8( @llvm.riscv.vwadd.nxv1i16.nxv1i8.nxv1i8( + undef, %0, %1, i32 %2) @@ -48,6 +50,7 @@ entry: } declare @llvm.riscv.vwadd.nxv2i16.nxv2i8.nxv2i8( + , , , i32); @@ -61,6 +64,7 @@ define @intrinsic_vwadd_vv_nxv2i16_nxv2i8_nxv2i8( @llvm.riscv.vwadd.nxv2i16.nxv2i8.nxv2i8( + undef, %0, %1, i32 %2) @@ -94,6 +98,7 @@ entry: } declare @llvm.riscv.vwadd.nxv4i16.nxv4i8.nxv4i8( + , , , i32); @@ -107,6 +112,7 @@ define @intrinsic_vwadd_vv_nxv4i16_nxv4i8_nxv4i8( @llvm.riscv.vwadd.nxv4i16.nxv4i8.nxv4i8( + undef, %0, %1, i32 %2) @@ -140,6 +146,7 @@ entry: } declare @llvm.riscv.vwadd.nxv8i16.nxv8i8.nxv8i8( + , , , i32); @@ -153,6 +160,7 @@ define @intrinsic_vwadd_vv_nxv8i16_nxv8i8_nxv8i8( @llvm.riscv.vwadd.nxv8i16.nxv8i8.nxv8i8( + undef, %0, %1, i32 %2) @@ -186,6 +194,7 @@ entry: } declare @llvm.riscv.vwadd.nxv16i16.nxv16i8.nxv16i8( + , , , i32); @@ -199,6 +208,7 @@ define @intrinsic_vwadd_vv_nxv16i16_nxv16i8_nxv16i8( @llvm.riscv.vwadd.nxv16i16.nxv16i8.nxv16i8( + undef, %0, %1, i32 %2) @@ -232,6 +242,7 @@ entry: } declare @llvm.riscv.vwadd.nxv32i16.nxv32i8.nxv32i8( + , , , i32); @@ -245,6 +256,7 @@ define @intrinsic_vwadd_vv_nxv32i16_nxv32i8_nxv32i8( @llvm.riscv.vwadd.nxv32i16.nxv32i8.nxv32i8( + undef, %0, %1, i32 %2) @@ -278,6 +290,7 @@ entry: } declare @llvm.riscv.vwadd.nxv1i32.nxv1i16.nxv1i16( + , , , i32); @@ -291,6 +304,7 @@ define @intrinsic_vwadd_vv_nxv1i32_nxv1i16_nxv1i16( @llvm.riscv.vwadd.nxv1i32.nxv1i16.nxv1i16( + undef, %0, %1, i32 %2) @@ -324,6 +338,7 @@ entry: } declare @llvm.riscv.vwadd.nxv2i32.nxv2i16.nxv2i16( + , , , i32); @@ -337,6 +352,7 @@ define @intrinsic_vwadd_vv_nxv2i32_nxv2i16_nxv2i16( @llvm.riscv.vwadd.nxv2i32.nxv2i16.nxv2i16( + undef, %0, %1, i32 %2) @@ -370,6 +386,7 @@ entry: } declare @llvm.riscv.vwadd.nxv4i32.nxv4i16.nxv4i16( + , , , i32); @@ -383,6 +400,7 @@ define @intrinsic_vwadd_vv_nxv4i32_nxv4i16_nxv4i16( @llvm.riscv.vwadd.nxv4i32.nxv4i16.nxv4i16( + undef, %0, %1, i32 %2) @@ -416,6 +434,7 @@ entry: } declare @llvm.riscv.vwadd.nxv8i32.nxv8i16.nxv8i16( + , , , i32); @@ -429,6 +448,7 @@ define @intrinsic_vwadd_vv_nxv8i32_nxv8i16_nxv8i16( @llvm.riscv.vwadd.nxv8i32.nxv8i16.nxv8i16( + undef, %0, %1, i32 %2) @@ -462,6 +482,7 @@ entry: } declare @llvm.riscv.vwadd.nxv16i32.nxv16i16.nxv16i16( + , , , i32); @@ -475,6 +496,7 @@ define @intrinsic_vwadd_vv_nxv16i32_nxv16i16_nxv16i16( @llvm.riscv.vwadd.nxv16i32.nxv16i16.nxv16i16( + undef, %0, %1, i32 %2) @@ -508,6 +530,7 @@ entry: } declare @llvm.riscv.vwadd.nxv1i64.nxv1i32.nxv1i32( + , , , i32); @@ -521,6 +544,7 @@ define @intrinsic_vwadd_vv_nxv1i64_nxv1i32_nxv1i32( @llvm.riscv.vwadd.nxv1i64.nxv1i32.nxv1i32( + undef, %0, %1, i32 %2) @@ -554,6 +578,7 @@ entry: } declare @llvm.riscv.vwadd.nxv2i64.nxv2i32.nxv2i32( + , , , i32); @@ -567,6 +592,7 @@ define @intrinsic_vwadd_vv_nxv2i64_nxv2i32_nxv2i32( @llvm.riscv.vwadd.nxv2i64.nxv2i32.nxv2i32( + undef, %0, %1, i32 %2) @@ -600,6 +626,7 @@ entry: } declare @llvm.riscv.vwadd.nxv4i64.nxv4i32.nxv4i32( + , , , i32); @@ -613,6 +640,7 @@ define @intrinsic_vwadd_vv_nxv4i64_nxv4i32_nxv4i32( @llvm.riscv.vwadd.nxv4i64.nxv4i32.nxv4i32( + undef, %0, %1, i32 %2) @@ -646,6 +674,7 @@ entry: } declare @llvm.riscv.vwadd.nxv8i64.nxv8i32.nxv8i32( + , , , i32); @@ -659,6 +688,7 @@ define @intrinsic_vwadd_vv_nxv8i64_nxv8i32_nxv8i32( @llvm.riscv.vwadd.nxv8i64.nxv8i32.nxv8i32( + undef, %0, %1, i32 %2) @@ -692,6 +722,7 @@ entry: } declare @llvm.riscv.vwadd.nxv1i16.nxv1i8.i8( + , , i8, i32); @@ -705,6 +736,7 @@ define @intrinsic_vwadd_vx_nxv1i16_nxv1i8_i8( @llvm.riscv.vwadd.nxv1i16.nxv1i8.i8( + undef, %0, i8 %1, i32 %2) @@ -738,6 +770,7 @@ entry: } declare @llvm.riscv.vwadd.nxv2i16.nxv2i8.i8( + , , i8, i32); @@ -751,6 +784,7 @@ define @intrinsic_vwadd_vx_nxv2i16_nxv2i8_i8( @llvm.riscv.vwadd.nxv2i16.nxv2i8.i8( + undef, %0, i8 %1, i32 %2) @@ -784,6 +818,7 @@ entry: } declare @llvm.riscv.vwadd.nxv4i16.nxv4i8.i8( + , , i8, i32); @@ -797,6 +832,7 @@ define @intrinsic_vwadd_vx_nxv4i16_nxv4i8_i8( @llvm.riscv.vwadd.nxv4i16.nxv4i8.i8( + undef, %0, i8 %1, i32 %2) @@ -830,6 +866,7 @@ entry: } declare @llvm.riscv.vwadd.nxv8i16.nxv8i8.i8( + , , i8, i32); @@ -843,6 +880,7 @@ define @intrinsic_vwadd_vx_nxv8i16_nxv8i8_i8( @llvm.riscv.vwadd.nxv8i16.nxv8i8.i8( + undef, %0, i8 %1, i32 %2) @@ -876,6 +914,7 @@ entry: } declare @llvm.riscv.vwadd.nxv16i16.nxv16i8.i8( + , , i8, i32); @@ -889,6 +928,7 @@ define @intrinsic_vwadd_vx_nxv16i16_nxv16i8_i8( @llvm.riscv.vwadd.nxv16i16.nxv16i8.i8( + undef, %0, i8 %1, i32 %2) @@ -922,6 +962,7 @@ entry: } declare @llvm.riscv.vwadd.nxv32i16.nxv32i8.i8( + , , i8, i32); @@ -935,6 +976,7 @@ define @intrinsic_vwadd_vx_nxv32i16_nxv32i8_i8( @llvm.riscv.vwadd.nxv32i16.nxv32i8.i8( + undef, %0, i8 %1, i32 %2) @@ -968,6 +1010,7 @@ entry: } declare @llvm.riscv.vwadd.nxv1i32.nxv1i16.i16( + , , i16, i32); @@ -981,6 +1024,7 @@ define @intrinsic_vwadd_vx_nxv1i32_nxv1i16_i16( @llvm.riscv.vwadd.nxv1i32.nxv1i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1014,6 +1058,7 @@ entry: } declare @llvm.riscv.vwadd.nxv2i32.nxv2i16.i16( + , , i16, i32); @@ -1027,6 +1072,7 @@ define @intrinsic_vwadd_vx_nxv2i32_nxv2i16_i16( @llvm.riscv.vwadd.nxv2i32.nxv2i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1060,6 +1106,7 @@ entry: } declare @llvm.riscv.vwadd.nxv4i32.nxv4i16.i16( + , , i16, i32); @@ -1073,6 +1120,7 @@ define @intrinsic_vwadd_vx_nxv4i32_nxv4i16_i16( @llvm.riscv.vwadd.nxv4i32.nxv4i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1106,6 +1154,7 @@ entry: } declare @llvm.riscv.vwadd.nxv8i32.nxv8i16.i16( + , , i16, i32); @@ -1119,6 +1168,7 @@ define @intrinsic_vwadd_vx_nxv8i32_nxv8i16_i16( @llvm.riscv.vwadd.nxv8i32.nxv8i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1152,6 +1202,7 @@ entry: } declare @llvm.riscv.vwadd.nxv16i32.nxv16i16.i16( + , , i16, i32); @@ -1165,6 +1216,7 @@ define @intrinsic_vwadd_vx_nxv16i32_nxv16i16_i16( @llvm.riscv.vwadd.nxv16i32.nxv16i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1198,6 +1250,7 @@ entry: } declare @llvm.riscv.vwadd.nxv1i64.nxv1i32.i32( + , , i32, i32); @@ -1211,6 +1264,7 @@ define @intrinsic_vwadd_vx_nxv1i64_nxv1i32_i32( @llvm.riscv.vwadd.nxv1i64.nxv1i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1244,6 +1298,7 @@ entry: } declare @llvm.riscv.vwadd.nxv2i64.nxv2i32.i32( + , , i32, i32); @@ -1257,6 +1312,7 @@ define @intrinsic_vwadd_vx_nxv2i64_nxv2i32_i32( @llvm.riscv.vwadd.nxv2i64.nxv2i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1290,6 +1346,7 @@ entry: } declare @llvm.riscv.vwadd.nxv4i64.nxv4i32.i32( + , , i32, i32); @@ -1303,6 +1360,7 @@ define @intrinsic_vwadd_vx_nxv4i64_nxv4i32_i32( @llvm.riscv.vwadd.nxv4i64.nxv4i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1336,6 +1394,7 @@ entry: } declare @llvm.riscv.vwadd.nxv8i64.nxv8i32.i32( + , , i32, i32); @@ -1349,6 +1408,7 @@ define @intrinsic_vwadd_vx_nxv8i64_nxv8i32_i32( @llvm.riscv.vwadd.nxv8i64.nxv8i32.i32( + undef, %0, i32 %1, i32 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vwadd-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwadd-rv64.ll index 348d2b32b65c7eaaaf856d5d0e48983f6cc7f1a4..722349fed6613e0c9c587ac25e565fbc140a6ec4 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vwadd-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwadd-rv64.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vwadd.nxv1i16.nxv1i8.nxv1i8( + , , , i64); @@ -15,6 +16,7 @@ define @intrinsic_vwadd_vv_nxv1i16_nxv1i8_nxv1i8( @llvm.riscv.vwadd.nxv1i16.nxv1i8.nxv1i8( + undef, %0, %1, i64 %2) @@ -48,6 +50,7 @@ entry: } declare @llvm.riscv.vwadd.nxv2i16.nxv2i8.nxv2i8( + , , , i64); @@ -61,6 +64,7 @@ define @intrinsic_vwadd_vv_nxv2i16_nxv2i8_nxv2i8( @llvm.riscv.vwadd.nxv2i16.nxv2i8.nxv2i8( + undef, %0, %1, i64 %2) @@ -94,6 +98,7 @@ entry: } declare @llvm.riscv.vwadd.nxv4i16.nxv4i8.nxv4i8( + , , , i64); @@ -107,6 +112,7 @@ define @intrinsic_vwadd_vv_nxv4i16_nxv4i8_nxv4i8( @llvm.riscv.vwadd.nxv4i16.nxv4i8.nxv4i8( + undef, %0, %1, i64 %2) @@ -140,6 +146,7 @@ entry: } declare @llvm.riscv.vwadd.nxv8i16.nxv8i8.nxv8i8( + , , , i64); @@ -153,6 +160,7 @@ define @intrinsic_vwadd_vv_nxv8i16_nxv8i8_nxv8i8( @llvm.riscv.vwadd.nxv8i16.nxv8i8.nxv8i8( + undef, %0, %1, i64 %2) @@ -186,6 +194,7 @@ entry: } declare @llvm.riscv.vwadd.nxv16i16.nxv16i8.nxv16i8( + , , , i64); @@ -199,6 +208,7 @@ define @intrinsic_vwadd_vv_nxv16i16_nxv16i8_nxv16i8( @llvm.riscv.vwadd.nxv16i16.nxv16i8.nxv16i8( + undef, %0, %1, i64 %2) @@ -232,6 +242,7 @@ entry: } declare @llvm.riscv.vwadd.nxv32i16.nxv32i8.nxv32i8( + , , , i64); @@ -245,6 +256,7 @@ define @intrinsic_vwadd_vv_nxv32i16_nxv32i8_nxv32i8( @llvm.riscv.vwadd.nxv32i16.nxv32i8.nxv32i8( + undef, %0, %1, i64 %2) @@ -278,6 +290,7 @@ entry: } declare @llvm.riscv.vwadd.nxv1i32.nxv1i16.nxv1i16( + , , , i64); @@ -291,6 +304,7 @@ define @intrinsic_vwadd_vv_nxv1i32_nxv1i16_nxv1i16( @llvm.riscv.vwadd.nxv1i32.nxv1i16.nxv1i16( + undef, %0, %1, i64 %2) @@ -324,6 +338,7 @@ entry: } declare @llvm.riscv.vwadd.nxv2i32.nxv2i16.nxv2i16( + , , , i64); @@ -337,6 +352,7 @@ define @intrinsic_vwadd_vv_nxv2i32_nxv2i16_nxv2i16( @llvm.riscv.vwadd.nxv2i32.nxv2i16.nxv2i16( + undef, %0, %1, i64 %2) @@ -370,6 +386,7 @@ entry: } declare @llvm.riscv.vwadd.nxv4i32.nxv4i16.nxv4i16( + , , , i64); @@ -383,6 +400,7 @@ define @intrinsic_vwadd_vv_nxv4i32_nxv4i16_nxv4i16( @llvm.riscv.vwadd.nxv4i32.nxv4i16.nxv4i16( + undef, %0, %1, i64 %2) @@ -416,6 +434,7 @@ entry: } declare @llvm.riscv.vwadd.nxv8i32.nxv8i16.nxv8i16( + , , , i64); @@ -429,6 +448,7 @@ define @intrinsic_vwadd_vv_nxv8i32_nxv8i16_nxv8i16( @llvm.riscv.vwadd.nxv8i32.nxv8i16.nxv8i16( + undef, %0, %1, i64 %2) @@ -462,6 +482,7 @@ entry: } declare @llvm.riscv.vwadd.nxv16i32.nxv16i16.nxv16i16( + , , , i64); @@ -475,6 +496,7 @@ define @intrinsic_vwadd_vv_nxv16i32_nxv16i16_nxv16i16( @llvm.riscv.vwadd.nxv16i32.nxv16i16.nxv16i16( + undef, %0, %1, i64 %2) @@ -508,6 +530,7 @@ entry: } declare @llvm.riscv.vwadd.nxv1i64.nxv1i32.nxv1i32( + , , , i64); @@ -521,6 +544,7 @@ define @intrinsic_vwadd_vv_nxv1i64_nxv1i32_nxv1i32( @llvm.riscv.vwadd.nxv1i64.nxv1i32.nxv1i32( + undef, %0, %1, i64 %2) @@ -554,6 +578,7 @@ entry: } declare @llvm.riscv.vwadd.nxv2i64.nxv2i32.nxv2i32( + , , , i64); @@ -567,6 +592,7 @@ define @intrinsic_vwadd_vv_nxv2i64_nxv2i32_nxv2i32( @llvm.riscv.vwadd.nxv2i64.nxv2i32.nxv2i32( + undef, %0, %1, i64 %2) @@ -600,6 +626,7 @@ entry: } declare @llvm.riscv.vwadd.nxv4i64.nxv4i32.nxv4i32( + , , , i64); @@ -613,6 +640,7 @@ define @intrinsic_vwadd_vv_nxv4i64_nxv4i32_nxv4i32( @llvm.riscv.vwadd.nxv4i64.nxv4i32.nxv4i32( + undef, %0, %1, i64 %2) @@ -646,6 +674,7 @@ entry: } declare @llvm.riscv.vwadd.nxv8i64.nxv8i32.nxv8i32( + , , , i64); @@ -659,6 +688,7 @@ define @intrinsic_vwadd_vv_nxv8i64_nxv8i32_nxv8i32( @llvm.riscv.vwadd.nxv8i64.nxv8i32.nxv8i32( + undef, %0, %1, i64 %2) @@ -692,6 +722,7 @@ entry: } declare @llvm.riscv.vwadd.nxv1i16.nxv1i8.i8( + , , i8, i64); @@ -705,6 +736,7 @@ define @intrinsic_vwadd_vx_nxv1i16_nxv1i8_i8( @llvm.riscv.vwadd.nxv1i16.nxv1i8.i8( + undef, %0, i8 %1, i64 %2) @@ -738,6 +770,7 @@ entry: } declare @llvm.riscv.vwadd.nxv2i16.nxv2i8.i8( + , , i8, i64); @@ -751,6 +784,7 @@ define @intrinsic_vwadd_vx_nxv2i16_nxv2i8_i8( @llvm.riscv.vwadd.nxv2i16.nxv2i8.i8( + undef, %0, i8 %1, i64 %2) @@ -784,6 +818,7 @@ entry: } declare @llvm.riscv.vwadd.nxv4i16.nxv4i8.i8( + , , i8, i64); @@ -797,6 +832,7 @@ define @intrinsic_vwadd_vx_nxv4i16_nxv4i8_i8( @llvm.riscv.vwadd.nxv4i16.nxv4i8.i8( + undef, %0, i8 %1, i64 %2) @@ -830,6 +866,7 @@ entry: } declare @llvm.riscv.vwadd.nxv8i16.nxv8i8.i8( + , , i8, i64); @@ -843,6 +880,7 @@ define @intrinsic_vwadd_vx_nxv8i16_nxv8i8_i8( @llvm.riscv.vwadd.nxv8i16.nxv8i8.i8( + undef, %0, i8 %1, i64 %2) @@ -876,6 +914,7 @@ entry: } declare @llvm.riscv.vwadd.nxv16i16.nxv16i8.i8( + , , i8, i64); @@ -889,6 +928,7 @@ define @intrinsic_vwadd_vx_nxv16i16_nxv16i8_i8( @llvm.riscv.vwadd.nxv16i16.nxv16i8.i8( + undef, %0, i8 %1, i64 %2) @@ -922,6 +962,7 @@ entry: } declare @llvm.riscv.vwadd.nxv32i16.nxv32i8.i8( + , , i8, i64); @@ -935,6 +976,7 @@ define @intrinsic_vwadd_vx_nxv32i16_nxv32i8_i8( @llvm.riscv.vwadd.nxv32i16.nxv32i8.i8( + undef, %0, i8 %1, i64 %2) @@ -968,6 +1010,7 @@ entry: } declare @llvm.riscv.vwadd.nxv1i32.nxv1i16.i16( + , , i16, i64); @@ -981,6 +1024,7 @@ define @intrinsic_vwadd_vx_nxv1i32_nxv1i16_i16( @llvm.riscv.vwadd.nxv1i32.nxv1i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1014,6 +1058,7 @@ entry: } declare @llvm.riscv.vwadd.nxv2i32.nxv2i16.i16( + , , i16, i64); @@ -1027,6 +1072,7 @@ define @intrinsic_vwadd_vx_nxv2i32_nxv2i16_i16( @llvm.riscv.vwadd.nxv2i32.nxv2i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1060,6 +1106,7 @@ entry: } declare @llvm.riscv.vwadd.nxv4i32.nxv4i16.i16( + , , i16, i64); @@ -1073,6 +1120,7 @@ define @intrinsic_vwadd_vx_nxv4i32_nxv4i16_i16( @llvm.riscv.vwadd.nxv4i32.nxv4i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1106,6 +1154,7 @@ entry: } declare @llvm.riscv.vwadd.nxv8i32.nxv8i16.i16( + , , i16, i64); @@ -1119,6 +1168,7 @@ define @intrinsic_vwadd_vx_nxv8i32_nxv8i16_i16( @llvm.riscv.vwadd.nxv8i32.nxv8i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1152,6 +1202,7 @@ entry: } declare @llvm.riscv.vwadd.nxv16i32.nxv16i16.i16( + , , i16, i64); @@ -1165,6 +1216,7 @@ define @intrinsic_vwadd_vx_nxv16i32_nxv16i16_i16( @llvm.riscv.vwadd.nxv16i32.nxv16i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1198,6 +1250,7 @@ entry: } declare @llvm.riscv.vwadd.nxv1i64.nxv1i32.i32( + , , i32, i64); @@ -1211,6 +1264,7 @@ define @intrinsic_vwadd_vx_nxv1i64_nxv1i32_i32( @llvm.riscv.vwadd.nxv1i64.nxv1i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1244,6 +1298,7 @@ entry: } declare @llvm.riscv.vwadd.nxv2i64.nxv2i32.i32( + , , i32, i64); @@ -1257,6 +1312,7 @@ define @intrinsic_vwadd_vx_nxv2i64_nxv2i32_i32( @llvm.riscv.vwadd.nxv2i64.nxv2i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1290,6 +1346,7 @@ entry: } declare @llvm.riscv.vwadd.nxv4i64.nxv4i32.i32( + , , i32, i64); @@ -1303,6 +1360,7 @@ define @intrinsic_vwadd_vx_nxv4i64_nxv4i32_i32( @llvm.riscv.vwadd.nxv4i64.nxv4i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1336,6 +1394,7 @@ entry: } declare @llvm.riscv.vwadd.nxv8i64.nxv8i32.i32( + , , i32, i64); @@ -1349,6 +1408,7 @@ define @intrinsic_vwadd_vx_nxv8i64_nxv8i32_i32( @llvm.riscv.vwadd.nxv8i64.nxv8i32.i32( + undef, %0, i32 %1, i64 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv32.ll index 6fab280f8f9b1d3a1efbaf2187f1c33a1f00a288..b11f800551c30e94b332c8ec5e14ebd621d31fea 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv32.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs -early-live-intervals < %s | FileCheck %s declare @llvm.riscv.vwadd.w.nxv1i16.nxv1i8( + , , , i32); @@ -14,6 +15,7 @@ define @intrinsic_vwadd.w_wv_nxv1i16_nxv1i16_nxv1i8( @llvm.riscv.vwadd.w.nxv1i16.nxv1i8( + undef, %0, %1, i32 %2) @@ -47,6 +49,7 @@ entry: } declare @llvm.riscv.vwadd.w.nxv2i16.nxv2i8( + , , , i32); @@ -59,6 +62,7 @@ define @intrinsic_vwadd.w_wv_nxv2i16_nxv2i16_nxv2i8( @llvm.riscv.vwadd.w.nxv2i16.nxv2i8( + undef, %0, %1, i32 %2) @@ -92,6 +96,7 @@ entry: } declare @llvm.riscv.vwadd.w.nxv4i16.nxv4i8( + , , , i32); @@ -104,6 +109,7 @@ define @intrinsic_vwadd.w_wv_nxv4i16_nxv4i16_nxv4i8( @llvm.riscv.vwadd.w.nxv4i16.nxv4i8( + undef, %0, %1, i32 %2) @@ -137,6 +143,7 @@ entry: } declare @llvm.riscv.vwadd.w.nxv8i16.nxv8i8( + , , , i32); @@ -149,6 +156,7 @@ define @intrinsic_vwadd.w_wv_nxv8i16_nxv8i16_nxv8i8( @llvm.riscv.vwadd.w.nxv8i16.nxv8i8( + undef, %0, %1, i32 %2) @@ -182,6 +190,7 @@ entry: } declare @llvm.riscv.vwadd.w.nxv16i16.nxv16i8( + , , , i32); @@ -194,6 +203,7 @@ define @intrinsic_vwadd.w_wv_nxv16i16_nxv16i16_nxv16i8( @llvm.riscv.vwadd.w.nxv16i16.nxv16i8( + undef, %0, %1, i32 %2) @@ -227,6 +237,7 @@ entry: } declare @llvm.riscv.vwadd.w.nxv32i16.nxv32i8( + , , , i32); @@ -239,6 +250,7 @@ define @intrinsic_vwadd.w_wv_nxv32i16_nxv32i16_nxv32i8( @llvm.riscv.vwadd.w.nxv32i16.nxv32i8( + undef, %0, %1, i32 %2) @@ -273,6 +285,7 @@ entry: } declare @llvm.riscv.vwadd.w.nxv1i32.nxv1i16( + , , , i32); @@ -285,6 +298,7 @@ define @intrinsic_vwadd.w_wv_nxv1i32_nxv1i32_nxv1i16( @llvm.riscv.vwadd.w.nxv1i32.nxv1i16( + undef, %0, %1, i32 %2) @@ -318,6 +332,7 @@ entry: } declare @llvm.riscv.vwadd.w.nxv2i32.nxv2i16( + , , , i32); @@ -330,6 +345,7 @@ define @intrinsic_vwadd.w_wv_nxv2i32_nxv2i32_nxv2i16( @llvm.riscv.vwadd.w.nxv2i32.nxv2i16( + undef, %0, %1, i32 %2) @@ -363,6 +379,7 @@ entry: } declare @llvm.riscv.vwadd.w.nxv4i32.nxv4i16( + , , , i32); @@ -375,6 +392,7 @@ define @intrinsic_vwadd.w_wv_nxv4i32_nxv4i32_nxv4i16( @llvm.riscv.vwadd.w.nxv4i32.nxv4i16( + undef, %0, %1, i32 %2) @@ -408,6 +426,7 @@ entry: } declare @llvm.riscv.vwadd.w.nxv8i32.nxv8i16( + , , , i32); @@ -420,6 +439,7 @@ define @intrinsic_vwadd.w_wv_nxv8i32_nxv8i32_nxv8i16( @llvm.riscv.vwadd.w.nxv8i32.nxv8i16( + undef, %0, %1, i32 %2) @@ -453,6 +473,7 @@ entry: } declare @llvm.riscv.vwadd.w.nxv16i32.nxv16i16( + , , , i32); @@ -465,6 +486,7 @@ define @intrinsic_vwadd.w_wv_nxv16i32_nxv16i32_nxv16i16( @llvm.riscv.vwadd.w.nxv16i32.nxv16i16( + undef, %0, %1, i32 %2) @@ -499,6 +521,7 @@ entry: } declare @llvm.riscv.vwadd.w.nxv1i64.nxv1i32( + , , , i32); @@ -511,6 +534,7 @@ define @intrinsic_vwadd.w_wv_nxv1i64_nxv1i64_nxv1i32( @llvm.riscv.vwadd.w.nxv1i64.nxv1i32( + undef, %0, %1, i32 %2) @@ -544,6 +568,7 @@ entry: } declare @llvm.riscv.vwadd.w.nxv2i64.nxv2i32( + , , , i32); @@ -556,6 +581,7 @@ define @intrinsic_vwadd.w_wv_nxv2i64_nxv2i64_nxv2i32( @llvm.riscv.vwadd.w.nxv2i64.nxv2i32( + undef, %0, %1, i32 %2) @@ -589,6 +615,7 @@ entry: } declare @llvm.riscv.vwadd.w.nxv4i64.nxv4i32( + , , , i32); @@ -601,6 +628,7 @@ define @intrinsic_vwadd.w_wv_nxv4i64_nxv4i64_nxv4i32( @llvm.riscv.vwadd.w.nxv4i64.nxv4i32( + undef, %0, %1, i32 %2) @@ -634,6 +662,7 @@ entry: } declare @llvm.riscv.vwadd.w.nxv8i64.nxv8i32( + , , , i32); @@ -646,6 +675,7 @@ define @intrinsic_vwadd.w_wv_nxv8i64_nxv8i64_nxv8i32( @llvm.riscv.vwadd.w.nxv8i64.nxv8i32( + undef, %0, %1, i32 %2) @@ -680,6 +710,7 @@ entry: } declare @llvm.riscv.vwadd.w.nxv1i16.i8( + , , i8, i32); @@ -692,6 +723,7 @@ define @intrinsic_vwadd.w_wx_nxv1i16_nxv1i16_i8( @llvm.riscv.vwadd.w.nxv1i16.i8( + undef, %0, i8 %1, i32 %2) @@ -725,6 +757,7 @@ entry: } declare @llvm.riscv.vwadd.w.nxv2i16.i8( + , , i8, i32); @@ -737,6 +770,7 @@ define @intrinsic_vwadd.w_wx_nxv2i16_nxv2i16_i8( @llvm.riscv.vwadd.w.nxv2i16.i8( + undef, %0, i8 %1, i32 %2) @@ -770,6 +804,7 @@ entry: } declare @llvm.riscv.vwadd.w.nxv4i16.i8( + , , i8, i32); @@ -782,6 +817,7 @@ define @intrinsic_vwadd.w_wx_nxv4i16_nxv4i16_i8( @llvm.riscv.vwadd.w.nxv4i16.i8( + undef, %0, i8 %1, i32 %2) @@ -815,6 +851,7 @@ entry: } declare @llvm.riscv.vwadd.w.nxv8i16.i8( + , , i8, i32); @@ -827,6 +864,7 @@ define @intrinsic_vwadd.w_wx_nxv8i16_nxv8i16_i8( @llvm.riscv.vwadd.w.nxv8i16.i8( + undef, %0, i8 %1, i32 %2) @@ -860,6 +898,7 @@ entry: } declare @llvm.riscv.vwadd.w.nxv16i16.i8( + , , i8, i32); @@ -872,6 +911,7 @@ define @intrinsic_vwadd.w_wx_nxv16i16_nxv16i16_i8( @llvm.riscv.vwadd.w.nxv16i16.i8( + undef, %0, i8 %1, i32 %2) @@ -905,6 +945,7 @@ entry: } declare @llvm.riscv.vwadd.w.nxv32i16.i8( + , , i8, i32); @@ -917,6 +958,7 @@ define @intrinsic_vwadd.w_wx_nxv32i16_nxv32i16_i8( @llvm.riscv.vwadd.w.nxv32i16.i8( + undef, %0, i8 %1, i32 %2) @@ -950,6 +992,7 @@ entry: } declare @llvm.riscv.vwadd.w.nxv1i32.i16( + , , i16, i32); @@ -962,6 +1005,7 @@ define @intrinsic_vwadd.w_wx_nxv1i32_nxv1i32_i16( @llvm.riscv.vwadd.w.nxv1i32.i16( + undef, %0, i16 %1, i32 %2) @@ -995,6 +1039,7 @@ entry: } declare @llvm.riscv.vwadd.w.nxv2i32.i16( + , , i16, i32); @@ -1007,6 +1052,7 @@ define @intrinsic_vwadd.w_wx_nxv2i32_nxv2i32_i16( @llvm.riscv.vwadd.w.nxv2i32.i16( + undef, %0, i16 %1, i32 %2) @@ -1040,6 +1086,7 @@ entry: } declare @llvm.riscv.vwadd.w.nxv4i32.i16( + , , i16, i32); @@ -1052,6 +1099,7 @@ define @intrinsic_vwadd.w_wx_nxv4i32_nxv4i32_i16( @llvm.riscv.vwadd.w.nxv4i32.i16( + undef, %0, i16 %1, i32 %2) @@ -1085,6 +1133,7 @@ entry: } declare @llvm.riscv.vwadd.w.nxv8i32.i16( + , , i16, i32); @@ -1097,6 +1146,7 @@ define @intrinsic_vwadd.w_wx_nxv8i32_nxv8i32_i16( @llvm.riscv.vwadd.w.nxv8i32.i16( + undef, %0, i16 %1, i32 %2) @@ -1130,6 +1180,7 @@ entry: } declare @llvm.riscv.vwadd.w.nxv16i32.i16( + , , i16, i32); @@ -1142,6 +1193,7 @@ define @intrinsic_vwadd.w_wx_nxv16i32_nxv16i32_i16( @llvm.riscv.vwadd.w.nxv16i32.i16( + undef, %0, i16 %1, i32 %2) @@ -1175,6 +1227,7 @@ entry: } declare @llvm.riscv.vwadd.w.nxv1i64.i32( + , , i32, i32); @@ -1187,6 +1240,7 @@ define @intrinsic_vwadd.w_wx_nxv1i64_nxv1i64_i32( @llvm.riscv.vwadd.w.nxv1i64.i32( + undef, %0, i32 %1, i32 %2) @@ -1220,6 +1274,7 @@ entry: } declare @llvm.riscv.vwadd.w.nxv2i64.i32( + , , i32, i32); @@ -1232,6 +1287,7 @@ define @intrinsic_vwadd.w_wx_nxv2i64_nxv2i64_i32( @llvm.riscv.vwadd.w.nxv2i64.i32( + undef, %0, i32 %1, i32 %2) @@ -1265,6 +1321,7 @@ entry: } declare @llvm.riscv.vwadd.w.nxv4i64.i32( + , , i32, i32); @@ -1277,6 +1334,7 @@ define @intrinsic_vwadd.w_wx_nxv4i64_nxv4i64_i32( @llvm.riscv.vwadd.w.nxv4i64.i32( + undef, %0, i32 %1, i32 %2) @@ -1310,6 +1368,7 @@ entry: } declare @llvm.riscv.vwadd.w.nxv8i64.i32( + , , i32, i32); @@ -1322,6 +1381,7 @@ define @intrinsic_vwadd.w_wx_nxv8i64_nxv8i64_i32( @llvm.riscv.vwadd.w.nxv8i64.i32( + undef, %0, i32 %1, i32 %2) @@ -1873,6 +1933,7 @@ define @intrinsic_vwadd.w_wv_untie_nxv1i16_nxv1i16_nxv1i8( @llvm.riscv.vwadd.w.nxv1i16.nxv1i8( + undef, %1, %0, i32 %2) @@ -1889,6 +1950,7 @@ define @intrinsic_vwadd.w_wv_untie_nxv2i16_nxv2i16_nxv2i8( @llvm.riscv.vwadd.w.nxv2i16.nxv2i8( + undef, %1, %0, i32 %2) @@ -1905,6 +1967,7 @@ define @intrinsic_vwadd.w_wv_untie_nxv4i16_nxv4i16_nxv4i8( @llvm.riscv.vwadd.w.nxv4i16.nxv4i8( + undef, %1, %0, i32 %2) @@ -1921,6 +1984,7 @@ define @intrinsic_vwadd.w_wv_untie_nxv8i16_nxv8i16_nxv8i8( @llvm.riscv.vwadd.w.nxv8i16.nxv8i8( + undef, %1, %0, i32 %2) @@ -1937,6 +2001,7 @@ define @intrinsic_vwadd.w_wv_untie_nxv16i16_nxv16i16_nxv16i8 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.w.nxv16i16.nxv16i8( + undef, %1, %0, i32 %2) @@ -1953,6 +2018,7 @@ define @intrinsic_vwadd.w_wv_untie_nxv32i16_nxv32i16_nxv32i8 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.w.nxv32i16.nxv32i8( + undef, %1, %0, i32 %2) @@ -1969,6 +2035,7 @@ define @intrinsic_vwadd.w_wv_untie_nxv1i32_nxv1i32_nxv1i16( @llvm.riscv.vwadd.w.nxv1i32.nxv1i16( + undef, %1, %0, i32 %2) @@ -1985,6 +2052,7 @@ define @intrinsic_vwadd.w_wv_untie_nxv2i32_nxv2i32_nxv2i16( @llvm.riscv.vwadd.w.nxv2i32.nxv2i16( + undef, %1, %0, i32 %2) @@ -2001,6 +2069,7 @@ define @intrinsic_vwadd.w_wv_untie_nxv4i32_nxv4i32_nxv4i16( @llvm.riscv.vwadd.w.nxv4i32.nxv4i16( + undef, %1, %0, i32 %2) @@ -2017,6 +2086,7 @@ define @intrinsic_vwadd.w_wv_untie_nxv8i32_nxv8i32_nxv8i16( @llvm.riscv.vwadd.w.nxv8i32.nxv8i16( + undef, %1, %0, i32 %2) @@ -2033,6 +2103,7 @@ define @intrinsic_vwadd.w_wv_untie_nxv1i64_nxv1i64_nxv1i32( @llvm.riscv.vwadd.w.nxv1i64.nxv1i32( + undef, %1, %0, i32 %2) @@ -2049,6 +2120,7 @@ define @intrinsic_vwadd.w_wv_untie_nxv2i64_nxv2i64_nxv2i32( @llvm.riscv.vwadd.w.nxv2i64.nxv2i32( + undef, %1, %0, i32 %2) @@ -2065,6 +2137,7 @@ define @intrinsic_vwadd.w_wv_untie_nxv4i64_nxv4i64_nxv4i32( @llvm.riscv.vwadd.w.nxv4i64.nxv4i32( + undef, %1, %0, i32 %2) @@ -2081,6 +2154,7 @@ define @intrinsic_vwadd.w_wv_untie_nxv8i64_nxv8i64_nxv8i32( @llvm.riscv.vwadd.w.nxv8i64.nxv8i32( + undef, %1, %0, i32 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv64.ll index 65481d304ddf44cacbbf06428541a8679cb3c568..6e753d8b139de0146feaf373bbbdf3f7048c4274 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv64.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs -early-live-intervals < %s | FileCheck %s declare @llvm.riscv.vwadd.w.nxv1i16.nxv1i8( + , , , i64); @@ -14,6 +15,7 @@ define @intrinsic_vwadd.w_wv_nxv1i16_nxv1i16_nxv1i8( @llvm.riscv.vwadd.w.nxv1i16.nxv1i8( + undef, %0, %1, i64 %2) @@ -47,6 +49,7 @@ entry: } declare @llvm.riscv.vwadd.w.nxv2i16.nxv2i8( + , , , i64); @@ -59,6 +62,7 @@ define @intrinsic_vwadd.w_wv_nxv2i16_nxv2i16_nxv2i8( @llvm.riscv.vwadd.w.nxv2i16.nxv2i8( + undef, %0, %1, i64 %2) @@ -92,6 +96,7 @@ entry: } declare @llvm.riscv.vwadd.w.nxv4i16.nxv4i8( + , , , i64); @@ -104,6 +109,7 @@ define @intrinsic_vwadd.w_wv_nxv4i16_nxv4i16_nxv4i8( @llvm.riscv.vwadd.w.nxv4i16.nxv4i8( + undef, %0, %1, i64 %2) @@ -137,6 +143,7 @@ entry: } declare @llvm.riscv.vwadd.w.nxv8i16.nxv8i8( + , , , i64); @@ -149,6 +156,7 @@ define @intrinsic_vwadd.w_wv_nxv8i16_nxv8i16_nxv8i8( @llvm.riscv.vwadd.w.nxv8i16.nxv8i8( + undef, %0, %1, i64 %2) @@ -182,6 +190,7 @@ entry: } declare @llvm.riscv.vwadd.w.nxv16i16.nxv16i8( + , , , i64); @@ -194,6 +203,7 @@ define @intrinsic_vwadd.w_wv_nxv16i16_nxv16i16_nxv16i8( @llvm.riscv.vwadd.w.nxv16i16.nxv16i8( + undef, %0, %1, i64 %2) @@ -227,6 +237,7 @@ entry: } declare @llvm.riscv.vwadd.w.nxv32i16.nxv32i8( + , , , i64); @@ -239,6 +250,7 @@ define @intrinsic_vwadd.w_wv_nxv32i16_nxv32i16_nxv32i8( @llvm.riscv.vwadd.w.nxv32i16.nxv32i8( + undef, %0, %1, i64 %2) @@ -273,6 +285,7 @@ entry: } declare @llvm.riscv.vwadd.w.nxv1i32.nxv1i16( + , , , i64); @@ -285,6 +298,7 @@ define @intrinsic_vwadd.w_wv_nxv1i32_nxv1i32_nxv1i16( @llvm.riscv.vwadd.w.nxv1i32.nxv1i16( + undef, %0, %1, i64 %2) @@ -318,6 +332,7 @@ entry: } declare @llvm.riscv.vwadd.w.nxv2i32.nxv2i16( + , , , i64); @@ -330,6 +345,7 @@ define @intrinsic_vwadd.w_wv_nxv2i32_nxv2i32_nxv2i16( @llvm.riscv.vwadd.w.nxv2i32.nxv2i16( + undef, %0, %1, i64 %2) @@ -363,6 +379,7 @@ entry: } declare @llvm.riscv.vwadd.w.nxv4i32.nxv4i16( + , , , i64); @@ -375,6 +392,7 @@ define @intrinsic_vwadd.w_wv_nxv4i32_nxv4i32_nxv4i16( @llvm.riscv.vwadd.w.nxv4i32.nxv4i16( + undef, %0, %1, i64 %2) @@ -408,6 +426,7 @@ entry: } declare @llvm.riscv.vwadd.w.nxv8i32.nxv8i16( + , , , i64); @@ -420,6 +439,7 @@ define @intrinsic_vwadd.w_wv_nxv8i32_nxv8i32_nxv8i16( @llvm.riscv.vwadd.w.nxv8i32.nxv8i16( + undef, %0, %1, i64 %2) @@ -453,6 +473,7 @@ entry: } declare @llvm.riscv.vwadd.w.nxv16i32.nxv16i16( + , , , i64); @@ -465,6 +486,7 @@ define @intrinsic_vwadd.w_wv_nxv16i32_nxv16i32_nxv16i16( @llvm.riscv.vwadd.w.nxv16i32.nxv16i16( + undef, %0, %1, i64 %2) @@ -499,6 +521,7 @@ entry: } declare @llvm.riscv.vwadd.w.nxv1i64.nxv1i32( + , , , i64); @@ -511,6 +534,7 @@ define @intrinsic_vwadd.w_wv_nxv1i64_nxv1i64_nxv1i32( @llvm.riscv.vwadd.w.nxv1i64.nxv1i32( + undef, %0, %1, i64 %2) @@ -544,6 +568,7 @@ entry: } declare @llvm.riscv.vwadd.w.nxv2i64.nxv2i32( + , , , i64); @@ -556,6 +581,7 @@ define @intrinsic_vwadd.w_wv_nxv2i64_nxv2i64_nxv2i32( @llvm.riscv.vwadd.w.nxv2i64.nxv2i32( + undef, %0, %1, i64 %2) @@ -589,6 +615,7 @@ entry: } declare @llvm.riscv.vwadd.w.nxv4i64.nxv4i32( + , , , i64); @@ -601,6 +628,7 @@ define @intrinsic_vwadd.w_wv_nxv4i64_nxv4i64_nxv4i32( @llvm.riscv.vwadd.w.nxv4i64.nxv4i32( + undef, %0, %1, i64 %2) @@ -634,6 +662,7 @@ entry: } declare @llvm.riscv.vwadd.w.nxv8i64.nxv8i32( + , , , i64); @@ -646,6 +675,7 @@ define @intrinsic_vwadd.w_wv_nxv8i64_nxv8i64_nxv8i32( @llvm.riscv.vwadd.w.nxv8i64.nxv8i32( + undef, %0, %1, i64 %2) @@ -680,6 +710,7 @@ entry: } declare @llvm.riscv.vwadd.w.nxv1i16.i8( + , , i8, i64); @@ -692,6 +723,7 @@ define @intrinsic_vwadd.w_wx_nxv1i16_nxv1i16_i8( @llvm.riscv.vwadd.w.nxv1i16.i8( + undef, %0, i8 %1, i64 %2) @@ -725,6 +757,7 @@ entry: } declare @llvm.riscv.vwadd.w.nxv2i16.i8( + , , i8, i64); @@ -737,6 +770,7 @@ define @intrinsic_vwadd.w_wx_nxv2i16_nxv2i16_i8( @llvm.riscv.vwadd.w.nxv2i16.i8( + undef, %0, i8 %1, i64 %2) @@ -770,6 +804,7 @@ entry: } declare @llvm.riscv.vwadd.w.nxv4i16.i8( + , , i8, i64); @@ -782,6 +817,7 @@ define @intrinsic_vwadd.w_wx_nxv4i16_nxv4i16_i8( @llvm.riscv.vwadd.w.nxv4i16.i8( + undef, %0, i8 %1, i64 %2) @@ -815,6 +851,7 @@ entry: } declare @llvm.riscv.vwadd.w.nxv8i16.i8( + , , i8, i64); @@ -827,6 +864,7 @@ define @intrinsic_vwadd.w_wx_nxv8i16_nxv8i16_i8( @llvm.riscv.vwadd.w.nxv8i16.i8( + undef, %0, i8 %1, i64 %2) @@ -860,6 +898,7 @@ entry: } declare @llvm.riscv.vwadd.w.nxv16i16.i8( + , , i8, i64); @@ -872,6 +911,7 @@ define @intrinsic_vwadd.w_wx_nxv16i16_nxv16i16_i8( @llvm.riscv.vwadd.w.nxv16i16.i8( + undef, %0, i8 %1, i64 %2) @@ -905,6 +945,7 @@ entry: } declare @llvm.riscv.vwadd.w.nxv32i16.i8( + , , i8, i64); @@ -917,6 +958,7 @@ define @intrinsic_vwadd.w_wx_nxv32i16_nxv32i16_i8( @llvm.riscv.vwadd.w.nxv32i16.i8( + undef, %0, i8 %1, i64 %2) @@ -950,6 +992,7 @@ entry: } declare @llvm.riscv.vwadd.w.nxv1i32.i16( + , , i16, i64); @@ -962,6 +1005,7 @@ define @intrinsic_vwadd.w_wx_nxv1i32_nxv1i32_i16( @llvm.riscv.vwadd.w.nxv1i32.i16( + undef, %0, i16 %1, i64 %2) @@ -995,6 +1039,7 @@ entry: } declare @llvm.riscv.vwadd.w.nxv2i32.i16( + , , i16, i64); @@ -1007,6 +1052,7 @@ define @intrinsic_vwadd.w_wx_nxv2i32_nxv2i32_i16( @llvm.riscv.vwadd.w.nxv2i32.i16( + undef, %0, i16 %1, i64 %2) @@ -1040,6 +1086,7 @@ entry: } declare @llvm.riscv.vwadd.w.nxv4i32.i16( + , , i16, i64); @@ -1052,6 +1099,7 @@ define @intrinsic_vwadd.w_wx_nxv4i32_nxv4i32_i16( @llvm.riscv.vwadd.w.nxv4i32.i16( + undef, %0, i16 %1, i64 %2) @@ -1085,6 +1133,7 @@ entry: } declare @llvm.riscv.vwadd.w.nxv8i32.i16( + , , i16, i64); @@ -1097,6 +1146,7 @@ define @intrinsic_vwadd.w_wx_nxv8i32_nxv8i32_i16( @llvm.riscv.vwadd.w.nxv8i32.i16( + undef, %0, i16 %1, i64 %2) @@ -1130,6 +1180,7 @@ entry: } declare @llvm.riscv.vwadd.w.nxv16i32.i16( + , , i16, i64); @@ -1142,6 +1193,7 @@ define @intrinsic_vwadd.w_wx_nxv16i32_nxv16i32_i16( @llvm.riscv.vwadd.w.nxv16i32.i16( + undef, %0, i16 %1, i64 %2) @@ -1175,6 +1227,7 @@ entry: } declare @llvm.riscv.vwadd.w.nxv1i64.i32( + , , i32, i64); @@ -1187,6 +1240,7 @@ define @intrinsic_vwadd.w_wx_nxv1i64_nxv1i64_i32( @llvm.riscv.vwadd.w.nxv1i64.i32( + undef, %0, i32 %1, i64 %2) @@ -1220,6 +1274,7 @@ entry: } declare @llvm.riscv.vwadd.w.nxv2i64.i32( + , , i32, i64); @@ -1232,6 +1287,7 @@ define @intrinsic_vwadd.w_wx_nxv2i64_nxv2i64_i32( @llvm.riscv.vwadd.w.nxv2i64.i32( + undef, %0, i32 %1, i64 %2) @@ -1265,6 +1321,7 @@ entry: } declare @llvm.riscv.vwadd.w.nxv4i64.i32( + , , i32, i64); @@ -1277,6 +1334,7 @@ define @intrinsic_vwadd.w_wx_nxv4i64_nxv4i64_i32( @llvm.riscv.vwadd.w.nxv4i64.i32( + undef, %0, i32 %1, i64 %2) @@ -1310,6 +1368,7 @@ entry: } declare @llvm.riscv.vwadd.w.nxv8i64.i32( + , , i32, i64); @@ -1322,6 +1381,7 @@ define @intrinsic_vwadd.w_wx_nxv8i64_nxv8i64_i32( @llvm.riscv.vwadd.w.nxv8i64.i32( + undef, %0, i32 %1, i64 %2) @@ -1873,6 +1933,7 @@ define @intrinsic_vwadd.w_wv_untie_nxv1i16_nxv1i16_nxv1i8( @llvm.riscv.vwadd.w.nxv1i16.nxv1i8( + undef, %1, %0, i64 %2) @@ -1889,6 +1950,7 @@ define @intrinsic_vwadd.w_wv_untie_nxv2i16_nxv2i16_nxv2i8( @llvm.riscv.vwadd.w.nxv2i16.nxv2i8( + undef, %1, %0, i64 %2) @@ -1905,6 +1967,7 @@ define @intrinsic_vwadd.w_wv_untie_nxv4i16_nxv4i16_nxv4i8( @llvm.riscv.vwadd.w.nxv4i16.nxv4i8( + undef, %1, %0, i64 %2) @@ -1921,6 +1984,7 @@ define @intrinsic_vwadd.w_wv_untie_nxv8i16_nxv8i16_nxv8i8( @llvm.riscv.vwadd.w.nxv8i16.nxv8i8( + undef, %1, %0, i64 %2) @@ -1937,6 +2001,7 @@ define @intrinsic_vwadd.w_wv_untie_nxv16i16_nxv16i16_nxv16i8 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.w.nxv16i16.nxv16i8( + undef, %1, %0, i64 %2) @@ -1953,6 +2018,7 @@ define @intrinsic_vwadd.w_wv_untie_nxv32i16_nxv32i16_nxv32i8 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.w.nxv32i16.nxv32i8( + undef, %1, %0, i64 %2) @@ -1969,6 +2035,7 @@ define @intrinsic_vwadd.w_wv_untie_nxv1i32_nxv1i32_nxv1i16( @llvm.riscv.vwadd.w.nxv1i32.nxv1i16( + undef, %1, %0, i64 %2) @@ -1985,6 +2052,7 @@ define @intrinsic_vwadd.w_wv_untie_nxv2i32_nxv2i32_nxv2i16( @llvm.riscv.vwadd.w.nxv2i32.nxv2i16( + undef, %1, %0, i64 %2) @@ -2001,6 +2069,7 @@ define @intrinsic_vwadd.w_wv_untie_nxv4i32_nxv4i32_nxv4i16( @llvm.riscv.vwadd.w.nxv4i32.nxv4i16( + undef, %1, %0, i64 %2) @@ -2017,6 +2086,7 @@ define @intrinsic_vwadd.w_wv_untie_nxv8i32_nxv8i32_nxv8i16( @llvm.riscv.vwadd.w.nxv8i32.nxv8i16( + undef, %1, %0, i64 %2) @@ -2033,6 +2103,7 @@ define @intrinsic_vwadd.w_wv_untie_nxv1i64_nxv1i64_nxv1i32( @llvm.riscv.vwadd.w.nxv1i64.nxv1i32( + undef, %1, %0, i64 %2) @@ -2049,6 +2120,7 @@ define @intrinsic_vwadd.w_wv_untie_nxv2i64_nxv2i64_nxv2i32( @llvm.riscv.vwadd.w.nxv2i64.nxv2i32( + undef, %1, %0, i64 %2) @@ -2065,6 +2137,7 @@ define @intrinsic_vwadd.w_wv_untie_nxv4i64_nxv4i64_nxv4i32( @llvm.riscv.vwadd.w.nxv4i64.nxv4i32( + undef, %1, %0, i64 %2) @@ -2081,6 +2154,7 @@ define @intrinsic_vwadd.w_wv_untie_nxv8i64_nxv8i64_nxv8i32( @llvm.riscv.vwadd.w.nxv8i64.nxv8i32( + undef, %1, %0, i64 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vwaddu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwaddu-rv32.ll index f37d6ed898679adbcb4ab4b29c377d6d05561221..a8c0d0c1ff389d2f5460025d1f45393b75bf36c5 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vwaddu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwaddu-rv32.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vwaddu.nxv1i16.nxv1i8.nxv1i8( + , , , i32); @@ -15,6 +16,7 @@ define @intrinsic_vwaddu_vv_nxv1i16_nxv1i8_nxv1i8( @llvm.riscv.vwaddu.nxv1i16.nxv1i8.nxv1i8( + undef, %0, %1, i32 %2) @@ -48,6 +50,7 @@ entry: } declare @llvm.riscv.vwaddu.nxv2i16.nxv2i8.nxv2i8( + , , , i32); @@ -61,6 +64,7 @@ define @intrinsic_vwaddu_vv_nxv2i16_nxv2i8_nxv2i8( @llvm.riscv.vwaddu.nxv2i16.nxv2i8.nxv2i8( + undef, %0, %1, i32 %2) @@ -94,6 +98,7 @@ entry: } declare @llvm.riscv.vwaddu.nxv4i16.nxv4i8.nxv4i8( + , , , i32); @@ -107,6 +112,7 @@ define @intrinsic_vwaddu_vv_nxv4i16_nxv4i8_nxv4i8( @llvm.riscv.vwaddu.nxv4i16.nxv4i8.nxv4i8( + undef, %0, %1, i32 %2) @@ -140,6 +146,7 @@ entry: } declare @llvm.riscv.vwaddu.nxv8i16.nxv8i8.nxv8i8( + , , , i32); @@ -153,6 +160,7 @@ define @intrinsic_vwaddu_vv_nxv8i16_nxv8i8_nxv8i8( @llvm.riscv.vwaddu.nxv8i16.nxv8i8.nxv8i8( + undef, %0, %1, i32 %2) @@ -186,6 +194,7 @@ entry: } declare @llvm.riscv.vwaddu.nxv16i16.nxv16i8.nxv16i8( + , , , i32); @@ -199,6 +208,7 @@ define @intrinsic_vwaddu_vv_nxv16i16_nxv16i8_nxv16i8( @llvm.riscv.vwaddu.nxv16i16.nxv16i8.nxv16i8( + undef, %0, %1, i32 %2) @@ -232,6 +242,7 @@ entry: } declare @llvm.riscv.vwaddu.nxv32i16.nxv32i8.nxv32i8( + , , , i32); @@ -245,6 +256,7 @@ define @intrinsic_vwaddu_vv_nxv32i16_nxv32i8_nxv32i8( @llvm.riscv.vwaddu.nxv32i16.nxv32i8.nxv32i8( + undef, %0, %1, i32 %2) @@ -278,6 +290,7 @@ entry: } declare @llvm.riscv.vwaddu.nxv1i32.nxv1i16.nxv1i16( + , , , i32); @@ -291,6 +304,7 @@ define @intrinsic_vwaddu_vv_nxv1i32_nxv1i16_nxv1i16( @llvm.riscv.vwaddu.nxv1i32.nxv1i16.nxv1i16( + undef, %0, %1, i32 %2) @@ -324,6 +338,7 @@ entry: } declare @llvm.riscv.vwaddu.nxv2i32.nxv2i16.nxv2i16( + , , , i32); @@ -337,6 +352,7 @@ define @intrinsic_vwaddu_vv_nxv2i32_nxv2i16_nxv2i16( @llvm.riscv.vwaddu.nxv2i32.nxv2i16.nxv2i16( + undef, %0, %1, i32 %2) @@ -370,6 +386,7 @@ entry: } declare @llvm.riscv.vwaddu.nxv4i32.nxv4i16.nxv4i16( + , , , i32); @@ -383,6 +400,7 @@ define @intrinsic_vwaddu_vv_nxv4i32_nxv4i16_nxv4i16( @llvm.riscv.vwaddu.nxv4i32.nxv4i16.nxv4i16( + undef, %0, %1, i32 %2) @@ -416,6 +434,7 @@ entry: } declare @llvm.riscv.vwaddu.nxv8i32.nxv8i16.nxv8i16( + , , , i32); @@ -429,6 +448,7 @@ define @intrinsic_vwaddu_vv_nxv8i32_nxv8i16_nxv8i16( @llvm.riscv.vwaddu.nxv8i32.nxv8i16.nxv8i16( + undef, %0, %1, i32 %2) @@ -462,6 +482,7 @@ entry: } declare @llvm.riscv.vwaddu.nxv16i32.nxv16i16.nxv16i16( + , , , i32); @@ -475,6 +496,7 @@ define @intrinsic_vwaddu_vv_nxv16i32_nxv16i16_nxv16i16( @llvm.riscv.vwaddu.nxv16i32.nxv16i16.nxv16i16( + undef, %0, %1, i32 %2) @@ -508,6 +530,7 @@ entry: } declare @llvm.riscv.vwaddu.nxv1i64.nxv1i32.nxv1i32( + , , , i32); @@ -521,6 +544,7 @@ define @intrinsic_vwaddu_vv_nxv1i64_nxv1i32_nxv1i32( @llvm.riscv.vwaddu.nxv1i64.nxv1i32.nxv1i32( + undef, %0, %1, i32 %2) @@ -554,6 +578,7 @@ entry: } declare @llvm.riscv.vwaddu.nxv2i64.nxv2i32.nxv2i32( + , , , i32); @@ -567,6 +592,7 @@ define @intrinsic_vwaddu_vv_nxv2i64_nxv2i32_nxv2i32( @llvm.riscv.vwaddu.nxv2i64.nxv2i32.nxv2i32( + undef, %0, %1, i32 %2) @@ -600,6 +626,7 @@ entry: } declare @llvm.riscv.vwaddu.nxv4i64.nxv4i32.nxv4i32( + , , , i32); @@ -613,6 +640,7 @@ define @intrinsic_vwaddu_vv_nxv4i64_nxv4i32_nxv4i32( @llvm.riscv.vwaddu.nxv4i64.nxv4i32.nxv4i32( + undef, %0, %1, i32 %2) @@ -646,6 +674,7 @@ entry: } declare @llvm.riscv.vwaddu.nxv8i64.nxv8i32.nxv8i32( + , , , i32); @@ -659,6 +688,7 @@ define @intrinsic_vwaddu_vv_nxv8i64_nxv8i32_nxv8i32( @llvm.riscv.vwaddu.nxv8i64.nxv8i32.nxv8i32( + undef, %0, %1, i32 %2) @@ -692,6 +722,7 @@ entry: } declare @llvm.riscv.vwaddu.nxv1i16.nxv1i8.i8( + , , i8, i32); @@ -705,6 +736,7 @@ define @intrinsic_vwaddu_vx_nxv1i16_nxv1i8_i8( @llvm.riscv.vwaddu.nxv1i16.nxv1i8.i8( + undef, %0, i8 %1, i32 %2) @@ -738,6 +770,7 @@ entry: } declare @llvm.riscv.vwaddu.nxv2i16.nxv2i8.i8( + , , i8, i32); @@ -751,6 +784,7 @@ define @intrinsic_vwaddu_vx_nxv2i16_nxv2i8_i8( @llvm.riscv.vwaddu.nxv2i16.nxv2i8.i8( + undef, %0, i8 %1, i32 %2) @@ -784,6 +818,7 @@ entry: } declare @llvm.riscv.vwaddu.nxv4i16.nxv4i8.i8( + , , i8, i32); @@ -797,6 +832,7 @@ define @intrinsic_vwaddu_vx_nxv4i16_nxv4i8_i8( @llvm.riscv.vwaddu.nxv4i16.nxv4i8.i8( + undef, %0, i8 %1, i32 %2) @@ -830,6 +866,7 @@ entry: } declare @llvm.riscv.vwaddu.nxv8i16.nxv8i8.i8( + , , i8, i32); @@ -843,6 +880,7 @@ define @intrinsic_vwaddu_vx_nxv8i16_nxv8i8_i8( @llvm.riscv.vwaddu.nxv8i16.nxv8i8.i8( + undef, %0, i8 %1, i32 %2) @@ -876,6 +914,7 @@ entry: } declare @llvm.riscv.vwaddu.nxv16i16.nxv16i8.i8( + , , i8, i32); @@ -889,6 +928,7 @@ define @intrinsic_vwaddu_vx_nxv16i16_nxv16i8_i8( @llvm.riscv.vwaddu.nxv16i16.nxv16i8.i8( + undef, %0, i8 %1, i32 %2) @@ -922,6 +962,7 @@ entry: } declare @llvm.riscv.vwaddu.nxv32i16.nxv32i8.i8( + , , i8, i32); @@ -935,6 +976,7 @@ define @intrinsic_vwaddu_vx_nxv32i16_nxv32i8_i8( @llvm.riscv.vwaddu.nxv32i16.nxv32i8.i8( + undef, %0, i8 %1, i32 %2) @@ -968,6 +1010,7 @@ entry: } declare @llvm.riscv.vwaddu.nxv1i32.nxv1i16.i16( + , , i16, i32); @@ -981,6 +1024,7 @@ define @intrinsic_vwaddu_vx_nxv1i32_nxv1i16_i16( @llvm.riscv.vwaddu.nxv1i32.nxv1i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1014,6 +1058,7 @@ entry: } declare @llvm.riscv.vwaddu.nxv2i32.nxv2i16.i16( + , , i16, i32); @@ -1027,6 +1072,7 @@ define @intrinsic_vwaddu_vx_nxv2i32_nxv2i16_i16( @llvm.riscv.vwaddu.nxv2i32.nxv2i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1060,6 +1106,7 @@ entry: } declare @llvm.riscv.vwaddu.nxv4i32.nxv4i16.i16( + , , i16, i32); @@ -1073,6 +1120,7 @@ define @intrinsic_vwaddu_vx_nxv4i32_nxv4i16_i16( @llvm.riscv.vwaddu.nxv4i32.nxv4i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1106,6 +1154,7 @@ entry: } declare @llvm.riscv.vwaddu.nxv8i32.nxv8i16.i16( + , , i16, i32); @@ -1119,6 +1168,7 @@ define @intrinsic_vwaddu_vx_nxv8i32_nxv8i16_i16( @llvm.riscv.vwaddu.nxv8i32.nxv8i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1152,6 +1202,7 @@ entry: } declare @llvm.riscv.vwaddu.nxv16i32.nxv16i16.i16( + , , i16, i32); @@ -1165,6 +1216,7 @@ define @intrinsic_vwaddu_vx_nxv16i32_nxv16i16_i16( @llvm.riscv.vwaddu.nxv16i32.nxv16i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1198,6 +1250,7 @@ entry: } declare @llvm.riscv.vwaddu.nxv1i64.nxv1i32.i32( + , , i32, i32); @@ -1211,6 +1264,7 @@ define @intrinsic_vwaddu_vx_nxv1i64_nxv1i32_i32( @llvm.riscv.vwaddu.nxv1i64.nxv1i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1244,6 +1298,7 @@ entry: } declare @llvm.riscv.vwaddu.nxv2i64.nxv2i32.i32( + , , i32, i32); @@ -1257,6 +1312,7 @@ define @intrinsic_vwaddu_vx_nxv2i64_nxv2i32_i32( @llvm.riscv.vwaddu.nxv2i64.nxv2i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1290,6 +1346,7 @@ entry: } declare @llvm.riscv.vwaddu.nxv4i64.nxv4i32.i32( + , , i32, i32); @@ -1303,6 +1360,7 @@ define @intrinsic_vwaddu_vx_nxv4i64_nxv4i32_i32( @llvm.riscv.vwaddu.nxv4i64.nxv4i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1336,6 +1394,7 @@ entry: } declare @llvm.riscv.vwaddu.nxv8i64.nxv8i32.i32( + , , i32, i32); @@ -1349,6 +1408,7 @@ define @intrinsic_vwaddu_vx_nxv8i64_nxv8i32_i32( @llvm.riscv.vwaddu.nxv8i64.nxv8i32.i32( + undef, %0, i32 %1, i32 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vwaddu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwaddu-rv64.ll index 6c546a504b017ddf3b2f1b7372733e644426d557..d5acb1347fcf41626c4973faa4f6d69ede25d8cd 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vwaddu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwaddu-rv64.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vwaddu.nxv1i16.nxv1i8.nxv1i8( + , , , i64); @@ -15,6 +16,7 @@ define @intrinsic_vwaddu_vv_nxv1i16_nxv1i8_nxv1i8( @llvm.riscv.vwaddu.nxv1i16.nxv1i8.nxv1i8( + undef, %0, %1, i64 %2) @@ -48,6 +50,7 @@ entry: } declare @llvm.riscv.vwaddu.nxv2i16.nxv2i8.nxv2i8( + , , , i64); @@ -61,6 +64,7 @@ define @intrinsic_vwaddu_vv_nxv2i16_nxv2i8_nxv2i8( @llvm.riscv.vwaddu.nxv2i16.nxv2i8.nxv2i8( + undef, %0, %1, i64 %2) @@ -94,6 +98,7 @@ entry: } declare @llvm.riscv.vwaddu.nxv4i16.nxv4i8.nxv4i8( + , , , i64); @@ -107,6 +112,7 @@ define @intrinsic_vwaddu_vv_nxv4i16_nxv4i8_nxv4i8( @llvm.riscv.vwaddu.nxv4i16.nxv4i8.nxv4i8( + undef, %0, %1, i64 %2) @@ -140,6 +146,7 @@ entry: } declare @llvm.riscv.vwaddu.nxv8i16.nxv8i8.nxv8i8( + , , , i64); @@ -153,6 +160,7 @@ define @intrinsic_vwaddu_vv_nxv8i16_nxv8i8_nxv8i8( @llvm.riscv.vwaddu.nxv8i16.nxv8i8.nxv8i8( + undef, %0, %1, i64 %2) @@ -186,6 +194,7 @@ entry: } declare @llvm.riscv.vwaddu.nxv16i16.nxv16i8.nxv16i8( + , , , i64); @@ -199,6 +208,7 @@ define @intrinsic_vwaddu_vv_nxv16i16_nxv16i8_nxv16i8( @llvm.riscv.vwaddu.nxv16i16.nxv16i8.nxv16i8( + undef, %0, %1, i64 %2) @@ -232,6 +242,7 @@ entry: } declare @llvm.riscv.vwaddu.nxv32i16.nxv32i8.nxv32i8( + , , , i64); @@ -245,6 +256,7 @@ define @intrinsic_vwaddu_vv_nxv32i16_nxv32i8_nxv32i8( @llvm.riscv.vwaddu.nxv32i16.nxv32i8.nxv32i8( + undef, %0, %1, i64 %2) @@ -278,6 +290,7 @@ entry: } declare @llvm.riscv.vwaddu.nxv1i32.nxv1i16.nxv1i16( + , , , i64); @@ -291,6 +304,7 @@ define @intrinsic_vwaddu_vv_nxv1i32_nxv1i16_nxv1i16( @llvm.riscv.vwaddu.nxv1i32.nxv1i16.nxv1i16( + undef, %0, %1, i64 %2) @@ -324,6 +338,7 @@ entry: } declare @llvm.riscv.vwaddu.nxv2i32.nxv2i16.nxv2i16( + , , , i64); @@ -337,6 +352,7 @@ define @intrinsic_vwaddu_vv_nxv2i32_nxv2i16_nxv2i16( @llvm.riscv.vwaddu.nxv2i32.nxv2i16.nxv2i16( + undef, %0, %1, i64 %2) @@ -370,6 +386,7 @@ entry: } declare @llvm.riscv.vwaddu.nxv4i32.nxv4i16.nxv4i16( + , , , i64); @@ -383,6 +400,7 @@ define @intrinsic_vwaddu_vv_nxv4i32_nxv4i16_nxv4i16( @llvm.riscv.vwaddu.nxv4i32.nxv4i16.nxv4i16( + undef, %0, %1, i64 %2) @@ -416,6 +434,7 @@ entry: } declare @llvm.riscv.vwaddu.nxv8i32.nxv8i16.nxv8i16( + , , , i64); @@ -429,6 +448,7 @@ define @intrinsic_vwaddu_vv_nxv8i32_nxv8i16_nxv8i16( @llvm.riscv.vwaddu.nxv8i32.nxv8i16.nxv8i16( + undef, %0, %1, i64 %2) @@ -462,6 +482,7 @@ entry: } declare @llvm.riscv.vwaddu.nxv16i32.nxv16i16.nxv16i16( + , , , i64); @@ -475,6 +496,7 @@ define @intrinsic_vwaddu_vv_nxv16i32_nxv16i16_nxv16i16( @llvm.riscv.vwaddu.nxv16i32.nxv16i16.nxv16i16( + undef, %0, %1, i64 %2) @@ -508,6 +530,7 @@ entry: } declare @llvm.riscv.vwaddu.nxv1i64.nxv1i32.nxv1i32( + , , , i64); @@ -521,6 +544,7 @@ define @intrinsic_vwaddu_vv_nxv1i64_nxv1i32_nxv1i32( @llvm.riscv.vwaddu.nxv1i64.nxv1i32.nxv1i32( + undef, %0, %1, i64 %2) @@ -554,6 +578,7 @@ entry: } declare @llvm.riscv.vwaddu.nxv2i64.nxv2i32.nxv2i32( + , , , i64); @@ -567,6 +592,7 @@ define @intrinsic_vwaddu_vv_nxv2i64_nxv2i32_nxv2i32( @llvm.riscv.vwaddu.nxv2i64.nxv2i32.nxv2i32( + undef, %0, %1, i64 %2) @@ -600,6 +626,7 @@ entry: } declare @llvm.riscv.vwaddu.nxv4i64.nxv4i32.nxv4i32( + , , , i64); @@ -613,6 +640,7 @@ define @intrinsic_vwaddu_vv_nxv4i64_nxv4i32_nxv4i32( @llvm.riscv.vwaddu.nxv4i64.nxv4i32.nxv4i32( + undef, %0, %1, i64 %2) @@ -646,6 +674,7 @@ entry: } declare @llvm.riscv.vwaddu.nxv8i64.nxv8i32.nxv8i32( + , , , i64); @@ -659,6 +688,7 @@ define @intrinsic_vwaddu_vv_nxv8i64_nxv8i32_nxv8i32( @llvm.riscv.vwaddu.nxv8i64.nxv8i32.nxv8i32( + undef, %0, %1, i64 %2) @@ -692,6 +722,7 @@ entry: } declare @llvm.riscv.vwaddu.nxv1i16.nxv1i8.i8( + , , i8, i64); @@ -705,6 +736,7 @@ define @intrinsic_vwaddu_vx_nxv1i16_nxv1i8_i8( @llvm.riscv.vwaddu.nxv1i16.nxv1i8.i8( + undef, %0, i8 %1, i64 %2) @@ -738,6 +770,7 @@ entry: } declare @llvm.riscv.vwaddu.nxv2i16.nxv2i8.i8( + , , i8, i64); @@ -751,6 +784,7 @@ define @intrinsic_vwaddu_vx_nxv2i16_nxv2i8_i8( @llvm.riscv.vwaddu.nxv2i16.nxv2i8.i8( + undef, %0, i8 %1, i64 %2) @@ -784,6 +818,7 @@ entry: } declare @llvm.riscv.vwaddu.nxv4i16.nxv4i8.i8( + , , i8, i64); @@ -797,6 +832,7 @@ define @intrinsic_vwaddu_vx_nxv4i16_nxv4i8_i8( @llvm.riscv.vwaddu.nxv4i16.nxv4i8.i8( + undef, %0, i8 %1, i64 %2) @@ -830,6 +866,7 @@ entry: } declare @llvm.riscv.vwaddu.nxv8i16.nxv8i8.i8( + , , i8, i64); @@ -843,6 +880,7 @@ define @intrinsic_vwaddu_vx_nxv8i16_nxv8i8_i8( @llvm.riscv.vwaddu.nxv8i16.nxv8i8.i8( + undef, %0, i8 %1, i64 %2) @@ -876,6 +914,7 @@ entry: } declare @llvm.riscv.vwaddu.nxv16i16.nxv16i8.i8( + , , i8, i64); @@ -889,6 +928,7 @@ define @intrinsic_vwaddu_vx_nxv16i16_nxv16i8_i8( @llvm.riscv.vwaddu.nxv16i16.nxv16i8.i8( + undef, %0, i8 %1, i64 %2) @@ -922,6 +962,7 @@ entry: } declare @llvm.riscv.vwaddu.nxv32i16.nxv32i8.i8( + , , i8, i64); @@ -935,6 +976,7 @@ define @intrinsic_vwaddu_vx_nxv32i16_nxv32i8_i8( @llvm.riscv.vwaddu.nxv32i16.nxv32i8.i8( + undef, %0, i8 %1, i64 %2) @@ -968,6 +1010,7 @@ entry: } declare @llvm.riscv.vwaddu.nxv1i32.nxv1i16.i16( + , , i16, i64); @@ -981,6 +1024,7 @@ define @intrinsic_vwaddu_vx_nxv1i32_nxv1i16_i16( @llvm.riscv.vwaddu.nxv1i32.nxv1i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1014,6 +1058,7 @@ entry: } declare @llvm.riscv.vwaddu.nxv2i32.nxv2i16.i16( + , , i16, i64); @@ -1027,6 +1072,7 @@ define @intrinsic_vwaddu_vx_nxv2i32_nxv2i16_i16( @llvm.riscv.vwaddu.nxv2i32.nxv2i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1060,6 +1106,7 @@ entry: } declare @llvm.riscv.vwaddu.nxv4i32.nxv4i16.i16( + , , i16, i64); @@ -1073,6 +1120,7 @@ define @intrinsic_vwaddu_vx_nxv4i32_nxv4i16_i16( @llvm.riscv.vwaddu.nxv4i32.nxv4i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1106,6 +1154,7 @@ entry: } declare @llvm.riscv.vwaddu.nxv8i32.nxv8i16.i16( + , , i16, i64); @@ -1119,6 +1168,7 @@ define @intrinsic_vwaddu_vx_nxv8i32_nxv8i16_i16( @llvm.riscv.vwaddu.nxv8i32.nxv8i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1152,6 +1202,7 @@ entry: } declare @llvm.riscv.vwaddu.nxv16i32.nxv16i16.i16( + , , i16, i64); @@ -1165,6 +1216,7 @@ define @intrinsic_vwaddu_vx_nxv16i32_nxv16i16_i16( @llvm.riscv.vwaddu.nxv16i32.nxv16i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1198,6 +1250,7 @@ entry: } declare @llvm.riscv.vwaddu.nxv1i64.nxv1i32.i32( + , , i32, i64); @@ -1211,6 +1264,7 @@ define @intrinsic_vwaddu_vx_nxv1i64_nxv1i32_i32( @llvm.riscv.vwaddu.nxv1i64.nxv1i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1244,6 +1298,7 @@ entry: } declare @llvm.riscv.vwaddu.nxv2i64.nxv2i32.i32( + , , i32, i64); @@ -1257,6 +1312,7 @@ define @intrinsic_vwaddu_vx_nxv2i64_nxv2i32_i32( @llvm.riscv.vwaddu.nxv2i64.nxv2i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1290,6 +1346,7 @@ entry: } declare @llvm.riscv.vwaddu.nxv4i64.nxv4i32.i32( + , , i32, i64); @@ -1303,6 +1360,7 @@ define @intrinsic_vwaddu_vx_nxv4i64_nxv4i32_i32( @llvm.riscv.vwaddu.nxv4i64.nxv4i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1336,6 +1394,7 @@ entry: } declare @llvm.riscv.vwaddu.nxv8i64.nxv8i32.i32( + , , i32, i64); @@ -1349,6 +1408,7 @@ define @intrinsic_vwaddu_vx_nxv8i64_nxv8i32_i32( @llvm.riscv.vwaddu.nxv8i64.nxv8i32.i32( + undef, %0, i32 %1, i64 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv32.ll index 1128135e33ab48d2dd807604995e89dfe0de9feb..ff0c070a7b5c0de320befc69bc686712d5eeea52 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv32.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vwaddu.w.nxv1i16.nxv1i8( + , , , i32); @@ -14,6 +15,7 @@ define @intrinsic_vwaddu.w_wv_nxv1i16_nxv1i16_nxv1i8( @llvm.riscv.vwaddu.w.nxv1i16.nxv1i8( + undef, %0, %1, i32 %2) @@ -47,6 +49,7 @@ entry: } declare @llvm.riscv.vwaddu.w.nxv2i16.nxv2i8( + , , , i32); @@ -59,6 +62,7 @@ define @intrinsic_vwaddu.w_wv_nxv2i16_nxv2i16_nxv2i8( @llvm.riscv.vwaddu.w.nxv2i16.nxv2i8( + undef, %0, %1, i32 %2) @@ -92,6 +96,7 @@ entry: } declare @llvm.riscv.vwaddu.w.nxv4i16.nxv4i8( + , , , i32); @@ -104,6 +109,7 @@ define @intrinsic_vwaddu.w_wv_nxv4i16_nxv4i16_nxv4i8( @llvm.riscv.vwaddu.w.nxv4i16.nxv4i8( + undef, %0, %1, i32 %2) @@ -137,6 +143,7 @@ entry: } declare @llvm.riscv.vwaddu.w.nxv8i16.nxv8i8( + , , , i32); @@ -149,6 +156,7 @@ define @intrinsic_vwaddu.w_wv_nxv8i16_nxv8i16_nxv8i8( @llvm.riscv.vwaddu.w.nxv8i16.nxv8i8( + undef, %0, %1, i32 %2) @@ -182,6 +190,7 @@ entry: } declare @llvm.riscv.vwaddu.w.nxv16i16.nxv16i8( + , , , i32); @@ -194,6 +203,7 @@ define @intrinsic_vwaddu.w_wv_nxv16i16_nxv16i16_nxv16i8( @llvm.riscv.vwaddu.w.nxv16i16.nxv16i8( + undef, %0, %1, i32 %2) @@ -227,6 +237,7 @@ entry: } declare @llvm.riscv.vwaddu.w.nxv32i16.nxv32i8( + , , , i32); @@ -239,6 +250,7 @@ define @intrinsic_vwaddu.w_wv_nxv32i16_nxv32i16_nxv32i8( @llvm.riscv.vwaddu.w.nxv32i16.nxv32i8( + undef, %0, %1, i32 %2) @@ -273,6 +285,7 @@ entry: } declare @llvm.riscv.vwaddu.w.nxv1i32.nxv1i16( + , , , i32); @@ -285,6 +298,7 @@ define @intrinsic_vwaddu.w_wv_nxv1i32_nxv1i32_nxv1i16( @llvm.riscv.vwaddu.w.nxv1i32.nxv1i16( + undef, %0, %1, i32 %2) @@ -318,6 +332,7 @@ entry: } declare @llvm.riscv.vwaddu.w.nxv2i32.nxv2i16( + , , , i32); @@ -330,6 +345,7 @@ define @intrinsic_vwaddu.w_wv_nxv2i32_nxv2i32_nxv2i16( @llvm.riscv.vwaddu.w.nxv2i32.nxv2i16( + undef, %0, %1, i32 %2) @@ -363,6 +379,7 @@ entry: } declare @llvm.riscv.vwaddu.w.nxv4i32.nxv4i16( + , , , i32); @@ -375,6 +392,7 @@ define @intrinsic_vwaddu.w_wv_nxv4i32_nxv4i32_nxv4i16( @llvm.riscv.vwaddu.w.nxv4i32.nxv4i16( + undef, %0, %1, i32 %2) @@ -408,6 +426,7 @@ entry: } declare @llvm.riscv.vwaddu.w.nxv8i32.nxv8i16( + , , , i32); @@ -420,6 +439,7 @@ define @intrinsic_vwaddu.w_wv_nxv8i32_nxv8i32_nxv8i16( @llvm.riscv.vwaddu.w.nxv8i32.nxv8i16( + undef, %0, %1, i32 %2) @@ -453,6 +473,7 @@ entry: } declare @llvm.riscv.vwaddu.w.nxv16i32.nxv16i16( + , , , i32); @@ -465,6 +486,7 @@ define @intrinsic_vwaddu.w_wv_nxv16i32_nxv16i32_nxv16i16( @llvm.riscv.vwaddu.w.nxv16i32.nxv16i16( + undef, %0, %1, i32 %2) @@ -499,6 +521,7 @@ entry: } declare @llvm.riscv.vwaddu.w.nxv1i64.nxv1i32( + , , , i32); @@ -511,6 +534,7 @@ define @intrinsic_vwaddu.w_wv_nxv1i64_nxv1i64_nxv1i32( @llvm.riscv.vwaddu.w.nxv1i64.nxv1i32( + undef, %0, %1, i32 %2) @@ -544,6 +568,7 @@ entry: } declare @llvm.riscv.vwaddu.w.nxv2i64.nxv2i32( + , , , i32); @@ -556,6 +581,7 @@ define @intrinsic_vwaddu.w_wv_nxv2i64_nxv2i64_nxv2i32( @llvm.riscv.vwaddu.w.nxv2i64.nxv2i32( + undef, %0, %1, i32 %2) @@ -589,6 +615,7 @@ entry: } declare @llvm.riscv.vwaddu.w.nxv4i64.nxv4i32( + , , , i32); @@ -601,6 +628,7 @@ define @intrinsic_vwaddu.w_wv_nxv4i64_nxv4i64_nxv4i32( @llvm.riscv.vwaddu.w.nxv4i64.nxv4i32( + undef, %0, %1, i32 %2) @@ -634,6 +662,7 @@ entry: } declare @llvm.riscv.vwaddu.w.nxv8i64.nxv8i32( + , , , i32); @@ -646,6 +675,7 @@ define @intrinsic_vwaddu.w_wv_nxv8i64_nxv8i64_nxv8i32( @llvm.riscv.vwaddu.w.nxv8i64.nxv8i32( + undef, %0, %1, i32 %2) @@ -680,6 +710,7 @@ entry: } declare @llvm.riscv.vwaddu.w.nxv1i16.i8( + , , i8, i32); @@ -692,6 +723,7 @@ define @intrinsic_vwaddu.w_wx_nxv1i16_nxv1i16_i8( @llvm.riscv.vwaddu.w.nxv1i16.i8( + undef, %0, i8 %1, i32 %2) @@ -725,6 +757,7 @@ entry: } declare @llvm.riscv.vwaddu.w.nxv2i16.i8( + , , i8, i32); @@ -737,6 +770,7 @@ define @intrinsic_vwaddu.w_wx_nxv2i16_nxv2i16_i8( @llvm.riscv.vwaddu.w.nxv2i16.i8( + undef, %0, i8 %1, i32 %2) @@ -770,6 +804,7 @@ entry: } declare @llvm.riscv.vwaddu.w.nxv4i16.i8( + , , i8, i32); @@ -782,6 +817,7 @@ define @intrinsic_vwaddu.w_wx_nxv4i16_nxv4i16_i8( @llvm.riscv.vwaddu.w.nxv4i16.i8( + undef, %0, i8 %1, i32 %2) @@ -815,6 +851,7 @@ entry: } declare @llvm.riscv.vwaddu.w.nxv8i16.i8( + , , i8, i32); @@ -827,6 +864,7 @@ define @intrinsic_vwaddu.w_wx_nxv8i16_nxv8i16_i8( @llvm.riscv.vwaddu.w.nxv8i16.i8( + undef, %0, i8 %1, i32 %2) @@ -860,6 +898,7 @@ entry: } declare @llvm.riscv.vwaddu.w.nxv16i16.i8( + , , i8, i32); @@ -872,6 +911,7 @@ define @intrinsic_vwaddu.w_wx_nxv16i16_nxv16i16_i8( @llvm.riscv.vwaddu.w.nxv16i16.i8( + undef, %0, i8 %1, i32 %2) @@ -905,6 +945,7 @@ entry: } declare @llvm.riscv.vwaddu.w.nxv32i16.i8( + , , i8, i32); @@ -917,6 +958,7 @@ define @intrinsic_vwaddu.w_wx_nxv32i16_nxv32i16_i8( @llvm.riscv.vwaddu.w.nxv32i16.i8( + undef, %0, i8 %1, i32 %2) @@ -950,6 +992,7 @@ entry: } declare @llvm.riscv.vwaddu.w.nxv1i32.i16( + , , i16, i32); @@ -962,6 +1005,7 @@ define @intrinsic_vwaddu.w_wx_nxv1i32_nxv1i32_i16( @llvm.riscv.vwaddu.w.nxv1i32.i16( + undef, %0, i16 %1, i32 %2) @@ -995,6 +1039,7 @@ entry: } declare @llvm.riscv.vwaddu.w.nxv2i32.i16( + , , i16, i32); @@ -1007,6 +1052,7 @@ define @intrinsic_vwaddu.w_wx_nxv2i32_nxv2i32_i16( @llvm.riscv.vwaddu.w.nxv2i32.i16( + undef, %0, i16 %1, i32 %2) @@ -1040,6 +1086,7 @@ entry: } declare @llvm.riscv.vwaddu.w.nxv4i32.i16( + , , i16, i32); @@ -1052,6 +1099,7 @@ define @intrinsic_vwaddu.w_wx_nxv4i32_nxv4i32_i16( @llvm.riscv.vwaddu.w.nxv4i32.i16( + undef, %0, i16 %1, i32 %2) @@ -1085,6 +1133,7 @@ entry: } declare @llvm.riscv.vwaddu.w.nxv8i32.i16( + , , i16, i32); @@ -1097,6 +1146,7 @@ define @intrinsic_vwaddu.w_wx_nxv8i32_nxv8i32_i16( @llvm.riscv.vwaddu.w.nxv8i32.i16( + undef, %0, i16 %1, i32 %2) @@ -1130,6 +1180,7 @@ entry: } declare @llvm.riscv.vwaddu.w.nxv16i32.i16( + , , i16, i32); @@ -1142,6 +1193,7 @@ define @intrinsic_vwaddu.w_wx_nxv16i32_nxv16i32_i16( @llvm.riscv.vwaddu.w.nxv16i32.i16( + undef, %0, i16 %1, i32 %2) @@ -1175,6 +1227,7 @@ entry: } declare @llvm.riscv.vwaddu.w.nxv1i64.i32( + , , i32, i32); @@ -1187,6 +1240,7 @@ define @intrinsic_vwaddu.w_wx_nxv1i64_nxv1i64_i32( @llvm.riscv.vwaddu.w.nxv1i64.i32( + undef, %0, i32 %1, i32 %2) @@ -1220,6 +1274,7 @@ entry: } declare @llvm.riscv.vwaddu.w.nxv2i64.i32( + , , i32, i32); @@ -1232,6 +1287,7 @@ define @intrinsic_vwaddu.w_wx_nxv2i64_nxv2i64_i32( @llvm.riscv.vwaddu.w.nxv2i64.i32( + undef, %0, i32 %1, i32 %2) @@ -1265,6 +1321,7 @@ entry: } declare @llvm.riscv.vwaddu.w.nxv4i64.i32( + , , i32, i32); @@ -1277,6 +1334,7 @@ define @intrinsic_vwaddu.w_wx_nxv4i64_nxv4i64_i32( @llvm.riscv.vwaddu.w.nxv4i64.i32( + undef, %0, i32 %1, i32 %2) @@ -1310,6 +1368,7 @@ entry: } declare @llvm.riscv.vwaddu.w.nxv8i64.i32( + , , i32, i32); @@ -1322,6 +1381,7 @@ define @intrinsic_vwaddu.w_wx_nxv8i64_nxv8i64_i32( @llvm.riscv.vwaddu.w.nxv8i64.i32( + undef, %0, i32 %1, i32 %2) @@ -1873,6 +1933,7 @@ define @intrinsic_vwaddu.w_wv_untie_nxv1i16_nxv1i16_nxv1i8( @llvm.riscv.vwaddu.w.nxv1i16.nxv1i8( + undef, %1, %0, i32 %2) @@ -1889,6 +1950,7 @@ define @intrinsic_vwaddu.w_wv_untie_nxv2i16_nxv2i16_nxv2i8( @llvm.riscv.vwaddu.w.nxv2i16.nxv2i8( + undef, %1, %0, i32 %2) @@ -1905,6 +1967,7 @@ define @intrinsic_vwaddu.w_wv_untie_nxv4i16_nxv4i16_nxv4i8( @llvm.riscv.vwaddu.w.nxv4i16.nxv4i8( + undef, %1, %0, i32 %2) @@ -1921,6 +1984,7 @@ define @intrinsic_vwaddu.w_wv_untie_nxv8i16_nxv8i16_nxv8i8( @llvm.riscv.vwaddu.w.nxv8i16.nxv8i8( + undef, %1, %0, i32 %2) @@ -1937,6 +2001,7 @@ define @intrinsic_vwaddu.w_wv_untie_nxv16i16_nxv16i16_nxv16i ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.w.nxv16i16.nxv16i8( + undef, %1, %0, i32 %2) @@ -1953,6 +2018,7 @@ define @intrinsic_vwaddu.w_wv_untie_nxv32i16_nxv32i16_nxv32i ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.w.nxv32i16.nxv32i8( + undef, %1, %0, i32 %2) @@ -1969,6 +2035,7 @@ define @intrinsic_vwaddu.w_wv_untie_nxv1i32_nxv1i32_nxv1i16(< ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.w.nxv1i32.nxv1i16( + undef, %1, %0, i32 %2) @@ -1985,6 +2052,7 @@ define @intrinsic_vwaddu.w_wv_untie_nxv2i32_nxv2i32_nxv2i16(< ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.w.nxv2i32.nxv2i16( + undef, %1, %0, i32 %2) @@ -2001,6 +2069,7 @@ define @intrinsic_vwaddu.w_wv_untie_nxv4i32_nxv4i32_nxv4i16(< ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.w.nxv4i32.nxv4i16( + undef, %1, %0, i32 %2) @@ -2017,6 +2086,7 @@ define @intrinsic_vwaddu.w_wv_untie_nxv8i32_nxv8i32_nxv8i16(< ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.w.nxv8i32.nxv8i16( + undef, %1, %0, i32 %2) @@ -2033,6 +2103,7 @@ define @intrinsic_vwaddu.w_wv_untie_nxv1i64_nxv1i64_nxv1i32(< ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.w.nxv1i64.nxv1i32( + undef, %1, %0, i32 %2) @@ -2049,6 +2120,7 @@ define @intrinsic_vwaddu.w_wv_untie_nxv2i64_nxv2i64_nxv2i32(< ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.w.nxv2i64.nxv2i32( + undef, %1, %0, i32 %2) @@ -2065,6 +2137,7 @@ define @intrinsic_vwaddu.w_wv_untie_nxv4i64_nxv4i64_nxv4i32(< ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.w.nxv4i64.nxv4i32( + undef, %1, %0, i32 %2) @@ -2081,6 +2154,7 @@ define @intrinsic_vwaddu.w_wv_untie_nxv8i64_nxv8i64_nxv8i32(< ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.w.nxv8i64.nxv8i32( + undef, %1, %0, i32 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv64.ll index e3840e33f81a80f04afd9be26d4beaa18272a131..486acc72cd9ebbc5091e28d56dc467565e0b4e3b 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv64.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vwaddu.w.nxv1i16.nxv1i8( + , , , i64); @@ -14,6 +15,7 @@ define @intrinsic_vwaddu.w_wv_nxv1i16_nxv1i16_nxv1i8( @llvm.riscv.vwaddu.w.nxv1i16.nxv1i8( + undef, %0, %1, i64 %2) @@ -47,6 +49,7 @@ entry: } declare @llvm.riscv.vwaddu.w.nxv2i16.nxv2i8( + , , , i64); @@ -59,6 +62,7 @@ define @intrinsic_vwaddu.w_wv_nxv2i16_nxv2i16_nxv2i8( @llvm.riscv.vwaddu.w.nxv2i16.nxv2i8( + undef, %0, %1, i64 %2) @@ -92,6 +96,7 @@ entry: } declare @llvm.riscv.vwaddu.w.nxv4i16.nxv4i8( + , , , i64); @@ -104,6 +109,7 @@ define @intrinsic_vwaddu.w_wv_nxv4i16_nxv4i16_nxv4i8( @llvm.riscv.vwaddu.w.nxv4i16.nxv4i8( + undef, %0, %1, i64 %2) @@ -137,6 +143,7 @@ entry: } declare @llvm.riscv.vwaddu.w.nxv8i16.nxv8i8( + , , , i64); @@ -149,6 +156,7 @@ define @intrinsic_vwaddu.w_wv_nxv8i16_nxv8i16_nxv8i8( @llvm.riscv.vwaddu.w.nxv8i16.nxv8i8( + undef, %0, %1, i64 %2) @@ -182,6 +190,7 @@ entry: } declare @llvm.riscv.vwaddu.w.nxv16i16.nxv16i8( + , , , i64); @@ -194,6 +203,7 @@ define @intrinsic_vwaddu.w_wv_nxv16i16_nxv16i16_nxv16i8( @llvm.riscv.vwaddu.w.nxv16i16.nxv16i8( + undef, %0, %1, i64 %2) @@ -227,6 +237,7 @@ entry: } declare @llvm.riscv.vwaddu.w.nxv32i16.nxv32i8( + , , , i64); @@ -239,6 +250,7 @@ define @intrinsic_vwaddu.w_wv_nxv32i16_nxv32i16_nxv32i8( @llvm.riscv.vwaddu.w.nxv32i16.nxv32i8( + undef, %0, %1, i64 %2) @@ -273,6 +285,7 @@ entry: } declare @llvm.riscv.vwaddu.w.nxv1i32.nxv1i16( + , , , i64); @@ -285,6 +298,7 @@ define @intrinsic_vwaddu.w_wv_nxv1i32_nxv1i32_nxv1i16( @llvm.riscv.vwaddu.w.nxv1i32.nxv1i16( + undef, %0, %1, i64 %2) @@ -318,6 +332,7 @@ entry: } declare @llvm.riscv.vwaddu.w.nxv2i32.nxv2i16( + , , , i64); @@ -330,6 +345,7 @@ define @intrinsic_vwaddu.w_wv_nxv2i32_nxv2i32_nxv2i16( @llvm.riscv.vwaddu.w.nxv2i32.nxv2i16( + undef, %0, %1, i64 %2) @@ -363,6 +379,7 @@ entry: } declare @llvm.riscv.vwaddu.w.nxv4i32.nxv4i16( + , , , i64); @@ -375,6 +392,7 @@ define @intrinsic_vwaddu.w_wv_nxv4i32_nxv4i32_nxv4i16( @llvm.riscv.vwaddu.w.nxv4i32.nxv4i16( + undef, %0, %1, i64 %2) @@ -408,6 +426,7 @@ entry: } declare @llvm.riscv.vwaddu.w.nxv8i32.nxv8i16( + , , , i64); @@ -420,6 +439,7 @@ define @intrinsic_vwaddu.w_wv_nxv8i32_nxv8i32_nxv8i16( @llvm.riscv.vwaddu.w.nxv8i32.nxv8i16( + undef, %0, %1, i64 %2) @@ -453,6 +473,7 @@ entry: } declare @llvm.riscv.vwaddu.w.nxv16i32.nxv16i16( + , , , i64); @@ -465,6 +486,7 @@ define @intrinsic_vwaddu.w_wv_nxv16i32_nxv16i32_nxv16i16( @llvm.riscv.vwaddu.w.nxv16i32.nxv16i16( + undef, %0, %1, i64 %2) @@ -499,6 +521,7 @@ entry: } declare @llvm.riscv.vwaddu.w.nxv1i64.nxv1i32( + , , , i64); @@ -511,6 +534,7 @@ define @intrinsic_vwaddu.w_wv_nxv1i64_nxv1i64_nxv1i32( @llvm.riscv.vwaddu.w.nxv1i64.nxv1i32( + undef, %0, %1, i64 %2) @@ -544,6 +568,7 @@ entry: } declare @llvm.riscv.vwaddu.w.nxv2i64.nxv2i32( + , , , i64); @@ -556,6 +581,7 @@ define @intrinsic_vwaddu.w_wv_nxv2i64_nxv2i64_nxv2i32( @llvm.riscv.vwaddu.w.nxv2i64.nxv2i32( + undef, %0, %1, i64 %2) @@ -589,6 +615,7 @@ entry: } declare @llvm.riscv.vwaddu.w.nxv4i64.nxv4i32( + , , , i64); @@ -601,6 +628,7 @@ define @intrinsic_vwaddu.w_wv_nxv4i64_nxv4i64_nxv4i32( @llvm.riscv.vwaddu.w.nxv4i64.nxv4i32( + undef, %0, %1, i64 %2) @@ -634,6 +662,7 @@ entry: } declare @llvm.riscv.vwaddu.w.nxv8i64.nxv8i32( + , , , i64); @@ -646,6 +675,7 @@ define @intrinsic_vwaddu.w_wv_nxv8i64_nxv8i64_nxv8i32( @llvm.riscv.vwaddu.w.nxv8i64.nxv8i32( + undef, %0, %1, i64 %2) @@ -680,6 +710,7 @@ entry: } declare @llvm.riscv.vwaddu.w.nxv1i16.i8( + , , i8, i64); @@ -692,6 +723,7 @@ define @intrinsic_vwaddu.w_wx_nxv1i16_nxv1i16_i8( @llvm.riscv.vwaddu.w.nxv1i16.i8( + undef, %0, i8 %1, i64 %2) @@ -725,6 +757,7 @@ entry: } declare @llvm.riscv.vwaddu.w.nxv2i16.i8( + , , i8, i64); @@ -737,6 +770,7 @@ define @intrinsic_vwaddu.w_wx_nxv2i16_nxv2i16_i8( @llvm.riscv.vwaddu.w.nxv2i16.i8( + undef, %0, i8 %1, i64 %2) @@ -770,6 +804,7 @@ entry: } declare @llvm.riscv.vwaddu.w.nxv4i16.i8( + , , i8, i64); @@ -782,6 +817,7 @@ define @intrinsic_vwaddu.w_wx_nxv4i16_nxv4i16_i8( @llvm.riscv.vwaddu.w.nxv4i16.i8( + undef, %0, i8 %1, i64 %2) @@ -815,6 +851,7 @@ entry: } declare @llvm.riscv.vwaddu.w.nxv8i16.i8( + , , i8, i64); @@ -827,6 +864,7 @@ define @intrinsic_vwaddu.w_wx_nxv8i16_nxv8i16_i8( @llvm.riscv.vwaddu.w.nxv8i16.i8( + undef, %0, i8 %1, i64 %2) @@ -860,6 +898,7 @@ entry: } declare @llvm.riscv.vwaddu.w.nxv16i16.i8( + , , i8, i64); @@ -872,6 +911,7 @@ define @intrinsic_vwaddu.w_wx_nxv16i16_nxv16i16_i8( @llvm.riscv.vwaddu.w.nxv16i16.i8( + undef, %0, i8 %1, i64 %2) @@ -905,6 +945,7 @@ entry: } declare @llvm.riscv.vwaddu.w.nxv32i16.i8( + , , i8, i64); @@ -917,6 +958,7 @@ define @intrinsic_vwaddu.w_wx_nxv32i16_nxv32i16_i8( @llvm.riscv.vwaddu.w.nxv32i16.i8( + undef, %0, i8 %1, i64 %2) @@ -950,6 +992,7 @@ entry: } declare @llvm.riscv.vwaddu.w.nxv1i32.i16( + , , i16, i64); @@ -962,6 +1005,7 @@ define @intrinsic_vwaddu.w_wx_nxv1i32_nxv1i32_i16( @llvm.riscv.vwaddu.w.nxv1i32.i16( + undef, %0, i16 %1, i64 %2) @@ -995,6 +1039,7 @@ entry: } declare @llvm.riscv.vwaddu.w.nxv2i32.i16( + , , i16, i64); @@ -1007,6 +1052,7 @@ define @intrinsic_vwaddu.w_wx_nxv2i32_nxv2i32_i16( @llvm.riscv.vwaddu.w.nxv2i32.i16( + undef, %0, i16 %1, i64 %2) @@ -1040,6 +1086,7 @@ entry: } declare @llvm.riscv.vwaddu.w.nxv4i32.i16( + , , i16, i64); @@ -1052,6 +1099,7 @@ define @intrinsic_vwaddu.w_wx_nxv4i32_nxv4i32_i16( @llvm.riscv.vwaddu.w.nxv4i32.i16( + undef, %0, i16 %1, i64 %2) @@ -1085,6 +1133,7 @@ entry: } declare @llvm.riscv.vwaddu.w.nxv8i32.i16( + , , i16, i64); @@ -1097,6 +1146,7 @@ define @intrinsic_vwaddu.w_wx_nxv8i32_nxv8i32_i16( @llvm.riscv.vwaddu.w.nxv8i32.i16( + undef, %0, i16 %1, i64 %2) @@ -1130,6 +1180,7 @@ entry: } declare @llvm.riscv.vwaddu.w.nxv16i32.i16( + , , i16, i64); @@ -1142,6 +1193,7 @@ define @intrinsic_vwaddu.w_wx_nxv16i32_nxv16i32_i16( @llvm.riscv.vwaddu.w.nxv16i32.i16( + undef, %0, i16 %1, i64 %2) @@ -1175,6 +1227,7 @@ entry: } declare @llvm.riscv.vwaddu.w.nxv1i64.i32( + , , i32, i64); @@ -1187,6 +1240,7 @@ define @intrinsic_vwaddu.w_wx_nxv1i64_nxv1i64_i32( @llvm.riscv.vwaddu.w.nxv1i64.i32( + undef, %0, i32 %1, i64 %2) @@ -1220,6 +1274,7 @@ entry: } declare @llvm.riscv.vwaddu.w.nxv2i64.i32( + , , i32, i64); @@ -1232,6 +1287,7 @@ define @intrinsic_vwaddu.w_wx_nxv2i64_nxv2i64_i32( @llvm.riscv.vwaddu.w.nxv2i64.i32( + undef, %0, i32 %1, i64 %2) @@ -1265,6 +1321,7 @@ entry: } declare @llvm.riscv.vwaddu.w.nxv4i64.i32( + , , i32, i64); @@ -1277,6 +1334,7 @@ define @intrinsic_vwaddu.w_wx_nxv4i64_nxv4i64_i32( @llvm.riscv.vwaddu.w.nxv4i64.i32( + undef, %0, i32 %1, i64 %2) @@ -1310,6 +1368,7 @@ entry: } declare @llvm.riscv.vwaddu.w.nxv8i64.i32( + , , i32, i64); @@ -1322,6 +1381,7 @@ define @intrinsic_vwaddu.w_wx_nxv8i64_nxv8i64_i32( @llvm.riscv.vwaddu.w.nxv8i64.i32( + undef, %0, i32 %1, i64 %2) @@ -1873,6 +1933,7 @@ define @intrinsic_vwaddu.w_wv_untie_nxv1i16_nxv1i16_nxv1i8( @llvm.riscv.vwaddu.w.nxv1i16.nxv1i8( + undef, %1, %0, i64 %2) @@ -1889,6 +1950,7 @@ define @intrinsic_vwaddu.w_wv_untie_nxv2i16_nxv2i16_nxv2i8( @llvm.riscv.vwaddu.w.nxv2i16.nxv2i8( + undef, %1, %0, i64 %2) @@ -1905,6 +1967,7 @@ define @intrinsic_vwaddu.w_wv_untie_nxv4i16_nxv4i16_nxv4i8( @llvm.riscv.vwaddu.w.nxv4i16.nxv4i8( + undef, %1, %0, i64 %2) @@ -1921,6 +1984,7 @@ define @intrinsic_vwaddu.w_wv_untie_nxv8i16_nxv8i16_nxv8i8( @llvm.riscv.vwaddu.w.nxv8i16.nxv8i8( + undef, %1, %0, i64 %2) @@ -1937,6 +2001,7 @@ define @intrinsic_vwaddu.w_wv_untie_nxv16i16_nxv16i16_nxv16i ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.w.nxv16i16.nxv16i8( + undef, %1, %0, i64 %2) @@ -1953,6 +2018,7 @@ define @intrinsic_vwaddu.w_wv_untie_nxv32i16_nxv32i16_nxv32i ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.w.nxv32i16.nxv32i8( + undef, %1, %0, i64 %2) @@ -1969,6 +2035,7 @@ define @intrinsic_vwaddu.w_wv_untie_nxv1i32_nxv1i32_nxv1i16(< ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.w.nxv1i32.nxv1i16( + undef, %1, %0, i64 %2) @@ -1985,6 +2052,7 @@ define @intrinsic_vwaddu.w_wv_untie_nxv2i32_nxv2i32_nxv2i16(< ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.w.nxv2i32.nxv2i16( + undef, %1, %0, i64 %2) @@ -2001,6 +2069,7 @@ define @intrinsic_vwaddu.w_wv_untie_nxv4i32_nxv4i32_nxv4i16(< ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.w.nxv4i32.nxv4i16( + undef, %1, %0, i64 %2) @@ -2017,6 +2086,7 @@ define @intrinsic_vwaddu.w_wv_untie_nxv8i32_nxv8i32_nxv8i16(< ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.w.nxv8i32.nxv8i16( + undef, %1, %0, i64 %2) @@ -2033,6 +2103,7 @@ define @intrinsic_vwaddu.w_wv_untie_nxv1i64_nxv1i64_nxv1i32(< ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.w.nxv1i64.nxv1i32( + undef, %1, %0, i64 %2) @@ -2049,6 +2120,7 @@ define @intrinsic_vwaddu.w_wv_untie_nxv2i64_nxv2i64_nxv2i32(< ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.w.nxv2i64.nxv2i32( + undef, %1, %0, i64 %2) @@ -2065,6 +2137,7 @@ define @intrinsic_vwaddu.w_wv_untie_nxv4i64_nxv4i64_nxv4i32(< ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.w.nxv4i64.nxv4i32( + undef, %1, %0, i64 %2) @@ -2081,6 +2154,7 @@ define @intrinsic_vwaddu.w_wv_untie_nxv8i64_nxv8i64_nxv8i32(< ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.w.nxv8i64.nxv8i32( + undef, %1, %0, i64 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmul-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwmul-rv32.ll index 338e4b352ddf361c7fab506f758767726c106053..1484ded0041750a35b89c4413e259c9174923aa4 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vwmul-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwmul-rv32.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vwmul.nxv1i16.nxv1i8.nxv1i8( + , , , i32); @@ -15,6 +16,7 @@ define @intrinsic_vwmul_vv_nxv1i16_nxv1i8_nxv1i8( @llvm.riscv.vwmul.nxv1i16.nxv1i8.nxv1i8( + undef, %0, %1, i32 %2) @@ -48,6 +50,7 @@ entry: } declare @llvm.riscv.vwmul.nxv2i16.nxv2i8.nxv2i8( + , , , i32); @@ -61,6 +64,7 @@ define @intrinsic_vwmul_vv_nxv2i16_nxv2i8_nxv2i8( @llvm.riscv.vwmul.nxv2i16.nxv2i8.nxv2i8( + undef, %0, %1, i32 %2) @@ -94,6 +98,7 @@ entry: } declare @llvm.riscv.vwmul.nxv4i16.nxv4i8.nxv4i8( + , , , i32); @@ -107,6 +112,7 @@ define @intrinsic_vwmul_vv_nxv4i16_nxv4i8_nxv4i8( @llvm.riscv.vwmul.nxv4i16.nxv4i8.nxv4i8( + undef, %0, %1, i32 %2) @@ -140,6 +146,7 @@ entry: } declare @llvm.riscv.vwmul.nxv8i16.nxv8i8.nxv8i8( + , , , i32); @@ -153,6 +160,7 @@ define @intrinsic_vwmul_vv_nxv8i16_nxv8i8_nxv8i8( @llvm.riscv.vwmul.nxv8i16.nxv8i8.nxv8i8( + undef, %0, %1, i32 %2) @@ -186,6 +194,7 @@ entry: } declare @llvm.riscv.vwmul.nxv16i16.nxv16i8.nxv16i8( + , , , i32); @@ -199,6 +208,7 @@ define @intrinsic_vwmul_vv_nxv16i16_nxv16i8_nxv16i8( @llvm.riscv.vwmul.nxv16i16.nxv16i8.nxv16i8( + undef, %0, %1, i32 %2) @@ -232,6 +242,7 @@ entry: } declare @llvm.riscv.vwmul.nxv32i16.nxv32i8.nxv32i8( + , , , i32); @@ -245,6 +256,7 @@ define @intrinsic_vwmul_vv_nxv32i16_nxv32i8_nxv32i8( @llvm.riscv.vwmul.nxv32i16.nxv32i8.nxv32i8( + undef, %0, %1, i32 %2) @@ -278,6 +290,7 @@ entry: } declare @llvm.riscv.vwmul.nxv1i32.nxv1i16.nxv1i16( + , , , i32); @@ -291,6 +304,7 @@ define @intrinsic_vwmul_vv_nxv1i32_nxv1i16_nxv1i16( @llvm.riscv.vwmul.nxv1i32.nxv1i16.nxv1i16( + undef, %0, %1, i32 %2) @@ -324,6 +338,7 @@ entry: } declare @llvm.riscv.vwmul.nxv2i32.nxv2i16.nxv2i16( + , , , i32); @@ -337,6 +352,7 @@ define @intrinsic_vwmul_vv_nxv2i32_nxv2i16_nxv2i16( @llvm.riscv.vwmul.nxv2i32.nxv2i16.nxv2i16( + undef, %0, %1, i32 %2) @@ -370,6 +386,7 @@ entry: } declare @llvm.riscv.vwmul.nxv4i32.nxv4i16.nxv4i16( + , , , i32); @@ -383,6 +400,7 @@ define @intrinsic_vwmul_vv_nxv4i32_nxv4i16_nxv4i16( @llvm.riscv.vwmul.nxv4i32.nxv4i16.nxv4i16( + undef, %0, %1, i32 %2) @@ -416,6 +434,7 @@ entry: } declare @llvm.riscv.vwmul.nxv8i32.nxv8i16.nxv8i16( + , , , i32); @@ -429,6 +448,7 @@ define @intrinsic_vwmul_vv_nxv8i32_nxv8i16_nxv8i16( @llvm.riscv.vwmul.nxv8i32.nxv8i16.nxv8i16( + undef, %0, %1, i32 %2) @@ -462,6 +482,7 @@ entry: } declare @llvm.riscv.vwmul.nxv16i32.nxv16i16.nxv16i16( + , , , i32); @@ -475,6 +496,7 @@ define @intrinsic_vwmul_vv_nxv16i32_nxv16i16_nxv16i16( @llvm.riscv.vwmul.nxv16i32.nxv16i16.nxv16i16( + undef, %0, %1, i32 %2) @@ -508,6 +530,7 @@ entry: } declare @llvm.riscv.vwmul.nxv1i64.nxv1i32.nxv1i32( + , , , i32); @@ -521,6 +544,7 @@ define @intrinsic_vwmul_vv_nxv1i64_nxv1i32_nxv1i32( @llvm.riscv.vwmul.nxv1i64.nxv1i32.nxv1i32( + undef, %0, %1, i32 %2) @@ -554,6 +578,7 @@ entry: } declare @llvm.riscv.vwmul.nxv2i64.nxv2i32.nxv2i32( + , , , i32); @@ -567,6 +592,7 @@ define @intrinsic_vwmul_vv_nxv2i64_nxv2i32_nxv2i32( @llvm.riscv.vwmul.nxv2i64.nxv2i32.nxv2i32( + undef, %0, %1, i32 %2) @@ -600,6 +626,7 @@ entry: } declare @llvm.riscv.vwmul.nxv4i64.nxv4i32.nxv4i32( + , , , i32); @@ -613,6 +640,7 @@ define @intrinsic_vwmul_vv_nxv4i64_nxv4i32_nxv4i32( @llvm.riscv.vwmul.nxv4i64.nxv4i32.nxv4i32( + undef, %0, %1, i32 %2) @@ -646,6 +674,7 @@ entry: } declare @llvm.riscv.vwmul.nxv8i64.nxv8i32.nxv8i32( + , , , i32); @@ -659,6 +688,7 @@ define @intrinsic_vwmul_vv_nxv8i64_nxv8i32_nxv8i32( @llvm.riscv.vwmul.nxv8i64.nxv8i32.nxv8i32( + undef, %0, %1, i32 %2) @@ -692,6 +722,7 @@ entry: } declare @llvm.riscv.vwmul.nxv1i16.nxv1i8.i8( + , , i8, i32); @@ -705,6 +736,7 @@ define @intrinsic_vwmul_vx_nxv1i16_nxv1i8_i8( @llvm.riscv.vwmul.nxv1i16.nxv1i8.i8( + undef, %0, i8 %1, i32 %2) @@ -738,6 +770,7 @@ entry: } declare @llvm.riscv.vwmul.nxv2i16.nxv2i8.i8( + , , i8, i32); @@ -751,6 +784,7 @@ define @intrinsic_vwmul_vx_nxv2i16_nxv2i8_i8( @llvm.riscv.vwmul.nxv2i16.nxv2i8.i8( + undef, %0, i8 %1, i32 %2) @@ -784,6 +818,7 @@ entry: } declare @llvm.riscv.vwmul.nxv4i16.nxv4i8.i8( + , , i8, i32); @@ -797,6 +832,7 @@ define @intrinsic_vwmul_vx_nxv4i16_nxv4i8_i8( @llvm.riscv.vwmul.nxv4i16.nxv4i8.i8( + undef, %0, i8 %1, i32 %2) @@ -830,6 +866,7 @@ entry: } declare @llvm.riscv.vwmul.nxv8i16.nxv8i8.i8( + , , i8, i32); @@ -843,6 +880,7 @@ define @intrinsic_vwmul_vx_nxv8i16_nxv8i8_i8( @llvm.riscv.vwmul.nxv8i16.nxv8i8.i8( + undef, %0, i8 %1, i32 %2) @@ -876,6 +914,7 @@ entry: } declare @llvm.riscv.vwmul.nxv16i16.nxv16i8.i8( + , , i8, i32); @@ -889,6 +928,7 @@ define @intrinsic_vwmul_vx_nxv16i16_nxv16i8_i8( @llvm.riscv.vwmul.nxv16i16.nxv16i8.i8( + undef, %0, i8 %1, i32 %2) @@ -922,6 +962,7 @@ entry: } declare @llvm.riscv.vwmul.nxv32i16.nxv32i8.i8( + , , i8, i32); @@ -935,6 +976,7 @@ define @intrinsic_vwmul_vx_nxv32i16_nxv32i8_i8( @llvm.riscv.vwmul.nxv32i16.nxv32i8.i8( + undef, %0, i8 %1, i32 %2) @@ -968,6 +1010,7 @@ entry: } declare @llvm.riscv.vwmul.nxv1i32.nxv1i16.i16( + , , i16, i32); @@ -981,6 +1024,7 @@ define @intrinsic_vwmul_vx_nxv1i32_nxv1i16_i16( @llvm.riscv.vwmul.nxv1i32.nxv1i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1014,6 +1058,7 @@ entry: } declare @llvm.riscv.vwmul.nxv2i32.nxv2i16.i16( + , , i16, i32); @@ -1027,6 +1072,7 @@ define @intrinsic_vwmul_vx_nxv2i32_nxv2i16_i16( @llvm.riscv.vwmul.nxv2i32.nxv2i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1060,6 +1106,7 @@ entry: } declare @llvm.riscv.vwmul.nxv4i32.nxv4i16.i16( + , , i16, i32); @@ -1073,6 +1120,7 @@ define @intrinsic_vwmul_vx_nxv4i32_nxv4i16_i16( @llvm.riscv.vwmul.nxv4i32.nxv4i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1106,6 +1154,7 @@ entry: } declare @llvm.riscv.vwmul.nxv8i32.nxv8i16.i16( + , , i16, i32); @@ -1119,6 +1168,7 @@ define @intrinsic_vwmul_vx_nxv8i32_nxv8i16_i16( @llvm.riscv.vwmul.nxv8i32.nxv8i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1152,6 +1202,7 @@ entry: } declare @llvm.riscv.vwmul.nxv16i32.nxv16i16.i16( + , , i16, i32); @@ -1165,6 +1216,7 @@ define @intrinsic_vwmul_vx_nxv16i32_nxv16i16_i16( @llvm.riscv.vwmul.nxv16i32.nxv16i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1198,6 +1250,7 @@ entry: } declare @llvm.riscv.vwmul.nxv1i64.nxv1i32.i32( + , , i32, i32); @@ -1211,6 +1264,7 @@ define @intrinsic_vwmul_vx_nxv1i64_nxv1i32_i32( @llvm.riscv.vwmul.nxv1i64.nxv1i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1244,6 +1298,7 @@ entry: } declare @llvm.riscv.vwmul.nxv2i64.nxv2i32.i32( + , , i32, i32); @@ -1257,6 +1312,7 @@ define @intrinsic_vwmul_vx_nxv2i64_nxv2i32_i32( @llvm.riscv.vwmul.nxv2i64.nxv2i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1290,6 +1346,7 @@ entry: } declare @llvm.riscv.vwmul.nxv4i64.nxv4i32.i32( + , , i32, i32); @@ -1303,6 +1360,7 @@ define @intrinsic_vwmul_vx_nxv4i64_nxv4i32_i32( @llvm.riscv.vwmul.nxv4i64.nxv4i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1336,6 +1394,7 @@ entry: } declare @llvm.riscv.vwmul.nxv8i64.nxv8i32.i32( + , , i32, i32); @@ -1349,6 +1408,7 @@ define @intrinsic_vwmul_vx_nxv8i64_nxv8i32_i32( @llvm.riscv.vwmul.nxv8i64.nxv8i32.i32( + undef, %0, i32 %1, i32 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmul-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwmul-rv64.ll index da5accc0f1bd450041d4fc0f2e66f9aa18cc963d..8d56ba9e557f180d2b4b51484e868300c37cefb1 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vwmul-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwmul-rv64.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vwmul.nxv1i16.nxv1i8.nxv1i8( + , , , i64); @@ -15,6 +16,7 @@ define @intrinsic_vwmul_vv_nxv1i16_nxv1i8_nxv1i8( @llvm.riscv.vwmul.nxv1i16.nxv1i8.nxv1i8( + undef, %0, %1, i64 %2) @@ -48,6 +50,7 @@ entry: } declare @llvm.riscv.vwmul.nxv2i16.nxv2i8.nxv2i8( + , , , i64); @@ -61,6 +64,7 @@ define @intrinsic_vwmul_vv_nxv2i16_nxv2i8_nxv2i8( @llvm.riscv.vwmul.nxv2i16.nxv2i8.nxv2i8( + undef, %0, %1, i64 %2) @@ -94,6 +98,7 @@ entry: } declare @llvm.riscv.vwmul.nxv4i16.nxv4i8.nxv4i8( + , , , i64); @@ -107,6 +112,7 @@ define @intrinsic_vwmul_vv_nxv4i16_nxv4i8_nxv4i8( @llvm.riscv.vwmul.nxv4i16.nxv4i8.nxv4i8( + undef, %0, %1, i64 %2) @@ -140,6 +146,7 @@ entry: } declare @llvm.riscv.vwmul.nxv8i16.nxv8i8.nxv8i8( + , , , i64); @@ -153,6 +160,7 @@ define @intrinsic_vwmul_vv_nxv8i16_nxv8i8_nxv8i8( @llvm.riscv.vwmul.nxv8i16.nxv8i8.nxv8i8( + undef, %0, %1, i64 %2) @@ -186,6 +194,7 @@ entry: } declare @llvm.riscv.vwmul.nxv16i16.nxv16i8.nxv16i8( + , , , i64); @@ -199,6 +208,7 @@ define @intrinsic_vwmul_vv_nxv16i16_nxv16i8_nxv16i8( @llvm.riscv.vwmul.nxv16i16.nxv16i8.nxv16i8( + undef, %0, %1, i64 %2) @@ -232,6 +242,7 @@ entry: } declare @llvm.riscv.vwmul.nxv32i16.nxv32i8.nxv32i8( + , , , i64); @@ -245,6 +256,7 @@ define @intrinsic_vwmul_vv_nxv32i16_nxv32i8_nxv32i8( @llvm.riscv.vwmul.nxv32i16.nxv32i8.nxv32i8( + undef, %0, %1, i64 %2) @@ -278,6 +290,7 @@ entry: } declare @llvm.riscv.vwmul.nxv1i32.nxv1i16.nxv1i16( + , , , i64); @@ -291,6 +304,7 @@ define @intrinsic_vwmul_vv_nxv1i32_nxv1i16_nxv1i16( @llvm.riscv.vwmul.nxv1i32.nxv1i16.nxv1i16( + undef, %0, %1, i64 %2) @@ -324,6 +338,7 @@ entry: } declare @llvm.riscv.vwmul.nxv2i32.nxv2i16.nxv2i16( + , , , i64); @@ -337,6 +352,7 @@ define @intrinsic_vwmul_vv_nxv2i32_nxv2i16_nxv2i16( @llvm.riscv.vwmul.nxv2i32.nxv2i16.nxv2i16( + undef, %0, %1, i64 %2) @@ -370,6 +386,7 @@ entry: } declare @llvm.riscv.vwmul.nxv4i32.nxv4i16.nxv4i16( + , , , i64); @@ -383,6 +400,7 @@ define @intrinsic_vwmul_vv_nxv4i32_nxv4i16_nxv4i16( @llvm.riscv.vwmul.nxv4i32.nxv4i16.nxv4i16( + undef, %0, %1, i64 %2) @@ -416,6 +434,7 @@ entry: } declare @llvm.riscv.vwmul.nxv8i32.nxv8i16.nxv8i16( + , , , i64); @@ -429,6 +448,7 @@ define @intrinsic_vwmul_vv_nxv8i32_nxv8i16_nxv8i16( @llvm.riscv.vwmul.nxv8i32.nxv8i16.nxv8i16( + undef, %0, %1, i64 %2) @@ -462,6 +482,7 @@ entry: } declare @llvm.riscv.vwmul.nxv16i32.nxv16i16.nxv16i16( + , , , i64); @@ -475,6 +496,7 @@ define @intrinsic_vwmul_vv_nxv16i32_nxv16i16_nxv16i16( @llvm.riscv.vwmul.nxv16i32.nxv16i16.nxv16i16( + undef, %0, %1, i64 %2) @@ -508,6 +530,7 @@ entry: } declare @llvm.riscv.vwmul.nxv1i64.nxv1i32.nxv1i32( + , , , i64); @@ -521,6 +544,7 @@ define @intrinsic_vwmul_vv_nxv1i64_nxv1i32_nxv1i32( @llvm.riscv.vwmul.nxv1i64.nxv1i32.nxv1i32( + undef, %0, %1, i64 %2) @@ -554,6 +578,7 @@ entry: } declare @llvm.riscv.vwmul.nxv2i64.nxv2i32.nxv2i32( + , , , i64); @@ -567,6 +592,7 @@ define @intrinsic_vwmul_vv_nxv2i64_nxv2i32_nxv2i32( @llvm.riscv.vwmul.nxv2i64.nxv2i32.nxv2i32( + undef, %0, %1, i64 %2) @@ -600,6 +626,7 @@ entry: } declare @llvm.riscv.vwmul.nxv4i64.nxv4i32.nxv4i32( + , , , i64); @@ -613,6 +640,7 @@ define @intrinsic_vwmul_vv_nxv4i64_nxv4i32_nxv4i32( @llvm.riscv.vwmul.nxv4i64.nxv4i32.nxv4i32( + undef, %0, %1, i64 %2) @@ -646,6 +674,7 @@ entry: } declare @llvm.riscv.vwmul.nxv8i64.nxv8i32.nxv8i32( + , , , i64); @@ -659,6 +688,7 @@ define @intrinsic_vwmul_vv_nxv8i64_nxv8i32_nxv8i32( @llvm.riscv.vwmul.nxv8i64.nxv8i32.nxv8i32( + undef, %0, %1, i64 %2) @@ -692,6 +722,7 @@ entry: } declare @llvm.riscv.vwmul.nxv1i16.nxv1i8.i8( + , , i8, i64); @@ -705,6 +736,7 @@ define @intrinsic_vwmul_vx_nxv1i16_nxv1i8_i8( @llvm.riscv.vwmul.nxv1i16.nxv1i8.i8( + undef, %0, i8 %1, i64 %2) @@ -738,6 +770,7 @@ entry: } declare @llvm.riscv.vwmul.nxv2i16.nxv2i8.i8( + , , i8, i64); @@ -751,6 +784,7 @@ define @intrinsic_vwmul_vx_nxv2i16_nxv2i8_i8( @llvm.riscv.vwmul.nxv2i16.nxv2i8.i8( + undef, %0, i8 %1, i64 %2) @@ -784,6 +818,7 @@ entry: } declare @llvm.riscv.vwmul.nxv4i16.nxv4i8.i8( + , , i8, i64); @@ -797,6 +832,7 @@ define @intrinsic_vwmul_vx_nxv4i16_nxv4i8_i8( @llvm.riscv.vwmul.nxv4i16.nxv4i8.i8( + undef, %0, i8 %1, i64 %2) @@ -830,6 +866,7 @@ entry: } declare @llvm.riscv.vwmul.nxv8i16.nxv8i8.i8( + , , i8, i64); @@ -843,6 +880,7 @@ define @intrinsic_vwmul_vx_nxv8i16_nxv8i8_i8( @llvm.riscv.vwmul.nxv8i16.nxv8i8.i8( + undef, %0, i8 %1, i64 %2) @@ -876,6 +914,7 @@ entry: } declare @llvm.riscv.vwmul.nxv16i16.nxv16i8.i8( + , , i8, i64); @@ -889,6 +928,7 @@ define @intrinsic_vwmul_vx_nxv16i16_nxv16i8_i8( @llvm.riscv.vwmul.nxv16i16.nxv16i8.i8( + undef, %0, i8 %1, i64 %2) @@ -922,6 +962,7 @@ entry: } declare @llvm.riscv.vwmul.nxv32i16.nxv32i8.i8( + , , i8, i64); @@ -935,6 +976,7 @@ define @intrinsic_vwmul_vx_nxv32i16_nxv32i8_i8( @llvm.riscv.vwmul.nxv32i16.nxv32i8.i8( + undef, %0, i8 %1, i64 %2) @@ -968,6 +1010,7 @@ entry: } declare @llvm.riscv.vwmul.nxv1i32.nxv1i16.i16( + , , i16, i64); @@ -981,6 +1024,7 @@ define @intrinsic_vwmul_vx_nxv1i32_nxv1i16_i16( @llvm.riscv.vwmul.nxv1i32.nxv1i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1014,6 +1058,7 @@ entry: } declare @llvm.riscv.vwmul.nxv2i32.nxv2i16.i16( + , , i16, i64); @@ -1027,6 +1072,7 @@ define @intrinsic_vwmul_vx_nxv2i32_nxv2i16_i16( @llvm.riscv.vwmul.nxv2i32.nxv2i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1060,6 +1106,7 @@ entry: } declare @llvm.riscv.vwmul.nxv4i32.nxv4i16.i16( + , , i16, i64); @@ -1073,6 +1120,7 @@ define @intrinsic_vwmul_vx_nxv4i32_nxv4i16_i16( @llvm.riscv.vwmul.nxv4i32.nxv4i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1106,6 +1154,7 @@ entry: } declare @llvm.riscv.vwmul.nxv8i32.nxv8i16.i16( + , , i16, i64); @@ -1119,6 +1168,7 @@ define @intrinsic_vwmul_vx_nxv8i32_nxv8i16_i16( @llvm.riscv.vwmul.nxv8i32.nxv8i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1152,6 +1202,7 @@ entry: } declare @llvm.riscv.vwmul.nxv16i32.nxv16i16.i16( + , , i16, i64); @@ -1165,6 +1216,7 @@ define @intrinsic_vwmul_vx_nxv16i32_nxv16i16_i16( @llvm.riscv.vwmul.nxv16i32.nxv16i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1198,6 +1250,7 @@ entry: } declare @llvm.riscv.vwmul.nxv1i64.nxv1i32.i32( + , , i32, i64); @@ -1211,6 +1264,7 @@ define @intrinsic_vwmul_vx_nxv1i64_nxv1i32_i32( @llvm.riscv.vwmul.nxv1i64.nxv1i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1244,6 +1298,7 @@ entry: } declare @llvm.riscv.vwmul.nxv2i64.nxv2i32.i32( + , , i32, i64); @@ -1257,6 +1312,7 @@ define @intrinsic_vwmul_vx_nxv2i64_nxv2i32_i32( @llvm.riscv.vwmul.nxv2i64.nxv2i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1290,6 +1346,7 @@ entry: } declare @llvm.riscv.vwmul.nxv4i64.nxv4i32.i32( + , , i32, i64); @@ -1303,6 +1360,7 @@ define @intrinsic_vwmul_vx_nxv4i64_nxv4i32_i32( @llvm.riscv.vwmul.nxv4i64.nxv4i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1336,6 +1394,7 @@ entry: } declare @llvm.riscv.vwmul.nxv8i64.nxv8i32.i32( + , , i32, i64); @@ -1349,6 +1408,7 @@ define @intrinsic_vwmul_vx_nxv8i64_nxv8i32_i32( @llvm.riscv.vwmul.nxv8i64.nxv8i32.i32( + undef, %0, i32 %1, i64 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmulsu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwmulsu-rv32.ll index 4f480ebea87261d2c2f9ac963f1d4070e9f72bc9..1edfa38956c19da61faebfc52017699fa40aa003 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vwmulsu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwmulsu-rv32.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.nxv1i8( + , , , i32); @@ -15,6 +16,7 @@ define @intrinsic_vwmulsu_vv_nxv1i16_nxv1i8_nxv1i8( @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.nxv1i8( + undef, %0, %1, i32 %2) @@ -48,6 +50,7 @@ entry: } declare @llvm.riscv.vwmulsu.nxv2i16.nxv2i8.nxv2i8( + , , , i32); @@ -61,6 +64,7 @@ define @intrinsic_vwmulsu_vv_nxv2i16_nxv2i8_nxv2i8( @llvm.riscv.vwmulsu.nxv2i16.nxv2i8.nxv2i8( + undef, %0, %1, i32 %2) @@ -94,6 +98,7 @@ entry: } declare @llvm.riscv.vwmulsu.nxv4i16.nxv4i8.nxv4i8( + , , , i32); @@ -107,6 +112,7 @@ define @intrinsic_vwmulsu_vv_nxv4i16_nxv4i8_nxv4i8( @llvm.riscv.vwmulsu.nxv4i16.nxv4i8.nxv4i8( + undef, %0, %1, i32 %2) @@ -140,6 +146,7 @@ entry: } declare @llvm.riscv.vwmulsu.nxv8i16.nxv8i8.nxv8i8( + , , , i32); @@ -153,6 +160,7 @@ define @intrinsic_vwmulsu_vv_nxv8i16_nxv8i8_nxv8i8( @llvm.riscv.vwmulsu.nxv8i16.nxv8i8.nxv8i8( + undef, %0, %1, i32 %2) @@ -186,6 +194,7 @@ entry: } declare @llvm.riscv.vwmulsu.nxv16i16.nxv16i8.nxv16i8( + , , , i32); @@ -199,6 +208,7 @@ define @intrinsic_vwmulsu_vv_nxv16i16_nxv16i8_nxv16i8( @llvm.riscv.vwmulsu.nxv16i16.nxv16i8.nxv16i8( + undef, %0, %1, i32 %2) @@ -232,6 +242,7 @@ entry: } declare @llvm.riscv.vwmulsu.nxv32i16.nxv32i8.nxv32i8( + , , , i32); @@ -245,6 +256,7 @@ define @intrinsic_vwmulsu_vv_nxv32i16_nxv32i8_nxv32i8( @llvm.riscv.vwmulsu.nxv32i16.nxv32i8.nxv32i8( + undef, %0, %1, i32 %2) @@ -278,6 +290,7 @@ entry: } declare @llvm.riscv.vwmulsu.nxv1i32.nxv1i16.nxv1i16( + , , , i32); @@ -291,6 +304,7 @@ define @intrinsic_vwmulsu_vv_nxv1i32_nxv1i16_nxv1i16( @llvm.riscv.vwmulsu.nxv1i32.nxv1i16.nxv1i16( + undef, %0, %1, i32 %2) @@ -324,6 +338,7 @@ entry: } declare @llvm.riscv.vwmulsu.nxv2i32.nxv2i16.nxv2i16( + , , , i32); @@ -337,6 +352,7 @@ define @intrinsic_vwmulsu_vv_nxv2i32_nxv2i16_nxv2i16( @llvm.riscv.vwmulsu.nxv2i32.nxv2i16.nxv2i16( + undef, %0, %1, i32 %2) @@ -370,6 +386,7 @@ entry: } declare @llvm.riscv.vwmulsu.nxv4i32.nxv4i16.nxv4i16( + , , , i32); @@ -383,6 +400,7 @@ define @intrinsic_vwmulsu_vv_nxv4i32_nxv4i16_nxv4i16( @llvm.riscv.vwmulsu.nxv4i32.nxv4i16.nxv4i16( + undef, %0, %1, i32 %2) @@ -416,6 +434,7 @@ entry: } declare @llvm.riscv.vwmulsu.nxv8i32.nxv8i16.nxv8i16( + , , , i32); @@ -429,6 +448,7 @@ define @intrinsic_vwmulsu_vv_nxv8i32_nxv8i16_nxv8i16( @llvm.riscv.vwmulsu.nxv8i32.nxv8i16.nxv8i16( + undef, %0, %1, i32 %2) @@ -462,6 +482,7 @@ entry: } declare @llvm.riscv.vwmulsu.nxv16i32.nxv16i16.nxv16i16( + , , , i32); @@ -475,6 +496,7 @@ define @intrinsic_vwmulsu_vv_nxv16i32_nxv16i16_nxv16i16( @llvm.riscv.vwmulsu.nxv16i32.nxv16i16.nxv16i16( + undef, %0, %1, i32 %2) @@ -508,6 +530,7 @@ entry: } declare @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.nxv1i32( + , , , i32); @@ -521,6 +544,7 @@ define @intrinsic_vwmulsu_vv_nxv1i64_nxv1i32_nxv1i32( @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.nxv1i32( + undef, %0, %1, i32 %2) @@ -554,6 +578,7 @@ entry: } declare @llvm.riscv.vwmulsu.nxv2i64.nxv2i32.nxv2i32( + , , , i32); @@ -567,6 +592,7 @@ define @intrinsic_vwmulsu_vv_nxv2i64_nxv2i32_nxv2i32( @llvm.riscv.vwmulsu.nxv2i64.nxv2i32.nxv2i32( + undef, %0, %1, i32 %2) @@ -600,6 +626,7 @@ entry: } declare @llvm.riscv.vwmulsu.nxv4i64.nxv4i32.nxv4i32( + , , , i32); @@ -613,6 +640,7 @@ define @intrinsic_vwmulsu_vv_nxv4i64_nxv4i32_nxv4i32( @llvm.riscv.vwmulsu.nxv4i64.nxv4i32.nxv4i32( + undef, %0, %1, i32 %2) @@ -646,6 +674,7 @@ entry: } declare @llvm.riscv.vwmulsu.nxv8i64.nxv8i32.nxv8i32( + , , , i32); @@ -659,6 +688,7 @@ define @intrinsic_vwmulsu_vv_nxv8i64_nxv8i32_nxv8i32( @llvm.riscv.vwmulsu.nxv8i64.nxv8i32.nxv8i32( + undef, %0, %1, i32 %2) @@ -692,6 +722,7 @@ entry: } declare @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.i8( + , , i8, i32); @@ -705,6 +736,7 @@ define @intrinsic_vwmulsu_vx_nxv1i16_nxv1i8_i8( @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.i8( + undef, %0, i8 %1, i32 %2) @@ -738,6 +770,7 @@ entry: } declare @llvm.riscv.vwmulsu.nxv2i16.nxv2i8.i8( + , , i8, i32); @@ -751,6 +784,7 @@ define @intrinsic_vwmulsu_vx_nxv2i16_nxv2i8_i8( @llvm.riscv.vwmulsu.nxv2i16.nxv2i8.i8( + undef, %0, i8 %1, i32 %2) @@ -784,6 +818,7 @@ entry: } declare @llvm.riscv.vwmulsu.nxv4i16.nxv4i8.i8( + , , i8, i32); @@ -797,6 +832,7 @@ define @intrinsic_vwmulsu_vx_nxv4i16_nxv4i8_i8( @llvm.riscv.vwmulsu.nxv4i16.nxv4i8.i8( + undef, %0, i8 %1, i32 %2) @@ -830,6 +866,7 @@ entry: } declare @llvm.riscv.vwmulsu.nxv8i16.nxv8i8.i8( + , , i8, i32); @@ -843,6 +880,7 @@ define @intrinsic_vwmulsu_vx_nxv8i16_nxv8i8_i8( @llvm.riscv.vwmulsu.nxv8i16.nxv8i8.i8( + undef, %0, i8 %1, i32 %2) @@ -876,6 +914,7 @@ entry: } declare @llvm.riscv.vwmulsu.nxv16i16.nxv16i8.i8( + , , i8, i32); @@ -889,6 +928,7 @@ define @intrinsic_vwmulsu_vx_nxv16i16_nxv16i8_i8( @llvm.riscv.vwmulsu.nxv16i16.nxv16i8.i8( + undef, %0, i8 %1, i32 %2) @@ -922,6 +962,7 @@ entry: } declare @llvm.riscv.vwmulsu.nxv32i16.nxv32i8.i8( + , , i8, i32); @@ -935,6 +976,7 @@ define @intrinsic_vwmulsu_vx_nxv32i16_nxv32i8_i8( @llvm.riscv.vwmulsu.nxv32i16.nxv32i8.i8( + undef, %0, i8 %1, i32 %2) @@ -968,6 +1010,7 @@ entry: } declare @llvm.riscv.vwmulsu.nxv1i32.nxv1i16.i16( + , , i16, i32); @@ -981,6 +1024,7 @@ define @intrinsic_vwmulsu_vx_nxv1i32_nxv1i16_i16( @llvm.riscv.vwmulsu.nxv1i32.nxv1i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1014,6 +1058,7 @@ entry: } declare @llvm.riscv.vwmulsu.nxv2i32.nxv2i16.i16( + , , i16, i32); @@ -1027,6 +1072,7 @@ define @intrinsic_vwmulsu_vx_nxv2i32_nxv2i16_i16( @llvm.riscv.vwmulsu.nxv2i32.nxv2i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1060,6 +1106,7 @@ entry: } declare @llvm.riscv.vwmulsu.nxv4i32.nxv4i16.i16( + , , i16, i32); @@ -1073,6 +1120,7 @@ define @intrinsic_vwmulsu_vx_nxv4i32_nxv4i16_i16( @llvm.riscv.vwmulsu.nxv4i32.nxv4i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1106,6 +1154,7 @@ entry: } declare @llvm.riscv.vwmulsu.nxv8i32.nxv8i16.i16( + , , i16, i32); @@ -1119,6 +1168,7 @@ define @intrinsic_vwmulsu_vx_nxv8i32_nxv8i16_i16( @llvm.riscv.vwmulsu.nxv8i32.nxv8i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1152,6 +1202,7 @@ entry: } declare @llvm.riscv.vwmulsu.nxv16i32.nxv16i16.i16( + , , i16, i32); @@ -1165,6 +1216,7 @@ define @intrinsic_vwmulsu_vx_nxv16i32_nxv16i16_i16( @llvm.riscv.vwmulsu.nxv16i32.nxv16i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1198,6 +1250,7 @@ entry: } declare @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.i32( + , , i32, i32); @@ -1211,6 +1264,7 @@ define @intrinsic_vwmulsu_vx_nxv1i64_nxv1i32_i32( @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1244,6 +1298,7 @@ entry: } declare @llvm.riscv.vwmulsu.nxv2i64.nxv2i32.i32( + , , i32, i32); @@ -1257,6 +1312,7 @@ define @intrinsic_vwmulsu_vx_nxv2i64_nxv2i32_i32( @llvm.riscv.vwmulsu.nxv2i64.nxv2i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1290,6 +1346,7 @@ entry: } declare @llvm.riscv.vwmulsu.nxv4i64.nxv4i32.i32( + , , i32, i32); @@ -1303,6 +1360,7 @@ define @intrinsic_vwmulsu_vx_nxv4i64_nxv4i32_i32( @llvm.riscv.vwmulsu.nxv4i64.nxv4i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1336,6 +1394,7 @@ entry: } declare @llvm.riscv.vwmulsu.nxv8i64.nxv8i32.i32( + , , i32, i32); @@ -1349,6 +1408,7 @@ define @intrinsic_vwmulsu_vx_nxv8i64_nxv8i32_i32( @llvm.riscv.vwmulsu.nxv8i64.nxv8i32.i32( + undef, %0, i32 %1, i32 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmulsu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwmulsu-rv64.ll index da7d9b97bc0ace672188bf4ed86b010f1e06d065..f821a23f1f9e1faa29b117ed309d6fec9984054a 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vwmulsu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwmulsu-rv64.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.nxv1i8( + , , , i64); @@ -15,6 +16,7 @@ define @intrinsic_vwmulsu_vv_nxv1i16_nxv1i8_nxv1i8( @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.nxv1i8( + undef, %0, %1, i64 %2) @@ -48,6 +50,7 @@ entry: } declare @llvm.riscv.vwmulsu.nxv2i16.nxv2i8.nxv2i8( + , , , i64); @@ -61,6 +64,7 @@ define @intrinsic_vwmulsu_vv_nxv2i16_nxv2i8_nxv2i8( @llvm.riscv.vwmulsu.nxv2i16.nxv2i8.nxv2i8( + undef, %0, %1, i64 %2) @@ -94,6 +98,7 @@ entry: } declare @llvm.riscv.vwmulsu.nxv4i16.nxv4i8.nxv4i8( + , , , i64); @@ -107,6 +112,7 @@ define @intrinsic_vwmulsu_vv_nxv4i16_nxv4i8_nxv4i8( @llvm.riscv.vwmulsu.nxv4i16.nxv4i8.nxv4i8( + undef, %0, %1, i64 %2) @@ -140,6 +146,7 @@ entry: } declare @llvm.riscv.vwmulsu.nxv8i16.nxv8i8.nxv8i8( + , , , i64); @@ -153,6 +160,7 @@ define @intrinsic_vwmulsu_vv_nxv8i16_nxv8i8_nxv8i8( @llvm.riscv.vwmulsu.nxv8i16.nxv8i8.nxv8i8( + undef, %0, %1, i64 %2) @@ -186,6 +194,7 @@ entry: } declare @llvm.riscv.vwmulsu.nxv16i16.nxv16i8.nxv16i8( + , , , i64); @@ -199,6 +208,7 @@ define @intrinsic_vwmulsu_vv_nxv16i16_nxv16i8_nxv16i8( @llvm.riscv.vwmulsu.nxv16i16.nxv16i8.nxv16i8( + undef, %0, %1, i64 %2) @@ -232,6 +242,7 @@ entry: } declare @llvm.riscv.vwmulsu.nxv32i16.nxv32i8.nxv32i8( + , , , i64); @@ -245,6 +256,7 @@ define @intrinsic_vwmulsu_vv_nxv32i16_nxv32i8_nxv32i8( @llvm.riscv.vwmulsu.nxv32i16.nxv32i8.nxv32i8( + undef, %0, %1, i64 %2) @@ -278,6 +290,7 @@ entry: } declare @llvm.riscv.vwmulsu.nxv1i32.nxv1i16.nxv1i16( + , , , i64); @@ -291,6 +304,7 @@ define @intrinsic_vwmulsu_vv_nxv1i32_nxv1i16_nxv1i16( @llvm.riscv.vwmulsu.nxv1i32.nxv1i16.nxv1i16( + undef, %0, %1, i64 %2) @@ -324,6 +338,7 @@ entry: } declare @llvm.riscv.vwmulsu.nxv2i32.nxv2i16.nxv2i16( + , , , i64); @@ -337,6 +352,7 @@ define @intrinsic_vwmulsu_vv_nxv2i32_nxv2i16_nxv2i16( @llvm.riscv.vwmulsu.nxv2i32.nxv2i16.nxv2i16( + undef, %0, %1, i64 %2) @@ -370,6 +386,7 @@ entry: } declare @llvm.riscv.vwmulsu.nxv4i32.nxv4i16.nxv4i16( + , , , i64); @@ -383,6 +400,7 @@ define @intrinsic_vwmulsu_vv_nxv4i32_nxv4i16_nxv4i16( @llvm.riscv.vwmulsu.nxv4i32.nxv4i16.nxv4i16( + undef, %0, %1, i64 %2) @@ -416,6 +434,7 @@ entry: } declare @llvm.riscv.vwmulsu.nxv8i32.nxv8i16.nxv8i16( + , , , i64); @@ -429,6 +448,7 @@ define @intrinsic_vwmulsu_vv_nxv8i32_nxv8i16_nxv8i16( @llvm.riscv.vwmulsu.nxv8i32.nxv8i16.nxv8i16( + undef, %0, %1, i64 %2) @@ -462,6 +482,7 @@ entry: } declare @llvm.riscv.vwmulsu.nxv16i32.nxv16i16.nxv16i16( + , , , i64); @@ -475,6 +496,7 @@ define @intrinsic_vwmulsu_vv_nxv16i32_nxv16i16_nxv16i16( @llvm.riscv.vwmulsu.nxv16i32.nxv16i16.nxv16i16( + undef, %0, %1, i64 %2) @@ -508,6 +530,7 @@ entry: } declare @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.nxv1i32( + , , , i64); @@ -521,6 +544,7 @@ define @intrinsic_vwmulsu_vv_nxv1i64_nxv1i32_nxv1i32( @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.nxv1i32( + undef, %0, %1, i64 %2) @@ -554,6 +578,7 @@ entry: } declare @llvm.riscv.vwmulsu.nxv2i64.nxv2i32.nxv2i32( + , , , i64); @@ -567,6 +592,7 @@ define @intrinsic_vwmulsu_vv_nxv2i64_nxv2i32_nxv2i32( @llvm.riscv.vwmulsu.nxv2i64.nxv2i32.nxv2i32( + undef, %0, %1, i64 %2) @@ -600,6 +626,7 @@ entry: } declare @llvm.riscv.vwmulsu.nxv4i64.nxv4i32.nxv4i32( + , , , i64); @@ -613,6 +640,7 @@ define @intrinsic_vwmulsu_vv_nxv4i64_nxv4i32_nxv4i32( @llvm.riscv.vwmulsu.nxv4i64.nxv4i32.nxv4i32( + undef, %0, %1, i64 %2) @@ -646,6 +674,7 @@ entry: } declare @llvm.riscv.vwmulsu.nxv8i64.nxv8i32.nxv8i32( + , , , i64); @@ -659,6 +688,7 @@ define @intrinsic_vwmulsu_vv_nxv8i64_nxv8i32_nxv8i32( @llvm.riscv.vwmulsu.nxv8i64.nxv8i32.nxv8i32( + undef, %0, %1, i64 %2) @@ -692,6 +722,7 @@ entry: } declare @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.i8( + , , i8, i64); @@ -705,6 +736,7 @@ define @intrinsic_vwmulsu_vx_nxv1i16_nxv1i8_i8( @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.i8( + undef, %0, i8 %1, i64 %2) @@ -738,6 +770,7 @@ entry: } declare @llvm.riscv.vwmulsu.nxv2i16.nxv2i8.i8( + , , i8, i64); @@ -751,6 +784,7 @@ define @intrinsic_vwmulsu_vx_nxv2i16_nxv2i8_i8( @llvm.riscv.vwmulsu.nxv2i16.nxv2i8.i8( + undef, %0, i8 %1, i64 %2) @@ -784,6 +818,7 @@ entry: } declare @llvm.riscv.vwmulsu.nxv4i16.nxv4i8.i8( + , , i8, i64); @@ -797,6 +832,7 @@ define @intrinsic_vwmulsu_vx_nxv4i16_nxv4i8_i8( @llvm.riscv.vwmulsu.nxv4i16.nxv4i8.i8( + undef, %0, i8 %1, i64 %2) @@ -830,6 +866,7 @@ entry: } declare @llvm.riscv.vwmulsu.nxv8i16.nxv8i8.i8( + , , i8, i64); @@ -843,6 +880,7 @@ define @intrinsic_vwmulsu_vx_nxv8i16_nxv8i8_i8( @llvm.riscv.vwmulsu.nxv8i16.nxv8i8.i8( + undef, %0, i8 %1, i64 %2) @@ -876,6 +914,7 @@ entry: } declare @llvm.riscv.vwmulsu.nxv16i16.nxv16i8.i8( + , , i8, i64); @@ -889,6 +928,7 @@ define @intrinsic_vwmulsu_vx_nxv16i16_nxv16i8_i8( @llvm.riscv.vwmulsu.nxv16i16.nxv16i8.i8( + undef, %0, i8 %1, i64 %2) @@ -922,6 +962,7 @@ entry: } declare @llvm.riscv.vwmulsu.nxv32i16.nxv32i8.i8( + , , i8, i64); @@ -935,6 +976,7 @@ define @intrinsic_vwmulsu_vx_nxv32i16_nxv32i8_i8( @llvm.riscv.vwmulsu.nxv32i16.nxv32i8.i8( + undef, %0, i8 %1, i64 %2) @@ -968,6 +1010,7 @@ entry: } declare @llvm.riscv.vwmulsu.nxv1i32.nxv1i16.i16( + , , i16, i64); @@ -981,6 +1024,7 @@ define @intrinsic_vwmulsu_vx_nxv1i32_nxv1i16_i16( @llvm.riscv.vwmulsu.nxv1i32.nxv1i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1014,6 +1058,7 @@ entry: } declare @llvm.riscv.vwmulsu.nxv2i32.nxv2i16.i16( + , , i16, i64); @@ -1027,6 +1072,7 @@ define @intrinsic_vwmulsu_vx_nxv2i32_nxv2i16_i16( @llvm.riscv.vwmulsu.nxv2i32.nxv2i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1060,6 +1106,7 @@ entry: } declare @llvm.riscv.vwmulsu.nxv4i32.nxv4i16.i16( + , , i16, i64); @@ -1073,6 +1120,7 @@ define @intrinsic_vwmulsu_vx_nxv4i32_nxv4i16_i16( @llvm.riscv.vwmulsu.nxv4i32.nxv4i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1106,6 +1154,7 @@ entry: } declare @llvm.riscv.vwmulsu.nxv8i32.nxv8i16.i16( + , , i16, i64); @@ -1119,6 +1168,7 @@ define @intrinsic_vwmulsu_vx_nxv8i32_nxv8i16_i16( @llvm.riscv.vwmulsu.nxv8i32.nxv8i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1152,6 +1202,7 @@ entry: } declare @llvm.riscv.vwmulsu.nxv16i32.nxv16i16.i16( + , , i16, i64); @@ -1165,6 +1216,7 @@ define @intrinsic_vwmulsu_vx_nxv16i32_nxv16i16_i16( @llvm.riscv.vwmulsu.nxv16i32.nxv16i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1198,6 +1250,7 @@ entry: } declare @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.i32( + , , i32, i64); @@ -1211,6 +1264,7 @@ define @intrinsic_vwmulsu_vx_nxv1i64_nxv1i32_i32( @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1244,6 +1298,7 @@ entry: } declare @llvm.riscv.vwmulsu.nxv2i64.nxv2i32.i32( + , , i32, i64); @@ -1257,6 +1312,7 @@ define @intrinsic_vwmulsu_vx_nxv2i64_nxv2i32_i32( @llvm.riscv.vwmulsu.nxv2i64.nxv2i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1290,6 +1346,7 @@ entry: } declare @llvm.riscv.vwmulsu.nxv4i64.nxv4i32.i32( + , , i32, i64); @@ -1303,6 +1360,7 @@ define @intrinsic_vwmulsu_vx_nxv4i64_nxv4i32_i32( @llvm.riscv.vwmulsu.nxv4i64.nxv4i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1336,6 +1394,7 @@ entry: } declare @llvm.riscv.vwmulsu.nxv8i64.nxv8i32.i32( + , , i32, i64); @@ -1349,6 +1408,7 @@ define @intrinsic_vwmulsu_vx_nxv8i64_nxv8i32_i32( @llvm.riscv.vwmulsu.nxv8i64.nxv8i32.i32( + undef, %0, i32 %1, i64 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmulu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwmulu-rv32.ll index 11ac17450a4cfb55f15f467e31f6aa4c60ce23ca..ce02e398e7fdc79c0e6b09024db7aac1f3dd1f2d 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vwmulu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwmulu-rv32.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vwmulu.nxv1i16.nxv1i8.nxv1i8( + , , , i32); @@ -15,6 +16,7 @@ define @intrinsic_vwmulu_vv_nxv1i16_nxv1i8_nxv1i8( @llvm.riscv.vwmulu.nxv1i16.nxv1i8.nxv1i8( + undef, %0, %1, i32 %2) @@ -48,6 +50,7 @@ entry: } declare @llvm.riscv.vwmulu.nxv2i16.nxv2i8.nxv2i8( + , , , i32); @@ -61,6 +64,7 @@ define @intrinsic_vwmulu_vv_nxv2i16_nxv2i8_nxv2i8( @llvm.riscv.vwmulu.nxv2i16.nxv2i8.nxv2i8( + undef, %0, %1, i32 %2) @@ -94,6 +98,7 @@ entry: } declare @llvm.riscv.vwmulu.nxv4i16.nxv4i8.nxv4i8( + , , , i32); @@ -107,6 +112,7 @@ define @intrinsic_vwmulu_vv_nxv4i16_nxv4i8_nxv4i8( @llvm.riscv.vwmulu.nxv4i16.nxv4i8.nxv4i8( + undef, %0, %1, i32 %2) @@ -140,6 +146,7 @@ entry: } declare @llvm.riscv.vwmulu.nxv8i16.nxv8i8.nxv8i8( + , , , i32); @@ -153,6 +160,7 @@ define @intrinsic_vwmulu_vv_nxv8i16_nxv8i8_nxv8i8( @llvm.riscv.vwmulu.nxv8i16.nxv8i8.nxv8i8( + undef, %0, %1, i32 %2) @@ -186,6 +194,7 @@ entry: } declare @llvm.riscv.vwmulu.nxv16i16.nxv16i8.nxv16i8( + , , , i32); @@ -199,6 +208,7 @@ define @intrinsic_vwmulu_vv_nxv16i16_nxv16i8_nxv16i8( @llvm.riscv.vwmulu.nxv16i16.nxv16i8.nxv16i8( + undef, %0, %1, i32 %2) @@ -232,6 +242,7 @@ entry: } declare @llvm.riscv.vwmulu.nxv32i16.nxv32i8.nxv32i8( + , , , i32); @@ -245,6 +256,7 @@ define @intrinsic_vwmulu_vv_nxv32i16_nxv32i8_nxv32i8( @llvm.riscv.vwmulu.nxv32i16.nxv32i8.nxv32i8( + undef, %0, %1, i32 %2) @@ -278,6 +290,7 @@ entry: } declare @llvm.riscv.vwmulu.nxv1i32.nxv1i16.nxv1i16( + , , , i32); @@ -291,6 +304,7 @@ define @intrinsic_vwmulu_vv_nxv1i32_nxv1i16_nxv1i16( @llvm.riscv.vwmulu.nxv1i32.nxv1i16.nxv1i16( + undef, %0, %1, i32 %2) @@ -324,6 +338,7 @@ entry: } declare @llvm.riscv.vwmulu.nxv2i32.nxv2i16.nxv2i16( + , , , i32); @@ -337,6 +352,7 @@ define @intrinsic_vwmulu_vv_nxv2i32_nxv2i16_nxv2i16( @llvm.riscv.vwmulu.nxv2i32.nxv2i16.nxv2i16( + undef, %0, %1, i32 %2) @@ -370,6 +386,7 @@ entry: } declare @llvm.riscv.vwmulu.nxv4i32.nxv4i16.nxv4i16( + , , , i32); @@ -383,6 +400,7 @@ define @intrinsic_vwmulu_vv_nxv4i32_nxv4i16_nxv4i16( @llvm.riscv.vwmulu.nxv4i32.nxv4i16.nxv4i16( + undef, %0, %1, i32 %2) @@ -416,6 +434,7 @@ entry: } declare @llvm.riscv.vwmulu.nxv8i32.nxv8i16.nxv8i16( + , , , i32); @@ -429,6 +448,7 @@ define @intrinsic_vwmulu_vv_nxv8i32_nxv8i16_nxv8i16( @llvm.riscv.vwmulu.nxv8i32.nxv8i16.nxv8i16( + undef, %0, %1, i32 %2) @@ -462,6 +482,7 @@ entry: } declare @llvm.riscv.vwmulu.nxv16i32.nxv16i16.nxv16i16( + , , , i32); @@ -475,6 +496,7 @@ define @intrinsic_vwmulu_vv_nxv16i32_nxv16i16_nxv16i16( @llvm.riscv.vwmulu.nxv16i32.nxv16i16.nxv16i16( + undef, %0, %1, i32 %2) @@ -508,6 +530,7 @@ entry: } declare @llvm.riscv.vwmulu.nxv1i64.nxv1i32.nxv1i32( + , , , i32); @@ -521,6 +544,7 @@ define @intrinsic_vwmulu_vv_nxv1i64_nxv1i32_nxv1i32( @llvm.riscv.vwmulu.nxv1i64.nxv1i32.nxv1i32( + undef, %0, %1, i32 %2) @@ -554,6 +578,7 @@ entry: } declare @llvm.riscv.vwmulu.nxv2i64.nxv2i32.nxv2i32( + , , , i32); @@ -567,6 +592,7 @@ define @intrinsic_vwmulu_vv_nxv2i64_nxv2i32_nxv2i32( @llvm.riscv.vwmulu.nxv2i64.nxv2i32.nxv2i32( + undef, %0, %1, i32 %2) @@ -600,6 +626,7 @@ entry: } declare @llvm.riscv.vwmulu.nxv4i64.nxv4i32.nxv4i32( + , , , i32); @@ -613,6 +640,7 @@ define @intrinsic_vwmulu_vv_nxv4i64_nxv4i32_nxv4i32( @llvm.riscv.vwmulu.nxv4i64.nxv4i32.nxv4i32( + undef, %0, %1, i32 %2) @@ -646,6 +674,7 @@ entry: } declare @llvm.riscv.vwmulu.nxv8i64.nxv8i32.nxv8i32( + , , , i32); @@ -659,6 +688,7 @@ define @intrinsic_vwmulu_vv_nxv8i64_nxv8i32_nxv8i32( @llvm.riscv.vwmulu.nxv8i64.nxv8i32.nxv8i32( + undef, %0, %1, i32 %2) @@ -692,6 +722,7 @@ entry: } declare @llvm.riscv.vwmulu.nxv1i16.nxv1i8.i8( + , , i8, i32); @@ -705,6 +736,7 @@ define @intrinsic_vwmulu_vx_nxv1i16_nxv1i8_i8( @llvm.riscv.vwmulu.nxv1i16.nxv1i8.i8( + undef, %0, i8 %1, i32 %2) @@ -738,6 +770,7 @@ entry: } declare @llvm.riscv.vwmulu.nxv2i16.nxv2i8.i8( + , , i8, i32); @@ -751,6 +784,7 @@ define @intrinsic_vwmulu_vx_nxv2i16_nxv2i8_i8( @llvm.riscv.vwmulu.nxv2i16.nxv2i8.i8( + undef, %0, i8 %1, i32 %2) @@ -784,6 +818,7 @@ entry: } declare @llvm.riscv.vwmulu.nxv4i16.nxv4i8.i8( + , , i8, i32); @@ -797,6 +832,7 @@ define @intrinsic_vwmulu_vx_nxv4i16_nxv4i8_i8( @llvm.riscv.vwmulu.nxv4i16.nxv4i8.i8( + undef, %0, i8 %1, i32 %2) @@ -830,6 +866,7 @@ entry: } declare @llvm.riscv.vwmulu.nxv8i16.nxv8i8.i8( + , , i8, i32); @@ -843,6 +880,7 @@ define @intrinsic_vwmulu_vx_nxv8i16_nxv8i8_i8( @llvm.riscv.vwmulu.nxv8i16.nxv8i8.i8( + undef, %0, i8 %1, i32 %2) @@ -876,6 +914,7 @@ entry: } declare @llvm.riscv.vwmulu.nxv16i16.nxv16i8.i8( + , , i8, i32); @@ -889,6 +928,7 @@ define @intrinsic_vwmulu_vx_nxv16i16_nxv16i8_i8( @llvm.riscv.vwmulu.nxv16i16.nxv16i8.i8( + undef, %0, i8 %1, i32 %2) @@ -922,6 +962,7 @@ entry: } declare @llvm.riscv.vwmulu.nxv32i16.nxv32i8.i8( + , , i8, i32); @@ -935,6 +976,7 @@ define @intrinsic_vwmulu_vx_nxv32i16_nxv32i8_i8( @llvm.riscv.vwmulu.nxv32i16.nxv32i8.i8( + undef, %0, i8 %1, i32 %2) @@ -968,6 +1010,7 @@ entry: } declare @llvm.riscv.vwmulu.nxv1i32.nxv1i16.i16( + , , i16, i32); @@ -981,6 +1024,7 @@ define @intrinsic_vwmulu_vx_nxv1i32_nxv1i16_i16( @llvm.riscv.vwmulu.nxv1i32.nxv1i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1014,6 +1058,7 @@ entry: } declare @llvm.riscv.vwmulu.nxv2i32.nxv2i16.i16( + , , i16, i32); @@ -1027,6 +1072,7 @@ define @intrinsic_vwmulu_vx_nxv2i32_nxv2i16_i16( @llvm.riscv.vwmulu.nxv2i32.nxv2i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1060,6 +1106,7 @@ entry: } declare @llvm.riscv.vwmulu.nxv4i32.nxv4i16.i16( + , , i16, i32); @@ -1073,6 +1120,7 @@ define @intrinsic_vwmulu_vx_nxv4i32_nxv4i16_i16( @llvm.riscv.vwmulu.nxv4i32.nxv4i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1106,6 +1154,7 @@ entry: } declare @llvm.riscv.vwmulu.nxv8i32.nxv8i16.i16( + , , i16, i32); @@ -1119,6 +1168,7 @@ define @intrinsic_vwmulu_vx_nxv8i32_nxv8i16_i16( @llvm.riscv.vwmulu.nxv8i32.nxv8i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1152,6 +1202,7 @@ entry: } declare @llvm.riscv.vwmulu.nxv16i32.nxv16i16.i16( + , , i16, i32); @@ -1165,6 +1216,7 @@ define @intrinsic_vwmulu_vx_nxv16i32_nxv16i16_i16( @llvm.riscv.vwmulu.nxv16i32.nxv16i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1198,6 +1250,7 @@ entry: } declare @llvm.riscv.vwmulu.nxv1i64.nxv1i32.i32( + , , i32, i32); @@ -1211,6 +1264,7 @@ define @intrinsic_vwmulu_vx_nxv1i64_nxv1i32_i32( @llvm.riscv.vwmulu.nxv1i64.nxv1i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1244,6 +1298,7 @@ entry: } declare @llvm.riscv.vwmulu.nxv2i64.nxv2i32.i32( + , , i32, i32); @@ -1257,6 +1312,7 @@ define @intrinsic_vwmulu_vx_nxv2i64_nxv2i32_i32( @llvm.riscv.vwmulu.nxv2i64.nxv2i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1290,6 +1346,7 @@ entry: } declare @llvm.riscv.vwmulu.nxv4i64.nxv4i32.i32( + , , i32, i32); @@ -1303,6 +1360,7 @@ define @intrinsic_vwmulu_vx_nxv4i64_nxv4i32_i32( @llvm.riscv.vwmulu.nxv4i64.nxv4i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1336,6 +1394,7 @@ entry: } declare @llvm.riscv.vwmulu.nxv8i64.nxv8i32.i32( + , , i32, i32); @@ -1349,6 +1408,7 @@ define @intrinsic_vwmulu_vx_nxv8i64_nxv8i32_i32( @llvm.riscv.vwmulu.nxv8i64.nxv8i32.i32( + undef, %0, i32 %1, i32 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmulu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwmulu-rv64.ll index e9adaec0bdc8c5a529cbbe5fa7056b8f502c5e99..ab57eb0e0bfc41df518543804061cef8facf57df 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vwmulu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwmulu-rv64.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vwmulu.nxv1i16.nxv1i8.nxv1i8( + , , , i64); @@ -15,6 +16,7 @@ define @intrinsic_vwmulu_vv_nxv1i16_nxv1i8_nxv1i8( @llvm.riscv.vwmulu.nxv1i16.nxv1i8.nxv1i8( + undef, %0, %1, i64 %2) @@ -48,6 +50,7 @@ entry: } declare @llvm.riscv.vwmulu.nxv2i16.nxv2i8.nxv2i8( + , , , i64); @@ -61,6 +64,7 @@ define @intrinsic_vwmulu_vv_nxv2i16_nxv2i8_nxv2i8( @llvm.riscv.vwmulu.nxv2i16.nxv2i8.nxv2i8( + undef, %0, %1, i64 %2) @@ -94,6 +98,7 @@ entry: } declare @llvm.riscv.vwmulu.nxv4i16.nxv4i8.nxv4i8( + , , , i64); @@ -107,6 +112,7 @@ define @intrinsic_vwmulu_vv_nxv4i16_nxv4i8_nxv4i8( @llvm.riscv.vwmulu.nxv4i16.nxv4i8.nxv4i8( + undef, %0, %1, i64 %2) @@ -140,6 +146,7 @@ entry: } declare @llvm.riscv.vwmulu.nxv8i16.nxv8i8.nxv8i8( + , , , i64); @@ -153,6 +160,7 @@ define @intrinsic_vwmulu_vv_nxv8i16_nxv8i8_nxv8i8( @llvm.riscv.vwmulu.nxv8i16.nxv8i8.nxv8i8( + undef, %0, %1, i64 %2) @@ -186,6 +194,7 @@ entry: } declare @llvm.riscv.vwmulu.nxv16i16.nxv16i8.nxv16i8( + , , , i64); @@ -199,6 +208,7 @@ define @intrinsic_vwmulu_vv_nxv16i16_nxv16i8_nxv16i8( @llvm.riscv.vwmulu.nxv16i16.nxv16i8.nxv16i8( + undef, %0, %1, i64 %2) @@ -232,6 +242,7 @@ entry: } declare @llvm.riscv.vwmulu.nxv32i16.nxv32i8.nxv32i8( + , , , i64); @@ -245,6 +256,7 @@ define @intrinsic_vwmulu_vv_nxv32i16_nxv32i8_nxv32i8( @llvm.riscv.vwmulu.nxv32i16.nxv32i8.nxv32i8( + undef, %0, %1, i64 %2) @@ -278,6 +290,7 @@ entry: } declare @llvm.riscv.vwmulu.nxv1i32.nxv1i16.nxv1i16( + , , , i64); @@ -291,6 +304,7 @@ define @intrinsic_vwmulu_vv_nxv1i32_nxv1i16_nxv1i16( @llvm.riscv.vwmulu.nxv1i32.nxv1i16.nxv1i16( + undef, %0, %1, i64 %2) @@ -324,6 +338,7 @@ entry: } declare @llvm.riscv.vwmulu.nxv2i32.nxv2i16.nxv2i16( + , , , i64); @@ -337,6 +352,7 @@ define @intrinsic_vwmulu_vv_nxv2i32_nxv2i16_nxv2i16( @llvm.riscv.vwmulu.nxv2i32.nxv2i16.nxv2i16( + undef, %0, %1, i64 %2) @@ -370,6 +386,7 @@ entry: } declare @llvm.riscv.vwmulu.nxv4i32.nxv4i16.nxv4i16( + , , , i64); @@ -383,6 +400,7 @@ define @intrinsic_vwmulu_vv_nxv4i32_nxv4i16_nxv4i16( @llvm.riscv.vwmulu.nxv4i32.nxv4i16.nxv4i16( + undef, %0, %1, i64 %2) @@ -416,6 +434,7 @@ entry: } declare @llvm.riscv.vwmulu.nxv8i32.nxv8i16.nxv8i16( + , , , i64); @@ -429,6 +448,7 @@ define @intrinsic_vwmulu_vv_nxv8i32_nxv8i16_nxv8i16( @llvm.riscv.vwmulu.nxv8i32.nxv8i16.nxv8i16( + undef, %0, %1, i64 %2) @@ -462,6 +482,7 @@ entry: } declare @llvm.riscv.vwmulu.nxv16i32.nxv16i16.nxv16i16( + , , , i64); @@ -475,6 +496,7 @@ define @intrinsic_vwmulu_vv_nxv16i32_nxv16i16_nxv16i16( @llvm.riscv.vwmulu.nxv16i32.nxv16i16.nxv16i16( + undef, %0, %1, i64 %2) @@ -508,6 +530,7 @@ entry: } declare @llvm.riscv.vwmulu.nxv1i64.nxv1i32.nxv1i32( + , , , i64); @@ -521,6 +544,7 @@ define @intrinsic_vwmulu_vv_nxv1i64_nxv1i32_nxv1i32( @llvm.riscv.vwmulu.nxv1i64.nxv1i32.nxv1i32( + undef, %0, %1, i64 %2) @@ -554,6 +578,7 @@ entry: } declare @llvm.riscv.vwmulu.nxv2i64.nxv2i32.nxv2i32( + , , , i64); @@ -567,6 +592,7 @@ define @intrinsic_vwmulu_vv_nxv2i64_nxv2i32_nxv2i32( @llvm.riscv.vwmulu.nxv2i64.nxv2i32.nxv2i32( + undef, %0, %1, i64 %2) @@ -600,6 +626,7 @@ entry: } declare @llvm.riscv.vwmulu.nxv4i64.nxv4i32.nxv4i32( + , , , i64); @@ -613,6 +640,7 @@ define @intrinsic_vwmulu_vv_nxv4i64_nxv4i32_nxv4i32( @llvm.riscv.vwmulu.nxv4i64.nxv4i32.nxv4i32( + undef, %0, %1, i64 %2) @@ -646,6 +674,7 @@ entry: } declare @llvm.riscv.vwmulu.nxv8i64.nxv8i32.nxv8i32( + , , , i64); @@ -659,6 +688,7 @@ define @intrinsic_vwmulu_vv_nxv8i64_nxv8i32_nxv8i32( @llvm.riscv.vwmulu.nxv8i64.nxv8i32.nxv8i32( + undef, %0, %1, i64 %2) @@ -692,6 +722,7 @@ entry: } declare @llvm.riscv.vwmulu.nxv1i16.nxv1i8.i8( + , , i8, i64); @@ -705,6 +736,7 @@ define @intrinsic_vwmulu_vx_nxv1i16_nxv1i8_i8( @llvm.riscv.vwmulu.nxv1i16.nxv1i8.i8( + undef, %0, i8 %1, i64 %2) @@ -738,6 +770,7 @@ entry: } declare @llvm.riscv.vwmulu.nxv2i16.nxv2i8.i8( + , , i8, i64); @@ -751,6 +784,7 @@ define @intrinsic_vwmulu_vx_nxv2i16_nxv2i8_i8( @llvm.riscv.vwmulu.nxv2i16.nxv2i8.i8( + undef, %0, i8 %1, i64 %2) @@ -784,6 +818,7 @@ entry: } declare @llvm.riscv.vwmulu.nxv4i16.nxv4i8.i8( + , , i8, i64); @@ -797,6 +832,7 @@ define @intrinsic_vwmulu_vx_nxv4i16_nxv4i8_i8( @llvm.riscv.vwmulu.nxv4i16.nxv4i8.i8( + undef, %0, i8 %1, i64 %2) @@ -830,6 +866,7 @@ entry: } declare @llvm.riscv.vwmulu.nxv8i16.nxv8i8.i8( + , , i8, i64); @@ -843,6 +880,7 @@ define @intrinsic_vwmulu_vx_nxv8i16_nxv8i8_i8( @llvm.riscv.vwmulu.nxv8i16.nxv8i8.i8( + undef, %0, i8 %1, i64 %2) @@ -876,6 +914,7 @@ entry: } declare @llvm.riscv.vwmulu.nxv16i16.nxv16i8.i8( + , , i8, i64); @@ -889,6 +928,7 @@ define @intrinsic_vwmulu_vx_nxv16i16_nxv16i8_i8( @llvm.riscv.vwmulu.nxv16i16.nxv16i8.i8( + undef, %0, i8 %1, i64 %2) @@ -922,6 +962,7 @@ entry: } declare @llvm.riscv.vwmulu.nxv32i16.nxv32i8.i8( + , , i8, i64); @@ -935,6 +976,7 @@ define @intrinsic_vwmulu_vx_nxv32i16_nxv32i8_i8( @llvm.riscv.vwmulu.nxv32i16.nxv32i8.i8( + undef, %0, i8 %1, i64 %2) @@ -968,6 +1010,7 @@ entry: } declare @llvm.riscv.vwmulu.nxv1i32.nxv1i16.i16( + , , i16, i64); @@ -981,6 +1024,7 @@ define @intrinsic_vwmulu_vx_nxv1i32_nxv1i16_i16( @llvm.riscv.vwmulu.nxv1i32.nxv1i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1014,6 +1058,7 @@ entry: } declare @llvm.riscv.vwmulu.nxv2i32.nxv2i16.i16( + , , i16, i64); @@ -1027,6 +1072,7 @@ define @intrinsic_vwmulu_vx_nxv2i32_nxv2i16_i16( @llvm.riscv.vwmulu.nxv2i32.nxv2i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1060,6 +1106,7 @@ entry: } declare @llvm.riscv.vwmulu.nxv4i32.nxv4i16.i16( + , , i16, i64); @@ -1073,6 +1120,7 @@ define @intrinsic_vwmulu_vx_nxv4i32_nxv4i16_i16( @llvm.riscv.vwmulu.nxv4i32.nxv4i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1106,6 +1154,7 @@ entry: } declare @llvm.riscv.vwmulu.nxv8i32.nxv8i16.i16( + , , i16, i64); @@ -1119,6 +1168,7 @@ define @intrinsic_vwmulu_vx_nxv8i32_nxv8i16_i16( @llvm.riscv.vwmulu.nxv8i32.nxv8i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1152,6 +1202,7 @@ entry: } declare @llvm.riscv.vwmulu.nxv16i32.nxv16i16.i16( + , , i16, i64); @@ -1165,6 +1216,7 @@ define @intrinsic_vwmulu_vx_nxv16i32_nxv16i16_i16( @llvm.riscv.vwmulu.nxv16i32.nxv16i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1198,6 +1250,7 @@ entry: } declare @llvm.riscv.vwmulu.nxv1i64.nxv1i32.i32( + , , i32, i64); @@ -1211,6 +1264,7 @@ define @intrinsic_vwmulu_vx_nxv1i64_nxv1i32_i32( @llvm.riscv.vwmulu.nxv1i64.nxv1i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1244,6 +1298,7 @@ entry: } declare @llvm.riscv.vwmulu.nxv2i64.nxv2i32.i32( + , , i32, i64); @@ -1257,6 +1312,7 @@ define @intrinsic_vwmulu_vx_nxv2i64_nxv2i32_i32( @llvm.riscv.vwmulu.nxv2i64.nxv2i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1290,6 +1346,7 @@ entry: } declare @llvm.riscv.vwmulu.nxv4i64.nxv4i32.i32( + , , i32, i64); @@ -1303,6 +1360,7 @@ define @intrinsic_vwmulu_vx_nxv4i64_nxv4i32_i32( @llvm.riscv.vwmulu.nxv4i64.nxv4i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1336,6 +1394,7 @@ entry: } declare @llvm.riscv.vwmulu.nxv8i64.nxv8i32.i32( + , , i32, i64); @@ -1349,6 +1408,7 @@ define @intrinsic_vwmulu_vx_nxv8i64_nxv8i32_i32( @llvm.riscv.vwmulu.nxv8i64.nxv8i32.i32( + undef, %0, i32 %1, i64 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vwsub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwsub-rv32.ll index ef27184103d6093a530ed69b1ca229dd0d8a0891..a37b6c08580f619721beb9b3b6f70b1fcfc31883 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vwsub-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwsub-rv32.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vwsub.nxv1i16.nxv1i8.nxv1i8( + , , , i32); @@ -15,6 +16,7 @@ define @intrinsic_vwsub_vv_nxv1i16_nxv1i8_nxv1i8( @llvm.riscv.vwsub.nxv1i16.nxv1i8.nxv1i8( + undef, %0, %1, i32 %2) @@ -48,6 +50,7 @@ entry: } declare @llvm.riscv.vwsub.nxv2i16.nxv2i8.nxv2i8( + , , , i32); @@ -61,6 +64,7 @@ define @intrinsic_vwsub_vv_nxv2i16_nxv2i8_nxv2i8( @llvm.riscv.vwsub.nxv2i16.nxv2i8.nxv2i8( + undef, %0, %1, i32 %2) @@ -94,6 +98,7 @@ entry: } declare @llvm.riscv.vwsub.nxv4i16.nxv4i8.nxv4i8( + , , , i32); @@ -107,6 +112,7 @@ define @intrinsic_vwsub_vv_nxv4i16_nxv4i8_nxv4i8( @llvm.riscv.vwsub.nxv4i16.nxv4i8.nxv4i8( + undef, %0, %1, i32 %2) @@ -140,6 +146,7 @@ entry: } declare @llvm.riscv.vwsub.nxv8i16.nxv8i8.nxv8i8( + , , , i32); @@ -153,6 +160,7 @@ define @intrinsic_vwsub_vv_nxv8i16_nxv8i8_nxv8i8( @llvm.riscv.vwsub.nxv8i16.nxv8i8.nxv8i8( + undef, %0, %1, i32 %2) @@ -186,6 +194,7 @@ entry: } declare @llvm.riscv.vwsub.nxv16i16.nxv16i8.nxv16i8( + , , , i32); @@ -199,6 +208,7 @@ define @intrinsic_vwsub_vv_nxv16i16_nxv16i8_nxv16i8( @llvm.riscv.vwsub.nxv16i16.nxv16i8.nxv16i8( + undef, %0, %1, i32 %2) @@ -232,6 +242,7 @@ entry: } declare @llvm.riscv.vwsub.nxv32i16.nxv32i8.nxv32i8( + , , , i32); @@ -245,6 +256,7 @@ define @intrinsic_vwsub_vv_nxv32i16_nxv32i8_nxv32i8( @llvm.riscv.vwsub.nxv32i16.nxv32i8.nxv32i8( + undef, %0, %1, i32 %2) @@ -278,6 +290,7 @@ entry: } declare @llvm.riscv.vwsub.nxv1i32.nxv1i16.nxv1i16( + , , , i32); @@ -291,6 +304,7 @@ define @intrinsic_vwsub_vv_nxv1i32_nxv1i16_nxv1i16( @llvm.riscv.vwsub.nxv1i32.nxv1i16.nxv1i16( + undef, %0, %1, i32 %2) @@ -324,6 +338,7 @@ entry: } declare @llvm.riscv.vwsub.nxv2i32.nxv2i16.nxv2i16( + , , , i32); @@ -337,6 +352,7 @@ define @intrinsic_vwsub_vv_nxv2i32_nxv2i16_nxv2i16( @llvm.riscv.vwsub.nxv2i32.nxv2i16.nxv2i16( + undef, %0, %1, i32 %2) @@ -370,6 +386,7 @@ entry: } declare @llvm.riscv.vwsub.nxv4i32.nxv4i16.nxv4i16( + , , , i32); @@ -383,6 +400,7 @@ define @intrinsic_vwsub_vv_nxv4i32_nxv4i16_nxv4i16( @llvm.riscv.vwsub.nxv4i32.nxv4i16.nxv4i16( + undef, %0, %1, i32 %2) @@ -416,6 +434,7 @@ entry: } declare @llvm.riscv.vwsub.nxv8i32.nxv8i16.nxv8i16( + , , , i32); @@ -429,6 +448,7 @@ define @intrinsic_vwsub_vv_nxv8i32_nxv8i16_nxv8i16( @llvm.riscv.vwsub.nxv8i32.nxv8i16.nxv8i16( + undef, %0, %1, i32 %2) @@ -462,6 +482,7 @@ entry: } declare @llvm.riscv.vwsub.nxv16i32.nxv16i16.nxv16i16( + , , , i32); @@ -475,6 +496,7 @@ define @intrinsic_vwsub_vv_nxv16i32_nxv16i16_nxv16i16( @llvm.riscv.vwsub.nxv16i32.nxv16i16.nxv16i16( + undef, %0, %1, i32 %2) @@ -508,6 +530,7 @@ entry: } declare @llvm.riscv.vwsub.nxv1i64.nxv1i32.nxv1i32( + , , , i32); @@ -521,6 +544,7 @@ define @intrinsic_vwsub_vv_nxv1i64_nxv1i32_nxv1i32( @llvm.riscv.vwsub.nxv1i64.nxv1i32.nxv1i32( + undef, %0, %1, i32 %2) @@ -554,6 +578,7 @@ entry: } declare @llvm.riscv.vwsub.nxv2i64.nxv2i32.nxv2i32( + , , , i32); @@ -567,6 +592,7 @@ define @intrinsic_vwsub_vv_nxv2i64_nxv2i32_nxv2i32( @llvm.riscv.vwsub.nxv2i64.nxv2i32.nxv2i32( + undef, %0, %1, i32 %2) @@ -600,6 +626,7 @@ entry: } declare @llvm.riscv.vwsub.nxv4i64.nxv4i32.nxv4i32( + , , , i32); @@ -613,6 +640,7 @@ define @intrinsic_vwsub_vv_nxv4i64_nxv4i32_nxv4i32( @llvm.riscv.vwsub.nxv4i64.nxv4i32.nxv4i32( + undef, %0, %1, i32 %2) @@ -646,6 +674,7 @@ entry: } declare @llvm.riscv.vwsub.nxv8i64.nxv8i32.nxv8i32( + , , , i32); @@ -659,6 +688,7 @@ define @intrinsic_vwsub_vv_nxv8i64_nxv8i32_nxv8i32( @llvm.riscv.vwsub.nxv8i64.nxv8i32.nxv8i32( + undef, %0, %1, i32 %2) @@ -692,6 +722,7 @@ entry: } declare @llvm.riscv.vwsub.nxv1i16.nxv1i8.i8( + , , i8, i32); @@ -705,6 +736,7 @@ define @intrinsic_vwsub_vx_nxv1i16_nxv1i8_i8( @llvm.riscv.vwsub.nxv1i16.nxv1i8.i8( + undef, %0, i8 %1, i32 %2) @@ -738,6 +770,7 @@ entry: } declare @llvm.riscv.vwsub.nxv2i16.nxv2i8.i8( + , , i8, i32); @@ -751,6 +784,7 @@ define @intrinsic_vwsub_vx_nxv2i16_nxv2i8_i8( @llvm.riscv.vwsub.nxv2i16.nxv2i8.i8( + undef, %0, i8 %1, i32 %2) @@ -784,6 +818,7 @@ entry: } declare @llvm.riscv.vwsub.nxv4i16.nxv4i8.i8( + , , i8, i32); @@ -797,6 +832,7 @@ define @intrinsic_vwsub_vx_nxv4i16_nxv4i8_i8( @llvm.riscv.vwsub.nxv4i16.nxv4i8.i8( + undef, %0, i8 %1, i32 %2) @@ -830,6 +866,7 @@ entry: } declare @llvm.riscv.vwsub.nxv8i16.nxv8i8.i8( + , , i8, i32); @@ -843,6 +880,7 @@ define @intrinsic_vwsub_vx_nxv8i16_nxv8i8_i8( @llvm.riscv.vwsub.nxv8i16.nxv8i8.i8( + undef, %0, i8 %1, i32 %2) @@ -876,6 +914,7 @@ entry: } declare @llvm.riscv.vwsub.nxv16i16.nxv16i8.i8( + , , i8, i32); @@ -889,6 +928,7 @@ define @intrinsic_vwsub_vx_nxv16i16_nxv16i8_i8( @llvm.riscv.vwsub.nxv16i16.nxv16i8.i8( + undef, %0, i8 %1, i32 %2) @@ -922,6 +962,7 @@ entry: } declare @llvm.riscv.vwsub.nxv32i16.nxv32i8.i8( + , , i8, i32); @@ -935,6 +976,7 @@ define @intrinsic_vwsub_vx_nxv32i16_nxv32i8_i8( @llvm.riscv.vwsub.nxv32i16.nxv32i8.i8( + undef, %0, i8 %1, i32 %2) @@ -968,6 +1010,7 @@ entry: } declare @llvm.riscv.vwsub.nxv1i32.nxv1i16.i16( + , , i16, i32); @@ -981,6 +1024,7 @@ define @intrinsic_vwsub_vx_nxv1i32_nxv1i16_i16( @llvm.riscv.vwsub.nxv1i32.nxv1i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1014,6 +1058,7 @@ entry: } declare @llvm.riscv.vwsub.nxv2i32.nxv2i16.i16( + , , i16, i32); @@ -1027,6 +1072,7 @@ define @intrinsic_vwsub_vx_nxv2i32_nxv2i16_i16( @llvm.riscv.vwsub.nxv2i32.nxv2i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1060,6 +1106,7 @@ entry: } declare @llvm.riscv.vwsub.nxv4i32.nxv4i16.i16( + , , i16, i32); @@ -1073,6 +1120,7 @@ define @intrinsic_vwsub_vx_nxv4i32_nxv4i16_i16( @llvm.riscv.vwsub.nxv4i32.nxv4i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1106,6 +1154,7 @@ entry: } declare @llvm.riscv.vwsub.nxv8i32.nxv8i16.i16( + , , i16, i32); @@ -1119,6 +1168,7 @@ define @intrinsic_vwsub_vx_nxv8i32_nxv8i16_i16( @llvm.riscv.vwsub.nxv8i32.nxv8i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1152,6 +1202,7 @@ entry: } declare @llvm.riscv.vwsub.nxv16i32.nxv16i16.i16( + , , i16, i32); @@ -1165,6 +1216,7 @@ define @intrinsic_vwsub_vx_nxv16i32_nxv16i16_i16( @llvm.riscv.vwsub.nxv16i32.nxv16i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1198,6 +1250,7 @@ entry: } declare @llvm.riscv.vwsub.nxv1i64.nxv1i32.i32( + , , i32, i32); @@ -1211,6 +1264,7 @@ define @intrinsic_vwsub_vx_nxv1i64_nxv1i32_i32( @llvm.riscv.vwsub.nxv1i64.nxv1i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1244,6 +1298,7 @@ entry: } declare @llvm.riscv.vwsub.nxv2i64.nxv2i32.i32( + , , i32, i32); @@ -1257,6 +1312,7 @@ define @intrinsic_vwsub_vx_nxv2i64_nxv2i32_i32( @llvm.riscv.vwsub.nxv2i64.nxv2i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1290,6 +1346,7 @@ entry: } declare @llvm.riscv.vwsub.nxv4i64.nxv4i32.i32( + , , i32, i32); @@ -1303,6 +1360,7 @@ define @intrinsic_vwsub_vx_nxv4i64_nxv4i32_i32( @llvm.riscv.vwsub.nxv4i64.nxv4i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1336,6 +1394,7 @@ entry: } declare @llvm.riscv.vwsub.nxv8i64.nxv8i32.i32( + , , i32, i32); @@ -1349,6 +1408,7 @@ define @intrinsic_vwsub_vx_nxv8i64_nxv8i32_i32( @llvm.riscv.vwsub.nxv8i64.nxv8i32.i32( + undef, %0, i32 %1, i32 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vwsub-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwsub-rv64.ll index 2de365ecb121811a9c58b7b02293b868a00ad870..bc1e468c93f839c081797fa84642eba0d0238aad 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vwsub-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwsub-rv64.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vwsub.nxv1i16.nxv1i8.nxv1i8( + , , , i64); @@ -15,6 +16,7 @@ define @intrinsic_vwsub_vv_nxv1i16_nxv1i8_nxv1i8( @llvm.riscv.vwsub.nxv1i16.nxv1i8.nxv1i8( + undef, %0, %1, i64 %2) @@ -48,6 +50,7 @@ entry: } declare @llvm.riscv.vwsub.nxv2i16.nxv2i8.nxv2i8( + , , , i64); @@ -61,6 +64,7 @@ define @intrinsic_vwsub_vv_nxv2i16_nxv2i8_nxv2i8( @llvm.riscv.vwsub.nxv2i16.nxv2i8.nxv2i8( + undef, %0, %1, i64 %2) @@ -94,6 +98,7 @@ entry: } declare @llvm.riscv.vwsub.nxv4i16.nxv4i8.nxv4i8( + , , , i64); @@ -107,6 +112,7 @@ define @intrinsic_vwsub_vv_nxv4i16_nxv4i8_nxv4i8( @llvm.riscv.vwsub.nxv4i16.nxv4i8.nxv4i8( + undef, %0, %1, i64 %2) @@ -140,6 +146,7 @@ entry: } declare @llvm.riscv.vwsub.nxv8i16.nxv8i8.nxv8i8( + , , , i64); @@ -153,6 +160,7 @@ define @intrinsic_vwsub_vv_nxv8i16_nxv8i8_nxv8i8( @llvm.riscv.vwsub.nxv8i16.nxv8i8.nxv8i8( + undef, %0, %1, i64 %2) @@ -186,6 +194,7 @@ entry: } declare @llvm.riscv.vwsub.nxv16i16.nxv16i8.nxv16i8( + , , , i64); @@ -199,6 +208,7 @@ define @intrinsic_vwsub_vv_nxv16i16_nxv16i8_nxv16i8( @llvm.riscv.vwsub.nxv16i16.nxv16i8.nxv16i8( + undef, %0, %1, i64 %2) @@ -232,6 +242,7 @@ entry: } declare @llvm.riscv.vwsub.nxv32i16.nxv32i8.nxv32i8( + , , , i64); @@ -245,6 +256,7 @@ define @intrinsic_vwsub_vv_nxv32i16_nxv32i8_nxv32i8( @llvm.riscv.vwsub.nxv32i16.nxv32i8.nxv32i8( + undef, %0, %1, i64 %2) @@ -278,6 +290,7 @@ entry: } declare @llvm.riscv.vwsub.nxv1i32.nxv1i16.nxv1i16( + , , , i64); @@ -291,6 +304,7 @@ define @intrinsic_vwsub_vv_nxv1i32_nxv1i16_nxv1i16( @llvm.riscv.vwsub.nxv1i32.nxv1i16.nxv1i16( + undef, %0, %1, i64 %2) @@ -324,6 +338,7 @@ entry: } declare @llvm.riscv.vwsub.nxv2i32.nxv2i16.nxv2i16( + , , , i64); @@ -337,6 +352,7 @@ define @intrinsic_vwsub_vv_nxv2i32_nxv2i16_nxv2i16( @llvm.riscv.vwsub.nxv2i32.nxv2i16.nxv2i16( + undef, %0, %1, i64 %2) @@ -370,6 +386,7 @@ entry: } declare @llvm.riscv.vwsub.nxv4i32.nxv4i16.nxv4i16( + , , , i64); @@ -383,6 +400,7 @@ define @intrinsic_vwsub_vv_nxv4i32_nxv4i16_nxv4i16( @llvm.riscv.vwsub.nxv4i32.nxv4i16.nxv4i16( + undef, %0, %1, i64 %2) @@ -416,6 +434,7 @@ entry: } declare @llvm.riscv.vwsub.nxv8i32.nxv8i16.nxv8i16( + , , , i64); @@ -429,6 +448,7 @@ define @intrinsic_vwsub_vv_nxv8i32_nxv8i16_nxv8i16( @llvm.riscv.vwsub.nxv8i32.nxv8i16.nxv8i16( + undef, %0, %1, i64 %2) @@ -462,6 +482,7 @@ entry: } declare @llvm.riscv.vwsub.nxv16i32.nxv16i16.nxv16i16( + , , , i64); @@ -475,6 +496,7 @@ define @intrinsic_vwsub_vv_nxv16i32_nxv16i16_nxv16i16( @llvm.riscv.vwsub.nxv16i32.nxv16i16.nxv16i16( + undef, %0, %1, i64 %2) @@ -508,6 +530,7 @@ entry: } declare @llvm.riscv.vwsub.nxv1i64.nxv1i32.nxv1i32( + , , , i64); @@ -521,6 +544,7 @@ define @intrinsic_vwsub_vv_nxv1i64_nxv1i32_nxv1i32( @llvm.riscv.vwsub.nxv1i64.nxv1i32.nxv1i32( + undef, %0, %1, i64 %2) @@ -554,6 +578,7 @@ entry: } declare @llvm.riscv.vwsub.nxv2i64.nxv2i32.nxv2i32( + , , , i64); @@ -567,6 +592,7 @@ define @intrinsic_vwsub_vv_nxv2i64_nxv2i32_nxv2i32( @llvm.riscv.vwsub.nxv2i64.nxv2i32.nxv2i32( + undef, %0, %1, i64 %2) @@ -600,6 +626,7 @@ entry: } declare @llvm.riscv.vwsub.nxv4i64.nxv4i32.nxv4i32( + , , , i64); @@ -613,6 +640,7 @@ define @intrinsic_vwsub_vv_nxv4i64_nxv4i32_nxv4i32( @llvm.riscv.vwsub.nxv4i64.nxv4i32.nxv4i32( + undef, %0, %1, i64 %2) @@ -646,6 +674,7 @@ entry: } declare @llvm.riscv.vwsub.nxv8i64.nxv8i32.nxv8i32( + , , , i64); @@ -659,6 +688,7 @@ define @intrinsic_vwsub_vv_nxv8i64_nxv8i32_nxv8i32( @llvm.riscv.vwsub.nxv8i64.nxv8i32.nxv8i32( + undef, %0, %1, i64 %2) @@ -692,6 +722,7 @@ entry: } declare @llvm.riscv.vwsub.nxv1i16.nxv1i8.i8( + , , i8, i64); @@ -705,6 +736,7 @@ define @intrinsic_vwsub_vx_nxv1i16_nxv1i8_i8( @llvm.riscv.vwsub.nxv1i16.nxv1i8.i8( + undef, %0, i8 %1, i64 %2) @@ -738,6 +770,7 @@ entry: } declare @llvm.riscv.vwsub.nxv2i16.nxv2i8.i8( + , , i8, i64); @@ -751,6 +784,7 @@ define @intrinsic_vwsub_vx_nxv2i16_nxv2i8_i8( @llvm.riscv.vwsub.nxv2i16.nxv2i8.i8( + undef, %0, i8 %1, i64 %2) @@ -784,6 +818,7 @@ entry: } declare @llvm.riscv.vwsub.nxv4i16.nxv4i8.i8( + , , i8, i64); @@ -797,6 +832,7 @@ define @intrinsic_vwsub_vx_nxv4i16_nxv4i8_i8( @llvm.riscv.vwsub.nxv4i16.nxv4i8.i8( + undef, %0, i8 %1, i64 %2) @@ -830,6 +866,7 @@ entry: } declare @llvm.riscv.vwsub.nxv8i16.nxv8i8.i8( + , , i8, i64); @@ -843,6 +880,7 @@ define @intrinsic_vwsub_vx_nxv8i16_nxv8i8_i8( @llvm.riscv.vwsub.nxv8i16.nxv8i8.i8( + undef, %0, i8 %1, i64 %2) @@ -876,6 +914,7 @@ entry: } declare @llvm.riscv.vwsub.nxv16i16.nxv16i8.i8( + , , i8, i64); @@ -889,6 +928,7 @@ define @intrinsic_vwsub_vx_nxv16i16_nxv16i8_i8( @llvm.riscv.vwsub.nxv16i16.nxv16i8.i8( + undef, %0, i8 %1, i64 %2) @@ -922,6 +962,7 @@ entry: } declare @llvm.riscv.vwsub.nxv32i16.nxv32i8.i8( + , , i8, i64); @@ -935,6 +976,7 @@ define @intrinsic_vwsub_vx_nxv32i16_nxv32i8_i8( @llvm.riscv.vwsub.nxv32i16.nxv32i8.i8( + undef, %0, i8 %1, i64 %2) @@ -968,6 +1010,7 @@ entry: } declare @llvm.riscv.vwsub.nxv1i32.nxv1i16.i16( + , , i16, i64); @@ -981,6 +1024,7 @@ define @intrinsic_vwsub_vx_nxv1i32_nxv1i16_i16( @llvm.riscv.vwsub.nxv1i32.nxv1i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1014,6 +1058,7 @@ entry: } declare @llvm.riscv.vwsub.nxv2i32.nxv2i16.i16( + , , i16, i64); @@ -1027,6 +1072,7 @@ define @intrinsic_vwsub_vx_nxv2i32_nxv2i16_i16( @llvm.riscv.vwsub.nxv2i32.nxv2i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1060,6 +1106,7 @@ entry: } declare @llvm.riscv.vwsub.nxv4i32.nxv4i16.i16( + , , i16, i64); @@ -1073,6 +1120,7 @@ define @intrinsic_vwsub_vx_nxv4i32_nxv4i16_i16( @llvm.riscv.vwsub.nxv4i32.nxv4i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1106,6 +1154,7 @@ entry: } declare @llvm.riscv.vwsub.nxv8i32.nxv8i16.i16( + , , i16, i64); @@ -1119,6 +1168,7 @@ define @intrinsic_vwsub_vx_nxv8i32_nxv8i16_i16( @llvm.riscv.vwsub.nxv8i32.nxv8i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1152,6 +1202,7 @@ entry: } declare @llvm.riscv.vwsub.nxv16i32.nxv16i16.i16( + , , i16, i64); @@ -1165,6 +1216,7 @@ define @intrinsic_vwsub_vx_nxv16i32_nxv16i16_i16( @llvm.riscv.vwsub.nxv16i32.nxv16i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1198,6 +1250,7 @@ entry: } declare @llvm.riscv.vwsub.nxv1i64.nxv1i32.i32( + , , i32, i64); @@ -1211,6 +1264,7 @@ define @intrinsic_vwsub_vx_nxv1i64_nxv1i32_i32( @llvm.riscv.vwsub.nxv1i64.nxv1i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1244,6 +1298,7 @@ entry: } declare @llvm.riscv.vwsub.nxv2i64.nxv2i32.i32( + , , i32, i64); @@ -1257,6 +1312,7 @@ define @intrinsic_vwsub_vx_nxv2i64_nxv2i32_i32( @llvm.riscv.vwsub.nxv2i64.nxv2i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1290,6 +1346,7 @@ entry: } declare @llvm.riscv.vwsub.nxv4i64.nxv4i32.i32( + , , i32, i64); @@ -1303,6 +1360,7 @@ define @intrinsic_vwsub_vx_nxv4i64_nxv4i32_i32( @llvm.riscv.vwsub.nxv4i64.nxv4i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1336,6 +1394,7 @@ entry: } declare @llvm.riscv.vwsub.nxv8i64.nxv8i32.i32( + , , i32, i64); @@ -1349,6 +1408,7 @@ define @intrinsic_vwsub_vx_nxv8i64_nxv8i32_i32( @llvm.riscv.vwsub.nxv8i64.nxv8i32.i32( + undef, %0, i32 %1, i64 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv32.ll index 213cb69a9a6a2637ef2f5462c34171d947c7a4b9..a57271f6b854c80d24da189a13b0a5683ef31a0d 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv32.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vwsub.w.nxv1i16.nxv1i8( + , , , i32); @@ -14,6 +15,7 @@ define @intrinsic_vwsub.w_wv_nxv1i16_nxv1i16_nxv1i8( @llvm.riscv.vwsub.w.nxv1i16.nxv1i8( + undef, %0, %1, i32 %2) @@ -47,6 +49,7 @@ entry: } declare @llvm.riscv.vwsub.w.nxv2i16.nxv2i8( + , , , i32); @@ -59,6 +62,7 @@ define @intrinsic_vwsub.w_wv_nxv2i16_nxv2i16_nxv2i8( @llvm.riscv.vwsub.w.nxv2i16.nxv2i8( + undef, %0, %1, i32 %2) @@ -92,6 +96,7 @@ entry: } declare @llvm.riscv.vwsub.w.nxv4i16.nxv4i8( + , , , i32); @@ -104,6 +109,7 @@ define @intrinsic_vwsub.w_wv_nxv4i16_nxv4i16_nxv4i8( @llvm.riscv.vwsub.w.nxv4i16.nxv4i8( + undef, %0, %1, i32 %2) @@ -137,6 +143,7 @@ entry: } declare @llvm.riscv.vwsub.w.nxv8i16.nxv8i8( + , , , i32); @@ -149,6 +156,7 @@ define @intrinsic_vwsub.w_wv_nxv8i16_nxv8i16_nxv8i8( @llvm.riscv.vwsub.w.nxv8i16.nxv8i8( + undef, %0, %1, i32 %2) @@ -182,6 +190,7 @@ entry: } declare @llvm.riscv.vwsub.w.nxv16i16.nxv16i8( + , , , i32); @@ -194,6 +203,7 @@ define @intrinsic_vwsub.w_wv_nxv16i16_nxv16i16_nxv16i8( @llvm.riscv.vwsub.w.nxv16i16.nxv16i8( + undef, %0, %1, i32 %2) @@ -227,6 +237,7 @@ entry: } declare @llvm.riscv.vwsub.w.nxv32i16.nxv32i8( + , , , i32); @@ -239,6 +250,7 @@ define @intrinsic_vwsub.w_wv_nxv32i16_nxv32i16_nxv32i8( @llvm.riscv.vwsub.w.nxv32i16.nxv32i8( + undef, %0, %1, i32 %2) @@ -273,6 +285,7 @@ entry: } declare @llvm.riscv.vwsub.w.nxv1i32.nxv1i16( + , , , i32); @@ -285,6 +298,7 @@ define @intrinsic_vwsub.w_wv_nxv1i32_nxv1i32_nxv1i16( @llvm.riscv.vwsub.w.nxv1i32.nxv1i16( + undef, %0, %1, i32 %2) @@ -318,6 +332,7 @@ entry: } declare @llvm.riscv.vwsub.w.nxv2i32.nxv2i16( + , , , i32); @@ -330,6 +345,7 @@ define @intrinsic_vwsub.w_wv_nxv2i32_nxv2i32_nxv2i16( @llvm.riscv.vwsub.w.nxv2i32.nxv2i16( + undef, %0, %1, i32 %2) @@ -363,6 +379,7 @@ entry: } declare @llvm.riscv.vwsub.w.nxv4i32.nxv4i16( + , , , i32); @@ -375,6 +392,7 @@ define @intrinsic_vwsub.w_wv_nxv4i32_nxv4i32_nxv4i16( @llvm.riscv.vwsub.w.nxv4i32.nxv4i16( + undef, %0, %1, i32 %2) @@ -408,6 +426,7 @@ entry: } declare @llvm.riscv.vwsub.w.nxv8i32.nxv8i16( + , , , i32); @@ -420,6 +439,7 @@ define @intrinsic_vwsub.w_wv_nxv8i32_nxv8i32_nxv8i16( @llvm.riscv.vwsub.w.nxv8i32.nxv8i16( + undef, %0, %1, i32 %2) @@ -453,6 +473,7 @@ entry: } declare @llvm.riscv.vwsub.w.nxv16i32.nxv16i16( + , , , i32); @@ -465,6 +486,7 @@ define @intrinsic_vwsub.w_wv_nxv16i32_nxv16i32_nxv16i16( @llvm.riscv.vwsub.w.nxv16i32.nxv16i16( + undef, %0, %1, i32 %2) @@ -499,6 +521,7 @@ entry: } declare @llvm.riscv.vwsub.w.nxv1i64.nxv1i32( + , , , i32); @@ -511,6 +534,7 @@ define @intrinsic_vwsub.w_wv_nxv1i64_nxv1i64_nxv1i32( @llvm.riscv.vwsub.w.nxv1i64.nxv1i32( + undef, %0, %1, i32 %2) @@ -544,6 +568,7 @@ entry: } declare @llvm.riscv.vwsub.w.nxv2i64.nxv2i32( + , , , i32); @@ -556,6 +581,7 @@ define @intrinsic_vwsub.w_wv_nxv2i64_nxv2i64_nxv2i32( @llvm.riscv.vwsub.w.nxv2i64.nxv2i32( + undef, %0, %1, i32 %2) @@ -589,6 +615,7 @@ entry: } declare @llvm.riscv.vwsub.w.nxv4i64.nxv4i32( + , , , i32); @@ -601,6 +628,7 @@ define @intrinsic_vwsub.w_wv_nxv4i64_nxv4i64_nxv4i32( @llvm.riscv.vwsub.w.nxv4i64.nxv4i32( + undef, %0, %1, i32 %2) @@ -634,6 +662,7 @@ entry: } declare @llvm.riscv.vwsub.w.nxv8i64.nxv8i32( + , , , i32); @@ -646,6 +675,7 @@ define @intrinsic_vwsub.w_wv_nxv8i64_nxv8i64_nxv8i32( @llvm.riscv.vwsub.w.nxv8i64.nxv8i32( + undef, %0, %1, i32 %2) @@ -680,6 +710,7 @@ entry: } declare @llvm.riscv.vwsub.w.nxv1i16.i8( + , , i8, i32); @@ -692,6 +723,7 @@ define @intrinsic_vwsub.w_wx_nxv1i16_nxv1i16_i8( @llvm.riscv.vwsub.w.nxv1i16.i8( + undef, %0, i8 %1, i32 %2) @@ -725,6 +757,7 @@ entry: } declare @llvm.riscv.vwsub.w.nxv2i16.i8( + , , i8, i32); @@ -737,6 +770,7 @@ define @intrinsic_vwsub.w_wx_nxv2i16_nxv2i16_i8( @llvm.riscv.vwsub.w.nxv2i16.i8( + undef, %0, i8 %1, i32 %2) @@ -770,6 +804,7 @@ entry: } declare @llvm.riscv.vwsub.w.nxv4i16.i8( + , , i8, i32); @@ -782,6 +817,7 @@ define @intrinsic_vwsub.w_wx_nxv4i16_nxv4i16_i8( @llvm.riscv.vwsub.w.nxv4i16.i8( + undef, %0, i8 %1, i32 %2) @@ -815,6 +851,7 @@ entry: } declare @llvm.riscv.vwsub.w.nxv8i16.i8( + , , i8, i32); @@ -827,6 +864,7 @@ define @intrinsic_vwsub.w_wx_nxv8i16_nxv8i16_i8( @llvm.riscv.vwsub.w.nxv8i16.i8( + undef, %0, i8 %1, i32 %2) @@ -860,6 +898,7 @@ entry: } declare @llvm.riscv.vwsub.w.nxv16i16.i8( + , , i8, i32); @@ -872,6 +911,7 @@ define @intrinsic_vwsub.w_wx_nxv16i16_nxv16i16_i8( @llvm.riscv.vwsub.w.nxv16i16.i8( + undef, %0, i8 %1, i32 %2) @@ -905,6 +945,7 @@ entry: } declare @llvm.riscv.vwsub.w.nxv32i16.i8( + , , i8, i32); @@ -917,6 +958,7 @@ define @intrinsic_vwsub.w_wx_nxv32i16_nxv32i16_i8( @llvm.riscv.vwsub.w.nxv32i16.i8( + undef, %0, i8 %1, i32 %2) @@ -950,6 +992,7 @@ entry: } declare @llvm.riscv.vwsub.w.nxv1i32.i16( + , , i16, i32); @@ -962,6 +1005,7 @@ define @intrinsic_vwsub.w_wx_nxv1i32_nxv1i32_i16( @llvm.riscv.vwsub.w.nxv1i32.i16( + undef, %0, i16 %1, i32 %2) @@ -995,6 +1039,7 @@ entry: } declare @llvm.riscv.vwsub.w.nxv2i32.i16( + , , i16, i32); @@ -1007,6 +1052,7 @@ define @intrinsic_vwsub.w_wx_nxv2i32_nxv2i32_i16( @llvm.riscv.vwsub.w.nxv2i32.i16( + undef, %0, i16 %1, i32 %2) @@ -1040,6 +1086,7 @@ entry: } declare @llvm.riscv.vwsub.w.nxv4i32.i16( + , , i16, i32); @@ -1052,6 +1099,7 @@ define @intrinsic_vwsub.w_wx_nxv4i32_nxv4i32_i16( @llvm.riscv.vwsub.w.nxv4i32.i16( + undef, %0, i16 %1, i32 %2) @@ -1085,6 +1133,7 @@ entry: } declare @llvm.riscv.vwsub.w.nxv8i32.i16( + , , i16, i32); @@ -1097,6 +1146,7 @@ define @intrinsic_vwsub.w_wx_nxv8i32_nxv8i32_i16( @llvm.riscv.vwsub.w.nxv8i32.i16( + undef, %0, i16 %1, i32 %2) @@ -1130,6 +1180,7 @@ entry: } declare @llvm.riscv.vwsub.w.nxv16i32.i16( + , , i16, i32); @@ -1142,6 +1193,7 @@ define @intrinsic_vwsub.w_wx_nxv16i32_nxv16i32_i16( @llvm.riscv.vwsub.w.nxv16i32.i16( + undef, %0, i16 %1, i32 %2) @@ -1175,6 +1227,7 @@ entry: } declare @llvm.riscv.vwsub.w.nxv1i64.i32( + , , i32, i32); @@ -1187,6 +1240,7 @@ define @intrinsic_vwsub.w_wx_nxv1i64_nxv1i64_i32( @llvm.riscv.vwsub.w.nxv1i64.i32( + undef, %0, i32 %1, i32 %2) @@ -1220,6 +1274,7 @@ entry: } declare @llvm.riscv.vwsub.w.nxv2i64.i32( + , , i32, i32); @@ -1232,6 +1287,7 @@ define @intrinsic_vwsub.w_wx_nxv2i64_nxv2i64_i32( @llvm.riscv.vwsub.w.nxv2i64.i32( + undef, %0, i32 %1, i32 %2) @@ -1265,6 +1321,7 @@ entry: } declare @llvm.riscv.vwsub.w.nxv4i64.i32( + , , i32, i32); @@ -1277,6 +1334,7 @@ define @intrinsic_vwsub.w_wx_nxv4i64_nxv4i64_i32( @llvm.riscv.vwsub.w.nxv4i64.i32( + undef, %0, i32 %1, i32 %2) @@ -1310,6 +1368,7 @@ entry: } declare @llvm.riscv.vwsub.w.nxv8i64.i32( + , , i32, i32); @@ -1322,6 +1381,7 @@ define @intrinsic_vwsub.w_wx_nxv8i64_nxv8i64_i32( @llvm.riscv.vwsub.w.nxv8i64.i32( + undef, %0, i32 %1, i32 %2) @@ -1873,6 +1933,7 @@ define @intrinsic_vwsub.w_wv_untie_nxv1i16_nxv1i16_nxv1i8( @llvm.riscv.vwsub.w.nxv1i16.nxv1i8( + undef, %1, %0, i32 %2) @@ -1889,6 +1950,7 @@ define @intrinsic_vwsub.w_wv_untie_nxv2i16_nxv2i16_nxv2i8( @llvm.riscv.vwsub.w.nxv2i16.nxv2i8( + undef, %1, %0, i32 %2) @@ -1905,6 +1967,7 @@ define @intrinsic_vwsub.w_wv_untie_nxv4i16_nxv4i16_nxv4i8( @llvm.riscv.vwsub.w.nxv4i16.nxv4i8( + undef, %1, %0, i32 %2) @@ -1921,6 +1984,7 @@ define @intrinsic_vwsub.w_wv_untie_nxv8i16_nxv8i16_nxv8i8( @llvm.riscv.vwsub.w.nxv8i16.nxv8i8( + undef, %1, %0, i32 %2) @@ -1937,6 +2001,7 @@ define @intrinsic_vwsub.w_wv_untie_nxv16i16_nxv16i16_nxv16i8 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.w.nxv16i16.nxv16i8( + undef, %1, %0, i32 %2) @@ -1953,6 +2018,7 @@ define @intrinsic_vwsub.w_wv_untie_nxv32i16_nxv32i16_nxv32i8 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.w.nxv32i16.nxv32i8( + undef, %1, %0, i32 %2) @@ -1969,6 +2035,7 @@ define @intrinsic_vwsub.w_wv_untie_nxv1i32_nxv1i32_nxv1i16( @llvm.riscv.vwsub.w.nxv1i32.nxv1i16( + undef, %1, %0, i32 %2) @@ -1985,6 +2052,7 @@ define @intrinsic_vwsub.w_wv_untie_nxv2i32_nxv2i32_nxv2i16( @llvm.riscv.vwsub.w.nxv2i32.nxv2i16( + undef, %1, %0, i32 %2) @@ -2001,6 +2069,7 @@ define @intrinsic_vwsub.w_wv_untie_nxv4i32_nxv4i32_nxv4i16( @llvm.riscv.vwsub.w.nxv4i32.nxv4i16( + undef, %1, %0, i32 %2) @@ -2017,6 +2086,7 @@ define @intrinsic_vwsub.w_wv_untie_nxv8i32_nxv8i32_nxv8i16( @llvm.riscv.vwsub.w.nxv8i32.nxv8i16( + undef, %1, %0, i32 %2) @@ -2033,6 +2103,7 @@ define @intrinsic_vwsub.w_wv_untie_nxv1i64_nxv1i64_nxv1i32( @llvm.riscv.vwsub.w.nxv1i64.nxv1i32( + undef, %1, %0, i32 %2) @@ -2049,6 +2120,7 @@ define @intrinsic_vwsub.w_wv_untie_nxv2i64_nxv2i64_nxv2i32( @llvm.riscv.vwsub.w.nxv2i64.nxv2i32( + undef, %1, %0, i32 %2) @@ -2065,6 +2137,7 @@ define @intrinsic_vwsub.w_wv_untie_nxv4i64_nxv4i64_nxv4i32( @llvm.riscv.vwsub.w.nxv4i64.nxv4i32( + undef, %1, %0, i32 %2) @@ -2081,6 +2154,7 @@ define @intrinsic_vwsub.w_wv_untie_nxv8i64_nxv8i64_nxv8i32( @llvm.riscv.vwsub.w.nxv8i64.nxv8i32( + undef, %1, %0, i32 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv64.ll index 6e8aff5e815930885218f6fee55b551bce05cc88..f4abf11d4ac50b51d7a218acebfb6c993de2ee17 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv64.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vwsub.w.nxv1i16.nxv1i8( + , , , i64); @@ -14,6 +15,7 @@ define @intrinsic_vwsub.w_wv_nxv1i16_nxv1i16_nxv1i8( @llvm.riscv.vwsub.w.nxv1i16.nxv1i8( + undef, %0, %1, i64 %2) @@ -47,6 +49,7 @@ entry: } declare @llvm.riscv.vwsub.w.nxv2i16.nxv2i8( + , , , i64); @@ -59,6 +62,7 @@ define @intrinsic_vwsub.w_wv_nxv2i16_nxv2i16_nxv2i8( @llvm.riscv.vwsub.w.nxv2i16.nxv2i8( + undef, %0, %1, i64 %2) @@ -92,6 +96,7 @@ entry: } declare @llvm.riscv.vwsub.w.nxv4i16.nxv4i8( + , , , i64); @@ -104,6 +109,7 @@ define @intrinsic_vwsub.w_wv_nxv4i16_nxv4i16_nxv4i8( @llvm.riscv.vwsub.w.nxv4i16.nxv4i8( + undef, %0, %1, i64 %2) @@ -137,6 +143,7 @@ entry: } declare @llvm.riscv.vwsub.w.nxv8i16.nxv8i8( + , , , i64); @@ -149,6 +156,7 @@ define @intrinsic_vwsub.w_wv_nxv8i16_nxv8i16_nxv8i8( @llvm.riscv.vwsub.w.nxv8i16.nxv8i8( + undef, %0, %1, i64 %2) @@ -182,6 +190,7 @@ entry: } declare @llvm.riscv.vwsub.w.nxv16i16.nxv16i8( + , , , i64); @@ -194,6 +203,7 @@ define @intrinsic_vwsub.w_wv_nxv16i16_nxv16i16_nxv16i8( @llvm.riscv.vwsub.w.nxv16i16.nxv16i8( + undef, %0, %1, i64 %2) @@ -227,6 +237,7 @@ entry: } declare @llvm.riscv.vwsub.w.nxv32i16.nxv32i8( + , , , i64); @@ -239,6 +250,7 @@ define @intrinsic_vwsub.w_wv_nxv32i16_nxv32i16_nxv32i8( @llvm.riscv.vwsub.w.nxv32i16.nxv32i8( + undef, %0, %1, i64 %2) @@ -273,6 +285,7 @@ entry: } declare @llvm.riscv.vwsub.w.nxv1i32.nxv1i16( + , , , i64); @@ -285,6 +298,7 @@ define @intrinsic_vwsub.w_wv_nxv1i32_nxv1i32_nxv1i16( @llvm.riscv.vwsub.w.nxv1i32.nxv1i16( + undef, %0, %1, i64 %2) @@ -318,6 +332,7 @@ entry: } declare @llvm.riscv.vwsub.w.nxv2i32.nxv2i16( + , , , i64); @@ -330,6 +345,7 @@ define @intrinsic_vwsub.w_wv_nxv2i32_nxv2i32_nxv2i16( @llvm.riscv.vwsub.w.nxv2i32.nxv2i16( + undef, %0, %1, i64 %2) @@ -363,6 +379,7 @@ entry: } declare @llvm.riscv.vwsub.w.nxv4i32.nxv4i16( + , , , i64); @@ -375,6 +392,7 @@ define @intrinsic_vwsub.w_wv_nxv4i32_nxv4i32_nxv4i16( @llvm.riscv.vwsub.w.nxv4i32.nxv4i16( + undef, %0, %1, i64 %2) @@ -408,6 +426,7 @@ entry: } declare @llvm.riscv.vwsub.w.nxv8i32.nxv8i16( + , , , i64); @@ -420,6 +439,7 @@ define @intrinsic_vwsub.w_wv_nxv8i32_nxv8i32_nxv8i16( @llvm.riscv.vwsub.w.nxv8i32.nxv8i16( + undef, %0, %1, i64 %2) @@ -453,6 +473,7 @@ entry: } declare @llvm.riscv.vwsub.w.nxv16i32.nxv16i16( + , , , i64); @@ -465,6 +486,7 @@ define @intrinsic_vwsub.w_wv_nxv16i32_nxv16i32_nxv16i16( @llvm.riscv.vwsub.w.nxv16i32.nxv16i16( + undef, %0, %1, i64 %2) @@ -499,6 +521,7 @@ entry: } declare @llvm.riscv.vwsub.w.nxv1i64.nxv1i32( + , , , i64); @@ -511,6 +534,7 @@ define @intrinsic_vwsub.w_wv_nxv1i64_nxv1i64_nxv1i32( @llvm.riscv.vwsub.w.nxv1i64.nxv1i32( + undef, %0, %1, i64 %2) @@ -544,6 +568,7 @@ entry: } declare @llvm.riscv.vwsub.w.nxv2i64.nxv2i32( + , , , i64); @@ -556,6 +581,7 @@ define @intrinsic_vwsub.w_wv_nxv2i64_nxv2i64_nxv2i32( @llvm.riscv.vwsub.w.nxv2i64.nxv2i32( + undef, %0, %1, i64 %2) @@ -589,6 +615,7 @@ entry: } declare @llvm.riscv.vwsub.w.nxv4i64.nxv4i32( + , , , i64); @@ -601,6 +628,7 @@ define @intrinsic_vwsub.w_wv_nxv4i64_nxv4i64_nxv4i32( @llvm.riscv.vwsub.w.nxv4i64.nxv4i32( + undef, %0, %1, i64 %2) @@ -634,6 +662,7 @@ entry: } declare @llvm.riscv.vwsub.w.nxv8i64.nxv8i32( + , , , i64); @@ -646,6 +675,7 @@ define @intrinsic_vwsub.w_wv_nxv8i64_nxv8i64_nxv8i32( @llvm.riscv.vwsub.w.nxv8i64.nxv8i32( + undef, %0, %1, i64 %2) @@ -680,6 +710,7 @@ entry: } declare @llvm.riscv.vwsub.w.nxv1i16.i8( + , , i8, i64); @@ -692,6 +723,7 @@ define @intrinsic_vwsub.w_wx_nxv1i16_nxv1i16_i8( @llvm.riscv.vwsub.w.nxv1i16.i8( + undef, %0, i8 %1, i64 %2) @@ -725,6 +757,7 @@ entry: } declare @llvm.riscv.vwsub.w.nxv2i16.i8( + , , i8, i64); @@ -737,6 +770,7 @@ define @intrinsic_vwsub.w_wx_nxv2i16_nxv2i16_i8( @llvm.riscv.vwsub.w.nxv2i16.i8( + undef, %0, i8 %1, i64 %2) @@ -770,6 +804,7 @@ entry: } declare @llvm.riscv.vwsub.w.nxv4i16.i8( + , , i8, i64); @@ -782,6 +817,7 @@ define @intrinsic_vwsub.w_wx_nxv4i16_nxv4i16_i8( @llvm.riscv.vwsub.w.nxv4i16.i8( + undef, %0, i8 %1, i64 %2) @@ -815,6 +851,7 @@ entry: } declare @llvm.riscv.vwsub.w.nxv8i16.i8( + , , i8, i64); @@ -827,6 +864,7 @@ define @intrinsic_vwsub.w_wx_nxv8i16_nxv8i16_i8( @llvm.riscv.vwsub.w.nxv8i16.i8( + undef, %0, i8 %1, i64 %2) @@ -860,6 +898,7 @@ entry: } declare @llvm.riscv.vwsub.w.nxv16i16.i8( + , , i8, i64); @@ -872,6 +911,7 @@ define @intrinsic_vwsub.w_wx_nxv16i16_nxv16i16_i8( @llvm.riscv.vwsub.w.nxv16i16.i8( + undef, %0, i8 %1, i64 %2) @@ -905,6 +945,7 @@ entry: } declare @llvm.riscv.vwsub.w.nxv32i16.i8( + , , i8, i64); @@ -917,6 +958,7 @@ define @intrinsic_vwsub.w_wx_nxv32i16_nxv32i16_i8( @llvm.riscv.vwsub.w.nxv32i16.i8( + undef, %0, i8 %1, i64 %2) @@ -950,6 +992,7 @@ entry: } declare @llvm.riscv.vwsub.w.nxv1i32.i16( + , , i16, i64); @@ -962,6 +1005,7 @@ define @intrinsic_vwsub.w_wx_nxv1i32_nxv1i32_i16( @llvm.riscv.vwsub.w.nxv1i32.i16( + undef, %0, i16 %1, i64 %2) @@ -995,6 +1039,7 @@ entry: } declare @llvm.riscv.vwsub.w.nxv2i32.i16( + , , i16, i64); @@ -1007,6 +1052,7 @@ define @intrinsic_vwsub.w_wx_nxv2i32_nxv2i32_i16( @llvm.riscv.vwsub.w.nxv2i32.i16( + undef, %0, i16 %1, i64 %2) @@ -1040,6 +1086,7 @@ entry: } declare @llvm.riscv.vwsub.w.nxv4i32.i16( + , , i16, i64); @@ -1052,6 +1099,7 @@ define @intrinsic_vwsub.w_wx_nxv4i32_nxv4i32_i16( @llvm.riscv.vwsub.w.nxv4i32.i16( + undef, %0, i16 %1, i64 %2) @@ -1085,6 +1133,7 @@ entry: } declare @llvm.riscv.vwsub.w.nxv8i32.i16( + , , i16, i64); @@ -1097,6 +1146,7 @@ define @intrinsic_vwsub.w_wx_nxv8i32_nxv8i32_i16( @llvm.riscv.vwsub.w.nxv8i32.i16( + undef, %0, i16 %1, i64 %2) @@ -1130,6 +1180,7 @@ entry: } declare @llvm.riscv.vwsub.w.nxv16i32.i16( + , , i16, i64); @@ -1142,6 +1193,7 @@ define @intrinsic_vwsub.w_wx_nxv16i32_nxv16i32_i16( @llvm.riscv.vwsub.w.nxv16i32.i16( + undef, %0, i16 %1, i64 %2) @@ -1175,6 +1227,7 @@ entry: } declare @llvm.riscv.vwsub.w.nxv1i64.i32( + , , i32, i64); @@ -1187,6 +1240,7 @@ define @intrinsic_vwsub.w_wx_nxv1i64_nxv1i64_i32( @llvm.riscv.vwsub.w.nxv1i64.i32( + undef, %0, i32 %1, i64 %2) @@ -1220,6 +1274,7 @@ entry: } declare @llvm.riscv.vwsub.w.nxv2i64.i32( + , , i32, i64); @@ -1232,6 +1287,7 @@ define @intrinsic_vwsub.w_wx_nxv2i64_nxv2i64_i32( @llvm.riscv.vwsub.w.nxv2i64.i32( + undef, %0, i32 %1, i64 %2) @@ -1265,6 +1321,7 @@ entry: } declare @llvm.riscv.vwsub.w.nxv4i64.i32( + , , i32, i64); @@ -1277,6 +1334,7 @@ define @intrinsic_vwsub.w_wx_nxv4i64_nxv4i64_i32( @llvm.riscv.vwsub.w.nxv4i64.i32( + undef, %0, i32 %1, i64 %2) @@ -1310,6 +1368,7 @@ entry: } declare @llvm.riscv.vwsub.w.nxv8i64.i32( + , , i32, i64); @@ -1322,6 +1381,7 @@ define @intrinsic_vwsub.w_wx_nxv8i64_nxv8i64_i32( @llvm.riscv.vwsub.w.nxv8i64.i32( + undef, %0, i32 %1, i64 %2) @@ -1873,6 +1933,7 @@ define @intrinsic_vwsub.w_wv_untie_nxv1i16_nxv1i16_nxv1i8( @llvm.riscv.vwsub.w.nxv1i16.nxv1i8( + undef, %1, %0, i64 %2) @@ -1889,6 +1950,7 @@ define @intrinsic_vwsub.w_wv_untie_nxv2i16_nxv2i16_nxv2i8( @llvm.riscv.vwsub.w.nxv2i16.nxv2i8( + undef, %1, %0, i64 %2) @@ -1905,6 +1967,7 @@ define @intrinsic_vwsub.w_wv_untie_nxv4i16_nxv4i16_nxv4i8( @llvm.riscv.vwsub.w.nxv4i16.nxv4i8( + undef, %1, %0, i64 %2) @@ -1921,6 +1984,7 @@ define @intrinsic_vwsub.w_wv_untie_nxv8i16_nxv8i16_nxv8i8( @llvm.riscv.vwsub.w.nxv8i16.nxv8i8( + undef, %1, %0, i64 %2) @@ -1937,6 +2001,7 @@ define @intrinsic_vwsub.w_wv_untie_nxv16i16_nxv16i16_nxv16i8 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.w.nxv16i16.nxv16i8( + undef, %1, %0, i64 %2) @@ -1953,6 +2018,7 @@ define @intrinsic_vwsub.w_wv_untie_nxv32i16_nxv32i16_nxv32i8 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.w.nxv32i16.nxv32i8( + undef, %1, %0, i64 %2) @@ -1969,6 +2035,7 @@ define @intrinsic_vwsub.w_wv_untie_nxv1i32_nxv1i32_nxv1i16( @llvm.riscv.vwsub.w.nxv1i32.nxv1i16( + undef, %1, %0, i64 %2) @@ -1985,6 +2052,7 @@ define @intrinsic_vwsub.w_wv_untie_nxv2i32_nxv2i32_nxv2i16( @llvm.riscv.vwsub.w.nxv2i32.nxv2i16( + undef, %1, %0, i64 %2) @@ -2001,6 +2069,7 @@ define @intrinsic_vwsub.w_wv_untie_nxv4i32_nxv4i32_nxv4i16( @llvm.riscv.vwsub.w.nxv4i32.nxv4i16( + undef, %1, %0, i64 %2) @@ -2017,6 +2086,7 @@ define @intrinsic_vwsub.w_wv_untie_nxv8i32_nxv8i32_nxv8i16( @llvm.riscv.vwsub.w.nxv8i32.nxv8i16( + undef, %1, %0, i64 %2) @@ -2033,6 +2103,7 @@ define @intrinsic_vwsub.w_wv_untie_nxv1i64_nxv1i64_nxv1i32( @llvm.riscv.vwsub.w.nxv1i64.nxv1i32( + undef, %1, %0, i64 %2) @@ -2049,6 +2120,7 @@ define @intrinsic_vwsub.w_wv_untie_nxv2i64_nxv2i64_nxv2i32( @llvm.riscv.vwsub.w.nxv2i64.nxv2i32( + undef, %1, %0, i64 %2) @@ -2065,6 +2137,7 @@ define @intrinsic_vwsub.w_wv_untie_nxv4i64_nxv4i64_nxv4i32( @llvm.riscv.vwsub.w.nxv4i64.nxv4i32( + undef, %1, %0, i64 %2) @@ -2081,6 +2154,7 @@ define @intrinsic_vwsub.w_wv_untie_nxv8i64_nxv8i64_nxv8i32( @llvm.riscv.vwsub.w.nxv8i64.nxv8i32( + undef, %1, %0, i64 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vwsubu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwsubu-rv32.ll index 603f685d4c7e6b390e720ed692949edab3507d9f..2109237527f4427ec3fca7bf11dc8fc98b68b454 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vwsubu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwsubu-rv32.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vwsubu.nxv1i16.nxv1i8.nxv1i8( + , , , i32); @@ -15,6 +16,7 @@ define @intrinsic_vwsubu_vv_nxv1i16_nxv1i8_nxv1i8( @llvm.riscv.vwsubu.nxv1i16.nxv1i8.nxv1i8( + undef, %0, %1, i32 %2) @@ -48,6 +50,7 @@ entry: } declare @llvm.riscv.vwsubu.nxv2i16.nxv2i8.nxv2i8( + , , , i32); @@ -61,6 +64,7 @@ define @intrinsic_vwsubu_vv_nxv2i16_nxv2i8_nxv2i8( @llvm.riscv.vwsubu.nxv2i16.nxv2i8.nxv2i8( + undef, %0, %1, i32 %2) @@ -94,6 +98,7 @@ entry: } declare @llvm.riscv.vwsubu.nxv4i16.nxv4i8.nxv4i8( + , , , i32); @@ -107,6 +112,7 @@ define @intrinsic_vwsubu_vv_nxv4i16_nxv4i8_nxv4i8( @llvm.riscv.vwsubu.nxv4i16.nxv4i8.nxv4i8( + undef, %0, %1, i32 %2) @@ -140,6 +146,7 @@ entry: } declare @llvm.riscv.vwsubu.nxv8i16.nxv8i8.nxv8i8( + , , , i32); @@ -153,6 +160,7 @@ define @intrinsic_vwsubu_vv_nxv8i16_nxv8i8_nxv8i8( @llvm.riscv.vwsubu.nxv8i16.nxv8i8.nxv8i8( + undef, %0, %1, i32 %2) @@ -186,6 +194,7 @@ entry: } declare @llvm.riscv.vwsubu.nxv16i16.nxv16i8.nxv16i8( + , , , i32); @@ -199,6 +208,7 @@ define @intrinsic_vwsubu_vv_nxv16i16_nxv16i8_nxv16i8( @llvm.riscv.vwsubu.nxv16i16.nxv16i8.nxv16i8( + undef, %0, %1, i32 %2) @@ -232,6 +242,7 @@ entry: } declare @llvm.riscv.vwsubu.nxv32i16.nxv32i8.nxv32i8( + , , , i32); @@ -245,6 +256,7 @@ define @intrinsic_vwsubu_vv_nxv32i16_nxv32i8_nxv32i8( @llvm.riscv.vwsubu.nxv32i16.nxv32i8.nxv32i8( + undef, %0, %1, i32 %2) @@ -278,6 +290,7 @@ entry: } declare @llvm.riscv.vwsubu.nxv1i32.nxv1i16.nxv1i16( + , , , i32); @@ -291,6 +304,7 @@ define @intrinsic_vwsubu_vv_nxv1i32_nxv1i16_nxv1i16( @llvm.riscv.vwsubu.nxv1i32.nxv1i16.nxv1i16( + undef, %0, %1, i32 %2) @@ -324,6 +338,7 @@ entry: } declare @llvm.riscv.vwsubu.nxv2i32.nxv2i16.nxv2i16( + , , , i32); @@ -337,6 +352,7 @@ define @intrinsic_vwsubu_vv_nxv2i32_nxv2i16_nxv2i16( @llvm.riscv.vwsubu.nxv2i32.nxv2i16.nxv2i16( + undef, %0, %1, i32 %2) @@ -370,6 +386,7 @@ entry: } declare @llvm.riscv.vwsubu.nxv4i32.nxv4i16.nxv4i16( + , , , i32); @@ -383,6 +400,7 @@ define @intrinsic_vwsubu_vv_nxv4i32_nxv4i16_nxv4i16( @llvm.riscv.vwsubu.nxv4i32.nxv4i16.nxv4i16( + undef, %0, %1, i32 %2) @@ -416,6 +434,7 @@ entry: } declare @llvm.riscv.vwsubu.nxv8i32.nxv8i16.nxv8i16( + , , , i32); @@ -429,6 +448,7 @@ define @intrinsic_vwsubu_vv_nxv8i32_nxv8i16_nxv8i16( @llvm.riscv.vwsubu.nxv8i32.nxv8i16.nxv8i16( + undef, %0, %1, i32 %2) @@ -462,6 +482,7 @@ entry: } declare @llvm.riscv.vwsubu.nxv16i32.nxv16i16.nxv16i16( + , , , i32); @@ -475,6 +496,7 @@ define @intrinsic_vwsubu_vv_nxv16i32_nxv16i16_nxv16i16( @llvm.riscv.vwsubu.nxv16i32.nxv16i16.nxv16i16( + undef, %0, %1, i32 %2) @@ -508,6 +530,7 @@ entry: } declare @llvm.riscv.vwsubu.nxv1i64.nxv1i32.nxv1i32( + , , , i32); @@ -521,6 +544,7 @@ define @intrinsic_vwsubu_vv_nxv1i64_nxv1i32_nxv1i32( @llvm.riscv.vwsubu.nxv1i64.nxv1i32.nxv1i32( + undef, %0, %1, i32 %2) @@ -554,6 +578,7 @@ entry: } declare @llvm.riscv.vwsubu.nxv2i64.nxv2i32.nxv2i32( + , , , i32); @@ -567,6 +592,7 @@ define @intrinsic_vwsubu_vv_nxv2i64_nxv2i32_nxv2i32( @llvm.riscv.vwsubu.nxv2i64.nxv2i32.nxv2i32( + undef, %0, %1, i32 %2) @@ -600,6 +626,7 @@ entry: } declare @llvm.riscv.vwsubu.nxv4i64.nxv4i32.nxv4i32( + , , , i32); @@ -613,6 +640,7 @@ define @intrinsic_vwsubu_vv_nxv4i64_nxv4i32_nxv4i32( @llvm.riscv.vwsubu.nxv4i64.nxv4i32.nxv4i32( + undef, %0, %1, i32 %2) @@ -646,6 +674,7 @@ entry: } declare @llvm.riscv.vwsubu.nxv8i64.nxv8i32.nxv8i32( + , , , i32); @@ -659,6 +688,7 @@ define @intrinsic_vwsubu_vv_nxv8i64_nxv8i32_nxv8i32( @llvm.riscv.vwsubu.nxv8i64.nxv8i32.nxv8i32( + undef, %0, %1, i32 %2) @@ -692,6 +722,7 @@ entry: } declare @llvm.riscv.vwsubu.nxv1i16.nxv1i8.i8( + , , i8, i32); @@ -705,6 +736,7 @@ define @intrinsic_vwsubu_vx_nxv1i16_nxv1i8_i8( @llvm.riscv.vwsubu.nxv1i16.nxv1i8.i8( + undef, %0, i8 %1, i32 %2) @@ -738,6 +770,7 @@ entry: } declare @llvm.riscv.vwsubu.nxv2i16.nxv2i8.i8( + , , i8, i32); @@ -751,6 +784,7 @@ define @intrinsic_vwsubu_vx_nxv2i16_nxv2i8_i8( @llvm.riscv.vwsubu.nxv2i16.nxv2i8.i8( + undef, %0, i8 %1, i32 %2) @@ -784,6 +818,7 @@ entry: } declare @llvm.riscv.vwsubu.nxv4i16.nxv4i8.i8( + , , i8, i32); @@ -797,6 +832,7 @@ define @intrinsic_vwsubu_vx_nxv4i16_nxv4i8_i8( @llvm.riscv.vwsubu.nxv4i16.nxv4i8.i8( + undef, %0, i8 %1, i32 %2) @@ -830,6 +866,7 @@ entry: } declare @llvm.riscv.vwsubu.nxv8i16.nxv8i8.i8( + , , i8, i32); @@ -843,6 +880,7 @@ define @intrinsic_vwsubu_vx_nxv8i16_nxv8i8_i8( @llvm.riscv.vwsubu.nxv8i16.nxv8i8.i8( + undef, %0, i8 %1, i32 %2) @@ -876,6 +914,7 @@ entry: } declare @llvm.riscv.vwsubu.nxv16i16.nxv16i8.i8( + , , i8, i32); @@ -889,6 +928,7 @@ define @intrinsic_vwsubu_vx_nxv16i16_nxv16i8_i8( @llvm.riscv.vwsubu.nxv16i16.nxv16i8.i8( + undef, %0, i8 %1, i32 %2) @@ -922,6 +962,7 @@ entry: } declare @llvm.riscv.vwsubu.nxv32i16.nxv32i8.i8( + , , i8, i32); @@ -935,6 +976,7 @@ define @intrinsic_vwsubu_vx_nxv32i16_nxv32i8_i8( @llvm.riscv.vwsubu.nxv32i16.nxv32i8.i8( + undef, %0, i8 %1, i32 %2) @@ -968,6 +1010,7 @@ entry: } declare @llvm.riscv.vwsubu.nxv1i32.nxv1i16.i16( + , , i16, i32); @@ -981,6 +1024,7 @@ define @intrinsic_vwsubu_vx_nxv1i32_nxv1i16_i16( @llvm.riscv.vwsubu.nxv1i32.nxv1i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1014,6 +1058,7 @@ entry: } declare @llvm.riscv.vwsubu.nxv2i32.nxv2i16.i16( + , , i16, i32); @@ -1027,6 +1072,7 @@ define @intrinsic_vwsubu_vx_nxv2i32_nxv2i16_i16( @llvm.riscv.vwsubu.nxv2i32.nxv2i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1060,6 +1106,7 @@ entry: } declare @llvm.riscv.vwsubu.nxv4i32.nxv4i16.i16( + , , i16, i32); @@ -1073,6 +1120,7 @@ define @intrinsic_vwsubu_vx_nxv4i32_nxv4i16_i16( @llvm.riscv.vwsubu.nxv4i32.nxv4i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1106,6 +1154,7 @@ entry: } declare @llvm.riscv.vwsubu.nxv8i32.nxv8i16.i16( + , , i16, i32); @@ -1119,6 +1168,7 @@ define @intrinsic_vwsubu_vx_nxv8i32_nxv8i16_i16( @llvm.riscv.vwsubu.nxv8i32.nxv8i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1152,6 +1202,7 @@ entry: } declare @llvm.riscv.vwsubu.nxv16i32.nxv16i16.i16( + , , i16, i32); @@ -1165,6 +1216,7 @@ define @intrinsic_vwsubu_vx_nxv16i32_nxv16i16_i16( @llvm.riscv.vwsubu.nxv16i32.nxv16i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1198,6 +1250,7 @@ entry: } declare @llvm.riscv.vwsubu.nxv1i64.nxv1i32.i32( + , , i32, i32); @@ -1211,6 +1264,7 @@ define @intrinsic_vwsubu_vx_nxv1i64_nxv1i32_i32( @llvm.riscv.vwsubu.nxv1i64.nxv1i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1244,6 +1298,7 @@ entry: } declare @llvm.riscv.vwsubu.nxv2i64.nxv2i32.i32( + , , i32, i32); @@ -1257,6 +1312,7 @@ define @intrinsic_vwsubu_vx_nxv2i64_nxv2i32_i32( @llvm.riscv.vwsubu.nxv2i64.nxv2i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1290,6 +1346,7 @@ entry: } declare @llvm.riscv.vwsubu.nxv4i64.nxv4i32.i32( + , , i32, i32); @@ -1303,6 +1360,7 @@ define @intrinsic_vwsubu_vx_nxv4i64_nxv4i32_i32( @llvm.riscv.vwsubu.nxv4i64.nxv4i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1336,6 +1394,7 @@ entry: } declare @llvm.riscv.vwsubu.nxv8i64.nxv8i32.i32( + , , i32, i32); @@ -1349,6 +1408,7 @@ define @intrinsic_vwsubu_vx_nxv8i64_nxv8i32_i32( @llvm.riscv.vwsubu.nxv8i64.nxv8i32.i32( + undef, %0, i32 %1, i32 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vwsubu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwsubu-rv64.ll index 3c4cc0283cc0d22b885a3e3201df83c200a610b5..dac6937f73750273fd5cbab0bb5715c4ce6a239f 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vwsubu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwsubu-rv64.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vwsubu.nxv1i16.nxv1i8.nxv1i8( + , , , i64); @@ -15,6 +16,7 @@ define @intrinsic_vwsubu_vv_nxv1i16_nxv1i8_nxv1i8( @llvm.riscv.vwsubu.nxv1i16.nxv1i8.nxv1i8( + undef, %0, %1, i64 %2) @@ -48,6 +50,7 @@ entry: } declare @llvm.riscv.vwsubu.nxv2i16.nxv2i8.nxv2i8( + , , , i64); @@ -61,6 +64,7 @@ define @intrinsic_vwsubu_vv_nxv2i16_nxv2i8_nxv2i8( @llvm.riscv.vwsubu.nxv2i16.nxv2i8.nxv2i8( + undef, %0, %1, i64 %2) @@ -94,6 +98,7 @@ entry: } declare @llvm.riscv.vwsubu.nxv4i16.nxv4i8.nxv4i8( + , , , i64); @@ -107,6 +112,7 @@ define @intrinsic_vwsubu_vv_nxv4i16_nxv4i8_nxv4i8( @llvm.riscv.vwsubu.nxv4i16.nxv4i8.nxv4i8( + undef, %0, %1, i64 %2) @@ -140,6 +146,7 @@ entry: } declare @llvm.riscv.vwsubu.nxv8i16.nxv8i8.nxv8i8( + , , , i64); @@ -153,6 +160,7 @@ define @intrinsic_vwsubu_vv_nxv8i16_nxv8i8_nxv8i8( @llvm.riscv.vwsubu.nxv8i16.nxv8i8.nxv8i8( + undef, %0, %1, i64 %2) @@ -186,6 +194,7 @@ entry: } declare @llvm.riscv.vwsubu.nxv16i16.nxv16i8.nxv16i8( + , , , i64); @@ -199,6 +208,7 @@ define @intrinsic_vwsubu_vv_nxv16i16_nxv16i8_nxv16i8( @llvm.riscv.vwsubu.nxv16i16.nxv16i8.nxv16i8( + undef, %0, %1, i64 %2) @@ -232,6 +242,7 @@ entry: } declare @llvm.riscv.vwsubu.nxv32i16.nxv32i8.nxv32i8( + , , , i64); @@ -245,6 +256,7 @@ define @intrinsic_vwsubu_vv_nxv32i16_nxv32i8_nxv32i8( @llvm.riscv.vwsubu.nxv32i16.nxv32i8.nxv32i8( + undef, %0, %1, i64 %2) @@ -278,6 +290,7 @@ entry: } declare @llvm.riscv.vwsubu.nxv1i32.nxv1i16.nxv1i16( + , , , i64); @@ -291,6 +304,7 @@ define @intrinsic_vwsubu_vv_nxv1i32_nxv1i16_nxv1i16( @llvm.riscv.vwsubu.nxv1i32.nxv1i16.nxv1i16( + undef, %0, %1, i64 %2) @@ -324,6 +338,7 @@ entry: } declare @llvm.riscv.vwsubu.nxv2i32.nxv2i16.nxv2i16( + , , , i64); @@ -337,6 +352,7 @@ define @intrinsic_vwsubu_vv_nxv2i32_nxv2i16_nxv2i16( @llvm.riscv.vwsubu.nxv2i32.nxv2i16.nxv2i16( + undef, %0, %1, i64 %2) @@ -370,6 +386,7 @@ entry: } declare @llvm.riscv.vwsubu.nxv4i32.nxv4i16.nxv4i16( + , , , i64); @@ -383,6 +400,7 @@ define @intrinsic_vwsubu_vv_nxv4i32_nxv4i16_nxv4i16( @llvm.riscv.vwsubu.nxv4i32.nxv4i16.nxv4i16( + undef, %0, %1, i64 %2) @@ -416,6 +434,7 @@ entry: } declare @llvm.riscv.vwsubu.nxv8i32.nxv8i16.nxv8i16( + , , , i64); @@ -429,6 +448,7 @@ define @intrinsic_vwsubu_vv_nxv8i32_nxv8i16_nxv8i16( @llvm.riscv.vwsubu.nxv8i32.nxv8i16.nxv8i16( + undef, %0, %1, i64 %2) @@ -462,6 +482,7 @@ entry: } declare @llvm.riscv.vwsubu.nxv16i32.nxv16i16.nxv16i16( + , , , i64); @@ -475,6 +496,7 @@ define @intrinsic_vwsubu_vv_nxv16i32_nxv16i16_nxv16i16( @llvm.riscv.vwsubu.nxv16i32.nxv16i16.nxv16i16( + undef, %0, %1, i64 %2) @@ -508,6 +530,7 @@ entry: } declare @llvm.riscv.vwsubu.nxv1i64.nxv1i32.nxv1i32( + , , , i64); @@ -521,6 +544,7 @@ define @intrinsic_vwsubu_vv_nxv1i64_nxv1i32_nxv1i32( @llvm.riscv.vwsubu.nxv1i64.nxv1i32.nxv1i32( + undef, %0, %1, i64 %2) @@ -554,6 +578,7 @@ entry: } declare @llvm.riscv.vwsubu.nxv2i64.nxv2i32.nxv2i32( + , , , i64); @@ -567,6 +592,7 @@ define @intrinsic_vwsubu_vv_nxv2i64_nxv2i32_nxv2i32( @llvm.riscv.vwsubu.nxv2i64.nxv2i32.nxv2i32( + undef, %0, %1, i64 %2) @@ -600,6 +626,7 @@ entry: } declare @llvm.riscv.vwsubu.nxv4i64.nxv4i32.nxv4i32( + , , , i64); @@ -613,6 +640,7 @@ define @intrinsic_vwsubu_vv_nxv4i64_nxv4i32_nxv4i32( @llvm.riscv.vwsubu.nxv4i64.nxv4i32.nxv4i32( + undef, %0, %1, i64 %2) @@ -646,6 +674,7 @@ entry: } declare @llvm.riscv.vwsubu.nxv8i64.nxv8i32.nxv8i32( + , , , i64); @@ -659,6 +688,7 @@ define @intrinsic_vwsubu_vv_nxv8i64_nxv8i32_nxv8i32( @llvm.riscv.vwsubu.nxv8i64.nxv8i32.nxv8i32( + undef, %0, %1, i64 %2) @@ -692,6 +722,7 @@ entry: } declare @llvm.riscv.vwsubu.nxv1i16.nxv1i8.i8( + , , i8, i64); @@ -705,6 +736,7 @@ define @intrinsic_vwsubu_vx_nxv1i16_nxv1i8_i8( @llvm.riscv.vwsubu.nxv1i16.nxv1i8.i8( + undef, %0, i8 %1, i64 %2) @@ -738,6 +770,7 @@ entry: } declare @llvm.riscv.vwsubu.nxv2i16.nxv2i8.i8( + , , i8, i64); @@ -751,6 +784,7 @@ define @intrinsic_vwsubu_vx_nxv2i16_nxv2i8_i8( @llvm.riscv.vwsubu.nxv2i16.nxv2i8.i8( + undef, %0, i8 %1, i64 %2) @@ -784,6 +818,7 @@ entry: } declare @llvm.riscv.vwsubu.nxv4i16.nxv4i8.i8( + , , i8, i64); @@ -797,6 +832,7 @@ define @intrinsic_vwsubu_vx_nxv4i16_nxv4i8_i8( @llvm.riscv.vwsubu.nxv4i16.nxv4i8.i8( + undef, %0, i8 %1, i64 %2) @@ -830,6 +866,7 @@ entry: } declare @llvm.riscv.vwsubu.nxv8i16.nxv8i8.i8( + , , i8, i64); @@ -843,6 +880,7 @@ define @intrinsic_vwsubu_vx_nxv8i16_nxv8i8_i8( @llvm.riscv.vwsubu.nxv8i16.nxv8i8.i8( + undef, %0, i8 %1, i64 %2) @@ -876,6 +914,7 @@ entry: } declare @llvm.riscv.vwsubu.nxv16i16.nxv16i8.i8( + , , i8, i64); @@ -889,6 +928,7 @@ define @intrinsic_vwsubu_vx_nxv16i16_nxv16i8_i8( @llvm.riscv.vwsubu.nxv16i16.nxv16i8.i8( + undef, %0, i8 %1, i64 %2) @@ -922,6 +962,7 @@ entry: } declare @llvm.riscv.vwsubu.nxv32i16.nxv32i8.i8( + , , i8, i64); @@ -935,6 +976,7 @@ define @intrinsic_vwsubu_vx_nxv32i16_nxv32i8_i8( @llvm.riscv.vwsubu.nxv32i16.nxv32i8.i8( + undef, %0, i8 %1, i64 %2) @@ -968,6 +1010,7 @@ entry: } declare @llvm.riscv.vwsubu.nxv1i32.nxv1i16.i16( + , , i16, i64); @@ -981,6 +1024,7 @@ define @intrinsic_vwsubu_vx_nxv1i32_nxv1i16_i16( @llvm.riscv.vwsubu.nxv1i32.nxv1i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1014,6 +1058,7 @@ entry: } declare @llvm.riscv.vwsubu.nxv2i32.nxv2i16.i16( + , , i16, i64); @@ -1027,6 +1072,7 @@ define @intrinsic_vwsubu_vx_nxv2i32_nxv2i16_i16( @llvm.riscv.vwsubu.nxv2i32.nxv2i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1060,6 +1106,7 @@ entry: } declare @llvm.riscv.vwsubu.nxv4i32.nxv4i16.i16( + , , i16, i64); @@ -1073,6 +1120,7 @@ define @intrinsic_vwsubu_vx_nxv4i32_nxv4i16_i16( @llvm.riscv.vwsubu.nxv4i32.nxv4i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1106,6 +1154,7 @@ entry: } declare @llvm.riscv.vwsubu.nxv8i32.nxv8i16.i16( + , , i16, i64); @@ -1119,6 +1168,7 @@ define @intrinsic_vwsubu_vx_nxv8i32_nxv8i16_i16( @llvm.riscv.vwsubu.nxv8i32.nxv8i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1152,6 +1202,7 @@ entry: } declare @llvm.riscv.vwsubu.nxv16i32.nxv16i16.i16( + , , i16, i64); @@ -1165,6 +1216,7 @@ define @intrinsic_vwsubu_vx_nxv16i32_nxv16i16_i16( @llvm.riscv.vwsubu.nxv16i32.nxv16i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1198,6 +1250,7 @@ entry: } declare @llvm.riscv.vwsubu.nxv1i64.nxv1i32.i32( + , , i32, i64); @@ -1211,6 +1264,7 @@ define @intrinsic_vwsubu_vx_nxv1i64_nxv1i32_i32( @llvm.riscv.vwsubu.nxv1i64.nxv1i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1244,6 +1298,7 @@ entry: } declare @llvm.riscv.vwsubu.nxv2i64.nxv2i32.i32( + , , i32, i64); @@ -1257,6 +1312,7 @@ define @intrinsic_vwsubu_vx_nxv2i64_nxv2i32_i32( @llvm.riscv.vwsubu.nxv2i64.nxv2i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1290,6 +1346,7 @@ entry: } declare @llvm.riscv.vwsubu.nxv4i64.nxv4i32.i32( + , , i32, i64); @@ -1303,6 +1360,7 @@ define @intrinsic_vwsubu_vx_nxv4i64_nxv4i32_i32( @llvm.riscv.vwsubu.nxv4i64.nxv4i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1336,6 +1394,7 @@ entry: } declare @llvm.riscv.vwsubu.nxv8i64.nxv8i32.i32( + , , i32, i64); @@ -1349,6 +1408,7 @@ define @intrinsic_vwsubu_vx_nxv8i64_nxv8i32_i32( @llvm.riscv.vwsubu.nxv8i64.nxv8i32.i32( + undef, %0, i32 %1, i64 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv32.ll index 1406f14de4422b074646d46ed621cf1a5e325c96..3c7f59da442f1db20f37ac5c5b9eb94c58c4de07 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv32.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vwsubu.w.nxv1i16.nxv1i8( + , , , i32); @@ -14,6 +15,7 @@ define @intrinsic_vwsubu.w_wv_nxv1i16_nxv1i16_nxv1i8( @llvm.riscv.vwsubu.w.nxv1i16.nxv1i8( + undef, %0, %1, i32 %2) @@ -47,6 +49,7 @@ entry: } declare @llvm.riscv.vwsubu.w.nxv2i16.nxv2i8( + , , , i32); @@ -59,6 +62,7 @@ define @intrinsic_vwsubu.w_wv_nxv2i16_nxv2i16_nxv2i8( @llvm.riscv.vwsubu.w.nxv2i16.nxv2i8( + undef, %0, %1, i32 %2) @@ -92,6 +96,7 @@ entry: } declare @llvm.riscv.vwsubu.w.nxv4i16.nxv4i8( + , , , i32); @@ -104,6 +109,7 @@ define @intrinsic_vwsubu.w_wv_nxv4i16_nxv4i16_nxv4i8( @llvm.riscv.vwsubu.w.nxv4i16.nxv4i8( + undef, %0, %1, i32 %2) @@ -137,6 +143,7 @@ entry: } declare @llvm.riscv.vwsubu.w.nxv8i16.nxv8i8( + , , , i32); @@ -149,6 +156,7 @@ define @intrinsic_vwsubu.w_wv_nxv8i16_nxv8i16_nxv8i8( @llvm.riscv.vwsubu.w.nxv8i16.nxv8i8( + undef, %0, %1, i32 %2) @@ -182,6 +190,7 @@ entry: } declare @llvm.riscv.vwsubu.w.nxv16i16.nxv16i8( + , , , i32); @@ -194,6 +203,7 @@ define @intrinsic_vwsubu.w_wv_nxv16i16_nxv16i16_nxv16i8( @llvm.riscv.vwsubu.w.nxv16i16.nxv16i8( + undef, %0, %1, i32 %2) @@ -227,6 +237,7 @@ entry: } declare @llvm.riscv.vwsubu.w.nxv32i16.nxv32i8( + , , , i32); @@ -239,6 +250,7 @@ define @intrinsic_vwsubu.w_wv_nxv32i16_nxv32i16_nxv32i8( @llvm.riscv.vwsubu.w.nxv32i16.nxv32i8( + undef, %0, %1, i32 %2) @@ -273,6 +285,7 @@ entry: } declare @llvm.riscv.vwsubu.w.nxv1i32.nxv1i16( + , , , i32); @@ -285,6 +298,7 @@ define @intrinsic_vwsubu.w_wv_nxv1i32_nxv1i32_nxv1i16( @llvm.riscv.vwsubu.w.nxv1i32.nxv1i16( + undef, %0, %1, i32 %2) @@ -318,6 +332,7 @@ entry: } declare @llvm.riscv.vwsubu.w.nxv2i32.nxv2i16( + , , , i32); @@ -330,6 +345,7 @@ define @intrinsic_vwsubu.w_wv_nxv2i32_nxv2i32_nxv2i16( @llvm.riscv.vwsubu.w.nxv2i32.nxv2i16( + undef, %0, %1, i32 %2) @@ -363,6 +379,7 @@ entry: } declare @llvm.riscv.vwsubu.w.nxv4i32.nxv4i16( + , , , i32); @@ -375,6 +392,7 @@ define @intrinsic_vwsubu.w_wv_nxv4i32_nxv4i32_nxv4i16( @llvm.riscv.vwsubu.w.nxv4i32.nxv4i16( + undef, %0, %1, i32 %2) @@ -408,6 +426,7 @@ entry: } declare @llvm.riscv.vwsubu.w.nxv8i32.nxv8i16( + , , , i32); @@ -420,6 +439,7 @@ define @intrinsic_vwsubu.w_wv_nxv8i32_nxv8i32_nxv8i16( @llvm.riscv.vwsubu.w.nxv8i32.nxv8i16( + undef, %0, %1, i32 %2) @@ -453,6 +473,7 @@ entry: } declare @llvm.riscv.vwsubu.w.nxv16i32.nxv16i16( + , , , i32); @@ -465,6 +486,7 @@ define @intrinsic_vwsubu.w_wv_nxv16i32_nxv16i32_nxv16i16( @llvm.riscv.vwsubu.w.nxv16i32.nxv16i16( + undef, %0, %1, i32 %2) @@ -499,6 +521,7 @@ entry: } declare @llvm.riscv.vwsubu.w.nxv1i64.nxv1i32( + , , , i32); @@ -511,6 +534,7 @@ define @intrinsic_vwsubu.w_wv_nxv1i64_nxv1i64_nxv1i32( @llvm.riscv.vwsubu.w.nxv1i64.nxv1i32( + undef, %0, %1, i32 %2) @@ -544,6 +568,7 @@ entry: } declare @llvm.riscv.vwsubu.w.nxv2i64.nxv2i32( + , , , i32); @@ -556,6 +581,7 @@ define @intrinsic_vwsubu.w_wv_nxv2i64_nxv2i64_nxv2i32( @llvm.riscv.vwsubu.w.nxv2i64.nxv2i32( + undef, %0, %1, i32 %2) @@ -589,6 +615,7 @@ entry: } declare @llvm.riscv.vwsubu.w.nxv4i64.nxv4i32( + , , , i32); @@ -601,6 +628,7 @@ define @intrinsic_vwsubu.w_wv_nxv4i64_nxv4i64_nxv4i32( @llvm.riscv.vwsubu.w.nxv4i64.nxv4i32( + undef, %0, %1, i32 %2) @@ -634,6 +662,7 @@ entry: } declare @llvm.riscv.vwsubu.w.nxv8i64.nxv8i32( + , , , i32); @@ -646,6 +675,7 @@ define @intrinsic_vwsubu.w_wv_nxv8i64_nxv8i64_nxv8i32( @llvm.riscv.vwsubu.w.nxv8i64.nxv8i32( + undef, %0, %1, i32 %2) @@ -680,6 +710,7 @@ entry: } declare @llvm.riscv.vwsubu.w.nxv1i16.i8( + , , i8, i32); @@ -692,6 +723,7 @@ define @intrinsic_vwsubu.w_wx_nxv1i16_nxv1i16_i8( @llvm.riscv.vwsubu.w.nxv1i16.i8( + undef, %0, i8 %1, i32 %2) @@ -725,6 +757,7 @@ entry: } declare @llvm.riscv.vwsubu.w.nxv2i16.i8( + , , i8, i32); @@ -737,6 +770,7 @@ define @intrinsic_vwsubu.w_wx_nxv2i16_nxv2i16_i8( @llvm.riscv.vwsubu.w.nxv2i16.i8( + undef, %0, i8 %1, i32 %2) @@ -770,6 +804,7 @@ entry: } declare @llvm.riscv.vwsubu.w.nxv4i16.i8( + , , i8, i32); @@ -782,6 +817,7 @@ define @intrinsic_vwsubu.w_wx_nxv4i16_nxv4i16_i8( @llvm.riscv.vwsubu.w.nxv4i16.i8( + undef, %0, i8 %1, i32 %2) @@ -815,6 +851,7 @@ entry: } declare @llvm.riscv.vwsubu.w.nxv8i16.i8( + , , i8, i32); @@ -827,6 +864,7 @@ define @intrinsic_vwsubu.w_wx_nxv8i16_nxv8i16_i8( @llvm.riscv.vwsubu.w.nxv8i16.i8( + undef, %0, i8 %1, i32 %2) @@ -860,6 +898,7 @@ entry: } declare @llvm.riscv.vwsubu.w.nxv16i16.i8( + , , i8, i32); @@ -872,6 +911,7 @@ define @intrinsic_vwsubu.w_wx_nxv16i16_nxv16i16_i8( @llvm.riscv.vwsubu.w.nxv16i16.i8( + undef, %0, i8 %1, i32 %2) @@ -905,6 +945,7 @@ entry: } declare @llvm.riscv.vwsubu.w.nxv32i16.i8( + , , i8, i32); @@ -917,6 +958,7 @@ define @intrinsic_vwsubu.w_wx_nxv32i16_nxv32i16_i8( @llvm.riscv.vwsubu.w.nxv32i16.i8( + undef, %0, i8 %1, i32 %2) @@ -950,6 +992,7 @@ entry: } declare @llvm.riscv.vwsubu.w.nxv1i32.i16( + , , i16, i32); @@ -962,6 +1005,7 @@ define @intrinsic_vwsubu.w_wx_nxv1i32_nxv1i32_i16( @llvm.riscv.vwsubu.w.nxv1i32.i16( + undef, %0, i16 %1, i32 %2) @@ -995,6 +1039,7 @@ entry: } declare @llvm.riscv.vwsubu.w.nxv2i32.i16( + , , i16, i32); @@ -1007,6 +1052,7 @@ define @intrinsic_vwsubu.w_wx_nxv2i32_nxv2i32_i16( @llvm.riscv.vwsubu.w.nxv2i32.i16( + undef, %0, i16 %1, i32 %2) @@ -1040,6 +1086,7 @@ entry: } declare @llvm.riscv.vwsubu.w.nxv4i32.i16( + , , i16, i32); @@ -1052,6 +1099,7 @@ define @intrinsic_vwsubu.w_wx_nxv4i32_nxv4i32_i16( @llvm.riscv.vwsubu.w.nxv4i32.i16( + undef, %0, i16 %1, i32 %2) @@ -1085,6 +1133,7 @@ entry: } declare @llvm.riscv.vwsubu.w.nxv8i32.i16( + , , i16, i32); @@ -1097,6 +1146,7 @@ define @intrinsic_vwsubu.w_wx_nxv8i32_nxv8i32_i16( @llvm.riscv.vwsubu.w.nxv8i32.i16( + undef, %0, i16 %1, i32 %2) @@ -1130,6 +1180,7 @@ entry: } declare @llvm.riscv.vwsubu.w.nxv16i32.i16( + , , i16, i32); @@ -1142,6 +1193,7 @@ define @intrinsic_vwsubu.w_wx_nxv16i32_nxv16i32_i16( @llvm.riscv.vwsubu.w.nxv16i32.i16( + undef, %0, i16 %1, i32 %2) @@ -1175,6 +1227,7 @@ entry: } declare @llvm.riscv.vwsubu.w.nxv1i64.i32( + , , i32, i32); @@ -1187,6 +1240,7 @@ define @intrinsic_vwsubu.w_wx_nxv1i64_nxv1i64_i32( @llvm.riscv.vwsubu.w.nxv1i64.i32( + undef, %0, i32 %1, i32 %2) @@ -1220,6 +1274,7 @@ entry: } declare @llvm.riscv.vwsubu.w.nxv2i64.i32( + , , i32, i32); @@ -1232,6 +1287,7 @@ define @intrinsic_vwsubu.w_wx_nxv2i64_nxv2i64_i32( @llvm.riscv.vwsubu.w.nxv2i64.i32( + undef, %0, i32 %1, i32 %2) @@ -1265,6 +1321,7 @@ entry: } declare @llvm.riscv.vwsubu.w.nxv4i64.i32( + , , i32, i32); @@ -1277,6 +1334,7 @@ define @intrinsic_vwsubu.w_wx_nxv4i64_nxv4i64_i32( @llvm.riscv.vwsubu.w.nxv4i64.i32( + undef, %0, i32 %1, i32 %2) @@ -1310,6 +1368,7 @@ entry: } declare @llvm.riscv.vwsubu.w.nxv8i64.i32( + , , i32, i32); @@ -1322,6 +1381,7 @@ define @intrinsic_vwsubu.w_wx_nxv8i64_nxv8i64_i32( @llvm.riscv.vwsubu.w.nxv8i64.i32( + undef, %0, i32 %1, i32 %2) @@ -1873,6 +1933,7 @@ define @intrinsic_vwsubu.w_wv_untie_nxv1i16_nxv1i16_nxv1i8( @llvm.riscv.vwsubu.w.nxv1i16.nxv1i8( + undef, %1, %0, i32 %2) @@ -1889,6 +1950,7 @@ define @intrinsic_vwsubu.w_wv_untie_nxv2i16_nxv2i16_nxv2i8( @llvm.riscv.vwsubu.w.nxv2i16.nxv2i8( + undef, %1, %0, i32 %2) @@ -1905,6 +1967,7 @@ define @intrinsic_vwsubu.w_wv_untie_nxv4i16_nxv4i16_nxv4i8( @llvm.riscv.vwsubu.w.nxv4i16.nxv4i8( + undef, %1, %0, i32 %2) @@ -1921,6 +1984,7 @@ define @intrinsic_vwsubu.w_wv_untie_nxv8i16_nxv8i16_nxv8i8( @llvm.riscv.vwsubu.w.nxv8i16.nxv8i8( + undef, %1, %0, i32 %2) @@ -1937,6 +2001,7 @@ define @intrinsic_vwsubu.w_wv_untie_nxv16i16_nxv16i16_nxv16i ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.w.nxv16i16.nxv16i8( + undef, %1, %0, i32 %2) @@ -1953,6 +2018,7 @@ define @intrinsic_vwsubu.w_wv_untie_nxv32i16_nxv32i16_nxv32i ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.w.nxv32i16.nxv32i8( + undef, %1, %0, i32 %2) @@ -1969,6 +2035,7 @@ define @intrinsic_vwsubu.w_wv_untie_nxv1i32_nxv1i32_nxv1i16(< ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.w.nxv1i32.nxv1i16( + undef, %1, %0, i32 %2) @@ -1985,6 +2052,7 @@ define @intrinsic_vwsubu.w_wv_untie_nxv2i32_nxv2i32_nxv2i16(< ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.w.nxv2i32.nxv2i16( + undef, %1, %0, i32 %2) @@ -2001,6 +2069,7 @@ define @intrinsic_vwsubu.w_wv_untie_nxv4i32_nxv4i32_nxv4i16(< ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.w.nxv4i32.nxv4i16( + undef, %1, %0, i32 %2) @@ -2017,6 +2086,7 @@ define @intrinsic_vwsubu.w_wv_untie_nxv8i32_nxv8i32_nxv8i16(< ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.w.nxv8i32.nxv8i16( + undef, %1, %0, i32 %2) @@ -2033,6 +2103,7 @@ define @intrinsic_vwsubu.w_wv_untie_nxv1i64_nxv1i64_nxv1i32(< ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.w.nxv1i64.nxv1i32( + undef, %1, %0, i32 %2) @@ -2049,6 +2120,7 @@ define @intrinsic_vwsubu.w_wv_untie_nxv2i64_nxv2i64_nxv2i32(< ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.w.nxv2i64.nxv2i32( + undef, %1, %0, i32 %2) @@ -2065,6 +2137,7 @@ define @intrinsic_vwsubu.w_wv_untie_nxv4i64_nxv4i64_nxv4i32(< ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.w.nxv4i64.nxv4i32( + undef, %1, %0, i32 %2) @@ -2081,6 +2154,7 @@ define @intrinsic_vwsubu.w_wv_untie_nxv8i64_nxv8i64_nxv8i32(< ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.w.nxv8i64.nxv8i32( + undef, %1, %0, i32 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv64.ll index d504b680f2a6f836ba8f1d4d53ae77227762e30c..4b47c25384262018da7966e3c872f391e04cb07a 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv64.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vwsubu.w.nxv1i16.nxv1i8( + , , , i64); @@ -14,6 +15,7 @@ define @intrinsic_vwsubu.w_wv_nxv1i16_nxv1i16_nxv1i8( @llvm.riscv.vwsubu.w.nxv1i16.nxv1i8( + undef, %0, %1, i64 %2) @@ -47,6 +49,7 @@ entry: } declare @llvm.riscv.vwsubu.w.nxv2i16.nxv2i8( + , , , i64); @@ -59,6 +62,7 @@ define @intrinsic_vwsubu.w_wv_nxv2i16_nxv2i16_nxv2i8( @llvm.riscv.vwsubu.w.nxv2i16.nxv2i8( + undef, %0, %1, i64 %2) @@ -92,6 +96,7 @@ entry: } declare @llvm.riscv.vwsubu.w.nxv4i16.nxv4i8( + , , , i64); @@ -104,6 +109,7 @@ define @intrinsic_vwsubu.w_wv_nxv4i16_nxv4i16_nxv4i8( @llvm.riscv.vwsubu.w.nxv4i16.nxv4i8( + undef, %0, %1, i64 %2) @@ -137,6 +143,7 @@ entry: } declare @llvm.riscv.vwsubu.w.nxv8i16.nxv8i8( + , , , i64); @@ -149,6 +156,7 @@ define @intrinsic_vwsubu.w_wv_nxv8i16_nxv8i16_nxv8i8( @llvm.riscv.vwsubu.w.nxv8i16.nxv8i8( + undef, %0, %1, i64 %2) @@ -182,6 +190,7 @@ entry: } declare @llvm.riscv.vwsubu.w.nxv16i16.nxv16i8( + , , , i64); @@ -194,6 +203,7 @@ define @intrinsic_vwsubu.w_wv_nxv16i16_nxv16i16_nxv16i8( @llvm.riscv.vwsubu.w.nxv16i16.nxv16i8( + undef, %0, %1, i64 %2) @@ -227,6 +237,7 @@ entry: } declare @llvm.riscv.vwsubu.w.nxv32i16.nxv32i8( + , , , i64); @@ -239,6 +250,7 @@ define @intrinsic_vwsubu.w_wv_nxv32i16_nxv32i16_nxv32i8( @llvm.riscv.vwsubu.w.nxv32i16.nxv32i8( + undef, %0, %1, i64 %2) @@ -273,6 +285,7 @@ entry: } declare @llvm.riscv.vwsubu.w.nxv1i32.nxv1i16( + , , , i64); @@ -285,6 +298,7 @@ define @intrinsic_vwsubu.w_wv_nxv1i32_nxv1i32_nxv1i16( @llvm.riscv.vwsubu.w.nxv1i32.nxv1i16( + undef, %0, %1, i64 %2) @@ -318,6 +332,7 @@ entry: } declare @llvm.riscv.vwsubu.w.nxv2i32.nxv2i16( + , , , i64); @@ -330,6 +345,7 @@ define @intrinsic_vwsubu.w_wv_nxv2i32_nxv2i32_nxv2i16( @llvm.riscv.vwsubu.w.nxv2i32.nxv2i16( + undef, %0, %1, i64 %2) @@ -363,6 +379,7 @@ entry: } declare @llvm.riscv.vwsubu.w.nxv4i32.nxv4i16( + , , , i64); @@ -375,6 +392,7 @@ define @intrinsic_vwsubu.w_wv_nxv4i32_nxv4i32_nxv4i16( @llvm.riscv.vwsubu.w.nxv4i32.nxv4i16( + undef, %0, %1, i64 %2) @@ -408,6 +426,7 @@ entry: } declare @llvm.riscv.vwsubu.w.nxv8i32.nxv8i16( + , , , i64); @@ -420,6 +439,7 @@ define @intrinsic_vwsubu.w_wv_nxv8i32_nxv8i32_nxv8i16( @llvm.riscv.vwsubu.w.nxv8i32.nxv8i16( + undef, %0, %1, i64 %2) @@ -453,6 +473,7 @@ entry: } declare @llvm.riscv.vwsubu.w.nxv16i32.nxv16i16( + , , , i64); @@ -465,6 +486,7 @@ define @intrinsic_vwsubu.w_wv_nxv16i32_nxv16i32_nxv16i16( @llvm.riscv.vwsubu.w.nxv16i32.nxv16i16( + undef, %0, %1, i64 %2) @@ -499,6 +521,7 @@ entry: } declare @llvm.riscv.vwsubu.w.nxv1i64.nxv1i32( + , , , i64); @@ -511,6 +534,7 @@ define @intrinsic_vwsubu.w_wv_nxv1i64_nxv1i64_nxv1i32( @llvm.riscv.vwsubu.w.nxv1i64.nxv1i32( + undef, %0, %1, i64 %2) @@ -544,6 +568,7 @@ entry: } declare @llvm.riscv.vwsubu.w.nxv2i64.nxv2i32( + , , , i64); @@ -556,6 +581,7 @@ define @intrinsic_vwsubu.w_wv_nxv2i64_nxv2i64_nxv2i32( @llvm.riscv.vwsubu.w.nxv2i64.nxv2i32( + undef, %0, %1, i64 %2) @@ -589,6 +615,7 @@ entry: } declare @llvm.riscv.vwsubu.w.nxv4i64.nxv4i32( + , , , i64); @@ -601,6 +628,7 @@ define @intrinsic_vwsubu.w_wv_nxv4i64_nxv4i64_nxv4i32( @llvm.riscv.vwsubu.w.nxv4i64.nxv4i32( + undef, %0, %1, i64 %2) @@ -634,6 +662,7 @@ entry: } declare @llvm.riscv.vwsubu.w.nxv8i64.nxv8i32( + , , , i64); @@ -646,6 +675,7 @@ define @intrinsic_vwsubu.w_wv_nxv8i64_nxv8i64_nxv8i32( @llvm.riscv.vwsubu.w.nxv8i64.nxv8i32( + undef, %0, %1, i64 %2) @@ -680,6 +710,7 @@ entry: } declare @llvm.riscv.vwsubu.w.nxv1i16.i8( + , , i8, i64); @@ -692,6 +723,7 @@ define @intrinsic_vwsubu.w_wx_nxv1i16_nxv1i16_i8( @llvm.riscv.vwsubu.w.nxv1i16.i8( + undef, %0, i8 %1, i64 %2) @@ -725,6 +757,7 @@ entry: } declare @llvm.riscv.vwsubu.w.nxv2i16.i8( + , , i8, i64); @@ -737,6 +770,7 @@ define @intrinsic_vwsubu.w_wx_nxv2i16_nxv2i16_i8( @llvm.riscv.vwsubu.w.nxv2i16.i8( + undef, %0, i8 %1, i64 %2) @@ -770,6 +804,7 @@ entry: } declare @llvm.riscv.vwsubu.w.nxv4i16.i8( + , , i8, i64); @@ -782,6 +817,7 @@ define @intrinsic_vwsubu.w_wx_nxv4i16_nxv4i16_i8( @llvm.riscv.vwsubu.w.nxv4i16.i8( + undef, %0, i8 %1, i64 %2) @@ -815,6 +851,7 @@ entry: } declare @llvm.riscv.vwsubu.w.nxv8i16.i8( + , , i8, i64); @@ -827,6 +864,7 @@ define @intrinsic_vwsubu.w_wx_nxv8i16_nxv8i16_i8( @llvm.riscv.vwsubu.w.nxv8i16.i8( + undef, %0, i8 %1, i64 %2) @@ -860,6 +898,7 @@ entry: } declare @llvm.riscv.vwsubu.w.nxv16i16.i8( + , , i8, i64); @@ -872,6 +911,7 @@ define @intrinsic_vwsubu.w_wx_nxv16i16_nxv16i16_i8( @llvm.riscv.vwsubu.w.nxv16i16.i8( + undef, %0, i8 %1, i64 %2) @@ -905,6 +945,7 @@ entry: } declare @llvm.riscv.vwsubu.w.nxv32i16.i8( + , , i8, i64); @@ -917,6 +958,7 @@ define @intrinsic_vwsubu.w_wx_nxv32i16_nxv32i16_i8( @llvm.riscv.vwsubu.w.nxv32i16.i8( + undef, %0, i8 %1, i64 %2) @@ -950,6 +992,7 @@ entry: } declare @llvm.riscv.vwsubu.w.nxv1i32.i16( + , , i16, i64); @@ -962,6 +1005,7 @@ define @intrinsic_vwsubu.w_wx_nxv1i32_nxv1i32_i16( @llvm.riscv.vwsubu.w.nxv1i32.i16( + undef, %0, i16 %1, i64 %2) @@ -995,6 +1039,7 @@ entry: } declare @llvm.riscv.vwsubu.w.nxv2i32.i16( + , , i16, i64); @@ -1007,6 +1052,7 @@ define @intrinsic_vwsubu.w_wx_nxv2i32_nxv2i32_i16( @llvm.riscv.vwsubu.w.nxv2i32.i16( + undef, %0, i16 %1, i64 %2) @@ -1040,6 +1086,7 @@ entry: } declare @llvm.riscv.vwsubu.w.nxv4i32.i16( + , , i16, i64); @@ -1052,6 +1099,7 @@ define @intrinsic_vwsubu.w_wx_nxv4i32_nxv4i32_i16( @llvm.riscv.vwsubu.w.nxv4i32.i16( + undef, %0, i16 %1, i64 %2) @@ -1085,6 +1133,7 @@ entry: } declare @llvm.riscv.vwsubu.w.nxv8i32.i16( + , , i16, i64); @@ -1097,6 +1146,7 @@ define @intrinsic_vwsubu.w_wx_nxv8i32_nxv8i32_i16( @llvm.riscv.vwsubu.w.nxv8i32.i16( + undef, %0, i16 %1, i64 %2) @@ -1130,6 +1180,7 @@ entry: } declare @llvm.riscv.vwsubu.w.nxv16i32.i16( + , , i16, i64); @@ -1142,6 +1193,7 @@ define @intrinsic_vwsubu.w_wx_nxv16i32_nxv16i32_i16( @llvm.riscv.vwsubu.w.nxv16i32.i16( + undef, %0, i16 %1, i64 %2) @@ -1175,6 +1227,7 @@ entry: } declare @llvm.riscv.vwsubu.w.nxv1i64.i32( + , , i32, i64); @@ -1187,6 +1240,7 @@ define @intrinsic_vwsubu.w_wx_nxv1i64_nxv1i64_i32( @llvm.riscv.vwsubu.w.nxv1i64.i32( + undef, %0, i32 %1, i64 %2) @@ -1220,6 +1274,7 @@ entry: } declare @llvm.riscv.vwsubu.w.nxv2i64.i32( + , , i32, i64); @@ -1232,6 +1287,7 @@ define @intrinsic_vwsubu.w_wx_nxv2i64_nxv2i64_i32( @llvm.riscv.vwsubu.w.nxv2i64.i32( + undef, %0, i32 %1, i64 %2) @@ -1265,6 +1321,7 @@ entry: } declare @llvm.riscv.vwsubu.w.nxv4i64.i32( + , , i32, i64); @@ -1277,6 +1334,7 @@ define @intrinsic_vwsubu.w_wx_nxv4i64_nxv4i64_i32( @llvm.riscv.vwsubu.w.nxv4i64.i32( + undef, %0, i32 %1, i64 %2) @@ -1310,6 +1368,7 @@ entry: } declare @llvm.riscv.vwsubu.w.nxv8i64.i32( + , , i32, i64); @@ -1322,6 +1381,7 @@ define @intrinsic_vwsubu.w_wx_nxv8i64_nxv8i64_i32( @llvm.riscv.vwsubu.w.nxv8i64.i32( + undef, %0, i32 %1, i64 %2) @@ -1873,6 +1933,7 @@ define @intrinsic_vwsubu.w_wv_untie_nxv1i16_nxv1i16_nxv1i8( @llvm.riscv.vwsubu.w.nxv1i16.nxv1i8( + undef, %1, %0, i64 %2) @@ -1889,6 +1950,7 @@ define @intrinsic_vwsubu.w_wv_untie_nxv2i16_nxv2i16_nxv2i8( @llvm.riscv.vwsubu.w.nxv2i16.nxv2i8( + undef, %1, %0, i64 %2) @@ -1905,6 +1967,7 @@ define @intrinsic_vwsubu.w_wv_untie_nxv4i16_nxv4i16_nxv4i8( @llvm.riscv.vwsubu.w.nxv4i16.nxv4i8( + undef, %1, %0, i64 %2) @@ -1921,6 +1984,7 @@ define @intrinsic_vwsubu.w_wv_untie_nxv8i16_nxv8i16_nxv8i8( @llvm.riscv.vwsubu.w.nxv8i16.nxv8i8( + undef, %1, %0, i64 %2) @@ -1937,6 +2001,7 @@ define @intrinsic_vwsubu.w_wv_untie_nxv16i16_nxv16i16_nxv16i ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.w.nxv16i16.nxv16i8( + undef, %1, %0, i64 %2) @@ -1953,6 +2018,7 @@ define @intrinsic_vwsubu.w_wv_untie_nxv32i16_nxv32i16_nxv32i ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.w.nxv32i16.nxv32i8( + undef, %1, %0, i64 %2) @@ -1969,6 +2035,7 @@ define @intrinsic_vwsubu.w_wv_untie_nxv1i32_nxv1i32_nxv1i16(< ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.w.nxv1i32.nxv1i16( + undef, %1, %0, i64 %2) @@ -1985,6 +2052,7 @@ define @intrinsic_vwsubu.w_wv_untie_nxv2i32_nxv2i32_nxv2i16(< ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.w.nxv2i32.nxv2i16( + undef, %1, %0, i64 %2) @@ -2001,6 +2069,7 @@ define @intrinsic_vwsubu.w_wv_untie_nxv4i32_nxv4i32_nxv4i16(< ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.w.nxv4i32.nxv4i16( + undef, %1, %0, i64 %2) @@ -2017,6 +2086,7 @@ define @intrinsic_vwsubu.w_wv_untie_nxv8i32_nxv8i32_nxv8i16(< ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.w.nxv8i32.nxv8i16( + undef, %1, %0, i64 %2) @@ -2033,6 +2103,7 @@ define @intrinsic_vwsubu.w_wv_untie_nxv1i64_nxv1i64_nxv1i32(< ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.w.nxv1i64.nxv1i32( + undef, %1, %0, i64 %2) @@ -2049,6 +2120,7 @@ define @intrinsic_vwsubu.w_wv_untie_nxv2i64_nxv2i64_nxv2i32(< ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.w.nxv2i64.nxv2i32( + undef, %1, %0, i64 %2) @@ -2065,6 +2137,7 @@ define @intrinsic_vwsubu.w_wv_untie_nxv4i64_nxv4i64_nxv4i32(< ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.w.nxv4i64.nxv4i32( + undef, %1, %0, i64 %2) @@ -2081,6 +2154,7 @@ define @intrinsic_vwsubu.w_wv_untie_nxv8i64_nxv8i64_nxv8i32(< ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.w.nxv8i64.nxv8i32( + undef, %1, %0, i64 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vxor-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vxor-rv32.ll index 8da7f7acb2e6267f7dd09b25b4504391ef112665..0d006ca9d9f6a7c4dc1fd6a1467402ee85832856 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vxor-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vxor-rv32.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v,+f -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vxor.nxv1i8.nxv1i8( + , , , i32); @@ -14,6 +15,7 @@ define @intrinsic_vxor_vv_nxv1i8_nxv1i8_nxv1i8( @llvm.riscv.vxor.nxv1i8.nxv1i8( + undef, %0, %1, i32 %2) @@ -47,6 +49,7 @@ entry: } declare @llvm.riscv.vxor.nxv2i8.nxv2i8( + , , , i32); @@ -59,6 +62,7 @@ define @intrinsic_vxor_vv_nxv2i8_nxv2i8_nxv2i8( @llvm.riscv.vxor.nxv2i8.nxv2i8( + undef, %0, %1, i32 %2) @@ -92,6 +96,7 @@ entry: } declare @llvm.riscv.vxor.nxv4i8.nxv4i8( + , , , i32); @@ -104,6 +109,7 @@ define @intrinsic_vxor_vv_nxv4i8_nxv4i8_nxv4i8( @llvm.riscv.vxor.nxv4i8.nxv4i8( + undef, %0, %1, i32 %2) @@ -137,6 +143,7 @@ entry: } declare @llvm.riscv.vxor.nxv8i8.nxv8i8( + , , , i32); @@ -149,6 +156,7 @@ define @intrinsic_vxor_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vxor.nxv8i8.nxv8i8( + undef, %0, %1, i32 %2) @@ -182,6 +190,7 @@ entry: } declare @llvm.riscv.vxor.nxv16i8.nxv16i8( + , , , i32); @@ -194,6 +203,7 @@ define @intrinsic_vxor_vv_nxv16i8_nxv16i8_nxv16i8( @llvm.riscv.vxor.nxv16i8.nxv16i8( + undef, %0, %1, i32 %2) @@ -227,6 +237,7 @@ entry: } declare @llvm.riscv.vxor.nxv32i8.nxv32i8( + , , , i32); @@ -239,6 +250,7 @@ define @intrinsic_vxor_vv_nxv32i8_nxv32i8_nxv32i8( @llvm.riscv.vxor.nxv32i8.nxv32i8( + undef, %0, %1, i32 %2) @@ -272,6 +284,7 @@ entry: } declare @llvm.riscv.vxor.nxv64i8.nxv64i8( + , , , i32); @@ -284,6 +297,7 @@ define @intrinsic_vxor_vv_nxv64i8_nxv64i8_nxv64i8( @llvm.riscv.vxor.nxv64i8.nxv64i8( + undef, %0, %1, i32 %2) @@ -318,6 +332,7 @@ entry: } declare @llvm.riscv.vxor.nxv1i16.nxv1i16( + , , , i32); @@ -330,6 +345,7 @@ define @intrinsic_vxor_vv_nxv1i16_nxv1i16_nxv1i16( @llvm.riscv.vxor.nxv1i16.nxv1i16( + undef, %0, %1, i32 %2) @@ -363,6 +379,7 @@ entry: } declare @llvm.riscv.vxor.nxv2i16.nxv2i16( + , , , i32); @@ -375,6 +392,7 @@ define @intrinsic_vxor_vv_nxv2i16_nxv2i16_nxv2i16( @llvm.riscv.vxor.nxv2i16.nxv2i16( + undef, %0, %1, i32 %2) @@ -408,6 +426,7 @@ entry: } declare @llvm.riscv.vxor.nxv4i16.nxv4i16( + , , , i32); @@ -420,6 +439,7 @@ define @intrinsic_vxor_vv_nxv4i16_nxv4i16_nxv4i16( @llvm.riscv.vxor.nxv4i16.nxv4i16( + undef, %0, %1, i32 %2) @@ -453,6 +473,7 @@ entry: } declare @llvm.riscv.vxor.nxv8i16.nxv8i16( + , , , i32); @@ -465,6 +486,7 @@ define @intrinsic_vxor_vv_nxv8i16_nxv8i16_nxv8i16( @llvm.riscv.vxor.nxv8i16.nxv8i16( + undef, %0, %1, i32 %2) @@ -498,6 +520,7 @@ entry: } declare @llvm.riscv.vxor.nxv16i16.nxv16i16( + , , , i32); @@ -510,6 +533,7 @@ define @intrinsic_vxor_vv_nxv16i16_nxv16i16_nxv16i16( @llvm.riscv.vxor.nxv16i16.nxv16i16( + undef, %0, %1, i32 %2) @@ -543,6 +567,7 @@ entry: } declare @llvm.riscv.vxor.nxv32i16.nxv32i16( + , , , i32); @@ -555,6 +580,7 @@ define @intrinsic_vxor_vv_nxv32i16_nxv32i16_nxv32i16( @llvm.riscv.vxor.nxv32i16.nxv32i16( + undef, %0, %1, i32 %2) @@ -589,6 +615,7 @@ entry: } declare @llvm.riscv.vxor.nxv1i32.nxv1i32( + , , , i32); @@ -601,6 +628,7 @@ define @intrinsic_vxor_vv_nxv1i32_nxv1i32_nxv1i32( @llvm.riscv.vxor.nxv1i32.nxv1i32( + undef, %0, %1, i32 %2) @@ -634,6 +662,7 @@ entry: } declare @llvm.riscv.vxor.nxv2i32.nxv2i32( + , , , i32); @@ -646,6 +675,7 @@ define @intrinsic_vxor_vv_nxv2i32_nxv2i32_nxv2i32( @llvm.riscv.vxor.nxv2i32.nxv2i32( + undef, %0, %1, i32 %2) @@ -679,6 +709,7 @@ entry: } declare @llvm.riscv.vxor.nxv4i32.nxv4i32( + , , , i32); @@ -691,6 +722,7 @@ define @intrinsic_vxor_vv_nxv4i32_nxv4i32_nxv4i32( @llvm.riscv.vxor.nxv4i32.nxv4i32( + undef, %0, %1, i32 %2) @@ -724,6 +756,7 @@ entry: } declare @llvm.riscv.vxor.nxv8i32.nxv8i32( + , , , i32); @@ -736,6 +769,7 @@ define @intrinsic_vxor_vv_nxv8i32_nxv8i32_nxv8i32( @llvm.riscv.vxor.nxv8i32.nxv8i32( + undef, %0, %1, i32 %2) @@ -769,6 +803,7 @@ entry: } declare @llvm.riscv.vxor.nxv16i32.nxv16i32( + , , , i32); @@ -781,6 +816,7 @@ define @intrinsic_vxor_vv_nxv16i32_nxv16i32_nxv16i32( @llvm.riscv.vxor.nxv16i32.nxv16i32( + undef, %0, %1, i32 %2) @@ -815,6 +851,7 @@ entry: } declare @llvm.riscv.vxor.nxv1i64.nxv1i64( + , , , i32); @@ -827,6 +864,7 @@ define @intrinsic_vxor_vv_nxv1i64_nxv1i64_nxv1i64( @llvm.riscv.vxor.nxv1i64.nxv1i64( + undef, %0, %1, i32 %2) @@ -860,6 +898,7 @@ entry: } declare @llvm.riscv.vxor.nxv2i64.nxv2i64( + , , , i32); @@ -872,6 +911,7 @@ define @intrinsic_vxor_vv_nxv2i64_nxv2i64_nxv2i64( @llvm.riscv.vxor.nxv2i64.nxv2i64( + undef, %0, %1, i32 %2) @@ -905,6 +945,7 @@ entry: } declare @llvm.riscv.vxor.nxv4i64.nxv4i64( + , , , i32); @@ -917,6 +958,7 @@ define @intrinsic_vxor_vv_nxv4i64_nxv4i64_nxv4i64( @llvm.riscv.vxor.nxv4i64.nxv4i64( + undef, %0, %1, i32 %2) @@ -950,6 +992,7 @@ entry: } declare @llvm.riscv.vxor.nxv8i64.nxv8i64( + , , , i32); @@ -962,6 +1005,7 @@ define @intrinsic_vxor_vv_nxv8i64_nxv8i64_nxv8i64( @llvm.riscv.vxor.nxv8i64.nxv8i64( + undef, %0, %1, i32 %2) @@ -996,6 +1040,7 @@ entry: } declare @llvm.riscv.vxor.nxv1i8.i8( + , , i8, i32); @@ -1008,6 +1053,7 @@ define @intrinsic_vxor_vx_nxv1i8_nxv1i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv1i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1041,6 +1087,7 @@ entry: } declare @llvm.riscv.vxor.nxv2i8.i8( + , , i8, i32); @@ -1053,6 +1100,7 @@ define @intrinsic_vxor_vx_nxv2i8_nxv2i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv2i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1086,6 +1134,7 @@ entry: } declare @llvm.riscv.vxor.nxv4i8.i8( + , , i8, i32); @@ -1098,6 +1147,7 @@ define @intrinsic_vxor_vx_nxv4i8_nxv4i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv4i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1131,6 +1181,7 @@ entry: } declare @llvm.riscv.vxor.nxv8i8.i8( + , , i8, i32); @@ -1143,6 +1194,7 @@ define @intrinsic_vxor_vx_nxv8i8_nxv8i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv8i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1176,6 +1228,7 @@ entry: } declare @llvm.riscv.vxor.nxv16i8.i8( + , , i8, i32); @@ -1188,6 +1241,7 @@ define @intrinsic_vxor_vx_nxv16i8_nxv16i8_i8( @llvm.riscv.vxor.nxv16i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1221,6 +1275,7 @@ entry: } declare @llvm.riscv.vxor.nxv32i8.i8( + , , i8, i32); @@ -1233,6 +1288,7 @@ define @intrinsic_vxor_vx_nxv32i8_nxv32i8_i8( @llvm.riscv.vxor.nxv32i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1266,6 +1322,7 @@ entry: } declare @llvm.riscv.vxor.nxv64i8.i8( + , , i8, i32); @@ -1278,6 +1335,7 @@ define @intrinsic_vxor_vx_nxv64i8_nxv64i8_i8( @llvm.riscv.vxor.nxv64i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1311,6 +1369,7 @@ entry: } declare @llvm.riscv.vxor.nxv1i16.i16( + , , i16, i32); @@ -1323,6 +1382,7 @@ define @intrinsic_vxor_vx_nxv1i16_nxv1i16_i16( @llvm.riscv.vxor.nxv1i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1356,6 +1416,7 @@ entry: } declare @llvm.riscv.vxor.nxv2i16.i16( + , , i16, i32); @@ -1368,6 +1429,7 @@ define @intrinsic_vxor_vx_nxv2i16_nxv2i16_i16( @llvm.riscv.vxor.nxv2i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1401,6 +1463,7 @@ entry: } declare @llvm.riscv.vxor.nxv4i16.i16( + , , i16, i32); @@ -1413,6 +1476,7 @@ define @intrinsic_vxor_vx_nxv4i16_nxv4i16_i16( @llvm.riscv.vxor.nxv4i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1446,6 +1510,7 @@ entry: } declare @llvm.riscv.vxor.nxv8i16.i16( + , , i16, i32); @@ -1458,6 +1523,7 @@ define @intrinsic_vxor_vx_nxv8i16_nxv8i16_i16( @llvm.riscv.vxor.nxv8i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1491,6 +1557,7 @@ entry: } declare @llvm.riscv.vxor.nxv16i16.i16( + , , i16, i32); @@ -1503,6 +1570,7 @@ define @intrinsic_vxor_vx_nxv16i16_nxv16i16_i16( @llvm.riscv.vxor.nxv16i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1536,6 +1604,7 @@ entry: } declare @llvm.riscv.vxor.nxv32i16.i16( + , , i16, i32); @@ -1548,6 +1617,7 @@ define @intrinsic_vxor_vx_nxv32i16_nxv32i16_i16( @llvm.riscv.vxor.nxv32i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1581,6 +1651,7 @@ entry: } declare @llvm.riscv.vxor.nxv1i32.i32( + , , i32, i32); @@ -1593,6 +1664,7 @@ define @intrinsic_vxor_vx_nxv1i32_nxv1i32_i32( @llvm.riscv.vxor.nxv1i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1626,6 +1698,7 @@ entry: } declare @llvm.riscv.vxor.nxv2i32.i32( + , , i32, i32); @@ -1638,6 +1711,7 @@ define @intrinsic_vxor_vx_nxv2i32_nxv2i32_i32( @llvm.riscv.vxor.nxv2i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1671,6 +1745,7 @@ entry: } declare @llvm.riscv.vxor.nxv4i32.i32( + , , i32, i32); @@ -1683,6 +1758,7 @@ define @intrinsic_vxor_vx_nxv4i32_nxv4i32_i32( @llvm.riscv.vxor.nxv4i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1716,6 +1792,7 @@ entry: } declare @llvm.riscv.vxor.nxv8i32.i32( + , , i32, i32); @@ -1728,6 +1805,7 @@ define @intrinsic_vxor_vx_nxv8i32_nxv8i32_i32( @llvm.riscv.vxor.nxv8i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1761,6 +1839,7 @@ entry: } declare @llvm.riscv.vxor.nxv16i32.i32( + , , i32, i32); @@ -1773,6 +1852,7 @@ define @intrinsic_vxor_vx_nxv16i32_nxv16i32_i32( @llvm.riscv.vxor.nxv16i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1806,6 +1886,7 @@ entry: } declare @llvm.riscv.vxor.nxv1i64.i64( + , , i64, i32); @@ -1824,6 +1905,7 @@ define @intrinsic_vxor_vx_nxv1i64_nxv1i64_i64( @llvm.riscv.vxor.nxv1i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1863,6 +1945,7 @@ entry: } declare @llvm.riscv.vxor.nxv2i64.i64( + , , i64, i32); @@ -1881,6 +1964,7 @@ define @intrinsic_vxor_vx_nxv2i64_nxv2i64_i64( @llvm.riscv.vxor.nxv2i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1920,6 +2004,7 @@ entry: } declare @llvm.riscv.vxor.nxv4i64.i64( + , , i64, i32); @@ -1938,6 +2023,7 @@ define @intrinsic_vxor_vx_nxv4i64_nxv4i64_i64( @llvm.riscv.vxor.nxv4i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1977,6 +2063,7 @@ entry: } declare @llvm.riscv.vxor.nxv8i64.i64( + , , i64, i32); @@ -1995,6 +2082,7 @@ define @intrinsic_vxor_vx_nxv8i64_nxv8i64_i64( @llvm.riscv.vxor.nxv8i64.i64( + undef, %0, i64 %1, i32 %2) @@ -2041,6 +2129,7 @@ define @intrinsic_vxor_vi_nxv1i8_nxv1i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv1i8.i8( + undef, %0, i8 9, i32 %1) @@ -2073,6 +2162,7 @@ define @intrinsic_vxor_vi_nxv2i8_nxv2i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv2i8.i8( + undef, %0, i8 9, i32 %1) @@ -2105,6 +2195,7 @@ define @intrinsic_vxor_vi_nxv4i8_nxv4i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv4i8.i8( + undef, %0, i8 9, i32 %1) @@ -2137,6 +2228,7 @@ define @intrinsic_vxor_vi_nxv8i8_nxv8i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv8i8.i8( + undef, %0, i8 9, i32 %1) @@ -2169,6 +2261,7 @@ define @intrinsic_vxor_vi_nxv16i8_nxv16i8_i8( @llvm.riscv.vxor.nxv16i8.i8( + undef, %0, i8 9, i32 %1) @@ -2201,6 +2294,7 @@ define @intrinsic_vxor_vi_nxv32i8_nxv32i8_i8( @llvm.riscv.vxor.nxv32i8.i8( + undef, %0, i8 9, i32 %1) @@ -2233,6 +2327,7 @@ define @intrinsic_vxor_vi_nxv64i8_nxv64i8_i8( @llvm.riscv.vxor.nxv64i8.i8( + undef, %0, i8 9, i32 %1) @@ -2265,6 +2360,7 @@ define @intrinsic_vxor_vi_nxv1i16_nxv1i16_i16( @llvm.riscv.vxor.nxv1i16.i16( + undef, %0, i16 9, i32 %1) @@ -2297,6 +2393,7 @@ define @intrinsic_vxor_vi_nxv2i16_nxv2i16_i16( @llvm.riscv.vxor.nxv2i16.i16( + undef, %0, i16 9, i32 %1) @@ -2329,6 +2426,7 @@ define @intrinsic_vxor_vi_nxv4i16_nxv4i16_i16( @llvm.riscv.vxor.nxv4i16.i16( + undef, %0, i16 9, i32 %1) @@ -2361,6 +2459,7 @@ define @intrinsic_vxor_vi_nxv8i16_nxv8i16_i16( @llvm.riscv.vxor.nxv8i16.i16( + undef, %0, i16 9, i32 %1) @@ -2393,6 +2492,7 @@ define @intrinsic_vxor_vi_nxv16i16_nxv16i16_i16( @llvm.riscv.vxor.nxv16i16.i16( + undef, %0, i16 9, i32 %1) @@ -2425,6 +2525,7 @@ define @intrinsic_vxor_vi_nxv32i16_nxv32i16_i16( @llvm.riscv.vxor.nxv32i16.i16( + undef, %0, i16 9, i32 %1) @@ -2457,6 +2558,7 @@ define @intrinsic_vxor_vi_nxv1i32_nxv1i32_i32( @llvm.riscv.vxor.nxv1i32.i32( + undef, %0, i32 9, i32 %1) @@ -2489,6 +2591,7 @@ define @intrinsic_vxor_vi_nxv2i32_nxv2i32_i32( @llvm.riscv.vxor.nxv2i32.i32( + undef, %0, i32 9, i32 %1) @@ -2521,6 +2624,7 @@ define @intrinsic_vxor_vi_nxv4i32_nxv4i32_i32( @llvm.riscv.vxor.nxv4i32.i32( + undef, %0, i32 9, i32 %1) @@ -2553,6 +2657,7 @@ define @intrinsic_vxor_vi_nxv8i32_nxv8i32_i32( @llvm.riscv.vxor.nxv8i32.i32( + undef, %0, i32 9, i32 %1) @@ -2585,6 +2690,7 @@ define @intrinsic_vxor_vi_nxv16i32_nxv16i32_i32( @llvm.riscv.vxor.nxv16i32.i32( + undef, %0, i32 9, i32 %1) @@ -2617,6 +2723,7 @@ define @intrinsic_vxor_vi_nxv1i64_nxv1i64_i64( @llvm.riscv.vxor.nxv1i64.i64( + undef, %0, i64 9, i32 %1) @@ -2649,6 +2756,7 @@ define @intrinsic_vxor_vi_nxv2i64_nxv2i64_i64( @llvm.riscv.vxor.nxv2i64.i64( + undef, %0, i64 9, i32 %1) @@ -2681,6 +2789,7 @@ define @intrinsic_vxor_vi_nxv4i64_nxv4i64_i64( @llvm.riscv.vxor.nxv4i64.i64( + undef, %0, i64 9, i32 %1) @@ -2713,6 +2822,7 @@ define @intrinsic_vxor_vi_nxv8i64_nxv8i64_i64( @llvm.riscv.vxor.nxv8i64.i64( + undef, %0, i64 9, i32 %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vxor-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vxor-rv64.ll index 08c47cd6c17fe1d0fa0b442cb653a97edfe738a0..45e21d13ed807fb7f4175a787106e7b089efdd7f 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vxor-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vxor-rv64.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v,+d -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vxor.nxv1i8.nxv1i8( + , , , i64); @@ -14,6 +15,7 @@ define @intrinsic_vxor_vv_nxv1i8_nxv1i8_nxv1i8( @llvm.riscv.vxor.nxv1i8.nxv1i8( + undef, %0, %1, i64 %2) @@ -47,6 +49,7 @@ entry: } declare @llvm.riscv.vxor.nxv2i8.nxv2i8( + , , , i64); @@ -59,6 +62,7 @@ define @intrinsic_vxor_vv_nxv2i8_nxv2i8_nxv2i8( @llvm.riscv.vxor.nxv2i8.nxv2i8( + undef, %0, %1, i64 %2) @@ -92,6 +96,7 @@ entry: } declare @llvm.riscv.vxor.nxv4i8.nxv4i8( + , , , i64); @@ -104,6 +109,7 @@ define @intrinsic_vxor_vv_nxv4i8_nxv4i8_nxv4i8( @llvm.riscv.vxor.nxv4i8.nxv4i8( + undef, %0, %1, i64 %2) @@ -137,6 +143,7 @@ entry: } declare @llvm.riscv.vxor.nxv8i8.nxv8i8( + , , , i64); @@ -149,6 +156,7 @@ define @intrinsic_vxor_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vxor.nxv8i8.nxv8i8( + undef, %0, %1, i64 %2) @@ -182,6 +190,7 @@ entry: } declare @llvm.riscv.vxor.nxv16i8.nxv16i8( + , , , i64); @@ -194,6 +203,7 @@ define @intrinsic_vxor_vv_nxv16i8_nxv16i8_nxv16i8( @llvm.riscv.vxor.nxv16i8.nxv16i8( + undef, %0, %1, i64 %2) @@ -227,6 +237,7 @@ entry: } declare @llvm.riscv.vxor.nxv32i8.nxv32i8( + , , , i64); @@ -239,6 +250,7 @@ define @intrinsic_vxor_vv_nxv32i8_nxv32i8_nxv32i8( @llvm.riscv.vxor.nxv32i8.nxv32i8( + undef, %0, %1, i64 %2) @@ -272,6 +284,7 @@ entry: } declare @llvm.riscv.vxor.nxv64i8.nxv64i8( + , , , i64); @@ -284,6 +297,7 @@ define @intrinsic_vxor_vv_nxv64i8_nxv64i8_nxv64i8( @llvm.riscv.vxor.nxv64i8.nxv64i8( + undef, %0, %1, i64 %2) @@ -318,6 +332,7 @@ entry: } declare @llvm.riscv.vxor.nxv1i16.nxv1i16( + , , , i64); @@ -330,6 +345,7 @@ define @intrinsic_vxor_vv_nxv1i16_nxv1i16_nxv1i16( @llvm.riscv.vxor.nxv1i16.nxv1i16( + undef, %0, %1, i64 %2) @@ -363,6 +379,7 @@ entry: } declare @llvm.riscv.vxor.nxv2i16.nxv2i16( + , , , i64); @@ -375,6 +392,7 @@ define @intrinsic_vxor_vv_nxv2i16_nxv2i16_nxv2i16( @llvm.riscv.vxor.nxv2i16.nxv2i16( + undef, %0, %1, i64 %2) @@ -408,6 +426,7 @@ entry: } declare @llvm.riscv.vxor.nxv4i16.nxv4i16( + , , , i64); @@ -420,6 +439,7 @@ define @intrinsic_vxor_vv_nxv4i16_nxv4i16_nxv4i16( @llvm.riscv.vxor.nxv4i16.nxv4i16( + undef, %0, %1, i64 %2) @@ -453,6 +473,7 @@ entry: } declare @llvm.riscv.vxor.nxv8i16.nxv8i16( + , , , i64); @@ -465,6 +486,7 @@ define @intrinsic_vxor_vv_nxv8i16_nxv8i16_nxv8i16( @llvm.riscv.vxor.nxv8i16.nxv8i16( + undef, %0, %1, i64 %2) @@ -498,6 +520,7 @@ entry: } declare @llvm.riscv.vxor.nxv16i16.nxv16i16( + , , , i64); @@ -510,6 +533,7 @@ define @intrinsic_vxor_vv_nxv16i16_nxv16i16_nxv16i16( @llvm.riscv.vxor.nxv16i16.nxv16i16( + undef, %0, %1, i64 %2) @@ -543,6 +567,7 @@ entry: } declare @llvm.riscv.vxor.nxv32i16.nxv32i16( + , , , i64); @@ -555,6 +580,7 @@ define @intrinsic_vxor_vv_nxv32i16_nxv32i16_nxv32i16( @llvm.riscv.vxor.nxv32i16.nxv32i16( + undef, %0, %1, i64 %2) @@ -589,6 +615,7 @@ entry: } declare @llvm.riscv.vxor.nxv1i32.nxv1i32( + , , , i64); @@ -601,6 +628,7 @@ define @intrinsic_vxor_vv_nxv1i32_nxv1i32_nxv1i32( @llvm.riscv.vxor.nxv1i32.nxv1i32( + undef, %0, %1, i64 %2) @@ -634,6 +662,7 @@ entry: } declare @llvm.riscv.vxor.nxv2i32.nxv2i32( + , , , i64); @@ -646,6 +675,7 @@ define @intrinsic_vxor_vv_nxv2i32_nxv2i32_nxv2i32( @llvm.riscv.vxor.nxv2i32.nxv2i32( + undef, %0, %1, i64 %2) @@ -679,6 +709,7 @@ entry: } declare @llvm.riscv.vxor.nxv4i32.nxv4i32( + , , , i64); @@ -691,6 +722,7 @@ define @intrinsic_vxor_vv_nxv4i32_nxv4i32_nxv4i32( @llvm.riscv.vxor.nxv4i32.nxv4i32( + undef, %0, %1, i64 %2) @@ -724,6 +756,7 @@ entry: } declare @llvm.riscv.vxor.nxv8i32.nxv8i32( + , , , i64); @@ -736,6 +769,7 @@ define @intrinsic_vxor_vv_nxv8i32_nxv8i32_nxv8i32( @llvm.riscv.vxor.nxv8i32.nxv8i32( + undef, %0, %1, i64 %2) @@ -769,6 +803,7 @@ entry: } declare @llvm.riscv.vxor.nxv16i32.nxv16i32( + , , , i64); @@ -781,6 +816,7 @@ define @intrinsic_vxor_vv_nxv16i32_nxv16i32_nxv16i32( @llvm.riscv.vxor.nxv16i32.nxv16i32( + undef, %0, %1, i64 %2) @@ -815,6 +851,7 @@ entry: } declare @llvm.riscv.vxor.nxv1i64.nxv1i64( + , , , i64); @@ -827,6 +864,7 @@ define @intrinsic_vxor_vv_nxv1i64_nxv1i64_nxv1i64( @llvm.riscv.vxor.nxv1i64.nxv1i64( + undef, %0, %1, i64 %2) @@ -860,6 +898,7 @@ entry: } declare @llvm.riscv.vxor.nxv2i64.nxv2i64( + , , , i64); @@ -872,6 +911,7 @@ define @intrinsic_vxor_vv_nxv2i64_nxv2i64_nxv2i64( @llvm.riscv.vxor.nxv2i64.nxv2i64( + undef, %0, %1, i64 %2) @@ -905,6 +945,7 @@ entry: } declare @llvm.riscv.vxor.nxv4i64.nxv4i64( + , , , i64); @@ -917,6 +958,7 @@ define @intrinsic_vxor_vv_nxv4i64_nxv4i64_nxv4i64( @llvm.riscv.vxor.nxv4i64.nxv4i64( + undef, %0, %1, i64 %2) @@ -950,6 +992,7 @@ entry: } declare @llvm.riscv.vxor.nxv8i64.nxv8i64( + , , , i64); @@ -962,6 +1005,7 @@ define @intrinsic_vxor_vv_nxv8i64_nxv8i64_nxv8i64( @llvm.riscv.vxor.nxv8i64.nxv8i64( + undef, %0, %1, i64 %2) @@ -996,6 +1040,7 @@ entry: } declare @llvm.riscv.vxor.nxv1i8.i8( + , , i8, i64); @@ -1008,6 +1053,7 @@ define @intrinsic_vxor_vx_nxv1i8_nxv1i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv1i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1041,6 +1087,7 @@ entry: } declare @llvm.riscv.vxor.nxv2i8.i8( + , , i8, i64); @@ -1053,6 +1100,7 @@ define @intrinsic_vxor_vx_nxv2i8_nxv2i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv2i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1086,6 +1134,7 @@ entry: } declare @llvm.riscv.vxor.nxv4i8.i8( + , , i8, i64); @@ -1098,6 +1147,7 @@ define @intrinsic_vxor_vx_nxv4i8_nxv4i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv4i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1131,6 +1181,7 @@ entry: } declare @llvm.riscv.vxor.nxv8i8.i8( + , , i8, i64); @@ -1143,6 +1194,7 @@ define @intrinsic_vxor_vx_nxv8i8_nxv8i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv8i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1176,6 +1228,7 @@ entry: } declare @llvm.riscv.vxor.nxv16i8.i8( + , , i8, i64); @@ -1188,6 +1241,7 @@ define @intrinsic_vxor_vx_nxv16i8_nxv16i8_i8( @llvm.riscv.vxor.nxv16i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1221,6 +1275,7 @@ entry: } declare @llvm.riscv.vxor.nxv32i8.i8( + , , i8, i64); @@ -1233,6 +1288,7 @@ define @intrinsic_vxor_vx_nxv32i8_nxv32i8_i8( @llvm.riscv.vxor.nxv32i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1266,6 +1322,7 @@ entry: } declare @llvm.riscv.vxor.nxv64i8.i8( + , , i8, i64); @@ -1278,6 +1335,7 @@ define @intrinsic_vxor_vx_nxv64i8_nxv64i8_i8( @llvm.riscv.vxor.nxv64i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1311,6 +1369,7 @@ entry: } declare @llvm.riscv.vxor.nxv1i16.i16( + , , i16, i64); @@ -1323,6 +1382,7 @@ define @intrinsic_vxor_vx_nxv1i16_nxv1i16_i16( @llvm.riscv.vxor.nxv1i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1356,6 +1416,7 @@ entry: } declare @llvm.riscv.vxor.nxv2i16.i16( + , , i16, i64); @@ -1368,6 +1429,7 @@ define @intrinsic_vxor_vx_nxv2i16_nxv2i16_i16( @llvm.riscv.vxor.nxv2i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1401,6 +1463,7 @@ entry: } declare @llvm.riscv.vxor.nxv4i16.i16( + , , i16, i64); @@ -1413,6 +1476,7 @@ define @intrinsic_vxor_vx_nxv4i16_nxv4i16_i16( @llvm.riscv.vxor.nxv4i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1446,6 +1510,7 @@ entry: } declare @llvm.riscv.vxor.nxv8i16.i16( + , , i16, i64); @@ -1458,6 +1523,7 @@ define @intrinsic_vxor_vx_nxv8i16_nxv8i16_i16( @llvm.riscv.vxor.nxv8i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1491,6 +1557,7 @@ entry: } declare @llvm.riscv.vxor.nxv16i16.i16( + , , i16, i64); @@ -1503,6 +1570,7 @@ define @intrinsic_vxor_vx_nxv16i16_nxv16i16_i16( @llvm.riscv.vxor.nxv16i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1536,6 +1604,7 @@ entry: } declare @llvm.riscv.vxor.nxv32i16.i16( + , , i16, i64); @@ -1548,6 +1617,7 @@ define @intrinsic_vxor_vx_nxv32i16_nxv32i16_i16( @llvm.riscv.vxor.nxv32i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1581,6 +1651,7 @@ entry: } declare @llvm.riscv.vxor.nxv1i32.i32( + , , i32, i64); @@ -1593,6 +1664,7 @@ define @intrinsic_vxor_vx_nxv1i32_nxv1i32_i32( @llvm.riscv.vxor.nxv1i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1626,6 +1698,7 @@ entry: } declare @llvm.riscv.vxor.nxv2i32.i32( + , , i32, i64); @@ -1638,6 +1711,7 @@ define @intrinsic_vxor_vx_nxv2i32_nxv2i32_i32( @llvm.riscv.vxor.nxv2i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1671,6 +1745,7 @@ entry: } declare @llvm.riscv.vxor.nxv4i32.i32( + , , i32, i64); @@ -1683,6 +1758,7 @@ define @intrinsic_vxor_vx_nxv4i32_nxv4i32_i32( @llvm.riscv.vxor.nxv4i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1716,6 +1792,7 @@ entry: } declare @llvm.riscv.vxor.nxv8i32.i32( + , , i32, i64); @@ -1728,6 +1805,7 @@ define @intrinsic_vxor_vx_nxv8i32_nxv8i32_i32( @llvm.riscv.vxor.nxv8i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1761,6 +1839,7 @@ entry: } declare @llvm.riscv.vxor.nxv16i32.i32( + , , i32, i64); @@ -1773,6 +1852,7 @@ define @intrinsic_vxor_vx_nxv16i32_nxv16i32_i32( @llvm.riscv.vxor.nxv16i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1806,6 +1886,7 @@ entry: } declare @llvm.riscv.vxor.nxv1i64.i64( + , , i64, i64); @@ -1818,6 +1899,7 @@ define @intrinsic_vxor_vx_nxv1i64_nxv1i64_i64( @llvm.riscv.vxor.nxv1i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1851,6 +1933,7 @@ entry: } declare @llvm.riscv.vxor.nxv2i64.i64( + , , i64, i64); @@ -1863,6 +1946,7 @@ define @intrinsic_vxor_vx_nxv2i64_nxv2i64_i64( @llvm.riscv.vxor.nxv2i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1896,6 +1980,7 @@ entry: } declare @llvm.riscv.vxor.nxv4i64.i64( + , , i64, i64); @@ -1908,6 +1993,7 @@ define @intrinsic_vxor_vx_nxv4i64_nxv4i64_i64( @llvm.riscv.vxor.nxv4i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1941,6 +2027,7 @@ entry: } declare @llvm.riscv.vxor.nxv8i64.i64( + , , i64, i64); @@ -1953,6 +2040,7 @@ define @intrinsic_vxor_vx_nxv8i64_nxv8i64_i64( @llvm.riscv.vxor.nxv8i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1993,6 +2081,7 @@ define @intrinsic_vxor_vi_nxv1i8_nxv1i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv1i8.i8( + undef, %0, i8 9, i64 %1) @@ -2025,6 +2114,7 @@ define @intrinsic_vxor_vi_nxv2i8_nxv2i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv2i8.i8( + undef, %0, i8 9, i64 %1) @@ -2057,6 +2147,7 @@ define @intrinsic_vxor_vi_nxv4i8_nxv4i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv4i8.i8( + undef, %0, i8 9, i64 %1) @@ -2089,6 +2180,7 @@ define @intrinsic_vxor_vi_nxv8i8_nxv8i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv8i8.i8( + undef, %0, i8 9, i64 %1) @@ -2121,6 +2213,7 @@ define @intrinsic_vxor_vi_nxv16i8_nxv16i8_i8( @llvm.riscv.vxor.nxv16i8.i8( + undef, %0, i8 9, i64 %1) @@ -2153,6 +2246,7 @@ define @intrinsic_vxor_vi_nxv32i8_nxv32i8_i8( @llvm.riscv.vxor.nxv32i8.i8( + undef, %0, i8 9, i64 %1) @@ -2185,6 +2279,7 @@ define @intrinsic_vxor_vi_nxv64i8_nxv64i8_i8( @llvm.riscv.vxor.nxv64i8.i8( + undef, %0, i8 9, i64 %1) @@ -2217,6 +2312,7 @@ define @intrinsic_vxor_vi_nxv1i16_nxv1i16_i16( @llvm.riscv.vxor.nxv1i16.i16( + undef, %0, i16 9, i64 %1) @@ -2249,6 +2345,7 @@ define @intrinsic_vxor_vi_nxv2i16_nxv2i16_i16( @llvm.riscv.vxor.nxv2i16.i16( + undef, %0, i16 9, i64 %1) @@ -2281,6 +2378,7 @@ define @intrinsic_vxor_vi_nxv4i16_nxv4i16_i16( @llvm.riscv.vxor.nxv4i16.i16( + undef, %0, i16 9, i64 %1) @@ -2313,6 +2411,7 @@ define @intrinsic_vxor_vi_nxv8i16_nxv8i16_i16( @llvm.riscv.vxor.nxv8i16.i16( + undef, %0, i16 9, i64 %1) @@ -2345,6 +2444,7 @@ define @intrinsic_vxor_vi_nxv16i16_nxv16i16_i16( @llvm.riscv.vxor.nxv16i16.i16( + undef, %0, i16 9, i64 %1) @@ -2377,6 +2477,7 @@ define @intrinsic_vxor_vi_nxv32i16_nxv32i16_i16( @llvm.riscv.vxor.nxv32i16.i16( + undef, %0, i16 9, i64 %1) @@ -2409,6 +2510,7 @@ define @intrinsic_vxor_vi_nxv1i32_nxv1i32_i32( @llvm.riscv.vxor.nxv1i32.i32( + undef, %0, i32 9, i64 %1) @@ -2441,6 +2543,7 @@ define @intrinsic_vxor_vi_nxv2i32_nxv2i32_i32( @llvm.riscv.vxor.nxv2i32.i32( + undef, %0, i32 9, i64 %1) @@ -2473,6 +2576,7 @@ define @intrinsic_vxor_vi_nxv4i32_nxv4i32_i32( @llvm.riscv.vxor.nxv4i32.i32( + undef, %0, i32 9, i64 %1) @@ -2505,6 +2609,7 @@ define @intrinsic_vxor_vi_nxv8i32_nxv8i32_i32( @llvm.riscv.vxor.nxv8i32.i32( + undef, %0, i32 9, i64 %1) @@ -2537,6 +2642,7 @@ define @intrinsic_vxor_vi_nxv16i32_nxv16i32_i32( @llvm.riscv.vxor.nxv16i32.i32( + undef, %0, i32 9, i64 %1) @@ -2569,6 +2675,7 @@ define @intrinsic_vxor_vi_nxv1i64_nxv1i64_i64( @llvm.riscv.vxor.nxv1i64.i64( + undef, %0, i64 9, i64 %1) @@ -2601,6 +2708,7 @@ define @intrinsic_vxor_vi_nxv2i64_nxv2i64_i64( @llvm.riscv.vxor.nxv2i64.i64( + undef, %0, i64 9, i64 %1) @@ -2633,6 +2741,7 @@ define @intrinsic_vxor_vi_nxv4i64_nxv4i64_i64( @llvm.riscv.vxor.nxv4i64.i64( + undef, %0, i64 9, i64 %1) @@ -2665,6 +2774,7 @@ define @intrinsic_vxor_vi_nxv8i64_nxv8i64_i64( @llvm.riscv.vxor.nxv8i64.i64( + undef, %0, i64 9, i64 %1)