diff --git a/bolt/README.md b/bolt/README.md index 5580eaac769557915769a3d78482540a46e28342..eed46fff848f120b0a09deef228e195d4dc64d38 100644 --- a/bolt/README.md +++ b/bolt/README.md @@ -180,7 +180,7 @@ Once you have `perf.fdata` ready, you can use it for optimizations with BOLT. Assuming your environment is setup to include the right path, execute `llvm-bolt`: ``` -$ llvm-bolt -o .bolt -data=perf.fdata -reorder-blocks=ext-tsp -reorder-functions=hfsort -split-functions=2 -split-all-cold -split-eh -dyno-stats +$ llvm-bolt -o .bolt -data=perf.fdata -reorder-blocks=ext-tsp -reorder-functions=hfsort -split-functions -split-all-cold -split-eh -dyno-stats ``` If you do need an updated debug info, then add `-update-debug-sections` option diff --git a/bolt/docs/OptimizingClang.md b/bolt/docs/OptimizingClang.md index 4bbc757d59d57186febcd6de2bdfc5dea70995a2..ff7e71b6a76bc2b6606285f90404c89e3ee79406 100644 --- a/bolt/docs/OptimizingClang.md +++ b/bolt/docs/OptimizingClang.md @@ -64,7 +64,7 @@ Notice that we are passing `clang-7` to `perf2bolt` which is the real binary tha the generated profile: ```bash $ llvm-bolt $CPATH/clang-7 -o $CPATH/clang-7.bolt -b clang-7.yaml \ - -reorder-blocks=ext-tsp -reorder-functions=hfsort+ -split-functions=3 \ + -reorder-blocks=ext-tsp -reorder-functions=hfsort+ -split-functions \ -split-all-cold -dyno-stats -icf=1 -use-gnu-stack ``` The output will look similar to the one below: diff --git a/bolt/include/bolt/Core/BinaryBasicBlock.h b/bolt/include/bolt/Core/BinaryBasicBlock.h index 0048a50b50fc7e290cf0cc87cc8cc6b8965e3820..37b3a9664f9937da754efccc122fded7bf0f67ae 100644 --- a/bolt/include/bolt/Core/BinaryBasicBlock.h +++ b/bolt/include/bolt/Core/BinaryBasicBlock.h @@ -634,14 +634,12 @@ public: /// Test if BB is a predecessor of this block. bool isPredecessor(const BinaryBasicBlock *BB) const { - auto Itr = std::find(Predecessors.begin(), Predecessors.end(), BB); - return Itr != Predecessors.end(); + return llvm::is_contained(Predecessors, BB); } /// Test if BB is a successor of this block. bool isSuccessor(const BinaryBasicBlock *BB) const { - auto Itr = std::find(Successors.begin(), Successors.end(), BB); - return Itr != Successors.end(); + return llvm::is_contained(Successors, BB); } /// Test if this BB has a valid execution count. diff --git a/bolt/include/bolt/Core/BinaryData.h b/bolt/include/bolt/Core/BinaryData.h index 01e1538f8a95e8c3cb57e8e847fac2a0ec122e67..5f1efda781905db66ec18e16a3263a88c5f0816f 100644 --- a/bolt/include/bolt/Core/BinaryData.h +++ b/bolt/include/bolt/Core/BinaryData.h @@ -112,7 +112,7 @@ public: bool nameStartsWith(StringRef Prefix) const; bool hasSymbol(const MCSymbol *Symbol) const { - return std::find(Symbols.begin(), Symbols.end(), Symbol) != Symbols.end(); + return llvm::is_contained(Symbols, Symbol); } bool isAbsolute() const; diff --git a/bolt/include/bolt/Core/BinaryFunction.h b/bolt/include/bolt/Core/BinaryFunction.h index e140fb856cedb1e61bee3bc3d1b989f1c3019eea..3a84b26fbc8e3269efe533ddb866ed6ea6c470e3 100644 --- a/bolt/include/bolt/Core/BinaryFunction.h +++ b/bolt/include/bolt/Core/BinaryFunction.h @@ -716,9 +716,8 @@ private: BB->setOffset(Offset); BasicBlockOffsets.emplace_back(Offset, BB); - assert(std::is_sorted(BasicBlockOffsets.begin(), BasicBlockOffsets.end(), - CompareBasicBlockOffsets()) && - std::is_sorted(begin(), end())); + assert(llvm::is_sorted(BasicBlockOffsets, CompareBasicBlockOffsets()) && + llvm::is_sorted(blocks())); return BB; } diff --git a/bolt/include/bolt/Passes/SplitFunctions.h b/bolt/include/bolt/Passes/SplitFunctions.h index 5fbd67a8061c3ec510c0f2915b38c30eed0ed668..81f751e8a2df2fd9919c2dd56833cc1d39fd3390 100644 --- a/bolt/include/bolt/Passes/SplitFunctions.h +++ b/bolt/include/bolt/Passes/SplitFunctions.h @@ -18,15 +18,6 @@ namespace bolt { /// Split function code in multiple parts. class SplitFunctions : public BinaryFunctionPass { -public: - /// Settings for splitting function bodies into hot/cold partitions. - enum SplittingType : char { - ST_NONE = 0, /// Do not split functions. - ST_LARGE, /// In non-relocation mode, only split functions that - /// are too large to fit into the original space. - ST_ALL, /// Split all functions. - }; - private: /// Split function body into fragments. void splitFunction(BinaryFunction &Function); diff --git a/bolt/lib/Core/BinaryBasicBlock.cpp b/bolt/lib/Core/BinaryBasicBlock.cpp index a3c2b4b61cd462abebda4a667b80be9ec3c0cb46..b5b6acaa26f974a567937582e2f65654465b3850 100644 --- a/bolt/lib/Core/BinaryBasicBlock.cpp +++ b/bolt/lib/Core/BinaryBasicBlock.cpp @@ -544,7 +544,7 @@ BinaryBasicBlock::getBranchStats(const BinaryBasicBlock *Succ) const { } if (TotalCount > 0) { - auto Itr = std::find(Successors.begin(), Successors.end(), Succ); + auto Itr = llvm::find(Successors, Succ); assert(Itr != Successors.end()); const BinaryBranchInfo &BI = BranchInfo[Itr - Successors.begin()]; if (BI.Count && BI.Count != COUNT_NO_PROFILE) { diff --git a/bolt/lib/Core/BinaryContext.cpp b/bolt/lib/Core/BinaryContext.cpp index 6a6f12d48f7208e8ea91a0933ca1adea3fa69246..fa594cf35e9d6c78bb3c2ab2ebf4eaa68985e81c 100644 --- a/bolt/lib/Core/BinaryContext.cpp +++ b/bolt/lib/Core/BinaryContext.cpp @@ -718,9 +718,8 @@ void BinaryContext::skipMarkedFragments() { BF->setSimple(false); BF->setHasSplitJumpTable(true); - std::for_each(BF->Fragments.begin(), BF->Fragments.end(), addToWorklist); - std::for_each(BF->ParentFragments.begin(), BF->ParentFragments.end(), - addToWorklist); + llvm::for_each(BF->Fragments, addToWorklist); + llvm::for_each(BF->ParentFragments, addToWorklist); } if (!FragmentsToSkip.empty()) errs() << "BOLT-WARNING: skipped " << FragmentsToSkip.size() << " function" @@ -1059,10 +1058,9 @@ void BinaryContext::generateSymbolHashes() { // First check if a non-anonymous alias exists and move it to the front. if (BD.getSymbols().size() > 1) { - auto Itr = std::find_if(BD.getSymbols().begin(), BD.getSymbols().end(), - [&](const MCSymbol *Symbol) { - return !isInternalSymbolName(Symbol->getName()); - }); + auto Itr = llvm::find_if(BD.getSymbols(), [&](const MCSymbol *Symbol) { + return !isInternalSymbolName(Symbol->getName()); + }); if (Itr != BD.getSymbols().end()) { size_t Idx = std::distance(BD.getSymbols().begin(), Itr); std::swap(BD.getSymbols()[0], BD.getSymbols()[Idx]); @@ -1224,8 +1222,7 @@ void BinaryContext::foldFunction(BinaryFunction &ChildBF, ChildBF.getSymbols().clear(); // Move other names the child function is known under. - std::move(ChildBF.Aliases.begin(), ChildBF.Aliases.end(), - std::back_inserter(ParentBF.Aliases)); + llvm::move(ChildBF.Aliases, std::back_inserter(ParentBF.Aliases)); ChildBF.Aliases.clear(); if (HasRelocations) { @@ -1392,32 +1389,29 @@ unsigned BinaryContext::addDebugFilenameToUnit(const uint32_t DestCUID, std::vector BinaryContext::getSortedFunctions() { std::vector SortedFunctions(BinaryFunctions.size()); - std::transform(BinaryFunctions.begin(), BinaryFunctions.end(), - SortedFunctions.begin(), - [](std::pair &BFI) { - return &BFI.second; - }); - - std::stable_sort(SortedFunctions.begin(), SortedFunctions.end(), - [](const BinaryFunction *A, const BinaryFunction *B) { - if (A->hasValidIndex() && B->hasValidIndex()) { - return A->getIndex() < B->getIndex(); - } - return A->hasValidIndex(); - }); + llvm::transform(BinaryFunctions, SortedFunctions.begin(), + [](std::pair &BFI) { + return &BFI.second; + }); + + llvm::stable_sort(SortedFunctions, + [](const BinaryFunction *A, const BinaryFunction *B) { + if (A->hasValidIndex() && B->hasValidIndex()) { + return A->getIndex() < B->getIndex(); + } + return A->hasValidIndex(); + }); return SortedFunctions; } std::vector BinaryContext::getAllBinaryFunctions() { std::vector AllFunctions; AllFunctions.reserve(BinaryFunctions.size() + InjectedBinaryFunctions.size()); - std::transform(BinaryFunctions.begin(), BinaryFunctions.end(), - std::back_inserter(AllFunctions), - [](std::pair &BFI) { - return &BFI.second; - }); - std::copy(InjectedBinaryFunctions.begin(), InjectedBinaryFunctions.end(), - std::back_inserter(AllFunctions)); + llvm::transform(BinaryFunctions, std::back_inserter(AllFunctions), + [](std::pair &BFI) { + return &BFI.second; + }); + llvm::copy(InjectedBinaryFunctions, std::back_inserter(AllFunctions)); return AllFunctions; } @@ -1494,17 +1488,15 @@ void BinaryContext::preprocessDebugInfo() { llvm::errs() << "BOLT-WARNING: BOLT does not support mix mode binary with " "DWARF5 and DWARF{2,3,4}.\n"; - std::sort(AllRanges.begin(), AllRanges.end()); + llvm::sort(AllRanges); for (auto &KV : BinaryFunctions) { const uint64_t FunctionAddress = KV.first; BinaryFunction &Function = KV.second; - auto It = std::partition_point( - AllRanges.begin(), AllRanges.end(), - [=](CURange R) { return R.HighPC <= FunctionAddress; }); - if (It != AllRanges.end() && It->LowPC <= FunctionAddress) { + auto It = llvm::partition_point( + AllRanges, [=](CURange R) { return R.HighPC <= FunctionAddress; }); + if (It != AllRanges.end() && It->LowPC <= FunctionAddress) Function.setDWARFUnit(It->Unit); - } } // Discover units with debug info that needs to be updated. @@ -2218,8 +2210,7 @@ DebugAddressRangesVector BinaryContext::translateModuleAddressRanges( break; const DebugAddressRangesVector FunctionRanges = Function.getOutputAddressRanges(); - std::move(std::begin(FunctionRanges), std::end(FunctionRanges), - std::back_inserter(OutputRanges)); + llvm::move(FunctionRanges, std::back_inserter(OutputRanges)); std::advance(BFI, 1); } } diff --git a/bolt/lib/Core/BinaryEmitter.cpp b/bolt/lib/Core/BinaryEmitter.cpp index d30dbb2a375dfc8613479612ed3ba5891941890f..1001a3a3454ce798e5dc739c36bc53168908eead 100644 --- a/bolt/lib/Core/BinaryEmitter.cpp +++ b/bolt/lib/Core/BinaryEmitter.cpp @@ -333,8 +333,7 @@ bool BinaryEmitter::emitFunction(BinaryFunction &Function, bool EmitColdPart) { // Only write CIE CFI insns that LLVM will not already emit const std::vector &FrameInstrs = MAI->getInitialFrameState(); - if (std::find(FrameInstrs.begin(), FrameInstrs.end(), CFIInstr) == - FrameInstrs.end()) + if (!llvm::is_contained(FrameInstrs, CFIInstr)) emitCFIInstruction(CFIInstr); } } @@ -1087,7 +1086,7 @@ void BinaryEmitter::emitDebugLineInfoForUnprocessedCUs() { StmtListOffsets.push_back(*StmtList); } - std::sort(StmtListOffsets.begin(), StmtListOffsets.end()); + llvm::sort(StmtListOffsets); // For each CU that was not processed, emit its line info as a binary blob. for (const std::unique_ptr &CU : BC.DwCtx->compile_units()) { @@ -1105,8 +1104,7 @@ void BinaryEmitter::emitDebugLineInfoForUnprocessedCUs() { // Statement list ends where the next unit contribution begins, or at the // end of the section. - auto It = - std::upper_bound(StmtListOffsets.begin(), StmtListOffsets.end(), Begin); + auto It = llvm::upper_bound(StmtListOffsets, Begin); const uint64_t End = It == StmtListOffsets.end() ? DebugLineContents.size() : *It; diff --git a/bolt/lib/Core/BinaryFunction.cpp b/bolt/lib/Core/BinaryFunction.cpp index 8d546b8599ea85e0f88a5d144df42b8e12b9be59..d490c4efc0907627b3a4629d93c7a9bfee40ac7a 100644 --- a/bolt/lib/Core/BinaryFunction.cpp +++ b/bolt/lib/Core/BinaryFunction.cpp @@ -281,9 +281,9 @@ BinaryFunction::getBasicBlockContainingOffset(uint64_t Offset) { * BasicBlockOffsets.end(), * CompareBasicBlockOffsets()))); */ - auto I = std::upper_bound(BasicBlockOffsets.begin(), BasicBlockOffsets.end(), - BasicBlockOffset(Offset, nullptr), - CompareBasicBlockOffsets()); + auto I = + llvm::upper_bound(BasicBlockOffsets, BasicBlockOffset(Offset, nullptr), + CompareBasicBlockOffsets()); assert(I != BasicBlockOffsets.begin() && "first basic block not at offset 0"); --I; BinaryBasicBlock *BB = I->second; @@ -561,10 +561,9 @@ void BinaryFunction::print(raw_ostream &OS, std::string Annotation, std::vector Indices(BB->succ_size()); std::iota(Indices.begin(), Indices.end(), 0); if (BB->succ_size() > 2 && BB->getKnownExecutionCount()) { - std::stable_sort(Indices.begin(), Indices.end(), - [&](const uint64_t A, const uint64_t B) { - return BB->BranchInfo[B] < BB->BranchInfo[A]; - }); + llvm::stable_sort(Indices, [&](const uint64_t A, const uint64_t B) { + return BB->BranchInfo[B] < BB->BranchInfo[A]; + }); } ListSeparator LS; for (unsigned I = 0; I < Indices.size(); ++I) { @@ -1718,7 +1717,7 @@ void BinaryFunction::postProcessJumpTables() { // Remove duplicates branches. We can get a bunch of them from jump tables. // Without doing jump table value profiling we don't have use for extra // (duplicate) branches. - std::sort(TakenBranches.begin(), TakenBranches.end()); + llvm::sort(TakenBranches); auto NewEnd = std::unique(TakenBranches.begin(), TakenBranches.end()); TakenBranches.erase(NewEnd, TakenBranches.end()); } @@ -3003,8 +3002,7 @@ void BinaryFunction::dumpGraph(raw_ostream &OS) const { << "node [fontname=courier, shape=box, style=filled, colorscheme=brbg9]\n"; uint64_t Offset = Address; for (BinaryBasicBlock *BB : BasicBlocks) { - auto LayoutPos = - std::find(BasicBlocksLayout.begin(), BasicBlocksLayout.end(), BB); + auto LayoutPos = llvm::find(BasicBlocksLayout, BB); unsigned Layout = LayoutPos - BasicBlocksLayout.begin(); const char *ColdStr = BB->isCold() ? " (cold)" : ""; std::vector Attrs; @@ -3187,8 +3185,7 @@ bool BinaryFunction::validateCFG() const { } for (const BinaryBasicBlock *LPBlock : BB->landing_pads()) { - if (std::find(LPBlock->throw_begin(), LPBlock->throw_end(), BB) == - LPBlock->throw_end()) { + if (!llvm::is_contained(LPBlock->throwers(), BB)) { errs() << "BOLT-ERROR: inconsistent landing pad detected in " << *this << ": " << BB->getName() << " is in LandingPads but not in " << LPBlock->getName() << " Throwers\n"; @@ -3196,8 +3193,7 @@ bool BinaryFunction::validateCFG() const { } } for (const BinaryBasicBlock *Thrower : BB->throwers()) { - if (std::find(Thrower->lp_begin(), Thrower->lp_end(), BB) == - Thrower->lp_end()) { + if (!llvm::is_contained(Thrower->landing_pads(), BB)) { errs() << "BOLT-ERROR: inconsistent thrower detected in " << *this << ": " << BB->getName() << " is in Throwers list but not in " << Thrower->getName() << " LandingPads\n"; @@ -3670,7 +3666,7 @@ void BinaryFunction::updateLayout(BinaryBasicBlock *Start, } // Insert new blocks in the layout immediately after Start. - auto Pos = std::find(layout_begin(), layout_end(), Start); + auto Pos = llvm::find(layout(), Start); assert(Pos != layout_end()); BasicBlockListType::iterator Begin = std::next(BasicBlocks.begin(), getIndex(Start) + 1); @@ -4184,10 +4180,10 @@ DebugAddressRangesVector BinaryFunction::translateInputToOutputRanges( // If the function hasn't changed return the same ranges. if (!isEmitted()) { OutputRanges.resize(InputRanges.size()); - std::transform(InputRanges.begin(), InputRanges.end(), OutputRanges.begin(), - [](const DWARFAddressRange &Range) { - return DebugAddressRange(Range.LowPC, Range.HighPC); - }); + llvm::transform(InputRanges, OutputRanges.begin(), + [](const DWARFAddressRange &Range) { + return DebugAddressRange(Range.LowPC, Range.HighPC); + }); return OutputRanges; } @@ -4207,9 +4203,9 @@ DebugAddressRangesVector BinaryFunction::translateInputToOutputRanges( const uint64_t InputEndOffset = std::min(Range.HighPC - getAddress(), getSize()); - auto BBI = std::upper_bound( - BasicBlockOffsets.begin(), BasicBlockOffsets.end(), - BasicBlockOffset(InputOffset, nullptr), CompareBasicBlockOffsets()); + auto BBI = llvm::upper_bound(BasicBlockOffsets, + BasicBlockOffset(InputOffset, nullptr), + CompareBasicBlockOffsets()); --BBI; do { const BinaryBasicBlock *BB = BBI->second; @@ -4246,7 +4242,7 @@ DebugAddressRangesVector BinaryFunction::translateInputToOutputRanges( } // Post-processing pass to sort and merge ranges. - std::sort(OutputRanges.begin(), OutputRanges.end()); + llvm::sort(OutputRanges); DebugAddressRangesVector MergedRanges; PrevEndAddress = 0; for (const DebugAddressRange &Range : OutputRanges) { @@ -4315,9 +4311,9 @@ DebugLocationsVector BinaryFunction::translateInputToOutputLocationList( } uint64_t InputOffset = Start - getAddress(); const uint64_t InputEndOffset = std::min(End - getAddress(), getSize()); - auto BBI = std::upper_bound( - BasicBlockOffsets.begin(), BasicBlockOffsets.end(), - BasicBlockOffset(InputOffset, nullptr), CompareBasicBlockOffsets()); + auto BBI = llvm::upper_bound(BasicBlockOffsets, + BasicBlockOffset(InputOffset, nullptr), + CompareBasicBlockOffsets()); --BBI; do { const BinaryBasicBlock *BB = BBI->second; @@ -4354,9 +4350,8 @@ DebugLocationsVector BinaryFunction::translateInputToOutputLocationList( } // Sort and merge adjacent entries with identical location. - std::stable_sort( - OutputLL.begin(), OutputLL.end(), - [](const DebugLocationEntry &A, const DebugLocationEntry &B) { + llvm::stable_sort( + OutputLL, [](const DebugLocationEntry &A, const DebugLocationEntry &B) { return A.LowPC < B.LowPC; }); DebugLocationsVector MergedLL; diff --git a/bolt/lib/Core/DebugData.cpp b/bolt/lib/Core/DebugData.cpp index 767df183b147871e47cb4242cbc1bd0748643f2d..6c32651d4be325c26601c2fbd7906983c1ad0841 100644 --- a/bolt/lib/Core/DebugData.cpp +++ b/bolt/lib/Core/DebugData.cpp @@ -313,10 +313,10 @@ void DebugAddrWriter::AddressForDWOCU::dump() { std::vector SortedMap(indexToAddressBegin(), indexToAdddessEnd()); // Sorting address in increasing order of indices. - std::sort(SortedMap.begin(), SortedMap.end(), - [](const IndexAddressPair &A, const IndexAddressPair &B) { - return A.first < B.first; - }); + llvm::sort(SortedMap, + [](const IndexAddressPair &A, const IndexAddressPair &B) { + return A.first < B.first; + }); for (auto &Pair : SortedMap) dbgs() << Twine::utohexstr(Pair.second) << "\t" << Pair.first << "\n"; } @@ -375,10 +375,10 @@ AddressSectionBuffer DebugAddrWriter::finalize() { std::vector SortedMap(AM->second.indexToAddressBegin(), AM->second.indexToAdddessEnd()); // Sorting address in increasing order of indices. - std::sort(SortedMap.begin(), SortedMap.end(), - [](const IndexAddressPair &A, const IndexAddressPair &B) { - return A.first < B.first; - }); + llvm::sort(SortedMap, + [](const IndexAddressPair &A, const IndexAddressPair &B) { + return A.first < B.first; + }); uint8_t AddrSize = CU->getAddressByteSize(); uint32_t Counter = 0; @@ -449,10 +449,10 @@ AddressSectionBuffer DebugAddrWriterDwarf5::finalize() { AMIter->second.indexToAddressBegin(), AMIter->second.indexToAdddessEnd()); // Sorting address in increasing order of indices. - std::sort(SortedMap.begin(), SortedMap.end(), - [](const IndexAddressPair &A, const IndexAddressPair &B) { - return A.first < B.first; - }); + llvm::sort(SortedMap, + [](const IndexAddressPair &A, const IndexAddressPair &B) { + return A.first < B.first; + }); // Writing out Header const uint32_t Length = SortedMap.size() * AddrSize + 4; support::endian::write(AddressStream, Length, Endian); @@ -841,22 +841,20 @@ std::string SimpleBinaryPatcher::patchBinary(StringRef BinaryContents) { CUOffsetMap DebugInfoBinaryPatcher::computeNewOffsets(DWARFContext &DWCtx, bool IsDWOContext) { CUOffsetMap CUMap; - std::sort(DebugPatches.begin(), DebugPatches.end(), - [](const UniquePatchPtrType &V1, const UniquePatchPtrType &V2) { - if (V1.get()->Offset == V2.get()->Offset) { - if (V1->Kind == DebugPatchKind::NewDebugEntry && - V2->Kind == DebugPatchKind::NewDebugEntry) - return reinterpret_cast(V1.get()) - ->CurrentOrder < - reinterpret_cast(V2.get()) - ->CurrentOrder; - - // This is a case where we are modifying first entry of next - // DIE, and adding a new one. - return V1->Kind == DebugPatchKind::NewDebugEntry; - } - return V1.get()->Offset < V2.get()->Offset; - }); + llvm::sort(DebugPatches, [](const UniquePatchPtrType &V1, + const UniquePatchPtrType &V2) { + if (V1.get()->Offset == V2.get()->Offset) { + if (V1->Kind == DebugPatchKind::NewDebugEntry && + V2->Kind == DebugPatchKind::NewDebugEntry) + return reinterpret_cast(V1.get())->CurrentOrder < + reinterpret_cast(V2.get())->CurrentOrder; + + // This is a case where we are modifying first entry of next + // DIE, and adding a new one. + return V1->Kind == DebugPatchKind::NewDebugEntry; + } + return V1.get()->Offset < V2.get()->Offset; + }); DWARFUnitVector::compile_unit_range CompileUnits = IsDWOContext ? DWCtx.dwo_compile_units() : DWCtx.compile_units(); diff --git a/bolt/lib/Core/DynoStats.cpp b/bolt/lib/Core/DynoStats.cpp index 502f67e653f0a523323069c60c03e548facdb061..6849f537b44f98d601fdc64efa9a28c1bf5d8187 100644 --- a/bolt/lib/Core/DynoStats.cpp +++ b/bolt/lib/Core/DynoStats.cpp @@ -107,7 +107,7 @@ void DynoStats::print(raw_ostream &OS, const DynoStats *Other, SortedHistogram.emplace_back(Stat.second.first, Stat.first); // Sort using lexicographic ordering - std::sort(SortedHistogram.begin(), SortedHistogram.end()); + llvm::sort(SortedHistogram); // Dump in ascending order: Start with Opcode with Highest execution // count. diff --git a/bolt/lib/Core/Exceptions.cpp b/bolt/lib/Core/Exceptions.cpp index b3ee89bd9a58e2c28d5df63daef3f71b6f26e65e..b7c0a9f11bba48cf0d44eecc7a9925602074347f 100644 --- a/bolt/lib/Core/Exceptions.cpp +++ b/bolt/lib/Core/Exceptions.cpp @@ -657,7 +657,7 @@ std::vector CFIReaderWriter::generateEHFrameHeader( std::map PCToFDE; // Presort array for binary search. - std::sort(FailedAddresses.begin(), FailedAddresses.end()); + llvm::sort(FailedAddresses); // Initialize PCToFDE using NewEHFrame. for (dwarf::FrameEntry &Entry : NewEHFrame.entries()) { @@ -683,9 +683,7 @@ std::vector CFIReaderWriter::generateEHFrameHeader( }; LLVM_DEBUG(dbgs() << "BOLT-DEBUG: new .eh_frame contains " - << std::distance(NewEHFrame.entries().begin(), - NewEHFrame.entries().end()) - << " entries\n"); + << llvm::size(NewEHFrame.entries()) << " entries\n"); // Add entries from the original .eh_frame corresponding to the functions // that we did not update. @@ -707,9 +705,7 @@ std::vector CFIReaderWriter::generateEHFrameHeader( }; LLVM_DEBUG(dbgs() << "BOLT-DEBUG: old .eh_frame contains " - << std::distance(OldEHFrame.entries().begin(), - OldEHFrame.entries().end()) - << " entries\n"); + << llvm::size(OldEHFrame.entries()) << " entries\n"); // Generate a new .eh_frame_hdr based on the new map. diff --git a/bolt/lib/Passes/BinaryPasses.cpp b/bolt/lib/Passes/BinaryPasses.cpp index 649aa71d91a7db6bf1094557bbe41792f3695554..304cf7c2f71eef9a275dde53eec31dea70adb479 100644 --- a/bolt/lib/Passes/BinaryPasses.cpp +++ b/bolt/lib/Passes/BinaryPasses.cpp @@ -1420,10 +1420,10 @@ void PrintProgramStats::runOnFunctions(BinaryContext &BC) { if (ProfiledFunctions.size() > 10) { if (opts::Verbosity >= 1) { outs() << "BOLT-INFO: top called functions are:\n"; - std::sort(ProfiledFunctions.begin(), ProfiledFunctions.end(), - [](const BinaryFunction *A, const BinaryFunction *B) { - return B->getExecutionCount() < A->getExecutionCount(); - }); + llvm::sort(ProfiledFunctions, + [](const BinaryFunction *A, const BinaryFunction *B) { + return B->getExecutionCount() < A->getExecutionCount(); + }); auto SFI = ProfiledFunctions.begin(); auto SFIend = ProfiledFunctions.end(); for (unsigned I = 0u; I < opts::TopCalledLimit && SFI != SFIend; @@ -1433,8 +1433,7 @@ void PrintProgramStats::runOnFunctions(BinaryContext &BC) { } if (!opts::PrintSortedBy.empty() && - std::find(opts::PrintSortedBy.begin(), opts::PrintSortedBy.end(), - DynoStats::FIRST_DYNO_STAT) == opts::PrintSortedBy.end()) { + !llvm::is_contained(opts::PrintSortedBy, DynoStats::FIRST_DYNO_STAT)) { std::vector Functions; std::map Stats; @@ -1448,24 +1447,22 @@ void PrintProgramStats::runOnFunctions(BinaryContext &BC) { } const bool SortAll = - std::find(opts::PrintSortedBy.begin(), opts::PrintSortedBy.end(), - DynoStats::LAST_DYNO_STAT) != opts::PrintSortedBy.end(); + llvm::is_contained(opts::PrintSortedBy, DynoStats::LAST_DYNO_STAT); const bool Ascending = opts::DynoStatsSortOrderOpt == opts::DynoStatsSortOrder::Ascending; if (SortAll) { - std::stable_sort(Functions.begin(), Functions.end(), - [Ascending, &Stats](const BinaryFunction *A, - const BinaryFunction *B) { - return Ascending ? Stats.at(A) < Stats.at(B) - : Stats.at(B) < Stats.at(A); - }); + llvm::stable_sort(Functions, + [Ascending, &Stats](const BinaryFunction *A, + const BinaryFunction *B) { + return Ascending ? Stats.at(A) < Stats.at(B) + : Stats.at(B) < Stats.at(A); + }); } else { - std::stable_sort( - Functions.begin(), Functions.end(), - [Ascending, &Stats](const BinaryFunction *A, - const BinaryFunction *B) { + llvm::stable_sort( + Functions, [Ascending, &Stats](const BinaryFunction *A, + const BinaryFunction *B) { const DynoStats &StatsA = Stats.at(A); const DynoStats &StatsB = Stats.at(B); return Ascending ? StatsA.lessThan(StatsB, opts::PrintSortedBy) @@ -1564,11 +1561,11 @@ void PrintProgramStats::runOnFunctions(BinaryContext &BC) { } if (!SuboptimalFuncs.empty()) { - std::sort(SuboptimalFuncs.begin(), SuboptimalFuncs.end(), - [](const BinaryFunction *A, const BinaryFunction *B) { - return A->getKnownExecutionCount() / A->getSize() > - B->getKnownExecutionCount() / B->getSize(); - }); + llvm::sort(SuboptimalFuncs, + [](const BinaryFunction *A, const BinaryFunction *B) { + return A->getKnownExecutionCount() / A->getSize() > + B->getKnownExecutionCount() / B->getSize(); + }); outs() << "BOLT-INFO: " << SuboptimalFuncs.size() << " functions have " diff --git a/bolt/lib/Passes/ExtTSPReorderAlgorithm.cpp b/bolt/lib/Passes/ExtTSPReorderAlgorithm.cpp index e20f97935cfae22b41692519e4d284f249ea3312..7281d8290f263820eb8a41825550594cf9a9d438 100644 --- a/bolt/lib/Passes/ExtTSPReorderAlgorithm.cpp +++ b/bolt/lib/Passes/ExtTSPReorderAlgorithm.cpp @@ -801,8 +801,7 @@ private: } // Remove chain From from the list of active chains - auto Iter = std::remove(HotChains.begin(), HotChains.end(), From); - HotChains.erase(Iter, HotChains.end()); + llvm::erase_value(HotChains, From); // Invalidate caches for (std::pair EdgeIter : Into->edges()) @@ -818,26 +817,23 @@ private: SortedChains.push_back(&Chain); // Sorting chains by density in decreasing order - std::stable_sort( - SortedChains.begin(), SortedChains.end(), - [](const Chain *C1, const Chain *C2) { - // Original entry point to the front - if (C1->isEntryPoint() != C2->isEntryPoint()) { - if (C1->isEntryPoint()) - return true; - if (C2->isEntryPoint()) - return false; - } + llvm::stable_sort(SortedChains, [](const Chain *C1, const Chain *C2) { + // Original entry point to the front + if (C1->isEntryPoint() != C2->isEntryPoint()) { + if (C1->isEntryPoint()) + return true; + if (C2->isEntryPoint()) + return false; + } - const double D1 = C1->density(); - const double D2 = C2->density(); - if (D1 != D2) - return D1 > D2; + const double D1 = C1->density(); + const double D2 = C2->density(); + if (D1 != D2) + return D1 > D2; - // Making the order deterministic - return C1->id() < C2->id(); - } - ); + // Making the order deterministic + return C1->id() < C2->id(); + }); // Collect the basic blocks in the order specified by their chains Order.reserve(BF.layout_size()); diff --git a/bolt/lib/Passes/HFSort.cpp b/bolt/lib/Passes/HFSort.cpp index dcdf76bf7c85c83c8fecfa1b1f9d8da457adb346..6569de7d6258f452d78203cf86e5df82b806af39 100644 --- a/bolt/lib/Passes/HFSort.cpp +++ b/bolt/lib/Passes/HFSort.cpp @@ -86,7 +86,7 @@ namespace { void freezeClusters(const CallGraph &Cg, std::vector &Clusters) { uint32_t TotalSize = 0; - std::sort(Clusters.begin(), Clusters.end(), compareClustersDensity); + llvm::sort(Clusters, compareClustersDensity); for (Cluster &C : Clusters) { uint32_t NewSize = TotalSize + C.size(); if (NewSize > FrozenPages * HugePageSize) @@ -150,13 +150,12 @@ std::vector clusterize(const CallGraph &Cg) { for (Cluster &Cluster : Clusters) FuncCluster[Cluster.targets().front()] = &Cluster; - std::sort(SortedFuncs.begin(), SortedFuncs.end(), - [&](const NodeId F1, const NodeId F2) { - const CallGraph::Node &Func1 = Cg.getNode(F1); - const CallGraph::Node &Func2 = Cg.getNode(F2); - return Func1.samples() * Func2.size() > // TODO: is this correct? - Func2.samples() * Func1.size(); - }); + llvm::sort(SortedFuncs, [&](const NodeId F1, const NodeId F2) { + const CallGraph::Node &Func1 = Cg.getNode(F1); + const CallGraph::Node &Func2 = Cg.getNode(F2); + return Func1.samples() * Func2.size() > // TODO: is this correct? + Func2.samples() * Func1.size(); + }); // Process each function, and consider merging its cluster with the // one containing its most likely predecessor. @@ -234,8 +233,7 @@ std::vector clusterize(const CallGraph &Cg) { Visited.insert(Cluster); } - std::sort(SortedClusters.begin(), SortedClusters.end(), - compareClustersDensity); + llvm::sort(SortedClusters, compareClustersDensity); return SortedClusters; } @@ -251,9 +249,9 @@ std::vector randomClusters(const CallGraph &Cg) { Clusters.emplace_back(F, Cg.getNode(F)); } - std::sort( - Clusters.begin(), Clusters.end(), - [](const Cluster &A, const Cluster &B) { return A.size() < B.size(); }); + llvm::sort(Clusters, [](const Cluster &A, const Cluster &B) { + return A.size() < B.size(); + }); auto pickMergeCluster = [&Clusters](const size_t Idx) { size_t MaxIdx = Idx + 1; diff --git a/bolt/lib/Passes/HFSortPlus.cpp b/bolt/lib/Passes/HFSortPlus.cpp index fff9165810aa985c6f7ac050b61ffa3f33e08338..70b9a4d51e6ee58a66a92a3358cc9bc7b1946997 100644 --- a/bolt/lib/Passes/HFSortPlus.cpp +++ b/bolt/lib/Passes/HFSortPlus.cpp @@ -245,7 +245,7 @@ public: // Making sure the comparison is deterministic return L->Id < R->Id; }; - std::stable_sort(HotChains.begin(), HotChains.end(), DensityComparator); + llvm::stable_sort(HotChains, DensityComparator); // Return the set of clusters that are left, which are the ones that // didn't get merged (so their first func is its original func) @@ -453,9 +453,9 @@ private: } // Sort the pairs by the weight in reverse order - std::sort( - ArcsToMerge.begin(), ArcsToMerge.end(), - [](const Arc *L, const Arc *R) { return L->weight() > R->weight(); }); + llvm::sort(ArcsToMerge, [](const Arc *L, const Arc *R) { + return L->weight() > R->weight(); + }); // Merge the pairs of chains for (const Arc *Arc : ArcsToMerge) { @@ -567,8 +567,7 @@ private: Into->Score = score(Into); // Remove chain From From the list of active chains - auto it = std::remove(HotChains.begin(), HotChains.end(), From); - HotChains.erase(it, HotChains.end()); + llvm::erase_value(HotChains, From); } private: diff --git a/bolt/lib/Passes/IdenticalCodeFolding.cpp b/bolt/lib/Passes/IdenticalCodeFolding.cpp index e9dc6171cdc1aba2d842f868f6b0305617831d45..70d1532aeaf817e044a7022c896d49ef5e64e9c7 100644 --- a/bolt/lib/Passes/IdenticalCodeFolding.cpp +++ b/bolt/lib/Passes/IdenticalCodeFolding.cpp @@ -479,11 +479,10 @@ void IdenticalCodeFolding::runOnFunctions(BinaryContext &BC) { // Fold functions. Keep the order consistent across invocations with // different options. - std::stable_sort(Twins.begin(), Twins.end(), - [](const BinaryFunction *A, const BinaryFunction *B) { - return A->getFunctionNumber() < - B->getFunctionNumber(); - }); + llvm::stable_sort( + Twins, [](const BinaryFunction *A, const BinaryFunction *B) { + return A->getFunctionNumber() < B->getFunctionNumber(); + }); BinaryFunction *ParentBF = Twins[0]; for (unsigned I = 1; I < Twins.size(); ++I) { diff --git a/bolt/lib/Passes/IndirectCallPromotion.cpp b/bolt/lib/Passes/IndirectCallPromotion.cpp index 4df905e2c59f3f308e347e501a7da5a2612cebfc..09597c1d11a820b5e8f697286d5b6e2bb43b876d 100644 --- a/bolt/lib/Passes/IndirectCallPromotion.cpp +++ b/bolt/lib/Passes/IndirectCallPromotion.cpp @@ -238,17 +238,16 @@ IndirectCallPromotion::getCallTargets(BinaryBasicBlock &BB, } // Sort by symbol then addr. - std::sort(Targets.begin(), Targets.end(), - [](const Callsite &A, const Callsite &B) { - if (A.To.Sym && B.To.Sym) - return A.To.Sym < B.To.Sym; - else if (A.To.Sym && !B.To.Sym) - return true; - else if (!A.To.Sym && B.To.Sym) - return false; - else - return A.To.Addr < B.To.Addr; - }); + llvm::sort(Targets, [](const Callsite &A, const Callsite &B) { + if (A.To.Sym && B.To.Sym) + return A.To.Sym < B.To.Sym; + else if (A.To.Sym && !B.To.Sym) + return true; + else if (!A.To.Sym && B.To.Sym) + return false; + else + return A.To.Addr < B.To.Addr; + }); // Targets may contain multiple entries to the same target, but using // different indices. Their profile will report the same number of branches @@ -294,21 +293,18 @@ IndirectCallPromotion::getCallTargets(BinaryBasicBlock &BB, // Sort by target count, number of indices in case of jump table, and // mispredicts. We prioritize targets with high count, small number of indices // and high mispredicts. Break ties by selecting targets with lower addresses. - std::stable_sort(Targets.begin(), Targets.end(), - [](const Callsite &A, const Callsite &B) { - if (A.Branches != B.Branches) - return A.Branches > B.Branches; - if (A.JTIndices.size() != B.JTIndices.size()) - return A.JTIndices.size() < B.JTIndices.size(); - if (A.Mispreds != B.Mispreds) - return A.Mispreds > B.Mispreds; - return A.To.Addr < B.To.Addr; - }); + llvm::stable_sort(Targets, [](const Callsite &A, const Callsite &B) { + if (A.Branches != B.Branches) + return A.Branches > B.Branches; + if (A.JTIndices.size() != B.JTIndices.size()) + return A.JTIndices.size() < B.JTIndices.size(); + if (A.Mispreds != B.Mispreds) + return A.Mispreds > B.Mispreds; + return A.To.Addr < B.To.Addr; + }); // Remove non-symbol targets - auto Last = std::remove_if(Targets.begin(), Targets.end(), - [](const Callsite &CS) { return !CS.To.Sym; }); - Targets.erase(Last, Targets.end()); + llvm::erase_if(Targets, [](const Callsite &CS) { return !CS.To.Sym; }); LLVM_DEBUG(if (BF.getJumpTable(Inst)) { uint64_t TotalCount = 0; @@ -471,14 +467,14 @@ IndirectCallPromotion::maybeGetHotJumpTableTargets(BinaryBasicBlock &BB, HotTarget.second = Index; } - std::transform( - HotTargetMap.begin(), HotTargetMap.end(), std::back_inserter(HotTargets), + llvm::transform( + HotTargetMap, std::back_inserter(HotTargets), [](const std::pair> &A) { return A.second; }); // Sort with highest counts first. - std::sort(HotTargets.rbegin(), HotTargets.rend()); + llvm::sort(reverse(HotTargets)); LLVM_DEBUG({ dbgs() << "BOLT-INFO: ICP jump table hot targets:\n"; @@ -566,9 +562,7 @@ IndirectCallPromotion::findCallTargetSymbols(std::vector &Targets, NewTargets.push_back(Target); std::vector({JTIndex}).swap(NewTargets.back().JTIndices); - Target.JTIndices.erase(std::remove(Target.JTIndices.begin(), - Target.JTIndices.end(), JTIndex), - Target.JTIndices.end()); + llvm::erase_value(Target.JTIndices, JTIndex); // Keep fixCFG counts sane if more indices use this same target later assert(IndicesPerTarget[Target.To.Sym] > 0 && "wrong map"); @@ -581,7 +575,7 @@ IndirectCallPromotion::findCallTargetSymbols(std::vector &Targets, Target.Branches -= NewTargets.back().Branches; Target.Mispreds -= NewTargets.back().Mispreds; } - std::copy(Targets.begin(), Targets.end(), std::back_inserter(NewTargets)); + llvm::copy(Targets, std::back_inserter(NewTargets)); std::swap(NewTargets, Targets); N = I; @@ -1168,7 +1162,7 @@ void IndirectCallPromotion::runOnFunctions(BinaryContext &BC) { } // Sort callsites by execution count. - std::sort(IndirectCalls.rbegin(), IndirectCalls.rend()); + llvm::sort(reverse(IndirectCalls)); // Find callsites that contribute to the top "opts::ICPTopCallsites"% // number of calls. diff --git a/bolt/lib/Passes/Inliner.cpp b/bolt/lib/Passes/Inliner.cpp index b7188832a9802ffa6949de1a61efb705eb43d0fe..8052f8f5ce8f71d0a229ab517d738d33555416ab 100644 --- a/bolt/lib/Passes/Inliner.cpp +++ b/bolt/lib/Passes/Inliner.cpp @@ -353,10 +353,10 @@ Inliner::inlineCall(BinaryBasicBlock &CallerBB, // Add CFG edges to the basic blocks of the inlined instance. std::vector Successors(BB.succ_size()); - std::transform(BB.succ_begin(), BB.succ_end(), Successors.begin(), - [&InlinedBBMap](const BinaryBasicBlock *BB) { - return InlinedBBMap.at(BB); - }); + llvm::transform(BB.successors(), Successors.begin(), + [&InlinedBBMap](const BinaryBasicBlock *BB) { + return InlinedBBMap.at(BB); + }); if (CallerFunction.hasValidProfile() && Callee.hasValidProfile()) InlinedBB->addSuccessors(Successors.begin(), Successors.end(), @@ -397,11 +397,10 @@ bool Inliner::inlineCallsInFunction(BinaryFunction &Function) { BinaryContext &BC = Function.getBinaryContext(); std::vector Blocks(Function.layout().begin(), Function.layout().end()); - std::sort(Blocks.begin(), Blocks.end(), - [](const BinaryBasicBlock *BB1, const BinaryBasicBlock *BB2) { - return BB1->getKnownExecutionCount() > - BB2->getKnownExecutionCount(); - }); + llvm::sort( + Blocks, [](const BinaryBasicBlock *BB1, const BinaryBasicBlock *BB2) { + return BB1->getKnownExecutionCount() > BB2->getKnownExecutionCount(); + }); bool DidInlining = false; for (BinaryBasicBlock *BB : Blocks) { @@ -520,11 +519,10 @@ void Inliner::runOnFunctions(BinaryContext &BC) { continue; ConsideredFunctions.push_back(&Function); } - std::sort(ConsideredFunctions.begin(), ConsideredFunctions.end(), - [](const BinaryFunction *A, const BinaryFunction *B) { - return B->getKnownExecutionCount() < - A->getKnownExecutionCount(); - }); + llvm::sort(ConsideredFunctions, [](const BinaryFunction *A, + const BinaryFunction *B) { + return B->getKnownExecutionCount() < A->getKnownExecutionCount(); + }); for (BinaryFunction *Function : ConsideredFunctions) { if (opts::InlineLimit && NumInlinedCallSites >= opts::InlineLimit) break; diff --git a/bolt/lib/Passes/Instrumentation.cpp b/bolt/lib/Passes/Instrumentation.cpp index 7da3e362d0d67db4bc06d6141612deac7cb926b9..f236772149c8ebbab221b35e1cf6c685b7cc2f39 100644 --- a/bolt/lib/Passes/Instrumentation.cpp +++ b/bolt/lib/Passes/Instrumentation.cpp @@ -578,9 +578,8 @@ void Instrumentation::runOnFunctions(BinaryContext &BC) { MCSymbol *Target = BC.registerNameAtAddress( "__bolt_instr_fini", FiniSection->getAddress(), 0, 0); auto IsLEA = [&BC](const MCInst &Inst) { return BC.MIB->isLEA64r(Inst); }; - const auto LEA = - std::find_if(std::next(std::find_if(BB.rbegin(), BB.rend(), IsLEA)), - BB.rend(), IsLEA); + const auto LEA = std::find_if( + std::next(llvm::find_if(reverse(BB), IsLEA)), BB.rend(), IsLEA); LEA->getOperand(4).setExpr( MCSymbolRefExpr::create(Target, MCSymbolRefExpr::VK_None, *BC.Ctx)); } else { diff --git a/bolt/lib/Passes/LongJmp.cpp b/bolt/lib/Passes/LongJmp.cpp index 90da2c4416cb837e733268ac3b701c6b61cb9c21..429156655b9a59b10542927c1b35404979992b28 100644 --- a/bolt/lib/Passes/LongJmp.cpp +++ b/bolt/lib/Passes/LongJmp.cpp @@ -89,9 +89,8 @@ LongJmpPass::createNewStub(BinaryBasicBlock &SourceBB, const MCSymbol *TgtSym, auto registerInMap = [&](StubGroupsTy &Map) { StubGroupTy &StubGroup = Map[TgtSym]; StubGroup.insert( - std::lower_bound( - StubGroup.begin(), StubGroup.end(), - std::make_pair(AtAddress, nullptr), + llvm::lower_bound( + StubGroup, std::make_pair(AtAddress, nullptr), [&](const std::pair &LHS, const std::pair &RHS) { return LHS.first < RHS.first; @@ -126,8 +125,8 @@ BinaryBasicBlock *LongJmpPass::lookupStubFromGroup( const StubGroupTy &Candidates = CandidatesIter->second; if (Candidates.empty()) return nullptr; - auto Cand = std::lower_bound( - Candidates.begin(), Candidates.end(), std::make_pair(DotAddress, nullptr), + auto Cand = llvm::lower_bound( + Candidates, std::make_pair(DotAddress, nullptr), [&](const std::pair &LHS, const std::pair &RHS) { return LHS.first < RHS.first; @@ -256,11 +255,11 @@ void LongJmpPass::updateStubGroups() { for (auto &KeyVal : StubGroups) { for (StubTy &Elem : KeyVal.second) Elem.first = BBAddresses[Elem.second]; - std::sort(KeyVal.second.begin(), KeyVal.second.end(), - [&](const std::pair &LHS, - const std::pair &RHS) { - return LHS.first < RHS.first; - }); + llvm::sort(KeyVal.second, + [&](const std::pair &LHS, + const std::pair &RHS) { + return LHS.first < RHS.first; + }); } }; diff --git a/bolt/lib/Passes/LoopInversionPass.cpp b/bolt/lib/Passes/LoopInversionPass.cpp index 30eccb83c7e4d5f371ed615adabe12151896dfbf..0da143103dbd589fff113a90e1588a79aa11be0a 100644 --- a/bolt/lib/Passes/LoopInversionPass.cpp +++ b/bolt/lib/Passes/LoopInversionPass.cpp @@ -73,10 +73,9 @@ bool LoopInversionPass::runOnFunction(BinaryFunction &BF) { if (IsChanged) { BinaryFunction::BasicBlockOrderType NewOrder = BF.getLayout(); - std::sort(NewOrder.begin(), NewOrder.end(), - [&](BinaryBasicBlock *BB1, BinaryBasicBlock *BB2) { - return BB1->getLayoutIndex() < BB2->getLayoutIndex(); - }); + llvm::sort(NewOrder, [&](BinaryBasicBlock *BB1, BinaryBasicBlock *BB2) { + return BB1->getLayoutIndex() < BB2->getLayoutIndex(); + }); BF.updateBasicBlockLayout(NewOrder); } diff --git a/bolt/lib/Passes/PettisAndHansen.cpp b/bolt/lib/Passes/PettisAndHansen.cpp index 46de1c7aca6415e62b6b21424571ec3809413725..f138c609b689add10b20e6078c45d5d640a2349e 100644 --- a/bolt/lib/Passes/PettisAndHansen.cpp +++ b/bolt/lib/Passes/PettisAndHansen.cpp @@ -207,7 +207,7 @@ std::vector pettisAndHansen(const CallGraph &Cg) { for (Cluster *C : LiveClusters) OutClusters.push_back(std::move(*C)); - std::sort(OutClusters.begin(), OutClusters.end(), compareClustersDensity); + llvm::sort(OutClusters, compareClustersDensity); return OutClusters; } diff --git a/bolt/lib/Passes/RegReAssign.cpp b/bolt/lib/Passes/RegReAssign.cpp index d4f12222ec4d1d9ca317962674a7002577e7377d..a376ef87bfe3b363950c669c96bbb021d1639a04 100644 --- a/bolt/lib/Passes/RegReAssign.cpp +++ b/bolt/lib/Passes/RegReAssign.cpp @@ -197,8 +197,8 @@ void RegReAssign::rankRegisters(BinaryFunction &Function) { } } std::iota(RankedRegs.begin(), RankedRegs.end(), 0); // 0, 1, 2, 3... - std::sort(RankedRegs.begin(), RankedRegs.end(), - [&](size_t A, size_t B) { return RegScore[A] > RegScore[B]; }); + llvm::sort(RankedRegs, + [&](size_t A, size_t B) { return RegScore[A] > RegScore[B]; }); LLVM_DEBUG({ for (size_t Reg : RankedRegs) { diff --git a/bolt/lib/Passes/ReorderAlgorithm.cpp b/bolt/lib/Passes/ReorderAlgorithm.cpp index 62752b077ce5e042d0b0bc5af426174b110cbe32..ba99622f551590d2d955cf72b6bc9bd91fe39b71 100644 --- a/bolt/lib/Passes/ReorderAlgorithm.cpp +++ b/bolt/lib/Passes/ReorderAlgorithm.cpp @@ -244,7 +244,7 @@ void PHGreedyClusterAlgorithm::initQueue(std::vector &Queue, }; // Sort edges in increasing profile count order. - std::sort(Queue.begin(), Queue.end(), Comp); + llvm::sort(Queue, Comp); } void PHGreedyClusterAlgorithm::adjustQueue(std::vector &Queue, @@ -385,7 +385,7 @@ void MinBranchGreedyClusterAlgorithm::adjustQueue(std::vector &Queue, // Sort remaining edges in increasing weight order. Queue.swap(NewQueue); - std::sort(Queue.begin(), Queue.end(), Comp); + llvm::sort(Queue, Comp); } bool MinBranchGreedyClusterAlgorithm::areClustersCompatible( diff --git a/bolt/lib/Passes/ReorderData.cpp b/bolt/lib/Passes/ReorderData.cpp index 3f36caba53ae4b46333bbd848f40c279eb12447f..fb2ead08d090d06ca6c4f657831a6a3ca85c56c5 100644 --- a/bolt/lib/Passes/ReorderData.cpp +++ b/bolt/lib/Passes/ReorderData.cpp @@ -275,8 +275,8 @@ ReorderData::sortedByFunc(BinaryContext &BC, const BinarySection &Section, DataOrder Order = baseOrder(BC, Section); unsigned SplitPoint = Order.size(); - std::sort( - Order.begin(), Order.end(), + llvm::sort( + Order, [&](const DataOrder::value_type &A, const DataOrder::value_type &B) { // Total execution counts of functions referencing BD. const uint64_t ACount = BDtoFuncCount[A.first]; @@ -307,17 +307,17 @@ ReorderData::sortedByCount(BinaryContext &BC, DataOrder Order = baseOrder(BC, Section); unsigned SplitPoint = Order.size(); - std::sort(Order.begin(), Order.end(), - [](const DataOrder::value_type &A, const DataOrder::value_type &B) { - // Weight by number of loads/data size. - const double AWeight = double(A.second) / A.first->getSize(); - const double BWeight = double(B.second) / B.first->getSize(); - return (AWeight > BWeight || - (AWeight == BWeight && - (A.first->getSize() < B.first->getSize() || - (A.first->getSize() == B.first->getSize() && - A.first->getAddress() < B.first->getAddress())))); - }); + llvm::sort(Order, [](const DataOrder::value_type &A, + const DataOrder::value_type &B) { + // Weight by number of loads/data size. + const double AWeight = double(A.second) / A.first->getSize(); + const double BWeight = double(B.second) / B.first->getSize(); + return (AWeight > BWeight || + (AWeight == BWeight && + (A.first->getSize() < B.first->getSize() || + (A.first->getSize() == B.first->getSize() && + A.first->getAddress() < B.first->getAddress())))); + }); for (unsigned Idx = 0; Idx < Order.size(); ++Idx) { if (!Order[Idx].second) { diff --git a/bolt/lib/Passes/ReorderFunctions.cpp b/bolt/lib/Passes/ReorderFunctions.cpp index 6b7f80960faea9078c80d2e7ca08919a14938984..5cda10cb11ca2bb14611d1caf4492e7df79016eb 100644 --- a/bolt/lib/Passes/ReorderFunctions.cpp +++ b/bolt/lib/Passes/ReorderFunctions.cpp @@ -292,28 +292,26 @@ void ReorderFunctions::runOnFunctions(BinaryContext &BC) { { std::vector SortedFunctions(BFs.size()); uint32_t Index = 0; - std::transform(BFs.begin(), - BFs.end(), - SortedFunctions.begin(), - [](std::pair &BFI) { - return &BFI.second; - }); - std::stable_sort(SortedFunctions.begin(), SortedFunctions.end(), - [&](const BinaryFunction *A, const BinaryFunction *B) { - if (A->isIgnored()) - return false; - const size_t PadA = opts::padFunction(*A); - const size_t PadB = opts::padFunction(*B); - if (!PadA || !PadB) { - if (PadA) - return true; - if (PadB) - return false; - } - return !A->hasProfile() && - (B->hasProfile() || - (A->getExecutionCount() > B->getExecutionCount())); - }); + llvm::transform(BFs, SortedFunctions.begin(), + [](std::pair &BFI) { + return &BFI.second; + }); + llvm::stable_sort(SortedFunctions, [&](const BinaryFunction *A, + const BinaryFunction *B) { + if (A->isIgnored()) + return false; + const size_t PadA = opts::padFunction(*A); + const size_t PadB = opts::padFunction(*B); + if (!PadA || !PadB) { + if (PadA) + return true; + if (PadB) + return false; + } + return !A->hasProfile() && + (B->hasProfile() || + (A->getExecutionCount() > B->getExecutionCount())); + }); for (BinaryFunction *BF : SortedFunctions) if (BF->hasProfile()) BF->setIndex(Index++); @@ -409,24 +407,22 @@ void ReorderFunctions::runOnFunctions(BinaryContext &BC) { if (FuncsFile || LinkSectionsFile) { std::vector SortedFunctions(BFs.size()); - std::transform(BFs.begin(), BFs.end(), SortedFunctions.begin(), - [](std::pair &BFI) { - return &BFI.second; - }); + llvm::transform(BFs, SortedFunctions.begin(), + [](std::pair &BFI) { + return &BFI.second; + }); // Sort functions by index. - std::stable_sort( - SortedFunctions.begin(), - SortedFunctions.end(), - [](const BinaryFunction *A, const BinaryFunction *B) { - if (A->hasValidIndex() && B->hasValidIndex()) - return A->getIndex() < B->getIndex(); - if (A->hasValidIndex() && !B->hasValidIndex()) - return true; - if (!A->hasValidIndex() && B->hasValidIndex()) - return false; - return A->getAddress() < B->getAddress(); - }); + llvm::stable_sort(SortedFunctions, + [](const BinaryFunction *A, const BinaryFunction *B) { + if (A->hasValidIndex() && B->hasValidIndex()) + return A->getIndex() < B->getIndex(); + if (A->hasValidIndex() && !B->hasValidIndex()) + return true; + if (!A->hasValidIndex() && B->hasValidIndex()) + return false; + return A->getAddress() < B->getAddress(); + }); for (const BinaryFunction *Func : SortedFunctions) { if (!Func->hasValidIndex()) @@ -440,7 +436,7 @@ void ReorderFunctions::runOnFunctions(BinaryContext &BC) { if (LinkSectionsFile) { const char *Indent = ""; std::vector AllNames = Func->getNames(); - std::sort(AllNames.begin(), AllNames.end()); + llvm::sort(AllNames); for (StringRef Name : AllNames) { const size_t SlashPos = Name.find('/'); if (SlashPos != std::string::npos) { diff --git a/bolt/lib/Passes/ShrinkWrapping.cpp b/bolt/lib/Passes/ShrinkWrapping.cpp index f032735f8981f92545601f64e8ceec2337f23a19..a0c868c36a38384232854adcac0d595e9d5379fd 100644 --- a/bolt/lib/Passes/ShrinkWrapping.cpp +++ b/bolt/lib/Passes/ShrinkWrapping.cpp @@ -853,24 +853,21 @@ void ShrinkWrapping::computeDomOrder() { DominatorAnalysis &DA = Info.getDominatorAnalysis(); auto &InsnToBB = Info.getInsnToBBMap(); - std::sort(Order.begin(), Order.end(), - [&](const MCPhysReg &A, const MCPhysReg &B) { - BinaryBasicBlock *BBA = - BestSavePos[A] ? InsnToBB[BestSavePos[A]] : nullptr; - BinaryBasicBlock *BBB = - BestSavePos[B] ? InsnToBB[BestSavePos[B]] : nullptr; - if (BBA == BBB) - return A < B; - if (!BBA && BBB) - return false; - if (BBA && !BBB) - return true; - if (DA.doesADominateB(*BestSavePos[A], *BestSavePos[B])) - return true; - if (DA.doesADominateB(*BestSavePos[B], *BestSavePos[A])) - return false; - return A < B; - }); + llvm::sort(Order, [&](const MCPhysReg &A, const MCPhysReg &B) { + BinaryBasicBlock *BBA = BestSavePos[A] ? InsnToBB[BestSavePos[A]] : nullptr; + BinaryBasicBlock *BBB = BestSavePos[B] ? InsnToBB[BestSavePos[B]] : nullptr; + if (BBA == BBB) + return A < B; + if (!BBA && BBB) + return false; + if (BBA && !BBB) + return true; + if (DA.doesADominateB(*BestSavePos[A], *BestSavePos[B])) + return true; + if (DA.doesADominateB(*BestSavePos[B], *BestSavePos[A])) + return false; + return A < B; + }); for (MCPhysReg I = 0, E = BC.MRI->getNumRegs(); I != E; ++I) DomOrder[Order[I]] = I; @@ -1821,21 +1818,17 @@ BBIterTy ShrinkWrapping::processInsertionsList( } // Reorder POPs to obey the correct dominance relation between them - std::stable_sort(TodoList.begin(), TodoList.end(), - [&](const WorklistItem &A, const WorklistItem &B) { - if ((A.Action != WorklistItem::InsertPushOrPop || - !A.FIEToInsert.IsLoad) && - (B.Action != WorklistItem::InsertPushOrPop || - !B.FIEToInsert.IsLoad)) - return false; - if ((A.Action != WorklistItem::InsertPushOrPop || - !A.FIEToInsert.IsLoad)) - return true; - if ((B.Action != WorklistItem::InsertPushOrPop || - !B.FIEToInsert.IsLoad)) - return false; - return DomOrder[B.AffectedReg] < DomOrder[A.AffectedReg]; - }); + llvm::stable_sort(TodoList, [&](const WorklistItem &A, + const WorklistItem &B) { + if ((A.Action != WorklistItem::InsertPushOrPop || !A.FIEToInsert.IsLoad) && + (B.Action != WorklistItem::InsertPushOrPop || !B.FIEToInsert.IsLoad)) + return false; + if ((A.Action != WorklistItem::InsertPushOrPop || !A.FIEToInsert.IsLoad)) + return true; + if ((B.Action != WorklistItem::InsertPushOrPop || !B.FIEToInsert.IsLoad)) + return false; + return DomOrder[B.AffectedReg] < DomOrder[A.AffectedReg]; + }); // Process insertions for (WorklistItem &Item : TodoList) { diff --git a/bolt/lib/Passes/SplitFunctions.cpp b/bolt/lib/Passes/SplitFunctions.cpp index b84b2bd55446769ccbb3f93ee426724ca39696f4..60bb41204570ac377ba7e4d75e72310ad77fe9c5 100644 --- a/bolt/lib/Passes/SplitFunctions.cpp +++ b/bolt/lib/Passes/SplitFunctions.cpp @@ -14,6 +14,7 @@ #include "bolt/Core/BinaryFunction.h" #include "bolt/Core/ParallelUtilities.h" #include "llvm/Support/CommandLine.h" +#include "llvm/Support/FormatVariadic.h" #include @@ -22,6 +23,25 @@ using namespace llvm; using namespace bolt; +namespace { +class DeprecatedSplitFunctionOptionParser : public cl::parser { +public: + explicit DeprecatedSplitFunctionOptionParser(cl::Option &O) + : cl::parser(O) {} + + bool parse(cl::Option &O, StringRef ArgName, StringRef Arg, bool &Value) { + if (Arg == "2" || Arg == "3") { + Value = true; + errs() << formatv("BOLT-WARNING: specifying non-boolean value \"{0}\" " + "for option -{1} is deprecated\n", + Arg, ArgName); + return false; + } + return cl::parser::parse(O, ArgName, Arg, Value); + } +}; +} // namespace + namespace opts { extern cl::OptionCategory BoltOptCategory; @@ -42,21 +62,10 @@ static cl::opt SplitAlignThreshold( cl::Hidden, cl::cat(BoltOptCategory)); -static cl::opt -SplitFunctions("split-functions", - cl::desc("split functions into hot and cold regions"), - cl::init(SplitFunctions::ST_NONE), - cl::values(clEnumValN(SplitFunctions::ST_NONE, "0", - "do not split any function"), - clEnumValN(SplitFunctions::ST_LARGE, "1", - "in non-relocation mode only split functions too large " - "to fit into original code space"), - clEnumValN(SplitFunctions::ST_LARGE, "2", - "same as 1 (backwards compatibility)"), - clEnumValN(SplitFunctions::ST_ALL, "3", - "split all functions")), - cl::ZeroOrMore, - cl::cat(BoltOptCategory)); +static cl::opt + SplitFunctions("split-functions", + cl::desc("split functions into hot and cold regions"), + cl::cat(BoltOptCategory)); static cl::opt SplitThreshold( "split-threshold", @@ -66,11 +75,6 @@ static cl::opt SplitThreshold( "increase after splitting."), cl::init(0), cl::Hidden, cl::cat(BoltOptCategory)); -void syncOptions(BinaryContext &BC) { - if (!BC.HasRelocations && opts::SplitFunctions == SplitFunctions::ST_LARGE) - opts::SplitFunctions = SplitFunctions::ST_ALL; -} - } // namespace opts namespace llvm { @@ -85,9 +89,7 @@ bool SplitFunctions::shouldOptimize(const BinaryFunction &BF) const { } void SplitFunctions::runOnFunctions(BinaryContext &BC) { - opts::syncOptions(BC); - - if (opts::SplitFunctions == SplitFunctions::ST_NONE) + if (!opts::SplitFunctions) return; ParallelUtilities::WorkFuncTy WorkFun = [&](BinaryFunction &BF) { @@ -140,12 +142,6 @@ void SplitFunctions::splitFunction(BinaryFunction &BF) { << " pre-split is <0x" << Twine::utohexstr(OriginalHotSize) << ", 0x" << Twine::utohexstr(ColdSize) << ">\n"); - if (opts::SplitFunctions == SplitFunctions::ST_LARGE && - !BC.HasRelocations) { - // Split only if the function wouldn't fit. - if (OriginalHotSize <= BF.getMaxSize()) - return; - } } // Never outline the first basic block. @@ -188,10 +184,10 @@ void SplitFunctions::splitFunction(BinaryFunction &BF) { // All blocks with 0 count that we can move go to the end of the function. // Even if they were natural to cluster formation and were seen in-between // hot basic blocks. - std::stable_sort(BF.layout_begin(), BF.layout_end(), - [&](BinaryBasicBlock *A, BinaryBasicBlock *B) { - return A->canOutline() < B->canOutline(); - }); + llvm::stable_sort(BF.layout(), + [&](BinaryBasicBlock *A, BinaryBasicBlock *B) { + return A->canOutline() < B->canOutline(); + }); } else if (BF.hasEHRanges() && !opts::SplitEH) { // Typically functions with exception handling have landing pads at the end. // We cannot move beginning of landing pads, but we can move 0-count blocks diff --git a/bolt/lib/Passes/ThreeWayBranch.cpp b/bolt/lib/Passes/ThreeWayBranch.cpp index 445faba888ae55b796ab52b2f19854a404cb60f7..4469bb49b5fb903b2d69641779ccd48fc2b87315 100644 --- a/bolt/lib/Passes/ThreeWayBranch.cpp +++ b/bolt/lib/Passes/ThreeWayBranch.cpp @@ -101,12 +101,10 @@ void ThreeWayBranch::runOnFunction(BinaryFunction &Function) { Blocks.push_back(std::make_pair(SecondEndpoint, SecondCC)); Blocks.push_back(std::make_pair(ThirdEndpoint, ThirdCC)); - std::sort(Blocks.begin(), Blocks.end(), - [&](const std::pair A, - const std::pair B) { - return A.first->getExecutionCount() < - B.first->getExecutionCount(); - }); + llvm::sort(Blocks, [&](const std::pair A, + const std::pair B) { + return A.first->getExecutionCount() < B.first->getExecutionCount(); + }); uint64_t NewSecondBranchCount = Blocks[1].first->getExecutionCount() + Blocks[0].first->getExecutionCount(); diff --git a/bolt/lib/Profile/DataAggregator.cpp b/bolt/lib/Profile/DataAggregator.cpp index 9942a4033807f84d232ae766af18c624752bca13..6485e95b4952a7ea07f1b1b723178fbc5deafa1a 100644 --- a/bolt/lib/Profile/DataAggregator.cpp +++ b/bolt/lib/Profile/DataAggregator.cpp @@ -114,10 +114,10 @@ std::vector getTextSections(const BinaryContext *BC) { sections.push_back( {Section.getName(), Section.getAddress(), Section.getEndAddress()}); } - std::sort(sections.begin(), sections.end(), - [](const SectionNameAndRange &A, const SectionNameAndRange &B) { - return A.BeginAddress < B.BeginAddress; - }); + llvm::sort(sections, + [](const SectionNameAndRange &A, const SectionNameAndRange &B) { + return A.BeginAddress < B.BeginAddress; + }); return sections; } } diff --git a/bolt/lib/Profile/DataReader.cpp b/bolt/lib/Profile/DataReader.cpp index be1dc9f059d2c7b95338d49ec1a72507fe24cbea..a66f15a72f38575f0c3ea937ae645d014c9aa53d 100644 --- a/bolt/lib/Profile/DataReader.cpp +++ b/bolt/lib/Profile/DataReader.cpp @@ -95,7 +95,7 @@ void FuncBranchData::appendFrom(const FuncBranchData &FBD, uint64_t Offset) { I->To.Offset += Offset; } } - std::stable_sort(Data.begin(), Data.end()); + llvm::stable_sort(Data); ExecutionCount += FBD.ExecutionCount; for (auto I = FBD.EntryData.begin(), E = FBD.EntryData.end(); I != E; ++I) { assert(I->To.Name == FBD.Name); @@ -123,7 +123,7 @@ void SampleInfo::print(raw_ostream &OS) const { } uint64_t FuncSampleData::getSamples(uint64_t Start, uint64_t End) const { - assert(std::is_sorted(Data.begin(), Data.end())); + assert(llvm::is_sorted(Data)); struct Compare { bool operator()(const SampleInfo &SI, const uint64_t Val) const { return SI.Loc.Offset < Val; @@ -133,8 +133,8 @@ uint64_t FuncSampleData::getSamples(uint64_t Start, uint64_t End) const { } }; uint64_t Result = 0; - for (auto I = std::lower_bound(Data.begin(), Data.end(), Start, Compare()), - E = std::lower_bound(Data.begin(), Data.end(), End, Compare()); + for (auto I = llvm::lower_bound(Data, Start, Compare()), + E = llvm::lower_bound(Data, End, Compare()); I != E; ++I) Result += I->Hits; return Result; @@ -1146,12 +1146,10 @@ std::error_code DataReader::parseInNoLBRMode() { } for (StringMapEntry &FuncSamples : NamesToSamples) - std::stable_sort(FuncSamples.second.Data.begin(), - FuncSamples.second.Data.end()); + llvm::stable_sort(FuncSamples.second.Data); for (StringMapEntry &MemEvents : NamesToMemEvents) - std::stable_sort(MemEvents.second.Data.begin(), - MemEvents.second.Data.end()); + llvm::stable_sort(MemEvents.second.Data); return std::error_code(); } @@ -1247,12 +1245,10 @@ std::error_code DataReader::parse() { } for (StringMapEntry &FuncBranches : NamesToBranches) - std::stable_sort(FuncBranches.second.Data.begin(), - FuncBranches.second.Data.end()); + llvm::stable_sort(FuncBranches.second.Data); for (StringMapEntry &MemEvents : NamesToMemEvents) - std::stable_sort(MemEvents.second.Data.begin(), - MemEvents.second.Data.end()); + llvm::stable_sort(MemEvents.second.Data); return std::error_code(); } diff --git a/bolt/lib/Profile/Heatmap.cpp b/bolt/lib/Profile/Heatmap.cpp index 9761829683cddc749123bab67e28a882ea0b0201..af9cbc798898d382d5857893c07a23238ca8f9b0 100644 --- a/bolt/lib/Profile/Heatmap.cpp +++ b/bolt/lib/Profile/Heatmap.cpp @@ -233,7 +233,7 @@ void Heatmap::printCDF(raw_ostream &OS) const { NumTotalCounts += KV.second; } - std::sort(Counts.begin(), Counts.end(), std::greater()); + llvm::sort(Counts, std::greater()); double RatioLeftInKB = (1.0 * BucketSize) / 1024; assert(NumTotalCounts > 0 && diff --git a/bolt/lib/Profile/YAMLProfileWriter.cpp b/bolt/lib/Profile/YAMLProfileWriter.cpp index ddbcd5939adaf99aded7915dfb65313d0bbc8f68..bf00a087b2131c4b6fbd0fd78360cec7426a0182 100644 --- a/bolt/lib/Profile/YAMLProfileWriter.cpp +++ b/bolt/lib/Profile/YAMLProfileWriter.cpp @@ -106,7 +106,7 @@ void convert(const BinaryFunction &BF, } } - std::sort(YamlBB.CallSites.begin(), YamlBB.CallSites.end()); + llvm::sort(YamlBB.CallSites); // Skip printing if there's no profile data for non-entry basic block. // Include landing pads with non-zero execution count. diff --git a/bolt/lib/Rewrite/BoltDiff.cpp b/bolt/lib/Rewrite/BoltDiff.cpp index b1ea6d50692a8aeddbded94084ae9d9caa69640a..0b6e59e5369362331116dcd84642f04564e83448 100644 --- a/bolt/lib/Rewrite/BoltDiff.cpp +++ b/bolt/lib/Rewrite/BoltDiff.cpp @@ -302,10 +302,10 @@ class RewriteInstanceDiff { continue; Unmapped.emplace_back(&Function); } - std::sort(Unmapped.begin(), Unmapped.end(), - [&](const BinaryFunction *A, const BinaryFunction *B) { - return A->getFunctionScore() > B->getFunctionScore(); - }); + llvm::sort(Unmapped, + [&](const BinaryFunction *A, const BinaryFunction *B) { + return A->getFunctionScore() > B->getFunctionScore(); + }); for (const BinaryFunction *Function : Unmapped) { outs() << Function->getPrintName() << " : "; outs() << Function->getFunctionScore() << "\n"; diff --git a/bolt/lib/Rewrite/DWARFRewriter.cpp b/bolt/lib/Rewrite/DWARFRewriter.cpp index f0d6a4db0244b979232a787d7322320352bcbf88..36a80a11d2753bd3f7a6d160d5054ef91dfbfe87 100644 --- a/bolt/lib/Rewrite/DWARFRewriter.cpp +++ b/bolt/lib/Rewrite/DWARFRewriter.cpp @@ -479,6 +479,31 @@ void DWARFRewriter::updateUnitDebugInfo( AbbrevWriter); break; } + case dwarf::DW_TAG_call_site: { + auto patchPC = [&](AttrInfo &AttrVal, StringRef Entry) -> void { + Optional Address = AttrVal.V.getAsAddress(); + const BinaryFunction *Function = + BC.getBinaryFunctionContainingAddress(*Address); + const uint64_t UpdatedAddress = + Function->translateInputToOutputAddress(*Address); + const uint32_t Index = + AddrWriter->getIndexFromAddress(UpdatedAddress, Unit); + if (AttrVal.V.getForm() == dwarf::DW_FORM_addrx) + DebugInfoPatcher.addUDataPatch(AttrVal.Offset, Index, AttrVal.Size); + else + errs() << "BOLT-ERROR: unsupported form for " << Entry << "\n"; + }; + + if (Optional AttrVal = + findAttributeInfo(DIE, dwarf::DW_AT_call_pc)) + patchPC(*AttrVal, "DW_AT_call_pc"); + + if (Optional AttrVal = + findAttributeInfo(DIE, dwarf::DW_AT_call_return_pc)) + patchPC(*AttrVal, "DW_AT_call_return_pc"); + + break; + } default: { // Handle any tag that can have DW_AT_location attribute. DWARFFormValue Value; @@ -1243,10 +1268,9 @@ static std::string extractDWOTUFromDWP( // Sorting so it's easy to compare output. // They should be sharing the same Abbrev. - std::sort(TUContributions.begin(), TUContributions.end(), - [](const TUEntry &V1, const TUEntry &V2) -> bool { - return V1.second->Offset < V2.second->Offset; - }); + llvm::sort(TUContributions, [](const TUEntry &V1, const TUEntry &V2) -> bool { + return V1.second->Offset < V2.second->Offset; + }); for (auto &PairEntry : TUContributions) { const DWARFUnitIndex::Entry::SectionContribution *C = PairEntry.second; @@ -1289,11 +1313,11 @@ static void extractTypesFromDWPDWARF5( } // Sorting so it's easy to compare output. // They should be sharing the same Abbrev. - std::sort(TUContributions.begin(), TUContributions.end(), - [](const DWARFUnitIndex::Entry::SectionContribution *V1, - const DWARFUnitIndex::Entry::SectionContribution *V2) -> bool { - return V1->Offset < V2->Offset; - }); + llvm::sort(TUContributions, + [](const DWARFUnitIndex::Entry::SectionContribution *V1, + const DWARFUnitIndex::Entry::SectionContribution *V2) -> bool { + return V1->Offset < V2->Offset; + }); Streamer.switchSection(MCOFI.getDwarfInfoDWOSection()); for (const auto *C : TUContributions) Streamer.emitBytes(Contents.slice(C->Offset, C->Offset + C->Length)); diff --git a/bolt/lib/Rewrite/MachORewriteInstance.cpp b/bolt/lib/Rewrite/MachORewriteInstance.cpp index 0958e6eb73142e188ee9e9d2c4f1b297b3b0067d..de890eb17d0cacb90413ddd7bd408d57f0b4da0a 100644 --- a/bolt/lib/Rewrite/MachORewriteInstance.cpp +++ b/bolt/lib/Rewrite/MachORewriteInstance.cpp @@ -191,10 +191,9 @@ std::vector readDataInCode(const MachOObjectFile &O) { DataInCode.reserve(NumberOfEntries); for (auto I = O.begin_dices(), E = O.end_dices(); I != E; ++I) DataInCode.emplace_back(*I); - std::stable_sort(DataInCode.begin(), DataInCode.end(), - [](DataInCodeRegion LHS, DataInCodeRegion RHS) { - return LHS.Offset < RHS.Offset; - }); + llvm::stable_sort(DataInCode, [](DataInCodeRegion LHS, DataInCodeRegion RHS) { + return LHS.Offset < RHS.Offset; + }); return DataInCode; } @@ -244,10 +243,10 @@ void MachORewriteInstance::discoverFileObjects() { } if (FunctionSymbols.empty()) return; - std::stable_sort(FunctionSymbols.begin(), FunctionSymbols.end(), - [](const SymbolRef &LHS, const SymbolRef &RHS) { - return cantFail(LHS.getValue()) < cantFail(RHS.getValue()); - }); + llvm::stable_sort( + FunctionSymbols, [](const SymbolRef &LHS, const SymbolRef &RHS) { + return cantFail(LHS.getValue()) < cantFail(RHS.getValue()); + }); for (size_t Index = 0; Index < FunctionSymbols.size(); ++Index) { const uint64_t Address = cantFail(FunctionSymbols[Index].getValue()); ErrorOr Section = BC->getSectionForAddress(Address); diff --git a/bolt/lib/Rewrite/RewriteInstance.cpp b/bolt/lib/Rewrite/RewriteInstance.cpp index 37f1bcf7b7b598429ea41e1b02ace4cb7e8c2601..1047c2cbf864e10e5d042493b20813bc71ac617b 100644 --- a/bolt/lib/Rewrite/RewriteInstance.cpp +++ b/bolt/lib/Rewrite/RewriteInstance.cpp @@ -308,10 +308,9 @@ namespace { bool refersToReorderedSection(ErrorOr Section) { auto Itr = - std::find_if(opts::ReorderData.begin(), opts::ReorderData.end(), - [&](const std::string &SectionName) { - return (Section && Section->getName() == SectionName); - }); + llvm::find_if(opts::ReorderData, [&](const std::string &SectionName) { + return (Section && Section->getName() == SectionName); + }); return Itr != opts::ReorderData.end(); } @@ -839,8 +838,8 @@ void RewriteInstance::discoverFileObjects() { return Section.isAllocatable(); }; std::vector SortedFileSymbols; - std::copy_if(InputFile->symbol_begin(), InputFile->symbol_end(), - std::back_inserter(SortedFileSymbols), isSymbolInMemory); + llvm::copy_if(InputFile->symbols(), std::back_inserter(SortedFileSymbols), + isSymbolInMemory); auto CompareSymbols = [this](const SymbolRef &A, const SymbolRef &B) { // Marker symbols have the highest precedence, while // SECTIONs have the lowest. @@ -865,8 +864,7 @@ void RewriteInstance::discoverFileObjects() { return false; }; - std::stable_sort(SortedFileSymbols.begin(), SortedFileSymbols.end(), - CompareSymbols); + llvm::stable_sort(SortedFileSymbols, CompareSymbols); auto LastSymbol = SortedFileSymbols.end() - 1; @@ -2702,11 +2700,10 @@ void RewriteInstance::selectFunctionsToProcess() { if (ProfileReader->mayHaveProfileData(Function)) TopFunctions.push_back(&Function); } - std::sort(TopFunctions.begin(), TopFunctions.end(), - [](const BinaryFunction *A, const BinaryFunction *B) { - return - A->getKnownExecutionCount() < B->getKnownExecutionCount(); - }); + llvm::sort( + TopFunctions, [](const BinaryFunction *A, const BinaryFunction *B) { + return A->getKnownExecutionCount() < B->getKnownExecutionCount(); + }); size_t Index = TopFunctions.size() * opts::LiteThresholdPct / 100; if (Index) @@ -3295,7 +3292,7 @@ void RewriteInstance::updatePseudoProbes() { std::vector Addresses; for (auto &Entry : Address2ProbesMap) Addresses.push_back(Entry.first); - std::sort(Addresses.begin(), Addresses.end()); + llvm::sort(Addresses); for (uint64_t Key : Addresses) { for (MCDecodedPseudoProbe &Probe : Address2ProbesMap[Key]) { if (Probe.getAddress() == INT64_MAX) @@ -3569,7 +3566,7 @@ std::vector RewriteInstance::getCodeSections() { }; // Determine the order of sections. - std::stable_sort(CodeSections.begin(), CodeSections.end(), compareSections); + llvm::stable_sort(CodeSections, compareSections); return CodeSections; } @@ -3601,12 +3598,9 @@ void RewriteInstance::mapCodeSections(RuntimeDyld &RTDyld) { std::vector CodeSections = getCodeSections(); // Remove sections that were pre-allocated (patch sections). - CodeSections.erase( - std::remove_if(CodeSections.begin(), CodeSections.end(), - [](BinarySection *Section) { - return Section->getOutputAddress(); - }), - CodeSections.end()); + llvm::erase_if(CodeSections, [](BinarySection *Section) { + return Section->getOutputAddress(); + }); LLVM_DEBUG(dbgs() << "Code sections in the order of output:\n"; for (const BinarySection *Section : CodeSections) dbgs() << Section->getName() << '\n'; @@ -4263,11 +4257,11 @@ RewriteInstance::getOutputSections(ELFObjectFile *File, } // Sort all allocatable sections by their offset. - std::stable_sort(OutputSections.begin(), OutputSections.end(), - [] (const std::pair &A, - const std::pair &B) { - return A.second.sh_offset < B.second.sh_offset; - }); + llvm::stable_sort(OutputSections, + [](const std::pair &A, + const std::pair &B) { + return A.second.sh_offset < B.second.sh_offset; + }); // Fix section sizes to prevent overlapping. ELFShdrTy *PrevSection = nullptr; @@ -4376,11 +4370,10 @@ RewriteInstance::getOutputSections(ELFObjectFile *File, } std::vector SectionsOnly(OutputSections.size()); - std::transform(OutputSections.begin(), OutputSections.end(), - SectionsOnly.begin(), - [](std::pair &SectionInfo) { - return SectionInfo.second; - }); + llvm::transform(OutputSections, SectionsOnly.begin(), + [](std::pair &SectionInfo) { + return SectionInfo.second; + }); return SectionsOnly; } @@ -4777,13 +4770,11 @@ void RewriteInstance::updateELFSymbolTable( } // Put local symbols at the beginning. - std::stable_sort(Symbols.begin(), Symbols.end(), - [](const ELFSymTy &A, const ELFSymTy &B) { - if (A.getBinding() == ELF::STB_LOCAL && - B.getBinding() != ELF::STB_LOCAL) - return true; - return false; - }); + llvm::stable_sort(Symbols, [](const ELFSymTy &A, const ELFSymTy &B) { + if (A.getBinding() == ELF::STB_LOCAL && B.getBinding() != ELF::STB_LOCAL) + return true; + return false; + }); for (const ELFSymTy &Symbol : Symbols) Write(0, Symbol); diff --git a/bolt/lib/RuntimeLibs/InstrumentationRuntimeLibrary.cpp b/bolt/lib/RuntimeLibs/InstrumentationRuntimeLibrary.cpp index 091211662a039be52a65a59bff5b72274d498d5e..b9c6e0700bac7968e9c34186c60593e55f6a11d7 100644 --- a/bolt/lib/RuntimeLibs/InstrumentationRuntimeLibrary.cpp +++ b/bolt/lib/RuntimeLibs/InstrumentationRuntimeLibrary.cpp @@ -243,13 +243,12 @@ std::string InstrumentationRuntimeLibrary::buildTables(BinaryContext &BC) { }; // Indirect targets need to be sorted for fast lookup during runtime - std::sort(Summary->IndCallTargetDescriptions.begin(), - Summary->IndCallTargetDescriptions.end(), - [&](const IndCallTargetDescription &A, - const IndCallTargetDescription &B) { - return getOutputAddress(*A.Target, A.ToLoc.Offset) < - getOutputAddress(*B.Target, B.ToLoc.Offset); - }); + llvm::sort(Summary->IndCallTargetDescriptions, + [&](const IndCallTargetDescription &A, + const IndCallTargetDescription &B) { + return getOutputAddress(*A.Target, A.ToLoc.Offset) < + getOutputAddress(*B.Target, B.ToLoc.Offset); + }); // Start of the vector with descriptions (one CounterDescription for each // counter), vector size is Counters.size() CounterDescription-sized elmts diff --git a/bolt/test/X86/Inputs/dwarf5-call-pc-helper.s b/bolt/test/X86/Inputs/dwarf5-call-pc-helper.s new file mode 100644 index 0000000000000000000000000000000000000000..941601fffba29357eb5916f146307f037d9f6664 --- /dev/null +++ b/bolt/test/X86/Inputs/dwarf5-call-pc-helper.s @@ -0,0 +1,201 @@ +# -gdwarf-5 -g2 -O2 -S +# int helper(int z_, int d_) { +# return z_ + d_; +# } + + .text + .file "helper.cpp" + .globl _Z6helperii # -- Begin function _Z6helperii + .p2align 4, 0x90 + .type _Z6helperii,@function +_Z6helperii: # @_Z6helperii +.Lfunc_begin0: + .file 0 "." "helper.cpp" md5 0x8020e02b87876b529416442978378ed0 + .loc 0 1 0 # helper.cpp:1:0 + .cfi_startproc +# %bb.0: # %entry + #DEBUG_VALUE: helper:z_ <- $edi + #DEBUG_VALUE: helper:d_ <- $esi + # kill: def $esi killed $esi def $rsi + # kill: def $edi killed $edi def $rdi + .loc 0 2 13 prologue_end # helper.cpp:2:13 + leal (%rdi,%rsi), %eax + .loc 0 2 3 is_stmt 0 # helper.cpp:2:3 + retq +.Ltmp0: +.Lfunc_end0: + .size _Z6helperii, .Lfunc_end0-_Z6helperii + .cfi_endproc + # -- End function + .section .debug_abbrev,"",@progbits + .byte 1 # Abbreviation Code + .byte 17 # DW_TAG_compile_unit + .byte 1 # DW_CHILDREN_yes + .byte 37 # DW_AT_producer + .byte 37 # DW_FORM_strx1 + .byte 19 # DW_AT_language + .byte 5 # DW_FORM_data2 + .byte 3 # DW_AT_name + .byte 37 # DW_FORM_strx1 + .byte 114 # DW_AT_str_offsets_base + .byte 23 # DW_FORM_sec_offset + .byte 16 # DW_AT_stmt_list + .byte 23 # DW_FORM_sec_offset + .byte 27 # DW_AT_comp_dir + .byte 37 # DW_FORM_strx1 + .byte 17 # DW_AT_low_pc + .byte 27 # DW_FORM_addrx + .byte 18 # DW_AT_high_pc + .byte 6 # DW_FORM_data4 + .byte 115 # DW_AT_addr_base + .byte 23 # DW_FORM_sec_offset + .byte 0 # EOM(1) + .byte 0 # EOM(2) + .byte 2 # Abbreviation Code + .byte 46 # DW_TAG_subprogram + .byte 1 # DW_CHILDREN_yes + .byte 17 # DW_AT_low_pc + .byte 27 # DW_FORM_addrx + .byte 18 # DW_AT_high_pc + .byte 6 # DW_FORM_data4 + .byte 64 # DW_AT_frame_base + .byte 24 # DW_FORM_exprloc + .byte 122 # DW_AT_call_all_calls + .byte 25 # DW_FORM_flag_present + .byte 110 # DW_AT_linkage_name + .byte 37 # DW_FORM_strx1 + .byte 3 # DW_AT_name + .byte 37 # DW_FORM_strx1 + .byte 58 # DW_AT_decl_file + .byte 11 # DW_FORM_data1 + .byte 59 # DW_AT_decl_line + .byte 11 # DW_FORM_data1 + .byte 73 # DW_AT_type + .byte 19 # DW_FORM_ref4 + .byte 63 # DW_AT_external + .byte 25 # DW_FORM_flag_present + .byte 0 # EOM(1) + .byte 0 # EOM(2) + .byte 3 # Abbreviation Code + .byte 5 # DW_TAG_formal_parameter + .byte 0 # DW_CHILDREN_no + .byte 2 # DW_AT_location + .byte 24 # DW_FORM_exprloc + .byte 3 # DW_AT_name + .byte 37 # DW_FORM_strx1 + .byte 58 # DW_AT_decl_file + .byte 11 # DW_FORM_data1 + .byte 59 # DW_AT_decl_line + .byte 11 # DW_FORM_data1 + .byte 73 # DW_AT_type + .byte 19 # DW_FORM_ref4 + .byte 0 # EOM(1) + .byte 0 # EOM(2) + .byte 4 # Abbreviation Code + .byte 36 # DW_TAG_base_type + .byte 0 # DW_CHILDREN_no + .byte 3 # DW_AT_name + .byte 37 # DW_FORM_strx1 + .byte 62 # DW_AT_encoding + .byte 11 # DW_FORM_data1 + .byte 11 # DW_AT_byte_size + .byte 11 # DW_FORM_data1 + .byte 0 # EOM(1) + .byte 0 # EOM(2) + .byte 0 # EOM(3) + .section .debug_info,"",@progbits +.Lcu_begin0: + .long .Ldebug_info_end0-.Ldebug_info_start0 # Length of Unit +.Ldebug_info_start0: + .short 5 # DWARF version number + .byte 1 # DWARF Unit Type + .byte 8 # Address Size (in bytes) + .long .debug_abbrev # Offset Into Abbrev. Section + .byte 1 # Abbrev [1] 0xc:0x41 DW_TAG_compile_unit + .byte 0 # DW_AT_producer + .short 33 # DW_AT_language + .byte 1 # DW_AT_name + .long .Lstr_offsets_base0 # DW_AT_str_offsets_base + .long .Lline_table_start0 # DW_AT_stmt_list + .byte 2 # DW_AT_comp_dir + .byte 0 # DW_AT_low_pc + .long .Lfunc_end0-.Lfunc_begin0 # DW_AT_high_pc + .long .Laddr_table_base0 # DW_AT_addr_base + .byte 2 # Abbrev [2] 0x23:0x25 DW_TAG_subprogram + .byte 0 # DW_AT_low_pc + .long .Lfunc_end0-.Lfunc_begin0 # DW_AT_high_pc + .byte 1 # DW_AT_frame_base + .byte 87 + # DW_AT_call_all_calls + .byte 3 # DW_AT_linkage_name + .byte 4 # DW_AT_name + .byte 0 # DW_AT_decl_file + .byte 1 # DW_AT_decl_line + .long 72 # DW_AT_type + # DW_AT_external + .byte 3 # Abbrev [3] 0x33:0xa DW_TAG_formal_parameter + .byte 1 # DW_AT_location + .byte 85 + .byte 6 # DW_AT_name + .byte 0 # DW_AT_decl_file + .byte 1 # DW_AT_decl_line + .long 72 # DW_AT_type + .byte 3 # Abbrev [3] 0x3d:0xa DW_TAG_formal_parameter + .byte 1 # DW_AT_location + .byte 84 + .byte 7 # DW_AT_name + .byte 0 # DW_AT_decl_file + .byte 1 # DW_AT_decl_line + .long 72 # DW_AT_type + .byte 0 # End Of Children Mark + .byte 4 # Abbrev [4] 0x48:0x4 DW_TAG_base_type + .byte 5 # DW_AT_name + .byte 5 # DW_AT_encoding + .byte 4 # DW_AT_byte_size + .byte 0 # End Of Children Mark +.Ldebug_info_end0: + .section .debug_str_offsets,"",@progbits + .long 36 # Length of String Offsets Set + .short 5 + .short 0 +.Lstr_offsets_base0: + .section .debug_str,"MS",@progbits,1 +.Linfo_string0: + .asciz "clang version 15.0.0" # string offset=0 +.Linfo_string1: + .asciz "helper.cpp" # string offset=134 +.Linfo_string2: + .asciz "." # string offset=145 +.Linfo_string3: + .asciz "_Z6helperii" # string offset=191 +.Linfo_string4: + .asciz "helper" # string offset=203 +.Linfo_string5: + .asciz "int" # string offset=210 +.Linfo_string6: + .asciz "z_" # string offset=214 +.Linfo_string7: + .asciz "d_" # string offset=217 + .section .debug_str_offsets,"",@progbits + .long .Linfo_string0 + .long .Linfo_string1 + .long .Linfo_string2 + .long .Linfo_string3 + .long .Linfo_string4 + .long .Linfo_string5 + .long .Linfo_string6 + .long .Linfo_string7 + .section .debug_addr,"",@progbits + .long .Ldebug_addr_end0-.Ldebug_addr_start0 # Length of contribution +.Ldebug_addr_start0: + .short 5 # DWARF version number + .byte 8 # Address size + .byte 0 # Segment selector size +.Laddr_table_base0: + .quad .Lfunc_begin0 +.Ldebug_addr_end0: + .ident "clang version 15.0.0" + .section ".note.GNU-stack","",@progbits + .addrsig + .section .debug_line,"",@progbits +.Lline_table_start0: diff --git a/lldb/test/Shell/Commands/Inputs/dwarf5-low-pc-ranges-inlining.s b/bolt/test/X86/Inputs/dwarf5-call-pc-main.s similarity index 62% rename from lldb/test/Shell/Commands/Inputs/dwarf5-low-pc-ranges-inlining.s rename to bolt/test/X86/Inputs/dwarf5-call-pc-main.s index 9430fa205a3b96968bf6bbe3355802fdd769f2e4..7a91173effe7ead3c4df0c6521536b25b581aab3 100644 --- a/lldb/test/Shell/Commands/Inputs/dwarf5-low-pc-ranges-inlining.s +++ b/bolt/test/X86/Inputs/dwarf5-call-pc-main.s @@ -1,61 +1,78 @@ -# Manually modified to have DW_AT_ranges point to end list. -# int helper(int i) { -# return ++i; -# } -# -# int main(int argc, char *argv[]) { -# return helper(argc); +# -gdwarf-5 -g2 -O2 -S +# int helper(int z_, int d_); +# int x = 0; +# int y = 1; +# int main(int argc, char *argv[]) { +# if (argc == 5) { +# x = argc; +# y = argc + 3; +# return helper(x, y); +# } +# return 0; # } .text .file "main.cpp" - .section .text._Z6helperi,"ax",@progbits - .globl _Z6helperi # -- Begin function _Z6helperi - .p2align 4, 0x90 - .type _Z6helperi,@function -_Z6helperi: # @_Z6helperi -.Lfunc_begin0: - .file 0 "/home/test" "main.cpp" md5 0x3fc4870015f8bb98cd719b92f3dca96e - .loc 0 1 0 # main.cpp:1:0 - .cfi_startproc -# %bb.0: # %entry - #DEBUG_VALUE: helper:i <- $edi - # kill: def $edi killed $edi def $rdi - .loc 0 2 10 prologue_end # main.cpp:2:10 - leal 1(%rdi), %eax -.Ltmp0: - #DEBUG_VALUE: helper:i <- $eax - .loc 0 2 3 is_stmt 0 # main.cpp:2:3 - retq -.Ltmp1: -.Lfunc_end0: - .size _Z6helperi, .Lfunc_end0-_Z6helperi - .cfi_endproc - # -- End function - .section .text.main,"ax",@progbits + .file 0 "." "main.cpp" md5 0x988e72b4cad0891e25706b94310511c4 .globl main # -- Begin function main .p2align 4, 0x90 .type main,@function main: # @main -.Lfunc_begin1: - .loc 0 5 0 is_stmt 1 # main.cpp:5:0 +.Lfunc_begin0: + .loc 0 5 0 # main.cpp:5:0 .cfi_startproc # %bb.0: # %entry #DEBUG_VALUE: main:argc <- $edi #DEBUG_VALUE: main:argv <- $rsi - #DEBUG_VALUE: helper:i <- $edi - # kill: def $edi killed $edi def $rdi - .loc 0 2 10 prologue_end # main.cpp:2:10 - leal 1(%rdi), %eax + .loc 0 6 14 prologue_end # main.cpp:6:14 + cmpl $5, %edi +.Ltmp0: + .loc 0 6 9 is_stmt 0 # main.cpp:6:9 + jne .LBB0_1 +.Ltmp1: +# %bb.2: # %if.then + #DEBUG_VALUE: main:argc <- $edi + #DEBUG_VALUE: main:argv <- $rsi + .loc 0 7 9 is_stmt 1 # main.cpp:7:9 + movl $5, x(%rip) + .loc 0 8 9 # main.cpp:8:9 + movl $8, y(%rip) + .loc 0 9 14 # main.cpp:9:14 + movl $5, %edi .Ltmp2: - #DEBUG_VALUE: helper:i <- $eax - .loc 0 6 3 # main.cpp:6:3 - retq + #DEBUG_VALUE: main:argc <- [DW_OP_LLVM_entry_value 1] $edi + movl $8, %esi .Ltmp3: -.Lfunc_end1: - .size main, .Lfunc_end1-main + #DEBUG_VALUE: main:argv <- [DW_OP_LLVM_entry_value 1] $rsi + jmp _Z6helperii # TAILCALL +.Ltmp4: +.LBB0_1: # %return + #DEBUG_VALUE: main:argc <- $edi + #DEBUG_VALUE: main:argv <- $rsi + .loc 0 12 2 # main.cpp:12:2 + xorl %eax, %eax + retq +.Ltmp5: +.Lfunc_end0: + .size main, .Lfunc_end0-main .cfi_endproc # -- End function + .type x,@object # @x + .bss + .globl x + .p2align 2 +x: + .long 0 # 0x0 + .size x, 4 + + .type y,@object # @y + .data + .globl y + .p2align 2 +y: + .long 1 # 0x1 + .size y, 4 + .section .debug_loclists,"",@progbits .long .Ldebug_list_header_end0-.Ldebug_list_header_start0 # Length .Ldebug_list_header_start0: @@ -67,32 +84,44 @@ main: # @main .long .Ldebug_loc0-.Lloclists_table_base0 .long .Ldebug_loc1-.Lloclists_table_base0 .Ldebug_loc0: - .byte 1 # DW_LLE_base_addressx - .byte 0 # base address index .byte 4 # DW_LLE_offset_pair .uleb128 .Lfunc_begin0-.Lfunc_begin0 # starting offset - .uleb128 .Ltmp0-.Lfunc_begin0 # ending offset + .uleb128 .Ltmp2-.Lfunc_begin0 # ending offset .byte 1 # Loc expr size .byte 85 # super-register DW_OP_reg5 .byte 4 # DW_LLE_offset_pair - .uleb128 .Ltmp0-.Lfunc_begin0 # starting offset + .uleb128 .Ltmp2-.Lfunc_begin0 # starting offset + .uleb128 .Ltmp4-.Lfunc_begin0 # ending offset + .byte 4 # Loc expr size + .byte 163 # DW_OP_entry_value + .byte 1 # 1 + .byte 85 # super-register DW_OP_reg5 + .byte 159 # DW_OP_stack_value + .byte 4 # DW_LLE_offset_pair + .uleb128 .Ltmp4-.Lfunc_begin0 # starting offset .uleb128 .Lfunc_end0-.Lfunc_begin0 # ending offset .byte 1 # Loc expr size - .byte 80 # super-register DW_OP_reg0 + .byte 85 # super-register DW_OP_reg5 .byte 0 # DW_LLE_end_of_list .Ldebug_loc1: - .byte 1 # DW_LLE_base_addressx - .byte 1 # base address index .byte 4 # DW_LLE_offset_pair - .uleb128 .Lfunc_begin1-.Lfunc_begin1 # starting offset - .uleb128 .Ltmp2-.Lfunc_begin1 # ending offset + .uleb128 .Lfunc_begin0-.Lfunc_begin0 # starting offset + .uleb128 .Ltmp3-.Lfunc_begin0 # ending offset .byte 1 # Loc expr size - .byte 85 # super-register DW_OP_reg5 + .byte 84 # DW_OP_reg4 + .byte 4 # DW_LLE_offset_pair + .uleb128 .Ltmp3-.Lfunc_begin0 # starting offset + .uleb128 .Ltmp4-.Lfunc_begin0 # ending offset + .byte 4 # Loc expr size + .byte 163 # DW_OP_entry_value + .byte 1 # 1 + .byte 84 # DW_OP_reg4 + .byte 159 # DW_OP_stack_value .byte 4 # DW_LLE_offset_pair - .uleb128 .Ltmp2-.Lfunc_begin1 # starting offset - .uleb128 .Lfunc_end1-.Lfunc_begin1 # ending offset + .uleb128 .Ltmp4-.Lfunc_begin0 # starting offset + .uleb128 .Lfunc_end0-.Lfunc_begin0 # ending offset .byte 1 # Loc expr size - .byte 80 # super-register DW_OP_reg0 + .byte 84 # DW_OP_reg4 .byte 0 # DW_LLE_end_of_list .Ldebug_list_header_end0: .section .debug_abbrev,"",@progbits @@ -112,75 +141,33 @@ main: # @main .byte 27 # DW_AT_comp_dir .byte 37 # DW_FORM_strx1 .byte 17 # DW_AT_low_pc - .byte 1 # DW_FORM_addr - .byte 85 # DW_AT_ranges - .byte 35 # DW_FORM_rnglistx + .byte 27 # DW_FORM_addrx + .byte 18 # DW_AT_high_pc + .byte 6 # DW_FORM_data4 .byte 115 # DW_AT_addr_base .byte 23 # DW_FORM_sec_offset - .byte 116 # DW_AT_rnglists_base - .byte 23 # DW_FORM_sec_offset .ascii "\214\001" # DW_AT_loclists_base .byte 23 # DW_FORM_sec_offset .byte 0 # EOM(1) .byte 0 # EOM(2) .byte 2 # Abbreviation Code - .byte 46 # DW_TAG_subprogram - .byte 1 # DW_CHILDREN_yes - .byte 17 # DW_AT_low_pc - .byte 27 # DW_FORM_addrx - .byte 18 # DW_AT_high_pc - .byte 6 # DW_FORM_data4 - .byte 64 # DW_AT_frame_base - .byte 24 # DW_FORM_exprloc - .byte 122 # DW_AT_call_all_calls - .byte 25 # DW_FORM_flag_present - .byte 49 # DW_AT_abstract_origin - .byte 19 # DW_FORM_ref4 - .byte 0 # EOM(1) - .byte 0 # EOM(2) - .byte 3 # Abbreviation Code - .byte 5 # DW_TAG_formal_parameter + .byte 52 # DW_TAG_variable .byte 0 # DW_CHILDREN_no - .byte 2 # DW_AT_location - .byte 34 # DW_FORM_loclistx - .byte 49 # DW_AT_abstract_origin - .byte 19 # DW_FORM_ref4 - .byte 0 # EOM(1) - .byte 0 # EOM(2) - .byte 4 # Abbreviation Code - .byte 46 # DW_TAG_subprogram - .byte 1 # DW_CHILDREN_yes - .byte 110 # DW_AT_linkage_name - .byte 37 # DW_FORM_strx1 .byte 3 # DW_AT_name .byte 37 # DW_FORM_strx1 - .byte 58 # DW_AT_decl_file - .byte 11 # DW_FORM_data1 - .byte 59 # DW_AT_decl_line - .byte 11 # DW_FORM_data1 .byte 73 # DW_AT_type .byte 19 # DW_FORM_ref4 .byte 63 # DW_AT_external .byte 25 # DW_FORM_flag_present - .byte 32 # DW_AT_inline - .byte 33 # DW_FORM_implicit_const - .byte 1 - .byte 0 # EOM(1) - .byte 0 # EOM(2) - .byte 5 # Abbreviation Code - .byte 5 # DW_TAG_formal_parameter - .byte 0 # DW_CHILDREN_no - .byte 3 # DW_AT_name - .byte 37 # DW_FORM_strx1 .byte 58 # DW_AT_decl_file .byte 11 # DW_FORM_data1 .byte 59 # DW_AT_decl_line .byte 11 # DW_FORM_data1 - .byte 73 # DW_AT_type - .byte 19 # DW_FORM_ref4 + .byte 2 # DW_AT_location + .byte 24 # DW_FORM_exprloc .byte 0 # EOM(1) .byte 0 # EOM(2) - .byte 6 # Abbreviation Code + .byte 3 # Abbreviation Code .byte 36 # DW_TAG_base_type .byte 0 # DW_CHILDREN_no .byte 3 # DW_AT_name @@ -191,7 +178,7 @@ main: # @main .byte 11 # DW_FORM_data1 .byte 0 # EOM(1) .byte 0 # EOM(2) - .byte 7 # Abbreviation Code + .byte 4 # Abbreviation Code .byte 46 # DW_TAG_subprogram .byte 1 # DW_CHILDREN_yes .byte 17 # DW_AT_low_pc @@ -214,11 +201,11 @@ main: # @main .byte 25 # DW_FORM_flag_present .byte 0 # EOM(1) .byte 0 # EOM(2) - .byte 8 # Abbreviation Code + .byte 5 # Abbreviation Code .byte 5 # DW_TAG_formal_parameter .byte 0 # DW_CHILDREN_no .byte 2 # DW_AT_location - .byte 24 # DW_FORM_exprloc + .byte 34 # DW_FORM_loclistx .byte 3 # DW_AT_name .byte 37 # DW_FORM_strx1 .byte 58 # DW_AT_decl_file @@ -229,21 +216,50 @@ main: # @main .byte 19 # DW_FORM_ref4 .byte 0 # EOM(1) .byte 0 # EOM(2) - .byte 9 # Abbreviation Code - .byte 29 # DW_TAG_inlined_subroutine + .byte 6 # Abbreviation Code + .byte 72 # DW_TAG_call_site .byte 1 # DW_CHILDREN_yes - .byte 49 # DW_AT_abstract_origin + .byte 127 # DW_AT_call_origin .byte 19 # DW_FORM_ref4 - .byte 17 # DW_AT_low_pc + .ascii "\202\001" # DW_AT_call_tail_call + .byte 25 # DW_FORM_flag_present + .ascii "\201\001" # DW_AT_call_pc .byte 27 # DW_FORM_addrx - .byte 85 # DW_AT_ranges - .byte 35 # DW_FORM_rnglistx - .byte 88 # DW_AT_call_file - .byte 11 # DW_FORM_data1 - .byte 89 # DW_AT_call_line + .byte 0 # EOM(1) + .byte 0 # EOM(2) + .byte 7 # Abbreviation Code + .byte 73 # DW_TAG_call_site_parameter + .byte 0 # DW_CHILDREN_no + .byte 2 # DW_AT_location + .byte 24 # DW_FORM_exprloc + .byte 126 # DW_AT_call_value + .byte 24 # DW_FORM_exprloc + .byte 0 # EOM(1) + .byte 0 # EOM(2) + .byte 8 # Abbreviation Code + .byte 46 # DW_TAG_subprogram + .byte 1 # DW_CHILDREN_yes + .byte 110 # DW_AT_linkage_name + .byte 37 # DW_FORM_strx1 + .byte 3 # DW_AT_name + .byte 37 # DW_FORM_strx1 + .byte 58 # DW_AT_decl_file .byte 11 # DW_FORM_data1 - .byte 87 # DW_AT_call_column + .byte 59 # DW_AT_decl_line .byte 11 # DW_FORM_data1 + .byte 73 # DW_AT_type + .byte 19 # DW_FORM_ref4 + .byte 60 # DW_AT_declaration + .byte 25 # DW_FORM_flag_present + .byte 63 # DW_AT_external + .byte 25 # DW_FORM_flag_present + .byte 0 # EOM(1) + .byte 0 # EOM(2) + .byte 9 # Abbreviation Code + .byte 5 # DW_TAG_formal_parameter + .byte 0 # DW_CHILDREN_no + .byte 73 # DW_AT_type + .byte 19 # DW_FORM_ref4 .byte 0 # EOM(1) .byte 0 # EOM(2) .byte 10 # Abbreviation Code @@ -262,117 +278,103 @@ main: # @main .byte 1 # DWARF Unit Type .byte 8 # Address Size (in bytes) .long .debug_abbrev # Offset Into Abbrev. Section - .byte 1 # Abbrev [1] 0xc:0x93 DW_TAG_compile_unit + .byte 1 # Abbrev [1] 0xc:0x8b DW_TAG_compile_unit .byte 0 # DW_AT_producer .short 33 # DW_AT_language .byte 1 # DW_AT_name .long .Lstr_offsets_base0 # DW_AT_str_offsets_base .long .Lline_table_start0 # DW_AT_stmt_list .byte 2 # DW_AT_comp_dir - .quad 0 # DW_AT_low_pc - .byte 0 # DW_AT_ranges + .byte 2 # DW_AT_low_pc + .long .Lfunc_end0-.Lfunc_begin0 # DW_AT_high_pc .long .Laddr_table_base0 # DW_AT_addr_base - .long .Lrnglists_table_base0 # DW_AT_rnglists_base .long .Lloclists_table_base0 # DW_AT_loclists_base - .byte 2 # Abbrev [2] 0x2f:0x13 DW_TAG_subprogram - .byte 0 # DW_AT_low_pc - .long .Lfunc_end0-.Lfunc_begin0 # DW_AT_high_pc - .byte 1 # DW_AT_frame_base - .byte 87 - # DW_AT_call_all_calls - .long 66 # DW_AT_abstract_origin - .byte 3 # Abbrev [3] 0x3b:0x6 DW_TAG_formal_parameter - .byte 0 # DW_AT_location - .long 75 # DW_AT_abstract_origin - .byte 0 # End Of Children Mark - .byte 4 # Abbrev [4] 0x42:0x12 DW_TAG_subprogram - .byte 3 # DW_AT_linkage_name - .byte 4 # DW_AT_name - .byte 0 # DW_AT_decl_file - .byte 1 # DW_AT_decl_line - .long 84 # DW_AT_type + .byte 2 # Abbrev [2] 0x27:0xb DW_TAG_variable + .byte 3 # DW_AT_name + .long 50 # DW_AT_type # DW_AT_external - # DW_AT_inline - .byte 5 # Abbrev [5] 0x4b:0x8 DW_TAG_formal_parameter - .byte 6 # DW_AT_name .byte 0 # DW_AT_decl_file - .byte 1 # DW_AT_decl_line - .long 84 # DW_AT_type - .byte 0 # End Of Children Mark - .byte 6 # Abbrev [6] 0x54:0x4 DW_TAG_base_type - .byte 5 # DW_AT_name + .byte 3 # DW_AT_decl_line + .byte 2 # DW_AT_location + .byte 161 + .byte 0 + .byte 3 # Abbrev [3] 0x32:0x4 DW_TAG_base_type + .byte 4 # DW_AT_name .byte 5 # DW_AT_encoding .byte 4 # DW_AT_byte_size - .byte 7 # Abbrev [7] 0x58:0x38 DW_TAG_subprogram - .byte 1 # DW_AT_low_pc - .long .Lfunc_end1-.Lfunc_begin1 # DW_AT_high_pc + .byte 2 # Abbrev [2] 0x36:0xb DW_TAG_variable + .byte 5 # DW_AT_name + .long 50 # DW_AT_type + # DW_AT_external + .byte 0 # DW_AT_decl_file + .byte 4 # DW_AT_decl_line + .byte 2 # DW_AT_location + .byte 161 + .byte 1 + .byte 4 # Abbrev [4] 0x41:0x33 DW_TAG_subprogram + .byte 2 # DW_AT_low_pc + .long .Lfunc_end0-.Lfunc_begin0 # DW_AT_high_pc .byte 1 # DW_AT_frame_base .byte 87 # DW_AT_call_all_calls - .byte 7 # DW_AT_name + .byte 8 # DW_AT_name .byte 0 # DW_AT_decl_file .byte 5 # DW_AT_decl_line - .long 84 # DW_AT_type + .long 50 # DW_AT_type # DW_AT_external - .byte 8 # Abbrev [8] 0x67:0xa DW_TAG_formal_parameter - .byte 1 # DW_AT_location - .byte 85 - .byte 8 # DW_AT_name + .byte 5 # Abbrev [5] 0x50:0x9 DW_TAG_formal_parameter + .byte 0 # DW_AT_location + .byte 9 # DW_AT_name .byte 0 # DW_AT_decl_file .byte 5 # DW_AT_decl_line - .long 84 # DW_AT_type - .byte 8 # Abbrev [8] 0x71:0xa DW_TAG_formal_parameter + .long 50 # DW_AT_type + .byte 5 # Abbrev [5] 0x59:0x9 DW_TAG_formal_parameter .byte 1 # DW_AT_location - .byte 84 - .byte 9 # DW_AT_name + .byte 10 # DW_AT_name .byte 0 # DW_AT_decl_file .byte 5 # DW_AT_decl_line - .long 144 # DW_AT_type - .byte 9 # Abbrev [9] 0x7b:0x14 DW_TAG_inlined_subroutine - .long 66 # DW_AT_abstract_origin - .byte 2 # DW_AT_low_pc - .byte 1 # DW_AT_ranges - .byte 0 # DW_AT_call_file - .byte 6 # DW_AT_call_line - .byte 10 # DW_AT_call_column - .byte 3 # Abbrev [3] 0x88:0x6 DW_TAG_formal_parameter + .long 136 # DW_AT_type + .byte 6 # Abbrev [6] 0x62:0x11 DW_TAG_call_site + .long 116 # DW_AT_call_origin + # DW_AT_call_tail_call + .byte 3 # DW_AT_call_pc + .byte 7 # Abbrev [7] 0x68:0x5 DW_TAG_call_site_parameter + .byte 1 # DW_AT_location + .byte 84 + .byte 1 # DW_AT_call_value + .byte 56 + .byte 7 # Abbrev [7] 0x6d:0x5 DW_TAG_call_site_parameter .byte 1 # DW_AT_location - .long 75 # DW_AT_abstract_origin + .byte 85 + .byte 1 # DW_AT_call_value + .byte 53 .byte 0 # End Of Children Mark .byte 0 # End Of Children Mark - .byte 10 # Abbrev [10] 0x90:0x5 DW_TAG_pointer_type - .long 149 # DW_AT_type - .byte 10 # Abbrev [10] 0x95:0x5 DW_TAG_pointer_type - .long 154 # DW_AT_type - .byte 6 # Abbrev [6] 0x9a:0x4 DW_TAG_base_type - .byte 10 # DW_AT_name + .byte 8 # Abbrev [8] 0x74:0x14 DW_TAG_subprogram + .byte 6 # DW_AT_linkage_name + .byte 7 # DW_AT_name + .byte 0 # DW_AT_decl_file + .byte 2 # DW_AT_decl_line + .long 50 # DW_AT_type + # DW_AT_declaration + # DW_AT_external + .byte 9 # Abbrev [9] 0x7d:0x5 DW_TAG_formal_parameter + .long 50 # DW_AT_type + .byte 9 # Abbrev [9] 0x82:0x5 DW_TAG_formal_parameter + .long 50 # DW_AT_type + .byte 0 # End Of Children Mark + .byte 10 # Abbrev [10] 0x88:0x5 DW_TAG_pointer_type + .long 141 # DW_AT_type + .byte 10 # Abbrev [10] 0x8d:0x5 DW_TAG_pointer_type + .long 146 # DW_AT_type + .byte 3 # Abbrev [3] 0x92:0x4 DW_TAG_base_type + .byte 11 # DW_AT_name .byte 6 # DW_AT_encoding .byte 1 # DW_AT_byte_size .byte 0 # End Of Children Mark .Ldebug_info_end0: - .section .debug_rnglists,"",@progbits - .long .Ldebug_list_header_end1-.Ldebug_list_header_start1 # Length -.Ldebug_list_header_start1: - .short 5 # Version - .byte 8 # Address size - .byte 0 # Segment selector size - .long 2 # Offset entry count -.Lrnglists_table_base0: - .long .Ldebug_ranges0-.Lrnglists_table_base0 - .long .Ldebug_ranges1-.Lrnglists_table_base0 -.Ldebug_ranges0: - .byte 3 # DW_RLE_startx_length - .byte 0 # start index - .uleb128 .Lfunc_end0-.Lfunc_begin0 # length - .byte 3 # DW_RLE_startx_length - .byte 1 # start index - .uleb128 .Lfunc_end1-.Lfunc_begin1 # length - .byte 0 # DW_RLE_end_of_list - .Ldebug_ranges1: - .byte 0 # DW_RLE_end_of_list -.Ldebug_list_header_end1: .section .debug_str_offsets,"",@progbits - .long 48 # Length of String Offsets Set + .long 52 # Length of String Offsets Set .short 5 .short 0 .Lstr_offsets_base0: @@ -382,23 +384,25 @@ main: # @main .Linfo_string1: .asciz "main.cpp" # string offset=134 .Linfo_string2: - .asciz "/home/test" # string offset=143 + .asciz "." # string offset=143 .Linfo_string3: - .asciz "_Z6helperi" # string offset=181 + .asciz "x" # string offset=189 .Linfo_string4: - .asciz "helper" # string offset=192 + .asciz "int" # string offset=191 .Linfo_string5: - .asciz "int" # string offset=199 + .asciz "y" # string offset=195 .Linfo_string6: - .asciz "i" # string offset=203 + .asciz "_Z6helperii" # string offset=197 .Linfo_string7: - .asciz "main" # string offset=205 + .asciz "helper" # string offset=209 .Linfo_string8: - .asciz "argc" # string offset=210 + .asciz "main" # string offset=216 .Linfo_string9: - .asciz "argv" # string offset=215 + .asciz "argc" # string offset=221 .Linfo_string10: - .asciz "char" # string offset=220 + .asciz "argv" # string offset=226 +.Linfo_string11: + .asciz "char" # string offset=231 .section .debug_str_offsets,"",@progbits .long .Linfo_string0 .long .Linfo_string1 @@ -411,6 +415,7 @@ main: # @main .long .Linfo_string8 .long .Linfo_string9 .long .Linfo_string10 + .long .Linfo_string11 .section .debug_addr,"",@progbits .long .Ldebug_addr_end0-.Ldebug_addr_start0 # Length of contribution .Ldebug_addr_start0: @@ -418,9 +423,10 @@ main: # @main .byte 8 # Address size .byte 0 # Segment selector size .Laddr_table_base0: + .quad x + .quad y .quad .Lfunc_begin0 - .quad .Lfunc_begin1 - .quad 0 + .quad .Ltmp3 .Ldebug_addr_end0: .ident "clang version 15.0.0" .section ".note.GNU-stack","",@progbits diff --git a/bolt/test/X86/Inputs/dwarf5-return-pc-helper.s b/bolt/test/X86/Inputs/dwarf5-return-pc-helper.s new file mode 100644 index 0000000000000000000000000000000000000000..d01d2d2dcf501938bd2d6c956a959304ba76e487 --- /dev/null +++ b/bolt/test/X86/Inputs/dwarf5-return-pc-helper.s @@ -0,0 +1,188 @@ +# -gdwarf-5 -g2 -O2 -S +# int foo0(int argc) { +# return argc; +# } + + .text + .file "helper.cpp" + .globl _Z4foo0i # -- Begin function _Z4foo0i + .p2align 4, 0x90 + .type _Z4foo0i,@function +_Z4foo0i: # @_Z4foo0i +.Lfunc_begin0: + .file 0 "." "helper.cpp" md5 0x893b1fbf1f7f58c81b95cdb601b2f919 + .loc 0 1 0 # helper.cpp:1:0 + .cfi_startproc +# %bb.0: # %entry + #DEBUG_VALUE: foo0:argc <- $edi + movl %edi, %eax +.Ltmp0: + .loc 0 2 4 prologue_end # helper.cpp:2:4 + retq +.Ltmp1: +.Lfunc_end0: + .size _Z4foo0i, .Lfunc_end0-_Z4foo0i + .cfi_endproc + # -- End function + .section .debug_abbrev,"",@progbits + .byte 1 # Abbreviation Code + .byte 17 # DW_TAG_compile_unit + .byte 1 # DW_CHILDREN_yes + .byte 37 # DW_AT_producer + .byte 37 # DW_FORM_strx1 + .byte 19 # DW_AT_language + .byte 5 # DW_FORM_data2 + .byte 3 # DW_AT_name + .byte 37 # DW_FORM_strx1 + .byte 114 # DW_AT_str_offsets_base + .byte 23 # DW_FORM_sec_offset + .byte 16 # DW_AT_stmt_list + .byte 23 # DW_FORM_sec_offset + .byte 27 # DW_AT_comp_dir + .byte 37 # DW_FORM_strx1 + .byte 17 # DW_AT_low_pc + .byte 27 # DW_FORM_addrx + .byte 18 # DW_AT_high_pc + .byte 6 # DW_FORM_data4 + .byte 115 # DW_AT_addr_base + .byte 23 # DW_FORM_sec_offset + .byte 0 # EOM(1) + .byte 0 # EOM(2) + .byte 2 # Abbreviation Code + .byte 46 # DW_TAG_subprogram + .byte 1 # DW_CHILDREN_yes + .byte 17 # DW_AT_low_pc + .byte 27 # DW_FORM_addrx + .byte 18 # DW_AT_high_pc + .byte 6 # DW_FORM_data4 + .byte 64 # DW_AT_frame_base + .byte 24 # DW_FORM_exprloc + .byte 122 # DW_AT_call_all_calls + .byte 25 # DW_FORM_flag_present + .byte 110 # DW_AT_linkage_name + .byte 37 # DW_FORM_strx1 + .byte 3 # DW_AT_name + .byte 37 # DW_FORM_strx1 + .byte 58 # DW_AT_decl_file + .byte 11 # DW_FORM_data1 + .byte 59 # DW_AT_decl_line + .byte 11 # DW_FORM_data1 + .byte 73 # DW_AT_type + .byte 19 # DW_FORM_ref4 + .byte 63 # DW_AT_external + .byte 25 # DW_FORM_flag_present + .byte 0 # EOM(1) + .byte 0 # EOM(2) + .byte 3 # Abbreviation Code + .byte 5 # DW_TAG_formal_parameter + .byte 0 # DW_CHILDREN_no + .byte 2 # DW_AT_location + .byte 24 # DW_FORM_exprloc + .byte 3 # DW_AT_name + .byte 37 # DW_FORM_strx1 + .byte 58 # DW_AT_decl_file + .byte 11 # DW_FORM_data1 + .byte 59 # DW_AT_decl_line + .byte 11 # DW_FORM_data1 + .byte 73 # DW_AT_type + .byte 19 # DW_FORM_ref4 + .byte 0 # EOM(1) + .byte 0 # EOM(2) + .byte 4 # Abbreviation Code + .byte 36 # DW_TAG_base_type + .byte 0 # DW_CHILDREN_no + .byte 3 # DW_AT_name + .byte 37 # DW_FORM_strx1 + .byte 62 # DW_AT_encoding + .byte 11 # DW_FORM_data1 + .byte 11 # DW_AT_byte_size + .byte 11 # DW_FORM_data1 + .byte 0 # EOM(1) + .byte 0 # EOM(2) + .byte 0 # EOM(3) + .section .debug_info,"",@progbits +.Lcu_begin0: + .long .Ldebug_info_end0-.Ldebug_info_start0 # Length of Unit +.Ldebug_info_start0: + .short 5 # DWARF version number + .byte 1 # DWARF Unit Type + .byte 8 # Address Size (in bytes) + .long .debug_abbrev # Offset Into Abbrev. Section + .byte 1 # Abbrev [1] 0xc:0x37 DW_TAG_compile_unit + .byte 0 # DW_AT_producer + .short 33 # DW_AT_language + .byte 1 # DW_AT_name + .long .Lstr_offsets_base0 # DW_AT_str_offsets_base + .long .Lline_table_start0 # DW_AT_stmt_list + .byte 2 # DW_AT_comp_dir + .byte 0 # DW_AT_low_pc + .long .Lfunc_end0-.Lfunc_begin0 # DW_AT_high_pc + .long .Laddr_table_base0 # DW_AT_addr_base + .byte 2 # Abbrev [2] 0x23:0x1b DW_TAG_subprogram + .byte 0 # DW_AT_low_pc + .long .Lfunc_end0-.Lfunc_begin0 # DW_AT_high_pc + .byte 1 # DW_AT_frame_base + .byte 87 + # DW_AT_call_all_calls + .byte 3 # DW_AT_linkage_name + .byte 4 # DW_AT_name + .byte 0 # DW_AT_decl_file + .byte 1 # DW_AT_decl_line + .long 62 # DW_AT_type + # DW_AT_external + .byte 3 # Abbrev [3] 0x33:0xa DW_TAG_formal_parameter + .byte 1 # DW_AT_location + .byte 85 + .byte 6 # DW_AT_name + .byte 0 # DW_AT_decl_file + .byte 1 # DW_AT_decl_line + .long 62 # DW_AT_type + .byte 0 # End Of Children Mark + .byte 4 # Abbrev [4] 0x3e:0x4 DW_TAG_base_type + .byte 5 # DW_AT_name + .byte 5 # DW_AT_encoding + .byte 4 # DW_AT_byte_size + .byte 0 # End Of Children Mark +.Ldebug_info_end0: + .section .debug_str_offsets,"",@progbits + .long 32 # Length of String Offsets Set + .short 5 + .short 0 +.Lstr_offsets_base0: + .section .debug_str,"MS",@progbits,1 +.Linfo_string0: + .asciz "clang version 15.0.0" # string offset=0 +.Linfo_string1: + .asciz "helper.cpp" # string offset=134 +.Linfo_string2: + .asciz "." # string offset=145 +.Linfo_string3: + .asciz "_Z4foo0i" # string offset=193 +.Linfo_string4: + .asciz "foo0" # string offset=202 +.Linfo_string5: + .asciz "int" # string offset=207 +.Linfo_string6: + .asciz "argc" # string offset=211 + .section .debug_str_offsets,"",@progbits + .long .Linfo_string0 + .long .Linfo_string1 + .long .Linfo_string2 + .long .Linfo_string3 + .long .Linfo_string4 + .long .Linfo_string5 + .long .Linfo_string6 + .section .debug_addr,"",@progbits + .long .Ldebug_addr_end0-.Ldebug_addr_start0 # Length of contribution +.Ldebug_addr_start0: + .short 5 # DWARF version number + .byte 8 # Address size + .byte 0 # Segment selector size +.Laddr_table_base0: + .quad .Lfunc_begin0 +.Ldebug_addr_end0: + .ident "clang version 15.0.0" + .section ".note.GNU-stack","",@progbits + .addrsig + .section .debug_line,"",@progbits +.Lline_table_start0: diff --git a/bolt/test/X86/Inputs/dwarf5-return-pc-main.s b/bolt/test/X86/Inputs/dwarf5-return-pc-main.s new file mode 100644 index 0000000000000000000000000000000000000000..010d12c5512899e12592bc8400122c20da06cf84 --- /dev/null +++ b/bolt/test/X86/Inputs/dwarf5-return-pc-main.s @@ -0,0 +1,545 @@ +# -gdwarf-5 -g2 -O2 -S +# int fooVar0; +# void use() { +# ++fooVar0; +# } +# +# extern int foo0(int); +# int main(int argc, char *argv[]) { +# if ( argc == 5) { +# int x = argc; +# use(); +# return x + foo0(x); +# } +# return 0; +# } + + .text + .file "main.cpp" + .file 0 "." "main.cpp" md5 0x02a00a8be50e50d669847461ae3d9851 + .globl _Z3usev # -- Begin function _Z3usev + .p2align 4, 0x90 + .type _Z3usev,@function +_Z3usev: # @_Z3usev +.Lfunc_begin0: + .loc 0 2 0 # main.cpp:2:0 + .cfi_startproc +# %bb.0: # %entry + .loc 0 3 3 prologue_end # main.cpp:3:3 + incl fooVar0(%rip) + .loc 0 4 1 # main.cpp:4:1 + retq +.Ltmp0: +.Lfunc_end0: + .size _Z3usev, .Lfunc_end0-_Z3usev + .cfi_endproc + # -- End function + .globl main # -- Begin function main + .p2align 4, 0x90 + .type main,@function +main: # @main +.Lfunc_begin1: + .loc 0 7 0 # main.cpp:7:0 + .cfi_startproc +# %bb.0: # %entry + #DEBUG_VALUE: main:argc <- $edi + #DEBUG_VALUE: main:argv <- $rsi + xorl %eax, %eax +.Ltmp1: + .loc 0 8 14 prologue_end # main.cpp:8:14 + cmpl $5, %edi +.Ltmp2: + .loc 0 8 9 is_stmt 0 # main.cpp:8:9 + jne .LBB1_2 +.Ltmp3: +# %bb.1: # %if.then + #DEBUG_VALUE: main:argc <- $edi + #DEBUG_VALUE: main:argv <- $rsi + pushq %rax + .cfi_def_cfa_offset 16 +.Ltmp4: + #DEBUG_VALUE: x <- $edi + .loc 0 3 3 is_stmt 1 # main.cpp:3:3 + incl fooVar0(%rip) +.Ltmp5: + .loc 0 11 16 # main.cpp:11:16 + movl $5, %edi +.Ltmp6: + #DEBUG_VALUE: main:argc <- [DW_OP_LLVM_entry_value 1] $edi + callq _Z4foo0i +.Ltmp7: + #DEBUG_VALUE: main:argv <- [DW_OP_LLVM_entry_value 1] $rsi + .loc 0 11 14 is_stmt 0 # main.cpp:11:14 + addl $5, %eax + addq $8, %rsp +.Ltmp8: + .cfi_def_cfa_offset 8 +.LBB1_2: # %return + #DEBUG_VALUE: main:argc <- [DW_OP_LLVM_entry_value 1] $edi + #DEBUG_VALUE: main:argv <- [DW_OP_LLVM_entry_value 1] $rsi + .loc 0 14 1 is_stmt 1 # main.cpp:14:1 + retq +.Ltmp9: +.Lfunc_end1: + .size main, .Lfunc_end1-main + .cfi_endproc + # -- End function + .type fooVar0,@object # @fooVar0 + .bss + .globl fooVar0 + .p2align 2 +fooVar0: + .long 0 # 0x0 + .size fooVar0, 4 + + .section .debug_loclists,"",@progbits + .long .Ldebug_list_header_end0-.Ldebug_list_header_start0 # Length +.Ldebug_list_header_start0: + .short 5 # Version + .byte 8 # Address size + .byte 0 # Segment selector size + .long 3 # Offset entry count +.Lloclists_table_base0: + .long .Ldebug_loc0-.Lloclists_table_base0 + .long .Ldebug_loc1-.Lloclists_table_base0 + .long .Ldebug_loc2-.Lloclists_table_base0 +.Ldebug_loc0: + .byte 4 # DW_LLE_offset_pair + .uleb128 .Lfunc_begin1-.Lfunc_begin0 # starting offset + .uleb128 .Ltmp6-.Lfunc_begin0 # ending offset + .byte 1 # Loc expr size + .byte 85 # super-register DW_OP_reg5 + .byte 4 # DW_LLE_offset_pair + .uleb128 .Ltmp6-.Lfunc_begin0 # starting offset + .uleb128 .Lfunc_end1-.Lfunc_begin0 # ending offset + .byte 4 # Loc expr size + .byte 163 # DW_OP_entry_value + .byte 1 # 1 + .byte 85 # super-register DW_OP_reg5 + .byte 159 # DW_OP_stack_value + .byte 0 # DW_LLE_end_of_list +.Ldebug_loc1: + .byte 4 # DW_LLE_offset_pair + .uleb128 .Lfunc_begin1-.Lfunc_begin0 # starting offset + .uleb128 .Ltmp7-.Lfunc_begin0 # ending offset + .byte 1 # Loc expr size + .byte 84 # DW_OP_reg4 + .byte 4 # DW_LLE_offset_pair + .uleb128 .Ltmp7-.Lfunc_begin0 # starting offset + .uleb128 .Lfunc_end1-.Lfunc_begin0 # ending offset + .byte 4 # Loc expr size + .byte 163 # DW_OP_entry_value + .byte 1 # 1 + .byte 84 # DW_OP_reg4 + .byte 159 # DW_OP_stack_value + .byte 0 # DW_LLE_end_of_list +.Ldebug_loc2: + .byte 4 # DW_LLE_offset_pair + .uleb128 .Ltmp4-.Lfunc_begin0 # starting offset + .uleb128 .Ltmp6-.Lfunc_begin0 # ending offset + .byte 1 # Loc expr size + .byte 85 # super-register DW_OP_reg5 + .byte 0 # DW_LLE_end_of_list +.Ldebug_list_header_end0: + .section .debug_abbrev,"",@progbits + .byte 1 # Abbreviation Code + .byte 17 # DW_TAG_compile_unit + .byte 1 # DW_CHILDREN_yes + .byte 37 # DW_AT_producer + .byte 37 # DW_FORM_strx1 + .byte 19 # DW_AT_language + .byte 5 # DW_FORM_data2 + .byte 3 # DW_AT_name + .byte 37 # DW_FORM_strx1 + .byte 114 # DW_AT_str_offsets_base + .byte 23 # DW_FORM_sec_offset + .byte 16 # DW_AT_stmt_list + .byte 23 # DW_FORM_sec_offset + .byte 27 # DW_AT_comp_dir + .byte 37 # DW_FORM_strx1 + .byte 17 # DW_AT_low_pc + .byte 27 # DW_FORM_addrx + .byte 18 # DW_AT_high_pc + .byte 6 # DW_FORM_data4 + .byte 115 # DW_AT_addr_base + .byte 23 # DW_FORM_sec_offset + .ascii "\214\001" # DW_AT_loclists_base + .byte 23 # DW_FORM_sec_offset + .byte 0 # EOM(1) + .byte 0 # EOM(2) + .byte 2 # Abbreviation Code + .byte 52 # DW_TAG_variable + .byte 0 # DW_CHILDREN_no + .byte 3 # DW_AT_name + .byte 37 # DW_FORM_strx1 + .byte 73 # DW_AT_type + .byte 19 # DW_FORM_ref4 + .byte 63 # DW_AT_external + .byte 25 # DW_FORM_flag_present + .byte 58 # DW_AT_decl_file + .byte 11 # DW_FORM_data1 + .byte 59 # DW_AT_decl_line + .byte 11 # DW_FORM_data1 + .byte 2 # DW_AT_location + .byte 24 # DW_FORM_exprloc + .byte 0 # EOM(1) + .byte 0 # EOM(2) + .byte 3 # Abbreviation Code + .byte 36 # DW_TAG_base_type + .byte 0 # DW_CHILDREN_no + .byte 3 # DW_AT_name + .byte 37 # DW_FORM_strx1 + .byte 62 # DW_AT_encoding + .byte 11 # DW_FORM_data1 + .byte 11 # DW_AT_byte_size + .byte 11 # DW_FORM_data1 + .byte 0 # EOM(1) + .byte 0 # EOM(2) + .byte 4 # Abbreviation Code + .byte 46 # DW_TAG_subprogram + .byte 0 # DW_CHILDREN_no + .byte 17 # DW_AT_low_pc + .byte 27 # DW_FORM_addrx + .byte 18 # DW_AT_high_pc + .byte 6 # DW_FORM_data4 + .byte 64 # DW_AT_frame_base + .byte 24 # DW_FORM_exprloc + .byte 122 # DW_AT_call_all_calls + .byte 25 # DW_FORM_flag_present + .byte 49 # DW_AT_abstract_origin + .byte 19 # DW_FORM_ref4 + .byte 0 # EOM(1) + .byte 0 # EOM(2) + .byte 5 # Abbreviation Code + .byte 46 # DW_TAG_subprogram + .byte 0 # DW_CHILDREN_no + .byte 110 # DW_AT_linkage_name + .byte 37 # DW_FORM_strx1 + .byte 3 # DW_AT_name + .byte 37 # DW_FORM_strx1 + .byte 58 # DW_AT_decl_file + .byte 11 # DW_FORM_data1 + .byte 59 # DW_AT_decl_line + .byte 11 # DW_FORM_data1 + .byte 63 # DW_AT_external + .byte 25 # DW_FORM_flag_present + .byte 32 # DW_AT_inline + .byte 33 # DW_FORM_implicit_const + .byte 1 + .byte 0 # EOM(1) + .byte 0 # EOM(2) + .byte 6 # Abbreviation Code + .byte 46 # DW_TAG_subprogram + .byte 1 # DW_CHILDREN_yes + .byte 17 # DW_AT_low_pc + .byte 27 # DW_FORM_addrx + .byte 18 # DW_AT_high_pc + .byte 6 # DW_FORM_data4 + .byte 64 # DW_AT_frame_base + .byte 24 # DW_FORM_exprloc + .byte 122 # DW_AT_call_all_calls + .byte 25 # DW_FORM_flag_present + .byte 3 # DW_AT_name + .byte 37 # DW_FORM_strx1 + .byte 58 # DW_AT_decl_file + .byte 11 # DW_FORM_data1 + .byte 59 # DW_AT_decl_line + .byte 11 # DW_FORM_data1 + .byte 73 # DW_AT_type + .byte 19 # DW_FORM_ref4 + .byte 63 # DW_AT_external + .byte 25 # DW_FORM_flag_present + .byte 0 # EOM(1) + .byte 0 # EOM(2) + .byte 7 # Abbreviation Code + .byte 5 # DW_TAG_formal_parameter + .byte 0 # DW_CHILDREN_no + .byte 2 # DW_AT_location + .byte 34 # DW_FORM_loclistx + .byte 3 # DW_AT_name + .byte 37 # DW_FORM_strx1 + .byte 58 # DW_AT_decl_file + .byte 11 # DW_FORM_data1 + .byte 59 # DW_AT_decl_line + .byte 11 # DW_FORM_data1 + .byte 73 # DW_AT_type + .byte 19 # DW_FORM_ref4 + .byte 0 # EOM(1) + .byte 0 # EOM(2) + .byte 8 # Abbreviation Code + .byte 11 # DW_TAG_lexical_block + .byte 1 # DW_CHILDREN_yes + .byte 17 # DW_AT_low_pc + .byte 27 # DW_FORM_addrx + .byte 18 # DW_AT_high_pc + .byte 6 # DW_FORM_data4 + .byte 0 # EOM(1) + .byte 0 # EOM(2) + .byte 9 # Abbreviation Code + .byte 52 # DW_TAG_variable + .byte 0 # DW_CHILDREN_no + .byte 2 # DW_AT_location + .byte 34 # DW_FORM_loclistx + .byte 3 # DW_AT_name + .byte 37 # DW_FORM_strx1 + .byte 58 # DW_AT_decl_file + .byte 11 # DW_FORM_data1 + .byte 59 # DW_AT_decl_line + .byte 11 # DW_FORM_data1 + .byte 73 # DW_AT_type + .byte 19 # DW_FORM_ref4 + .byte 0 # EOM(1) + .byte 0 # EOM(2) + .byte 10 # Abbreviation Code + .byte 29 # DW_TAG_inlined_subroutine + .byte 0 # DW_CHILDREN_no + .byte 49 # DW_AT_abstract_origin + .byte 19 # DW_FORM_ref4 + .byte 17 # DW_AT_low_pc + .byte 27 # DW_FORM_addrx + .byte 18 # DW_AT_high_pc + .byte 6 # DW_FORM_data4 + .byte 88 # DW_AT_call_file + .byte 11 # DW_FORM_data1 + .byte 89 # DW_AT_call_line + .byte 11 # DW_FORM_data1 + .byte 87 # DW_AT_call_column + .byte 11 # DW_FORM_data1 + .byte 0 # EOM(1) + .byte 0 # EOM(2) + .byte 11 # Abbreviation Code + .byte 72 # DW_TAG_call_site + .byte 1 # DW_CHILDREN_yes + .byte 127 # DW_AT_call_origin + .byte 19 # DW_FORM_ref4 + .byte 125 # DW_AT_call_return_pc + .byte 27 # DW_FORM_addrx + .byte 0 # EOM(1) + .byte 0 # EOM(2) + .byte 12 # Abbreviation Code + .byte 73 # DW_TAG_call_site_parameter + .byte 0 # DW_CHILDREN_no + .byte 2 # DW_AT_location + .byte 24 # DW_FORM_exprloc + .byte 126 # DW_AT_call_value + .byte 24 # DW_FORM_exprloc + .byte 0 # EOM(1) + .byte 0 # EOM(2) + .byte 13 # Abbreviation Code + .byte 46 # DW_TAG_subprogram + .byte 1 # DW_CHILDREN_yes + .byte 110 # DW_AT_linkage_name + .byte 37 # DW_FORM_strx1 + .byte 3 # DW_AT_name + .byte 37 # DW_FORM_strx1 + .byte 58 # DW_AT_decl_file + .byte 11 # DW_FORM_data1 + .byte 59 # DW_AT_decl_line + .byte 11 # DW_FORM_data1 + .byte 73 # DW_AT_type + .byte 19 # DW_FORM_ref4 + .byte 60 # DW_AT_declaration + .byte 25 # DW_FORM_flag_present + .byte 63 # DW_AT_external + .byte 25 # DW_FORM_flag_present + .byte 0 # EOM(1) + .byte 0 # EOM(2) + .byte 14 # Abbreviation Code + .byte 5 # DW_TAG_formal_parameter + .byte 0 # DW_CHILDREN_no + .byte 73 # DW_AT_type + .byte 19 # DW_FORM_ref4 + .byte 0 # EOM(1) + .byte 0 # EOM(2) + .byte 15 # Abbreviation Code + .byte 15 # DW_TAG_pointer_type + .byte 0 # DW_CHILDREN_no + .byte 73 # DW_AT_type + .byte 19 # DW_FORM_ref4 + .byte 0 # EOM(1) + .byte 0 # EOM(2) + .byte 0 # EOM(3) + .section .debug_info,"",@progbits +.Lcu_begin0: + .long .Ldebug_info_end0-.Ldebug_info_start0 # Length of Unit +.Ldebug_info_start0: + .short 5 # DWARF version number + .byte 1 # DWARF Unit Type + .byte 8 # Address Size (in bytes) + .long .debug_abbrev # Offset Into Abbrev. Section + .byte 1 # Abbrev [1] 0xc:0xa4 DW_TAG_compile_unit + .byte 0 # DW_AT_producer + .short 33 # DW_AT_language + .byte 1 # DW_AT_name + .long .Lstr_offsets_base0 # DW_AT_str_offsets_base + .long .Lline_table_start0 # DW_AT_stmt_list + .byte 2 # DW_AT_comp_dir + .byte 1 # DW_AT_low_pc + .long .Lfunc_end1-.Lfunc_begin0 # DW_AT_high_pc + .long .Laddr_table_base0 # DW_AT_addr_base + .long .Lloclists_table_base0 # DW_AT_loclists_base + .byte 2 # Abbrev [2] 0x27:0xb DW_TAG_variable + .byte 3 # DW_AT_name + .long 50 # DW_AT_type + # DW_AT_external + .byte 0 # DW_AT_decl_file + .byte 1 # DW_AT_decl_line + .byte 2 # DW_AT_location + .byte 161 + .byte 0 + .byte 3 # Abbrev [3] 0x32:0x4 DW_TAG_base_type + .byte 4 # DW_AT_name + .byte 5 # DW_AT_encoding + .byte 4 # DW_AT_byte_size + .byte 4 # Abbrev [4] 0x36:0xc DW_TAG_subprogram + .byte 1 # DW_AT_low_pc + .long .Lfunc_end0-.Lfunc_begin0 # DW_AT_high_pc + .byte 1 # DW_AT_frame_base + .byte 87 + # DW_AT_call_all_calls + .long 66 # DW_AT_abstract_origin + .byte 5 # Abbrev [5] 0x42:0x5 DW_TAG_subprogram + .byte 5 # DW_AT_linkage_name + .byte 6 # DW_AT_name + .byte 0 # DW_AT_decl_file + .byte 2 # DW_AT_decl_line + # DW_AT_external + # DW_AT_inline + .byte 6 # Abbrev [6] 0x47:0x4b DW_TAG_subprogram + .byte 2 # DW_AT_low_pc + .long .Lfunc_end1-.Lfunc_begin1 # DW_AT_high_pc + .byte 1 # DW_AT_frame_base + .byte 87 + # DW_AT_call_all_calls + .byte 9 # DW_AT_name + .byte 0 # DW_AT_decl_file + .byte 7 # DW_AT_decl_line + .long 50 # DW_AT_type + # DW_AT_external + .byte 7 # Abbrev [7] 0x56:0x9 DW_TAG_formal_parameter + .byte 0 # DW_AT_location + .byte 10 # DW_AT_name + .byte 0 # DW_AT_decl_file + .byte 7 # DW_AT_decl_line + .long 50 # DW_AT_type + .byte 7 # Abbrev [7] 0x5f:0x9 DW_TAG_formal_parameter + .byte 1 # DW_AT_location + .byte 11 # DW_AT_name + .byte 0 # DW_AT_decl_file + .byte 7 # DW_AT_decl_line + .long 161 # DW_AT_type + .byte 8 # Abbrev [8] 0x68:0x1d DW_TAG_lexical_block + .byte 3 # DW_AT_low_pc + .long .Ltmp8-.Ltmp4 # DW_AT_high_pc + .byte 9 # Abbrev [9] 0x6e:0x9 DW_TAG_variable + .byte 2 # DW_AT_location + .byte 13 # DW_AT_name + .byte 0 # DW_AT_decl_file + .byte 9 # DW_AT_decl_line + .long 50 # DW_AT_type + .byte 10 # Abbrev [10] 0x77:0xd DW_TAG_inlined_subroutine + .long 66 # DW_AT_abstract_origin + .byte 3 # DW_AT_low_pc + .long .Ltmp5-.Ltmp4 # DW_AT_high_pc + .byte 0 # DW_AT_call_file + .byte 10 # DW_AT_call_line + .byte 5 # DW_AT_call_column + .byte 0 # End Of Children Mark + .byte 11 # Abbrev [11] 0x85:0xc DW_TAG_call_site + .long 146 # DW_AT_call_origin + .byte 4 # DW_AT_call_return_pc + .byte 12 # Abbrev [12] 0x8b:0x5 DW_TAG_call_site_parameter + .byte 1 # DW_AT_location + .byte 85 + .byte 1 # DW_AT_call_value + .byte 53 + .byte 0 # End Of Children Mark + .byte 0 # End Of Children Mark + .byte 13 # Abbrev [13] 0x92:0xf DW_TAG_subprogram + .byte 7 # DW_AT_linkage_name + .byte 8 # DW_AT_name + .byte 0 # DW_AT_decl_file + .byte 6 # DW_AT_decl_line + .long 50 # DW_AT_type + # DW_AT_declaration + # DW_AT_external + .byte 14 # Abbrev [14] 0x9b:0x5 DW_TAG_formal_parameter + .long 50 # DW_AT_type + .byte 0 # End Of Children Mark + .byte 15 # Abbrev [15] 0xa1:0x5 DW_TAG_pointer_type + .long 166 # DW_AT_type + .byte 15 # Abbrev [15] 0xa6:0x5 DW_TAG_pointer_type + .long 171 # DW_AT_type + .byte 3 # Abbrev [3] 0xab:0x4 DW_TAG_base_type + .byte 12 # DW_AT_name + .byte 6 # DW_AT_encoding + .byte 1 # DW_AT_byte_size + .byte 0 # End Of Children Mark +.Ldebug_info_end0: + .section .debug_str_offsets,"",@progbits + .long 60 # Length of String Offsets Set + .short 5 + .short 0 +.Lstr_offsets_base0: + .section .debug_str,"MS",@progbits,1 +.Linfo_string0: + .asciz "clang version 15.0.0" # string offset=0 +.Linfo_string1: + .asciz "main.cpp" # string offset=134 +.Linfo_string2: + .asciz "." # string offset=143 +.Linfo_string3: + .asciz "fooVar0" # string offset=191 +.Linfo_string4: + .asciz "int" # string offset=199 +.Linfo_string5: + .asciz "_Z3usev" # string offset=203 +.Linfo_string6: + .asciz "use" # string offset=211 +.Linfo_string7: + .asciz "_Z4foo0i" # string offset=215 +.Linfo_string8: + .asciz "foo0" # string offset=224 +.Linfo_string9: + .asciz "main" # string offset=229 +.Linfo_string10: + .asciz "argc" # string offset=234 +.Linfo_string11: + .asciz "argv" # string offset=239 +.Linfo_string12: + .asciz "char" # string offset=244 +.Linfo_string13: + .asciz "x" # string offset=249 + .section .debug_str_offsets,"",@progbits + .long .Linfo_string0 + .long .Linfo_string1 + .long .Linfo_string2 + .long .Linfo_string3 + .long .Linfo_string4 + .long .Linfo_string5 + .long .Linfo_string6 + .long .Linfo_string7 + .long .Linfo_string8 + .long .Linfo_string9 + .long .Linfo_string10 + .long .Linfo_string11 + .long .Linfo_string12 + .long .Linfo_string13 + .section .debug_addr,"",@progbits + .long .Ldebug_addr_end0-.Ldebug_addr_start0 # Length of contribution +.Ldebug_addr_start0: + .short 5 # DWARF version number + .byte 8 # Address size + .byte 0 # Segment selector size +.Laddr_table_base0: + .quad fooVar0 + .quad .Lfunc_begin0 + .quad .Lfunc_begin1 + .quad .Ltmp4 + .quad .Ltmp7 +.Ldebug_addr_end0: + .ident "clang version 15.0.0" + .section ".note.GNU-stack","",@progbits + .addrsig + .section .debug_line,"",@progbits +.Lline_table_start0: diff --git a/bolt/test/X86/bug-reorder-bb-jrcxz.s b/bolt/test/X86/bug-reorder-bb-jrcxz.s index 27cd38bc83069d804c79388cd0e28b1d04711bec..13611119beaf0745ca68e67202401b265d650de5 100644 --- a/bolt/test/X86/bug-reorder-bb-jrcxz.s +++ b/bolt/test/X86/bug-reorder-bb-jrcxz.s @@ -16,7 +16,7 @@ # RUN: llvm-bolt %t.exe -o %t.bolted --data %t.fdata \ # RUN: --reorder-blocks=ext-tsp --reorder-functions=hfsort \ -# RUN: --split-functions=2 --split-all-cold --split-eh --dyno-stats \ +# RUN: --split-functions --split-all-cold --split-eh --dyno-stats \ # RUN: --print-finalized 2>&1 | FileCheck %s # CHECK-NOT: value of -2105 is too large for field of 1 byte. diff --git a/bolt/test/X86/dwarf5-call-pc.test b/bolt/test/X86/dwarf5-call-pc.test new file mode 100644 index 0000000000000000000000000000000000000000..ec03a7bf8ad4add5f259e739a88101bd91af9469 --- /dev/null +++ b/bolt/test/X86/dwarf5-call-pc.test @@ -0,0 +1,31 @@ +# REQUIRES: system-linux + +# RUN: llvm-mc -dwarf-version=5 -filetype=obj -triple x86_64-unknown-linux %p/Inputs/dwarf5-call-pc-main.s -o %tmain.o +# RUN: llvm-mc -dwarf-version=5 -filetype=obj -triple x86_64-unknown-linux %p/Inputs/dwarf5-call-pc-helper.s -o %thelper.o +# RUN: %clang %cflags -dwarf-5 %tmain.o %thelper.o -o %t.exe -Wl,-q +# RUN: llvm-bolt %t.exe -o %t.exe.bolt --update-debug-sections -reorder-blocks=reverse +# RUN: llvm-dwarfdump --show-form --verbose --debug-info %t.exe > %tmain.txt +# RUN: llvm-objdump %t.exe --disassemble >> %tmain.txt +# RUN: llvm-dwarfdump --show-form --verbose --debug-info %t.exe.bolt > %tmainbolt.txt +# RUN: llvm-objdump %t.exe.bolt --disassemble >> %tmainbolt.txt +# RUN: cat %tmain.txt | FileCheck --check-prefix=PRECHECK %s +# RUN: cat %tmainbolt.txt | FileCheck --check-prefix=POSTCHECK %s + +# Test checks that DW_AT_call_pc address points to a correct address for jmp instruction. + +# PRECHECK: DW_TAG_call_site [6] +# PRECHECK-NEXT: DW_AT_call_origin [DW_FORM_ref4] +# PRECHECK-NEXT: DW_AT_call_tail_call +# PRECHECK-NEXT: DW_AT_call_pc [DW_FORM_addrx] +# PRECHECK-SAME: address = 0x[[#%x,ADDR:]]) +# PRECHECK: [[#ADDR]]: +# PRECHECK-SAME: jmp + +# POSTCHECK: DW_TAG_call_site [6] +# POSTCHECK-NEXT: DW_AT_call_origin [DW_FORM_ref4] +# POSTCHECK-NEXT: DW_AT_call_tail_call +# POSTCHECK-NEXT: DW_AT_call_pc [DW_FORM_addrx] +# POSTCHECK-SAME: address = 0x[[#%x,ADDR:]]) +# POSTCHECK:
: +# POSTCHECK: [[#ADDR]]: +# POSTCHECK-SAME: jmp diff --git a/bolt/test/X86/dwarf5-return-pc.test b/bolt/test/X86/dwarf5-return-pc.test new file mode 100644 index 0000000000000000000000000000000000000000..987a9fa8cefadef545a4044e181e2b8d3e90f663 --- /dev/null +++ b/bolt/test/X86/dwarf5-return-pc.test @@ -0,0 +1,29 @@ +# REQUIRES: system-linux + +# RUN: llvm-mc -dwarf-version=5 -filetype=obj -triple x86_64-unknown-linux %p/Inputs/dwarf5-return-pc-main.s -o %tmain.o +# RUN: llvm-mc -dwarf-version=5 -filetype=obj -triple x86_64-unknown-linux %p/Inputs/dwarf5-return-pc-helper.s -o %thelper.o +# RUN: %clang %cflags -dwarf-5 %tmain.o %thelper.o -o %t.exe -Wl,-q +# RUN: llvm-bolt %t.exe -o %t.exe.bolt --update-debug-sections -reorder-blocks=reverse +# RUN: llvm-dwarfdump --show-form --verbose --debug-info %t.exe > %tmain.txt +# RUN: llvm-objdump %t.exe --disassemble >> %tmain.txt +# RUN: llvm-dwarfdump --show-form --verbose --debug-info %t.exe.bolt > %tmainbolt.txt +# RUN: llvm-objdump %t.exe.bolt --disassemble >> %tmainbolt.txt +# RUN: cat %tmain.txt | FileCheck --check-prefix=PRECHECK %s +# RUN: cat %tmainbolt.txt | FileCheck --check-prefix=POSTCHECK %s + +# Test checks that DW_AT_call_return_pc points to an address after the callq instruction. + +# PRECHECK: DW_TAG_call_site [11] +# PRECHECK-NEXT: DW_AT_call_origin [DW_FORM_ref4] +# PRECHECK-NEXT: DW_AT_call_return_pc [DW_FORM_addrx] +# PRECHECK-SAME: address = 0x[[#%x,ADDR:]]) +# PRECHECK: callq +# PRECHECK-NEXT: [[#ADDR]]: + +# POSTCHECK: DW_TAG_call_site [11] +# POSTCHECK-NEXT: DW_AT_call_origin [DW_FORM_ref4] +# POSTCHECK-NEXT: DW_AT_call_return_pc [DW_FORM_addrx] +# POSTCHECK-SAME: address = 0x[[#%x,ADDR:]]) +# POSTCHECK:
: +# POSTCHECK: callq +# POSTCHECK-NEXT: [[#ADDR]]: diff --git a/bolt/test/X86/jump-table-icp.test b/bolt/test/X86/jump-table-icp.test index 0deeba459cda97a383bb4d7b6ae434d7b1fc4c8c..7ed77d3c0cdf496899d762f1d80e7c5f5bf97a1c 100644 --- a/bolt/test/X86/jump-table-icp.test +++ b/bolt/test/X86/jump-table-icp.test @@ -5,7 +5,7 @@ RUN: llvm-strip --strip-unneeded %t.o RUN: %clang %cflags -no-pie %t.o -o %t.exe -Wl,-q RUN: (llvm-bolt %t.exe --data %t.fdata -o %t --relocs \ -RUN: --reorder-blocks=cache --split-functions=3 --split-all-cold \ +RUN: --reorder-blocks=cache --split-functions --split-all-cold \ RUN: --use-gnu-stack --dyno-stats --indirect-call-promotion=jump-tables \ RUN: --print-icp -v=0 \ RUN: --icp-jt-remaining-percent-threshold=10 \ diff --git a/bolt/test/X86/shared_object_entry.s b/bolt/test/X86/shared_object_entry.s index ce72f0bf11101198cdd86cb603dba9edc1c68bf9..eeefbd8ee4e6f929535a07f829b03e117d48eeff 100644 --- a/bolt/test/X86/shared_object_entry.s +++ b/bolt/test/X86/shared_object_entry.s @@ -1,7 +1,7 @@ # RUN: llvm-mc -filetype=obj -triple x86_64-unknown-unknown %s -o %t.o # RUN: ld.lld %t.o -o %t.so --shared --entry=func1.cold.1 --emit-relocs # RUN: llvm-bolt -relocs %t.so -o %t -reorder-functions=hfsort+ \ -# RUN: -split-functions=3 -reorder-blocks=ext-tsp -split-all-cold \ +# RUN: -split-functions -reorder-blocks=ext-tsp -split-all-cold \ # RUN: -dyno-stats -icf=1 -use-gnu-stack # Check that an entry point is a cold symbol diff --git a/bolt/test/X86/unreachable.test b/bolt/test/X86/unreachable.test index b69e125af17ed2c65e0c36cbc41bba5c9421d3c1..162aafc8cbda0f56af8cc486d83268f90ab5a2dc 100644 --- a/bolt/test/X86/unreachable.test +++ b/bolt/test/X86/unreachable.test @@ -5,7 +5,7 @@ RUN: llvm-mc -filetype=obj -triple x86_64-unknown-unknown \ RUN: %p/Inputs/unreachable.s -o %t.o RUN: %clangxx %cxxflags -no-pie %t.o -o %t.exe %t.so RUN: llvm-bolt %t.exe -o %t \ -RUN: -reorder-blocks=none -split-functions=1 -eliminate-unreachable \ +RUN: -reorder-blocks=none -split-functions -eliminate-unreachable \ RUN: -funcs=foo -use-gnu-stack -print-cfg -print-finalized \ RUN: | FileCheck %s --check-prefix=BOLT RUN: llvm-objdump -d %t --print-imm-hex --disassemble-symbols=foo \ diff --git a/bolt/test/runtime/X86/exceptions-instrumentation.test b/bolt/test/runtime/X86/exceptions-instrumentation.test index db26bdefc54c380989da7bbe54b60de7956943bb..3af2f5a23a63cc7ac0ce16f1e5b9e0d06d22c4d5 100644 --- a/bolt/test/runtime/X86/exceptions-instrumentation.test +++ b/bolt/test/runtime/X86/exceptions-instrumentation.test @@ -9,7 +9,7 @@ RUN: %t.exc arg1 arg2 arg3 RUN: llvm-bolt %t_exc_split -o %t.exc.bolted --data %t.fdata \ RUN: --reorder-blocks=ext-tsp --reorder-functions=hfsort+ \ -RUN: --split-functions=3 --split-eh=1 \ +RUN: --split-functions --split-eh=1 \ RUN: | FileCheck --check-prefix=EXCEPTIONS %s EXCEPTIONS-NOT: invalid (possibly stale) profile diff --git a/bolt/test/runtime/X86/pie-exceptions-split.test b/bolt/test/runtime/X86/pie-exceptions-split.test index f3f42fa950a53ef3d08449da9d01419cada66360..499e9016602a30a9a046a9f8b1d147446351d162 100644 --- a/bolt/test/runtime/X86/pie-exceptions-split.test +++ b/bolt/test/runtime/X86/pie-exceptions-split.test @@ -10,7 +10,7 @@ RUN: llvm-bolt %t -o %t.instr --instrument --instrumentation-file=%t.fdata RUN: %t.instr RUN: llvm-bolt %t -o %t.bolt --data %t.fdata --reorder-blocks=ext-tsp \ -RUN: --split-functions=1 --split-eh --print-after-lowering \ +RUN: --split-functions --split-eh --print-after-lowering \ RUN: --print-only=main 2>&1 | FileCheck %s ## All calls to printf() should be from exception handling code that was @@ -26,4 +26,3 @@ RUN: %t.bolt arg1 arg2 arg3 2>&1 | FileCheck --check-prefix=CHECK-BOLTED %s CHECK-BOLTED: catch 2 CHECK-BOLTED-NEXT: catch 1 - diff --git a/bolt/test/runtime/meta-merge-fdata.test b/bolt/test/runtime/meta-merge-fdata.test index 57a8a54dcc26eb4e780105a9ee39731050a2a63a..374d87af72b5c42efaa424dfdb917a68a6c0ec7e 100644 --- a/bolt/test/runtime/meta-merge-fdata.test +++ b/bolt/test/runtime/meta-merge-fdata.test @@ -22,7 +22,7 @@ CHECK-FDATA: 0 [unknown] 0 1 _start 0 0 1 # Check that BOLT works with this profile RUN: llvm-bolt merge-fdata -o %t.bolt --data %t.fdata1 \ RUN: --reorder-blocks=ext-tsp --reorder-functions=hfsort+ \ -RUN: --split-functions=3 \ +RUN: --split-functions \ RUN: | FileCheck %s --check-prefix=CHECK-BOLT1 CHECK-BOLT1-NOT: invalid (possibly stale) profile @@ -44,7 +44,7 @@ RUN: cmp %t.fdata.base %t.fdata.inst # Optimize using merged fdata RUN: llvm-bolt merge-fdata -o %t.opt --data %t.fdata.base \ RUN: --reorder-blocks=ext-tsp --reorder-functions=hfsort+ \ -RUN: --split-functions=3 \ +RUN: --split-functions \ RUN: | FileCheck %s --check-prefix=CHECK-BOLT2 CHECK-BOLT2-NOT: invalid (possibly stale) profile diff --git a/bolt/tools/merge-fdata/merge-fdata.cpp b/bolt/tools/merge-fdata/merge-fdata.cpp index 3cb6c910a8b54bdbad332e569ed3e0090cd95096..4505c3f20a72a6c05740f3261345b31132d9e6d3 100644 --- a/bolt/tools/merge-fdata/merge-fdata.cpp +++ b/bolt/tools/merge-fdata/merge-fdata.cpp @@ -398,14 +398,14 @@ int main(int argc, char **argv) { BinaryProfile MergedProfile; MergedProfile.Header = MergedHeader; MergedProfile.Functions.resize(MergedBFs.size()); - std::transform( - MergedBFs.begin(), MergedBFs.end(), MergedProfile.Functions.begin(), + llvm::transform( + MergedBFs, MergedProfile.Functions.begin(), [](StringMapEntry &V) { return V.second; }); // For consistency, sort functions by their IDs. - std::sort(MergedProfile.Functions.begin(), MergedProfile.Functions.end(), - [](const BinaryFunctionProfile &A, - const BinaryFunctionProfile &B) { return A.Id < B.Id; }); + llvm::sort(MergedProfile.Functions, + [](const BinaryFunctionProfile &A, + const BinaryFunctionProfile &B) { return A.Id < B.Id; }); YamlOut << MergedProfile; } @@ -435,9 +435,8 @@ int main(int argc, char **argv) { CountFuncType CountFunc = (opts::PrintFunctionList == opts::ST_EXEC_COUNT) ? ExecCountFunc : BranchCountFunc; - std::transform(MergedBFs.begin(), MergedBFs.end(), FunctionList.begin(), - CountFunc); - std::stable_sort(FunctionList.rbegin(), FunctionList.rend()); + llvm::transform(MergedBFs, FunctionList.begin(), CountFunc); + llvm::stable_sort(reverse(FunctionList)); errs() << "Functions sorted by " << (opts::PrintFunctionList == opts::ST_EXEC_COUNT ? "execution" : "total branch") diff --git a/clang-tools-extra/clang-tidy/ClangTidy.cpp b/clang-tools-extra/clang-tidy/ClangTidy.cpp index f7ffcdbc692b865fff5c1377ab561b5f944c5396..4007363505ed61a1d908d83c1322388a2d75d359 100644 --- a/clang-tools-extra/clang-tidy/ClangTidy.cpp +++ b/clang-tools-extra/clang-tidy/ClangTidy.cpp @@ -623,5 +623,40 @@ void exportReplacements(const llvm::StringRef MainFilePath, YAML << TUD; } +NamesAndOptions +getAllChecksAndOptions(bool AllowEnablingAnalyzerAlphaCheckers) { + NamesAndOptions Result; + ClangTidyOptions Opts; + Opts.Checks = "*"; + clang::tidy::ClangTidyContext Context( + std::make_unique(ClangTidyGlobalOptions(), Opts), + AllowEnablingAnalyzerAlphaCheckers); + ClangTidyCheckFactories Factories; + for (const ClangTidyModuleRegistry::entry &Module : + ClangTidyModuleRegistry::entries()) { + Module.instantiate()->addCheckFactories(Factories); + } + + for (const auto &Factory : Factories) + Result.Names.insert(Factory.getKey()); + +#if CLANG_TIDY_ENABLE_STATIC_ANALYZER + SmallString<64> Buffer(AnalyzerCheckNamePrefix); + size_t DefSize = Buffer.size(); + for (const auto &AnalyzerCheck : AnalyzerOptions::getRegisteredCheckers( + AllowEnablingAnalyzerAlphaCheckers)) { + Buffer.truncate(DefSize); + Buffer.append(AnalyzerCheck); + Result.Names.insert(Buffer); + } +#endif // CLANG_TIDY_ENABLE_STATIC_ANALYZER + + Context.setOptionsCollector(&Result.Options); + for (const auto &Factory : Factories) { + Factory.getValue()(Factory.getKey(), &Context); + } + + return Result; +} } // namespace tidy } // namespace clang diff --git a/clang-tools-extra/clang-tidy/ClangTidy.h b/clang-tools-extra/clang-tidy/ClangTidy.h index 507d1ce6e572d900dcd5d536959283d446a8fd25..51d9e226c7946557cfb19bdfff560d2fcbff3c67 100644 --- a/clang-tools-extra/clang-tidy/ClangTidy.h +++ b/clang-tools-extra/clang-tidy/ClangTidy.h @@ -11,6 +11,7 @@ #include "ClangTidyDiagnosticConsumer.h" #include "ClangTidyOptions.h" +#include "llvm/ADT/StringSet.h" #include #include @@ -57,6 +58,14 @@ private: std::vector getCheckNames(const ClangTidyOptions &Options, bool AllowEnablingAnalyzerAlphaCheckers); +struct NamesAndOptions { + llvm::StringSet<> Names; + llvm::StringSet<> Options; +}; + +NamesAndOptions +getAllChecksAndOptions(bool AllowEnablingAnalyzerAlphaCheckers = true); + /// Returns the effective check-specific options. /// /// The method configures ClangTidy with the specified \p Options and collects diff --git a/clang-tools-extra/clang-tidy/ClangTidyCheck.cpp b/clang-tools-extra/clang-tidy/ClangTidyCheck.cpp index dc38b4e190cc104b9e595c251531c126ee7629c5..83aef0a40c9a1e61286123b5e293e4050145dda6 100644 --- a/clang-tools-extra/clang-tidy/ClangTidyCheck.cpp +++ b/clang-tools-extra/clang-tidy/ClangTidyCheck.cpp @@ -53,6 +53,8 @@ ClangTidyCheck::OptionsView::OptionsView( llvm::Optional ClangTidyCheck::OptionsView::get(StringRef LocalName) const { + if (Context->getOptionsCollector()) + Context->getOptionsCollector()->insert((NamePrefix + LocalName).str()); const auto &Iter = CheckOptions.find((NamePrefix + LocalName).str()); if (Iter != CheckOptions.end()) return StringRef(Iter->getValue().Value); @@ -60,8 +62,13 @@ ClangTidyCheck::OptionsView::get(StringRef LocalName) const { } static ClangTidyOptions::OptionMap::const_iterator -findPriorityOption(const ClangTidyOptions::OptionMap &Options, StringRef NamePrefix, - StringRef LocalName) { +findPriorityOption(const ClangTidyOptions::OptionMap &Options, + StringRef NamePrefix, StringRef LocalName, + llvm::StringSet<> *Collector) { + if (Collector) { + Collector->insert((NamePrefix + LocalName).str()); + Collector->insert(LocalName); + } auto IterLocal = Options.find((NamePrefix + LocalName).str()); auto IterGlobal = Options.find(LocalName); if (IterLocal == Options.end()) @@ -75,7 +82,8 @@ findPriorityOption(const ClangTidyOptions::OptionMap &Options, StringRef NamePre llvm::Optional ClangTidyCheck::OptionsView::getLocalOrGlobal(StringRef LocalName) const { - auto Iter = findPriorityOption(CheckOptions, NamePrefix, LocalName); + auto Iter = findPriorityOption(CheckOptions, NamePrefix, LocalName, + Context->getOptionsCollector()); if (Iter != CheckOptions.end()) return StringRef(Iter->getValue().Value); return None; @@ -108,7 +116,8 @@ ClangTidyCheck::OptionsView::get(StringRef LocalName) const { template <> llvm::Optional ClangTidyCheck::OptionsView::getLocalOrGlobal(StringRef LocalName) const { - auto Iter = findPriorityOption(CheckOptions, NamePrefix, LocalName); + auto Iter = findPriorityOption(CheckOptions, NamePrefix, LocalName, + Context->getOptionsCollector()); if (Iter != CheckOptions.end()) { if (auto Result = getAsBool(Iter->getValue().Value, Iter->getKey())) return Result; @@ -139,8 +148,11 @@ void ClangTidyCheck::OptionsView::store( llvm::Optional ClangTidyCheck::OptionsView::getEnumInt( StringRef LocalName, ArrayRef Mapping, bool CheckGlobal, bool IgnoreCase) const { + if (!CheckGlobal && Context->getOptionsCollector()) + Context->getOptionsCollector()->insert((NamePrefix + LocalName).str()); auto Iter = CheckGlobal - ? findPriorityOption(CheckOptions, NamePrefix, LocalName) + ? findPriorityOption(CheckOptions, NamePrefix, LocalName, + Context->getOptionsCollector()) : CheckOptions.find((NamePrefix + LocalName).str()); if (Iter == CheckOptions.end()) return None; diff --git a/clang-tools-extra/clang-tidy/ClangTidyDiagnosticConsumer.h b/clang-tools-extra/clang-tidy/ClangTidyDiagnosticConsumer.h index d9424234fcf8df8bb9e4ee9d4dade556a3ed2406..261e4f7ac8626a55d96cf4eafb492facd2bc4612 100644 --- a/clang-tools-extra/clang-tidy/ClangTidyDiagnosticConsumer.h +++ b/clang-tools-extra/clang-tidy/ClangTidyDiagnosticConsumer.h @@ -15,6 +15,7 @@ #include "clang/Basic/Diagnostic.h" #include "clang/Tooling/Core/Diagnostic.h" #include "llvm/ADT/DenseMap.h" +#include "llvm/ADT/StringSet.h" #include "llvm/Support/Regex.h" namespace clang { @@ -201,6 +202,11 @@ public: DiagEngine->getDiagnosticIDs()->getDescription(DiagnosticID))); } + void setOptionsCollector(llvm::StringSet<> *Collector) { + OptionsCollector = Collector; + } + llvm::StringSet<> *getOptionsCollector() const { return OptionsCollector; } + private: // Writes to Stats. friend class ClangTidyDiagnosticConsumer; @@ -230,6 +236,7 @@ private: bool SelfContainedDiags; NoLintDirectiveHandler NoLintHandler; + llvm::StringSet<> *OptionsCollector = nullptr; }; /// Gets the Fix attached to \p Diagnostic. diff --git a/clang-tools-extra/clang-tidy/ClangTidyOptions.cpp b/clang-tools-extra/clang-tidy/ClangTidyOptions.cpp index a12a4d6692c77204a584a82998c43c08801e01c8..f07a8f9e893d8416eb8dae7c61aceeb080ad7e67 100644 --- a/clang-tools-extra/clang-tidy/ClangTidyOptions.cpp +++ b/clang-tools-extra/clang-tidy/ClangTidyOptions.cpp @@ -81,10 +81,44 @@ struct NOptionMap { std::vector Options; }; +template <> +void yamlize(IO &IO, ClangTidyOptions::OptionMap &Options, bool, + EmptyContext &Ctx) { + if (IO.outputting()) { + IO.beginMapping(); + // Only output as a map + for (auto &Key : Options) { + bool UseDefault; + void *SaveInfo; + IO.preflightKey(Key.getKey().data(), true, false, UseDefault, SaveInfo); + StringRef S = Key.getValue().Value; + IO.scalarString(S, needsQuotes(S)); + IO.postflightKey(SaveInfo); + } + IO.endMapping(); + } else { + // We need custom logic here to support the old method of specifying check + // options using a list of maps containing key and value keys. + Input &I = reinterpret_cast(IO); + if (isa(I.getCurrentNode())) { + MappingNormalization NOpts( + IO, Options); + EmptyContext Ctx; + yamlize(IO, NOpts->Options, true, Ctx); + } else if (isa(I.getCurrentNode())) { + IO.beginMapping(); + for (StringRef Key : IO.keys()) { + IO.mapRequired(Key.data(), Options[Key].Value); + } + IO.endMapping(); + } else { + IO.setError("expected a sequence or map"); + } + } +} + template <> struct MappingTraits { static void mapping(IO &IO, ClangTidyOptions &Options) { - MappingNormalization NOpts( - IO, Options.CheckOptions); bool Ignored = false; IO.mapOptional("Checks", Options.Checks); IO.mapOptional("WarningsAsErrors", Options.WarningsAsErrors); @@ -92,7 +126,7 @@ template <> struct MappingTraits { IO.mapOptional("AnalyzeTemporaryDtors", Ignored); // legacy compatibility IO.mapOptional("FormatStyle", Options.FormatStyle); IO.mapOptional("User", Options.User); - IO.mapOptional("CheckOptions", NOpts->Options); + IO.mapOptional("CheckOptions", Options.CheckOptions); IO.mapOptional("ExtraArgs", Options.ExtraArgs); IO.mapOptional("ExtraArgsBefore", Options.ExtraArgsBefore); IO.mapOptional("InheritParentConfig", Options.InheritParentConfig); diff --git a/clang-tools-extra/clang-tidy/bugprone/NotNullTerminatedResultCheck.cpp b/clang-tools-extra/clang-tidy/bugprone/NotNullTerminatedResultCheck.cpp index 1efd88d7cbb955206a9a50625eee7334eba597f2..b2579e9ea615fb578a675975ae15d27e5b6cdba1 100644 --- a/clang-tools-extra/clang-tidy/bugprone/NotNullTerminatedResultCheck.cpp +++ b/clang-tools-extra/clang-tidy/bugprone/NotNullTerminatedResultCheck.cpp @@ -817,7 +817,7 @@ void NotNullTerminatedResultCheck::check( ++It; } - if (AreSafeFunctionsWanted.hasValue()) + if (AreSafeFunctionsWanted) UseSafeFunctions = AreSafeFunctionsWanted.getValue(); } diff --git a/clang-tools-extra/clang-tidy/cppcoreguidelines/VirtualClassDestructorCheck.cpp b/clang-tools-extra/clang-tidy/cppcoreguidelines/VirtualClassDestructorCheck.cpp index e730fb6fa01dbb1e8d4eba1d01b08c0ea73631fe..92b38bcfd81d179fccade14e50365b6c5377926c 100644 --- a/clang-tools-extra/clang-tidy/cppcoreguidelines/VirtualClassDestructorCheck.cpp +++ b/clang-tools-extra/clang-tidy/cppcoreguidelines/VirtualClassDestructorCheck.cpp @@ -54,13 +54,17 @@ getVirtualKeywordRange(const CXXDestructorDecl &Destructor, return None; SourceLocation VirtualBeginLoc = Destructor.getBeginLoc(); - SourceLocation VirtualEndLoc = VirtualBeginLoc.getLocWithOffset( - Lexer::MeasureTokenLength(VirtualBeginLoc, SM, LangOpts)); + SourceLocation VirtualBeginSpellingLoc = + SM.getSpellingLoc(Destructor.getBeginLoc()); + SourceLocation VirtualEndLoc = VirtualBeginSpellingLoc.getLocWithOffset( + Lexer::MeasureTokenLength(VirtualBeginSpellingLoc, SM, LangOpts)); /// Range ends with \c StartOfNextToken so that any whitespace after \c /// virtual is included. - SourceLocation StartOfNextToken = - Lexer::findNextToken(VirtualEndLoc, SM, LangOpts)->getLocation(); + Optional NextToken = Lexer::findNextToken(VirtualEndLoc, SM, LangOpts); + if (!NextToken) + return None; + SourceLocation StartOfNextToken = NextToken->getLocation(); return CharSourceRange::getCharRange(VirtualBeginLoc, StartOfNextToken); } diff --git a/clang-tools-extra/clang-tidy/tool/ClangTidyMain.cpp b/clang-tools-extra/clang-tidy/tool/ClangTidyMain.cpp index 1b0010bdd62a2076b0a2b954a4d7c10fc4f284d3..b5e5191876cadd6c35d766c05125117818da3c9a 100644 --- a/clang-tools-extra/clang-tidy/tool/ClangTidyMain.cpp +++ b/clang-tools-extra/clang-tidy/tool/ClangTidyMain.cpp @@ -19,6 +19,7 @@ #include "../ClangTidyForceLinker.h" #include "../GlobList.h" #include "clang/Tooling/CommonOptionsParser.h" +#include "llvm/ADT/StringSet.h" #include "llvm/Support/InitLLVM.h" #include "llvm/Support/PluginLoader.h" #include "llvm/Support/Process.h" @@ -51,8 +52,7 @@ Configuration files: InheritParentConfig: true User: user CheckOptions: - - key: some-check.SomeOption - value: 'some value' + some-check.SomeOption: 'some value' ... )"); @@ -170,8 +170,7 @@ line or a specific configuration file. static cl::opt Config("config", cl::desc(R"( Specifies a configuration in YAML/JSON format: -config="{Checks: '*', - CheckOptions: [{key: x, - value: y}]}" + CheckOptions: {x: y}}" When the value is empty, clang-tidy will attempt to find a file named .clang-tidy for each source file in its parent directories. @@ -257,6 +256,12 @@ This option overrides the 'UseColor' option in )"), cl::init(false), cl::cat(ClangTidyCategory)); +static cl::opt VerifyConfig("verify-config", cl::desc(R"( +Check the config files to ensure each check and +option is recognized. +)"), + cl::init(false), cl::cat(ClangTidyCategory)); + namespace clang { namespace tidy { @@ -385,6 +390,74 @@ getVfsFromFile(const std::string &OverlayFile, return FS; } +static StringRef closest(StringRef Value, const StringSet<> &Allowed) { + unsigned MaxEdit = 5U; + StringRef Closest; + for (auto Item : Allowed.keys()) { + unsigned Cur = Value.edit_distance_insensitive(Item, true, MaxEdit); + if (Cur < MaxEdit) { + Closest = Item; + MaxEdit = Cur; + } + } + return Closest; +} + +static constexpr StringLiteral VerifyConfigWarningEnd = " [-verify-config]\n"; + +static bool verifyChecks(const StringSet<> &AllChecks, StringRef CheckGlob, + StringRef Source) { + llvm::StringRef Cur, Rest; + bool AnyInvalid = false; + for (std::tie(Cur, Rest) = CheckGlob.split(','); + !(Cur.empty() && Rest.empty()); std::tie(Cur, Rest) = Rest.split(',')) { + Cur = Cur.trim(); + if (Cur.empty()) + continue; + Cur.consume_front("-"); + if (Cur.startswith("clang-diagnostic")) + continue; + if (Cur.contains('*')) { + SmallString<128> RegexText("^"); + StringRef MetaChars("()^$|*+?.[]\\{}"); + for (char C : Cur) { + if (C == '*') + RegexText.push_back('.'); + else if (MetaChars.contains(C)) + RegexText.push_back('\\'); + RegexText.push_back(C); + } + RegexText.push_back('$'); + llvm::Regex Glob(RegexText); + std::string Error; + if (!Glob.isValid(Error)) { + AnyInvalid = true; + llvm::WithColor::error(llvm::errs(), Source) + << "building check glob '" << Cur << "' " << Error << "'\n"; + continue; + } + if (llvm::none_of(AllChecks.keys(), + [&Glob](StringRef S) { return Glob.match(S); })) { + AnyInvalid = true; + llvm::WithColor::warning(llvm::errs(), Source) + << "check glob '" << Cur << "' doesn't match any known check" + << VerifyConfigWarningEnd; + } + } else { + if (AllChecks.contains(Cur)) + continue; + AnyInvalid = true; + llvm::raw_ostream &Output = llvm::WithColor::warning(llvm::errs(), Source) + << "unknown check '" << Cur << '\''; + llvm::StringRef Closest = closest(Cur, AllChecks); + if (!Closest.empty()) + Output << "; did you mean '" << Closest << '\''; + Output << VerifyConfigWarningEnd; + } + } + return AnyInvalid; +} + int clangTidyMain(int argc, const char **argv) { llvm::InitLLVM X(argc, argv); @@ -478,6 +551,38 @@ int clangTidyMain(int argc, const char **argv) { return 0; } + if (VerifyConfig) { + std::vector RawOptions = + OptionsProvider->getRawOptions(FileName); + NamesAndOptions Valid = + getAllChecksAndOptions(AllowEnablingAnalyzerAlphaCheckers); + bool AnyInvalid = false; + for (const std::pair &OptionWithSource : + RawOptions) { + const ClangTidyOptions &Opts = OptionWithSource.first; + if (Opts.Checks) + AnyInvalid |= + verifyChecks(Valid.Names, *Opts.Checks, OptionWithSource.second); + + for (auto Key : Opts.CheckOptions.keys()) { + if (Valid.Options.contains(Key)) + continue; + AnyInvalid = true; + auto &Output = + llvm::WithColor::warning(llvm::errs(), OptionWithSource.second) + << "unknown check option '" << Key << '\''; + llvm::StringRef Closest = closest(Key, Valid.Options); + if (!Closest.empty()) + Output << "; did you mean '" << Closest << '\''; + Output << VerifyConfigWarningEnd; + } + } + if (AnyInvalid) + return 1; + llvm::outs() << "No config errors detected.\n"; + return 0; + } + if (EnabledChecks.empty()) { llvm::errs() << "Error: no checks enabled.\n"; llvm::cl::PrintHelpMessage(/*Hidden=*/false, /*Categorized=*/true); diff --git a/clang-tools-extra/clang-tidy/tool/run-clang-tidy.py b/clang-tools-extra/clang-tidy/tool/run-clang-tidy.py index 821b941d4c3837506753d208158e170ff4c77899..e3da6fb9b09679fbfcf0689386bd4139e2847e19 100755 --- a/clang-tools-extra/clang-tidy/tool/run-clang-tidy.py +++ b/clang-tools-extra/clang-tidy/tool/run-clang-tidy.py @@ -236,8 +236,7 @@ def main(): config_group.add_argument('-config', default=None, help='Specifies a configuration in YAML/JSON format: ' ' -config="{Checks: \'*\', ' - ' CheckOptions: [{key: x, ' - ' value: y}]}" ' + ' CheckOptions: {x: y}}" ' 'When the value is empty, clang-tidy will ' 'attempt to find a file named .clang-tidy for ' 'each source file in its parent directories.') diff --git a/clang-tools-extra/clangd/ClangdLSPServer.cpp b/clang-tools-extra/clangd/ClangdLSPServer.cpp index 3bade14f86b9104b32ef9e1a63504d7bfaa319d3..edafb40ff2b79557c0163979bba90d37c3df2266 100644 --- a/clang-tools-extra/clangd/ClangdLSPServer.cpp +++ b/clang-tools-extra/clangd/ClangdLSPServer.cpp @@ -663,7 +663,7 @@ void ClangdLSPServer::onDocumentDidOpen( void ClangdLSPServer::onDocumentDidChange( const DidChangeTextDocumentParams &Params) { auto WantDiags = WantDiagnostics::Auto; - if (Params.wantDiagnostics.hasValue()) + if (Params.wantDiagnostics) WantDiags = Params.wantDiagnostics.getValue() ? WantDiagnostics::Yes : WantDiagnostics::No; diff --git a/clang-tools-extra/clangd/ClangdServer.cpp b/clang-tools-extra/clangd/ClangdServer.cpp index fa6c70b4acbc4346b2b70b458d165ceacffcbb70..4d9db8888c773508fb71e7bb6344d60b87692f74 100644 --- a/clang-tools-extra/clangd/ClangdServer.cpp +++ b/clang-tools-extra/clangd/ClangdServer.cpp @@ -411,7 +411,7 @@ void ClangdServer::codeComplete(PathRef File, Position Pos, clang::clangd::trace::Span Tracer("Completion results callback"); CB(std::move(Result)); } - if (SpecFuzzyFind && SpecFuzzyFind->NewReq.hasValue()) { + if (SpecFuzzyFind && SpecFuzzyFind->NewReq) { std::lock_guard Lock(CachedCompletionFuzzyFindRequestMutex); CachedCompletionFuzzyFindRequestByFile[File] = SpecFuzzyFind->NewReq.getValue(); diff --git a/clang-tools-extra/clangd/FeatureModule.cpp b/clang-tools-extra/clangd/FeatureModule.cpp index 85977aadd6e3207eb5dfcfd2630c50b78c3721e8..872cea14437894679c5cf9887df9e26a3661252b 100644 --- a/clang-tools-extra/clangd/FeatureModule.cpp +++ b/clang-tools-extra/clangd/FeatureModule.cpp @@ -13,12 +13,12 @@ namespace clang { namespace clangd { void FeatureModule::initialize(const Facilities &F) { - assert(!Fac.hasValue() && "Initialized twice"); + assert(!Fac && "Initialized twice"); Fac.emplace(F); } FeatureModule::Facilities &FeatureModule::facilities() { - assert(Fac.hasValue() && "Not initialized yet"); + assert(Fac && "Not initialized yet"); return *Fac; } diff --git a/clang-tools-extra/clangd/unittests/ClangdLSPServerTests.cpp b/clang-tools-extra/clangd/unittests/ClangdLSPServerTests.cpp index 722a5fb699f61548d99a55d172cf468d89a93d43..c3ad1b8cfb66620e2baae3d96d2519ede922ed9f 100644 --- a/clang-tools-extra/clangd/unittests/ClangdLSPServerTests.cpp +++ b/clang-tools-extra/clangd/unittests/ClangdLSPServerTests.cpp @@ -46,7 +46,7 @@ protected: } LSPClient &start() { - EXPECT_FALSE(Server.hasValue()) << "Already initialized"; + EXPECT_FALSE(Server) << "Already initialized"; Server.emplace(Client.transport(), FS, Opts); ServerThread.emplace([&] { EXPECT_TRUE(Server->run()); }); Client.call("initialize", llvm::json::Object{}); diff --git a/clang-tools-extra/clangd/unittests/CodeCompleteTests.cpp b/clang-tools-extra/clangd/unittests/CodeCompleteTests.cpp index 88698d317716835562e0da0b3b93a84cccf5789d..f962c3f4ff336237b991dde32fc62838898bde3c 100644 --- a/clang-tools-extra/clangd/unittests/CodeCompleteTests.cpp +++ b/clang-tools-extra/clangd/unittests/CodeCompleteTests.cpp @@ -92,7 +92,7 @@ Matcher &> has(std::string Name, CompletionItemKind K) { return Contains(AllOf(named(std::move(Name)), kind(K))); } -MATCHER(isDocumented, "") { return arg.Documentation.hasValue(); } +MATCHER(isDocumented, "") { return arg.Documentation.has_value(); } MATCHER(deprecated, "") { return arg.Deprecated; } std::unique_ptr memIndex(std::vector Symbols) { diff --git a/clang-tools-extra/clangd/unittests/DiagnosticsTests.cpp b/clang-tools-extra/clangd/unittests/DiagnosticsTests.cpp index 45ceee01ea9a1cbe5b015dd3a49d728b68d9e28c..0a24f3c0c69b2fbc676367ea5a1a3ce6d6b07064 100644 --- a/clang-tools-extra/clangd/unittests/DiagnosticsTests.cpp +++ b/clang-tools-extra/clangd/unittests/DiagnosticsTests.cpp @@ -892,8 +892,7 @@ void bar(int *Y); ASSERT_TRUE(X->getOriginalType()->getNullability(X->getASTContext()) == NullabilityKind::NonNull); const auto *Y = cast(findDecl(AST, "bar")).getParamDecl(0); - ASSERT_FALSE( - Y->getOriginalType()->getNullability(X->getASTContext()).hasValue()); + ASSERT_FALSE(Y->getOriginalType()->getNullability(X->getASTContext())); } TEST(DiagnosticsTest, InsideMacros) { diff --git a/clang-tools-extra/clangd/unittests/DumpASTTests.cpp b/clang-tools-extra/clangd/unittests/DumpASTTests.cpp index f316eedfbf60561b8d824c98080951650684be71..e7b368fd2552234b5ee512d86d5aa6a0e5ad0e6b 100644 --- a/clang-tools-extra/clangd/unittests/DumpASTTests.cpp +++ b/clang-tools-extra/clangd/unittests/DumpASTTests.cpp @@ -172,8 +172,7 @@ TEST(DumpASTTests, NoRange) { ASSERT_THAT(Node.children, Contains(withDetail("varFromSource"))); ASSERT_THAT(Node.children, Not(Contains(withDetail("funcFromHeader")))); EXPECT_THAT(Node.arcana, testing::StartsWith("TranslationUnitDecl ")); - ASSERT_FALSE(Node.range.hasValue()) - << "Expected no range for translation unit"; + ASSERT_FALSE(Node.range) << "Expected no range for translation unit"; } TEST(DumpASTTests, Arcana) { diff --git a/clang-tools-extra/clangd/unittests/FileIndexTests.cpp b/clang-tools-extra/clangd/unittests/FileIndexTests.cpp index 912d7d0e9613999e1f790793cd4c0308cab7494a..4aee602c4cfcf08f35a80d4354e4c74425bce83c 100644 --- a/clang-tools-extra/clangd/unittests/FileIndexTests.cpp +++ b/clang-tools-extra/clangd/unittests/FileIndexTests.cpp @@ -650,7 +650,7 @@ TEST(FileShardedIndexTest, Sharding) { Relation{Sym3.ID, RelationKind::BaseOf, Sym1.ID})); ASSERT_THAT(Shard->Sources->keys(), UnorderedElementsAre(AHeaderUri)); EXPECT_THAT(Shard->Sources->lookup(AHeaderUri).DirectIncludes, IsEmpty()); - EXPECT_TRUE(Shard->Cmd.hasValue()); + EXPECT_TRUE(Shard->Cmd); } { auto Shard = ShardedIndex.getShard(BHeaderUri); @@ -665,7 +665,7 @@ TEST(FileShardedIndexTest, Sharding) { UnorderedElementsAre(BHeaderUri, AHeaderUri)); EXPECT_THAT(Shard->Sources->lookup(BHeaderUri).DirectIncludes, UnorderedElementsAre(AHeaderUri)); - EXPECT_TRUE(Shard->Cmd.hasValue()); + EXPECT_TRUE(Shard->Cmd); } { auto Shard = ShardedIndex.getShard(BSourceUri); @@ -677,7 +677,7 @@ TEST(FileShardedIndexTest, Sharding) { UnorderedElementsAre(BSourceUri, BHeaderUri)); EXPECT_THAT(Shard->Sources->lookup(BSourceUri).DirectIncludes, UnorderedElementsAre(BHeaderUri)); - EXPECT_TRUE(Shard->Cmd.hasValue()); + EXPECT_TRUE(Shard->Cmd); } } diff --git a/clang-tools-extra/clangd/unittests/GlobalCompilationDatabaseTests.cpp b/clang-tools-extra/clangd/unittests/GlobalCompilationDatabaseTests.cpp index 3fc45edd16dec6019dd05ebc70751992d47137cc..f7a3f2ae16ad3cfdd60b2b8c9a8a26a22f0957f9 100644 --- a/clang-tools-extra/clangd/unittests/GlobalCompilationDatabaseTests.cpp +++ b/clang-tools-extra/clangd/unittests/GlobalCompilationDatabaseTests.cpp @@ -145,7 +145,7 @@ TEST_F(OverlayCDBTest, Adjustments) { return Ret; }); // Command from underlying gets adjusted. - auto Cmd = CDB.getCompileCommand(testPath("foo.cc")).getValue(); + auto Cmd = *CDB.getCompileCommand(testPath("foo.cc")); EXPECT_THAT(Cmd.CommandLine, ElementsAre("clang", "-DA=1", testPath("foo.cc"), "-DAdjust_foo.cc")); @@ -154,7 +154,7 @@ TEST_F(OverlayCDBTest, Adjustments) { BarCommand.Filename = testPath("bar.cc"); BarCommand.CommandLine = {"clang++", "-DB=1", testPath("bar.cc")}; CDB.setCompileCommand(testPath("bar.cc"), BarCommand); - Cmd = CDB.getCompileCommand(testPath("bar.cc")).getValue(); + Cmd = *CDB.getCompileCommand(testPath("bar.cc")); EXPECT_THAT( Cmd.CommandLine, ElementsAre("clang++", "-DB=1", testPath("bar.cc"), "-DAdjust_bar.cc")); @@ -253,7 +253,7 @@ TEST(GlobalCompilationDatabaseTest, DiscoveryWithNestedCDBs) { // Does not use the root CDB, so no broadcast. auto Cmd = DB.getCompileCommand(testPath("build/../a.cc")); - ASSERT_TRUE(Cmd.hasValue()); + ASSERT_TRUE(Cmd); EXPECT_THAT(Cmd->CommandLine, Contains("-DFOO")) << "a.cc uses foo/ CDB"; ASSERT_TRUE(DB.blockUntilIdle(timeoutSeconds(10))); EXPECT_THAT(DiscoveredFiles, IsEmpty()) << "Root CDB not discovered yet"; @@ -338,7 +338,7 @@ TEST(GlobalCompilationDatabaseTest, CompileFlagsDirectory) { } MATCHER_P(hasArg, Flag, "") { - if (!arg.hasValue()) { + if (!arg) { *result_listener << "command is null"; return false; } diff --git a/clang-tools-extra/clangd/unittests/HeadersTests.cpp b/clang-tools-extra/clangd/unittests/HeadersTests.cpp index 69fcadcf0674a67824f44f0fd6a2999c3cc8f50e..32e4aea15490bdae4f89f8488037699e54807e02 100644 --- a/clang-tools-extra/clangd/unittests/HeadersTests.cpp +++ b/clang-tools-extra/clangd/unittests/HeadersTests.cpp @@ -329,7 +329,7 @@ TEST_F(HeadersTest, DontInsertDuplicateResolved) { TEST_F(HeadersTest, PreferInserted) { auto Edit = insert(""); - EXPECT_TRUE(Edit.hasValue()); + EXPECT_TRUE(Edit); EXPECT_TRUE(StringRef(Edit->newText).contains("")); } diff --git a/clang-tools-extra/clangd/unittests/LSPClient.cpp b/clang-tools-extra/clangd/unittests/LSPClient.cpp index 21f64c72619108df98437b3284faa62da1a2847c..aaadc62e68f358b2aac8af0bf3634c8e9754efb2 100644 --- a/clang-tools-extra/clangd/unittests/LSPClient.cpp +++ b/clang-tools-extra/clangd/unittests/LSPClient.cpp @@ -24,7 +24,7 @@ namespace clangd { llvm::Expected clang::clangd::LSPClient::CallResult::take() { std::unique_lock Lock(Mu); if (!clangd::wait(Lock, CV, timeoutSeconds(10), - [this] { return Value.hasValue(); })) { + [this] { return Value.has_value(); })) { ADD_FAILURE() << "No result from call after 10 seconds!"; return llvm::json::Value(nullptr); } diff --git a/clang-tools-extra/clangd/unittests/Matchers.h b/clang-tools-extra/clangd/unittests/Matchers.h index ef53e867949c9bf66dfc2531c750e704cc0515e4..0fbd93b2e68825de172c455a6ae32ece3bab24be 100644 --- a/clang-tools-extra/clangd/unittests/Matchers.h +++ b/clang-tools-extra/clangd/unittests/Matchers.h @@ -173,7 +173,7 @@ private: virtual bool MatchAndExplain(Optional optional, ::testing::MatchResultListener *listener) const { - if (!optional.hasValue()) + if (!optional) return false; *listener << "which has a value "; diff --git a/clang-tools-extra/clangd/unittests/SerializationTests.cpp b/clang-tools-extra/clangd/unittests/SerializationTests.cpp index efbaed233c5b6e8b2e5683f9abc374f72c49094f..70873efe5776cadd5434a1946f4afabb4382eb09 100644 --- a/clang-tools-extra/clangd/unittests/SerializationTests.cpp +++ b/clang-tools-extra/clangd/unittests/SerializationTests.cpp @@ -297,7 +297,7 @@ TEST(SerializationTest, CmdlTest) { ASSERT_TRUE(bool(In)) << In.takeError(); ASSERT_TRUE(In->Cmd); - const tooling::CompileCommand &SerializedCmd = In->Cmd.getValue(); + const tooling::CompileCommand &SerializedCmd = *In->Cmd; EXPECT_EQ(SerializedCmd.CommandLine, Cmd.CommandLine); EXPECT_EQ(SerializedCmd.Directory, Cmd.Directory); EXPECT_NE(SerializedCmd.Filename, Cmd.Filename); diff --git a/clang-tools-extra/clangd/unittests/SymbolCollectorTests.cpp b/clang-tools-extra/clangd/unittests/SymbolCollectorTests.cpp index 32c889876f24c80363355d2d4b846785af2939ef..8dc7877c17849f318310157d407596fb9598d5cc 100644 --- a/clang-tools-extra/clangd/unittests/SymbolCollectorTests.cpp +++ b/clang-tools-extra/clangd/unittests/SymbolCollectorTests.cpp @@ -119,7 +119,7 @@ public: // build() must have been called. bool shouldCollect(llvm::StringRef Name, bool Qualified = true) { - assert(AST.hasValue()); + assert(AST); const NamedDecl &ND = Qualified ? findDecl(*AST, Name) : findUnqualifiedDecl(*AST, Name); const SourceManager &SM = AST->getSourceManager(); diff --git a/clang-tools-extra/clangd/unittests/SyncAPI.cpp b/clang-tools-extra/clangd/unittests/SyncAPI.cpp index 4e76cdf6c540d573f89c06c016d0f865bd52ba9f..52f47b01b4e1b4b3f2fc1cfa071b05c2e8cf83f9 100644 --- a/clang-tools-extra/clangd/unittests/SyncAPI.cpp +++ b/clang-tools-extra/clangd/unittests/SyncAPI.cpp @@ -27,9 +27,7 @@ namespace { /// T Result; /// someAsyncFunc(Param1, Param2, /*Callback=*/capture(Result)); template struct CaptureProxy { - CaptureProxy(llvm::Optional &Target) : Target(&Target) { - assert(!Target.hasValue()); - } + CaptureProxy(llvm::Optional &Target) : Target(&Target) { assert(!Target); } CaptureProxy(const CaptureProxy &) = delete; CaptureProxy &operator=(const CaptureProxy &) = delete; @@ -51,7 +49,7 @@ template struct CaptureProxy { if (!Target) return; assert(Future.valid() && "conversion to callback was not called"); - assert(!Target->hasValue()); + assert(!Target->has_value()); Target->emplace(std::move(*Future.get())); } diff --git a/clang-tools-extra/clangd/unittests/TestTU.cpp b/clang-tools-extra/clangd/unittests/TestTU.cpp index c47ed2910baa5be8bd9749eec42e30d14d43756c..3da1086829605885529f7c43e5958ece0221d32d 100644 --- a/clang-tools-extra/clangd/unittests/TestTU.cpp +++ b/clang-tools-extra/clangd/unittests/TestTU.cpp @@ -123,7 +123,7 @@ ParsedAST TestTU::build() const { /*PreambleCallback=*/nullptr); auto AST = ParsedAST::build(testPath(Filename), Inputs, std::move(CI), Diags.take(), Preamble); - if (!AST.hasValue()) { + if (!AST) { llvm::errs() << "Failed to build code:\n" << Code; std::abort(); } diff --git a/clang-tools-extra/clangd/unittests/TidyProviderTests.cpp b/clang-tools-extra/clangd/unittests/TidyProviderTests.cpp index a16c87456a1a0e0906c219809e463771354e0852..df3dcac0aa51a99448ea4e68490e513fbb4ad4f7 100644 --- a/clang-tools-extra/clangd/unittests/TidyProviderTests.cpp +++ b/clang-tools-extra/clangd/unittests/TidyProviderTests.cpp @@ -20,20 +20,17 @@ TEST(TidyProvider, NestedDirectories) { FS.Files[testPath(".clang-tidy")] = R"yaml( Checks: 'llvm-*' CheckOptions: - - key: TestKey - value: 1 + TestKey: 1 )yaml"; FS.Files[testPath("sub1/.clang-tidy")] = R"yaml( Checks: 'misc-*' CheckOptions: - - key: TestKey - value: 2 + TestKey: 2 )yaml"; FS.Files[testPath("sub1/sub2/.clang-tidy")] = R"yaml( Checks: 'bugprone-*' CheckOptions: - - key: TestKey - value: 3 + TestKey: 3 InheritParentConfig: true )yaml"; diff --git a/clang-tools-extra/clangd/unittests/tweaks/TweakTesting.cpp b/clang-tools-extra/clangd/unittests/tweaks/TweakTesting.cpp index 3f472f26c0aa429eb94517e9cb61101f54afa9b4..726e2b87d14002d1144710de5451b33c5f25e296 100644 --- a/clang-tools-extra/clangd/unittests/tweaks/TweakTesting.cpp +++ b/clang-tools-extra/clangd/unittests/tweaks/TweakTesting.cpp @@ -136,7 +136,7 @@ bool TweakTest::isAvailable(WrappedAST &AST, // We only care if prepare() succeeded, but must handle Errors. if (Result && !*Result) consumeError(Result->takeError()); - return Result.hasValue(); + return Result.has_value(); } TweakTest::WrappedAST TweakTest::build(llvm::StringRef Code) const { diff --git a/clang-tools-extra/docs/ReleaseNotes.rst b/clang-tools-extra/docs/ReleaseNotes.rst index b1a5d110a9217cceb5c8d39891f62dbb913d62da..f148884e90c5daaad3f73d763cbe42a8460b64e0 100644 --- a/clang-tools-extra/docs/ReleaseNotes.rst +++ b/clang-tools-extra/docs/ReleaseNotes.rst @@ -110,6 +110,11 @@ Improvements to clang-tidy from suppressing diagnostics associated with macro arguments. This fixes `Issue 55134 `_. +- Added an option -verify-config which will check the config file to ensure each + `Checks` and `CheckOptions` entries are recognised. + +- .clang-tidy files can now use the more natural dictionary syntax for specifying `CheckOptions`. + New checks ^^^^^^^^^^ @@ -160,6 +165,15 @@ Changes in existing checks ` when `sizeof(...)` is compared against a `__int128_t`. +- Fixed bugs in :doc:`bugprone-use-after-move + `: + + - Treat a move in a lambda capture as happening in the function that defines + the lambda, not within the body of the lambda (as we were previously doing + erroneously). + + - Don't emit an erroneous warning on self-moves. + - Made :doc:`cert-oop57-cpp ` more sensitive by checking for an arbitrary expression in the second argument of ``memset``. @@ -205,6 +219,10 @@ Changes in existing checks ` to work when the vector is a member of a structure. +- Fixed a crash in :doc:`performance-unnecessary-value-param + ` when the specialization + template has an unnecessary value parameter. Removed the fix for a template. + - Fixed a crash in :doc:`readability-const-return-type ` when a pure virtual function overrided has a const return type. Removed the fix for a virtual function. @@ -220,19 +238,6 @@ Changes in existing checks ` to simplify expressions using DeMorgan's Theorem. -- Fixed a crash in :doc:`performance-unnecessary-value-param - ` when the specialization - template has an unnecessary value parameter. Removed the fix for a template. - -- Fixed bugs in :doc:`bugprone-use-after-move - `: - - - Treat a move in a lambda capture as happening in the function that defines - the lambda, not within the body of the lambda (as we were previously doing - erroneously). - - - Don't emit an erroneous warning on self-moves. - Removed checks ^^^^^^^^^^^^^^ diff --git a/clang-tools-extra/docs/clang-tidy/Contributing.rst b/clang-tools-extra/docs/clang-tidy/Contributing.rst index e67de656f90da636d2a60e7523b01156c9bf638a..0014bb23aee4ff0217fd59ac6ce839d914d067fc 100644 --- a/clang-tools-extra/docs/clang-tidy/Contributing.rst +++ b/clang-tools-extra/docs/clang-tidy/Contributing.rst @@ -519,17 +519,15 @@ be set in a ``.clang-tidy`` file in the following way: .. code-block:: yaml CheckOptions: - - key: my-check.SomeOption1 - value: 123 - - key: my-check.SomeOption2 - value: 'some other value' + my-check.SomeOption1: 123 + my-check.SomeOption2: 'some other value' If you need to specify check options on a command line, you can use the inline YAML format: .. code-block:: console - $ clang-tidy -config="{CheckOptions: [{key: a, value: b}, {key: x, value: y}]}" ... + $ clang-tidy -config="{CheckOptions: {a: b, x: y}}" ... Testing Checks diff --git a/clang-tools-extra/docs/clang-tidy/checks/misc/confusable-identifiers.rst b/clang-tools-extra/docs/clang-tidy/checks/misc/confusable-identifiers.rst index 5a89212ae9ab0a8e946743396b3d268285717ae0..b6d3e9981acf92621074710f14e69aaeb29bb617 100644 --- a/clang-tools-extra/docs/clang-tidy/checks/misc/confusable-identifiers.rst +++ b/clang-tools-extra/docs/clang-tidy/checks/misc/confusable-identifiers.rst @@ -9,7 +9,7 @@ attack described in `CVE-2021-42574 - Specifies a configuration in YAML/JSON format: -config="{Checks: '*', - CheckOptions: [{key: x, - value: y}]}" + CheckOptions: {x, y}}" When the value is empty, clang-tidy will attempt to find a file named .clang-tidy for each source file in its parent directories. @@ -244,6 +243,9 @@ An overview of all the command-line options: standard output supports colors. This option overrides the 'UseColor' option in .clang-tidy file, if any. + --verify-config - + Check the config files to ensure each check and + option is recognized. --vfsoverlay= - Overlay the virtual filesystem described by file over the real file system. @@ -292,8 +294,7 @@ An overview of all the command-line options: InheritParentConfig: true User: user CheckOptions: - - key: some-check.SomeOption - value: 'some value' + some-check.SomeOption: 'some value' ... .. _clang-tidy-nolint: diff --git a/clang-tools-extra/pseudo/include/clang-pseudo/GLR.h b/clang-tools-extra/pseudo/include/clang-pseudo/GLR.h index 8783872fa35567d3e1853eea8c3e23ab22586e16..a3e8611de4252005669dd2ee43ceb82580d7c19f 100644 --- a/clang-tools-extra/pseudo/include/clang-pseudo/GLR.h +++ b/clang-tools-extra/pseudo/include/clang-pseudo/GLR.h @@ -132,34 +132,17 @@ struct ParseParams { const ForestNode &glrParse(const TokenStream &Code, const ParseParams &Params, SymbolID StartSymbol); -// An active stack head can have multiple available actions (reduce/reduce -// actions, reduce/shift actions). -// A step is any one action applied to any one stack head. -struct ParseStep { - // A specific stack head. - const GSS::Node *Head = nullptr; - // An action associated with the head. - LRTable::Action Action = LRTable::Action::sentinel(); -}; -// A callback is invoked whenever a new GSS head is created during the GLR -// parsing process (glrShift, or glrReduce). -using NewHeadCallback = std::function; -// Apply all PendingShift actions on a given GSS state, newly-created heads are -// passed to the callback. -// -// When this function returns, PendingShift is empty. +// Shift a token onto all OldHeads, placing the results into NewHeads. // // Exposed for testing only. -void glrShift(std::vector &PendingShift, const ForestNode &NextTok, - const ParseParams &Params, NewHeadCallback NewHeadCB); -// Applies PendingReduce actions, until no more reduce actions are available. -// -// When this function returns, PendingReduce is empty. Calls to NewHeadCB may -// add elements to PendingReduce +void glrShift(llvm::ArrayRef OldHeads, + const ForestNode &NextTok, const ParseParams &Params, + std::vector &NewHeads); +// Applies available reductions on Heads, appending resulting heads to the list. // // Exposed for testing only. -void glrReduce(std::vector &PendingReduce, const ParseParams &Params, - NewHeadCallback NewHeadCB); +void glrReduce(std::vector &Heads, SymbolID Lookahead, + const ParseParams &Params); } // namespace pseudo } // namespace clang diff --git a/clang-tools-extra/pseudo/include/clang-pseudo/grammar/LRTable.h b/clang-tools-extra/pseudo/include/clang-pseudo/grammar/LRTable.h index 08e4868b88f70d7ed4cfb632d4a593c941e614be..ab619774d93da41402e56df9b9f7c25dbd664247 100644 --- a/clang-tools-extra/pseudo/include/clang-pseudo/grammar/LRTable.h +++ b/clang-tools-extra/pseudo/include/clang-pseudo/grammar/LRTable.h @@ -128,7 +128,12 @@ public: llvm::ArrayRef getActions(StateID State, SymbolID Terminal) const; // Returns the state after we reduce a nonterminal. // Expected to be called by LR parsers. + // REQUIRES: Nonterminal is valid here. StateID getGoToState(StateID State, SymbolID Nonterminal) const; + // Returns the state after we shift a terminal. + // Expected to be called by LR parsers. + // If the terminal is invalid here, returns None. + llvm::Optional getShiftState(StateID State, SymbolID Terminal) const; // Looks up available actions. // Returns empty if no available actions in the table. diff --git a/clang-tools-extra/pseudo/lib/GLR.cpp b/clang-tools-extra/pseudo/lib/GLR.cpp index 39da7f15c5c9a7a16d72fa0d923a53329f7dd3a6..d93f682afac6cf1b8f289f3cbd49dec82a6211af 100644 --- a/clang-tools-extra/pseudo/lib/GLR.cpp +++ b/clang-tools-extra/pseudo/lib/GLR.cpp @@ -15,7 +15,6 @@ #include "llvm/ADT/ScopeExit.h" #include "llvm/ADT/StringExtras.h" #include "llvm/Support/Debug.h" -#include "llvm/Support/ErrorHandling.h" #include "llvm/Support/FormatVariadic.h" #include #include @@ -37,94 +36,6 @@ llvm::raw_ostream &operator<<(llvm::raw_ostream &OS, const GSS::Node &N) { return OS; } -const ForestNode &glrParse(const TokenStream &Tokens, const ParseParams &Params, - SymbolID StartSymbol) { - assert(isNonterminal(StartSymbol) && "Start symbol must be a nonterminal"); - llvm::ArrayRef Terminals = Params.Forest.createTerminals(Tokens); - auto &G = Params.G; - (void)G; - auto &GSS = Params.GSStack; - - // Lists of active shift, reduce actions. - std::vector PendingShift, PendingReduce; - auto AddSteps = [&](const GSS::Node *Head, SymbolID NextTok) { - for (const auto &Action : Params.Table.getActions(Head->State, NextTok)) { - switch (Action.kind()) { - case LRTable::Action::Shift: - PendingShift.push_back({Head, Action}); - break; - case LRTable::Action::Reduce: - PendingReduce.push_back({Head, Action}); - break; - default: - llvm_unreachable("unexpected action kind!"); - } - } - }; - StateID StartState = Params.Table.getStartState(StartSymbol); - std::vector NewHeads = { - GSS.addNode(/*State=*/StartState, - /*ForestNode=*/nullptr, {})}; - auto MaybeGC = [&, Roots(std::vector{}), I(0u)]() mutable { - assert(PendingShift.empty() && PendingReduce.empty() && - "Running GC at the wrong time!"); - - if (++I != 20) // Run periodically to balance CPU and memory usage. - return; - I = 0; - - // We need to copy the list: Roots is consumed by the GC. - Roots = NewHeads; - GSS.gc(std::move(Roots)); - }; - for (const ForestNode &Terminal : Terminals) { - LLVM_DEBUG(llvm::dbgs() << llvm::formatv("Next token {0} (id={1})\n", - G.symbolName(Terminal.symbol()), - Terminal.symbol())); - for (const auto *Head : NewHeads) - AddSteps(Head, Terminal.symbol()); - NewHeads.clear(); - glrReduce(PendingReduce, Params, - [&](const GSS::Node * NewHead) { - // A reduce will enable more steps. - AddSteps(NewHead, Terminal.symbol()); - }); - - glrShift(PendingShift, Terminal, Params, - [&](const GSS::Node *NewHead) { NewHeads.push_back(NewHead); }); - MaybeGC(); - } - LLVM_DEBUG(llvm::dbgs() << llvm::formatv("Next is eof\n")); - for (const auto *Heads : NewHeads) - AddSteps(Heads, tokenSymbol(tok::eof)); - - StateID AcceptState = Params.Table.getGoToState(StartState, StartSymbol); - // Collect new heads created from the final reduce. - std::vector Heads; - glrReduce(PendingReduce, Params, [&](const GSS::Node *NewHead) { - Heads.push_back(NewHead); - // A reduce will enable more steps. - AddSteps(NewHead, tokenSymbol(tok::eof)); - }); - - const ForestNode *Result = nullptr; - for (const auto *Head : Heads) { - if (Head->State == AcceptState) { - assert(Head->Payload->symbol() == StartSymbol); - assert(Result == nullptr && "multiple results!"); - Result = Head->Payload; - } - } - if (Result) - return *Result; - // We failed to parse the input, returning an opaque forest node for recovery. - // - // FIXME: We will need to invoke our generic error-recovery handlers when we - // reach EOF without reaching accept state, and involving the eof - // token in the above main for-loopmay be the best way to reuse the code). - return Params.Forest.createOpaque(StartSymbol, /*Token::Index=*/0); -} - // Apply all pending shift actions. // In theory, LR parsing doesn't have shift/shift conflicts on a single head. // But we may have multiple active heads, and each head has a shift action. @@ -138,42 +49,40 @@ const ForestNode &glrParse(const TokenStream &Tokens, const ParseParams &Params, // After the shift action, the GSS is: // 0---1---2---4 // └---3---┘ -void glrShift(std::vector &PendingShift, const ForestNode &NewTok, - const ParseParams &Params, NewHeadCallback NewHeadCB) { +void glrShift(llvm::ArrayRef OldHeads, + const ForestNode &NewTok, const ParseParams &Params, + std::vector &NewHeads) { assert(NewTok.kind() == ForestNode::Terminal); - assert(llvm::all_of(PendingShift, - [](const ParseStep &Step) { - return Step.Action.kind() == LRTable::Action::Shift; - }) && - "Pending shift actions must be shift actions"); LLVM_DEBUG(llvm::dbgs() << llvm::formatv(" Shift {0} ({1} active heads):\n", Params.G.symbolName(NewTok.symbol()), - PendingShift.size())); + OldHeads.size())); // We group pending shifts by their target state so we can merge them. - llvm::stable_sort(PendingShift, [](const ParseStep &L, const ParseStep &R) { - return L.Action.getShiftState() < R.Action.getShiftState(); - }); - auto Rest = llvm::makeArrayRef(PendingShift); + llvm::SmallVector, 8> Shifts; + for (const auto *H : OldHeads) + if (auto S = Params.Table.getShiftState(H->State, NewTok.symbol())) + Shifts.push_back({*S, H}); + llvm::stable_sort(Shifts, llvm::less_first{}); + + auto Rest = llvm::makeArrayRef(Shifts); llvm::SmallVector Parents; while (!Rest.empty()) { // Collect the batch of PendingShift that have compatible shift states. // Their heads become TempParents, the parents of the new GSS node. - StateID NextState = Rest.front().Action.getShiftState(); + StateID NextState = Rest.front().first; Parents.clear(); for (const auto &Base : Rest) { - if (Base.Action.getShiftState() != NextState) + if (Base.first != NextState) break; - Parents.push_back(Base.Head); + Parents.push_back(Base.second); } Rest = Rest.drop_front(Parents.size()); LLVM_DEBUG(llvm::dbgs() << llvm::formatv(" --> S{0} ({1} heads)\n", NextState, Parents.size())); - NewHeadCB(Params.GSStack.addNode(NextState, &NewTok, Parents)); + NewHeads.push_back(Params.GSStack.addNode(NextState, &NewTok, Parents)); } - PendingShift.clear(); } namespace { @@ -187,7 +96,6 @@ template void sortAndUnique(std::vector &Vec) { llvm::sort(Vec); Vec.erase(std::unique(Vec.begin(), Vec.end()), Vec.end()); } -} // namespace // Perform reduces until no more are possible. // @@ -231,8 +139,12 @@ template void sortAndUnique(std::vector &Vec) { // After reducing 3 by `pointer := class-name STAR` and // 2 by`enum-name := class-name STAR`: // 0--5(pointer) // 5 is goto(0, pointer) -void glrReduce(std::vector &PendingReduce, const ParseParams &Params, - NewHeadCallback NewHeadCB) { +// +// (This is a functor rather than a function to allow it to reuse scratch +// storage across calls). +class GLRReduce { + const ParseParams &Params; + // There are two interacting complications: // 1. Performing one reduce can unlock new reduces on the newly-created head. // 2a. The ambiguous ForestNodes must be complete (have all sequence nodes). @@ -280,55 +192,119 @@ void glrReduce(std::vector &PendingReduce, const ParseParams &Params, }; // A sequence is the ForestNode payloads of the GSS nodes we are reducing. - // These are the RHS of the rule, the RuleID is stored in the Family. - // They specify a sequence ForestNode we may build (but we dedup first). using Sequence = llvm::SmallVector; + // Like ArrayRef, but with the missing operator<. + // (Sequences are big to move by value as the collections gets rearranged). + struct SequenceRef { + SequenceRef(const Sequence &S) : S(S) {} + llvm::ArrayRef S; + friend bool operator==(SequenceRef A, SequenceRef B) { return A.S == B.S; } + friend bool operator<(const SequenceRef &A, const SequenceRef &B) { + return std::lexicographical_compare(A.S.begin(), A.S.end(), B.S.begin(), + B.S.end()); + } + }; + // Underlying storage for sequences pointed to by stored SequenceRefs. + std::deque SequenceStorage; + // We don't actually destroy the sequences between calls, to reuse storage. + // Everything SequenceStorage[ >=SequenceStorageCount ] is reusable scratch. + unsigned SequenceStorageCount; + + // Halfway through a reduction (after the pop, before the push), we have + // collected nodes for the RHS of a rule, and reached a base node. + // They specify a sequence ForestNode we may build (but we dedup first). + // (The RuleID is not stored here, but rather in the Family). struct PushSpec { - // A base node is the head after popping the GSS nodes we are reducing. - const GSS::Node* Base = nullptr; - Sequence Seq; + // The last node popped before pushing. Its parent is the reduction base(s). + // (Base is more fundamental, but this is cheaper to store). + const GSS::Node* LastPop = nullptr; + Sequence *Seq = nullptr; }; - KeyedQueue Sequences; + KeyedQueue Sequences; // FIXME: rename => PendingPushes? + + // We treat Heads as a queue of Pop operations still to be performed. + // PoppedHeads is our position within it. + std::vector *Heads; + unsigned NextPopHead; + SymbolID Lookahead; Sequence TempSequence; - // Pop walks up the parent chain(s) for a reduction from Head by to Rule. +public: + GLRReduce(const ParseParams &Params) : Params(Params) {} + + void operator()(std::vector &Heads, SymbolID Lookahead) { + assert(isToken(Lookahead)); + + NextPopHead = 0; + this->Heads = &Heads; + this->Lookahead = Lookahead; + assert(Sequences.empty()); + SequenceStorageCount = 0; + + popPending(); + while (!Sequences.empty()) { + pushNext(); + popPending(); + } + } + +private: + // pop walks up the parent chain(s) for a reduction from Head by to Rule. // Once we reach the end, record the bases and sequences. - auto Pop = [&](const GSS::Node *Head, RuleID RID) { + void pop(const GSS::Node *Head, RuleID RID) { LLVM_DEBUG(llvm::dbgs() << " Pop " << Params.G.dumpRule(RID) << "\n"); const auto &Rule = Params.G.lookupRule(RID); Family F{/*Start=*/0, /*Symbol=*/Rule.Target, /*Rule=*/RID}; TempSequence.resize_for_overwrite(Rule.Size); auto DFS = [&](const GSS::Node *N, unsigned I, auto &DFS) { - if (I == Rule.Size) { + TempSequence[Rule.Size - 1 - I] = N->Payload; + if (I + 1 == Rule.Size) { F.Start = TempSequence.front()->startTokenIndex(); - LLVM_DEBUG(llvm::dbgs() << " --> base at S" << N->State << "\n"); - Sequences.emplace(F, PushSpec{N, TempSequence}); + LLVM_DEBUG({ + for (const auto *B : N->parents()) + llvm::dbgs() << " --> base at S" << B->State << "\n"; + }); + + // Copy the chain to stable storage so it can be enqueued. + if (SequenceStorageCount == SequenceStorage.size()) + SequenceStorage.emplace_back(); + SequenceStorage[SequenceStorageCount] = TempSequence; + Sequence *Seq = &SequenceStorage[SequenceStorageCount++]; + + Sequences.emplace(F, PushSpec{N, Seq}); return; } - TempSequence[Rule.Size - 1 - I] = N->Payload; for (const GSS::Node *Parent : N->parents()) DFS(Parent, I + 1, DFS); }; DFS(Head, 0, DFS); - }; - auto PopPending = [&] { - for (const ParseStep &Pending : PendingReduce) - Pop(Pending.Head, Pending.Action.getReduceRule()); - PendingReduce.clear(); - }; - - std::vector> FamilyBases; - std::vector> FamilySequences; + } - std::vector TempGSSNodes; - std::vector TempForestNodes; + // popPending pops every available reduction. + void popPending() { + for (; NextPopHead < Heads->size(); ++NextPopHead) { + // In trivial cases, we perform the complete reduce here! + if (popAndPushTrivial()) + continue; + for (const auto &A : + Params.Table.getActions((*Heads)[NextPopHead]->State, Lookahead)) { + if (A.kind() != LRTable::Action::Reduce) + continue; + pop((*Heads)[NextPopHead], A.getReduceRule()); + } + } + } - // Main reduction loop: - // - pop as much as we can - // - process one family at a time, forming a forest node - // - produces new GSS heads which may enable more pops - PopPending(); - while (!Sequences.empty()) { + // Storage reused by each call to pushNext. + std::vector> FamilyBases; + std::vector> FamilySequences; + std::vector Parents; + std::vector SequenceNodes; + + // Process one push family, forming a forest node. + // This produces new GSS heads which may enable more pops. + void pushNext() { + assert(!Sequences.empty()); Family F = Sequences.top().first; LLVM_DEBUG(llvm::dbgs() << " Push " << Params.G.symbolName(F.Symbol) @@ -341,22 +317,20 @@ void glrReduce(std::vector &PendingReduce, const ParseParams &Params, FamilySequences.clear(); FamilyBases.clear(); do { - FamilySequences.emplace_back(Sequences.top().first.Rule, - Sequences.top().second.Seq); - FamilyBases.emplace_back( - Params.Table.getGoToState(Sequences.top().second.Base->State, - F.Symbol), - Sequences.top().second.Base); + const PushSpec &Push = Sequences.top().second; + FamilySequences.emplace_back(Sequences.top().first.Rule, *Push.Seq); + for (const GSS::Node *Base : Push.LastPop->parents()) + FamilyBases.emplace_back( + Params.Table.getGoToState(Base->State, F.Symbol), Base); Sequences.pop(); } while (!Sequences.empty() && Sequences.top().first == F); // Build a forest node for each unique sequence. sortAndUnique(FamilySequences); - auto &SequenceNodes = TempForestNodes; SequenceNodes.clear(); for (const auto &SequenceSpec : FamilySequences) SequenceNodes.push_back(&Params.Forest.createSequence( - F.Symbol, SequenceSpec.first, SequenceSpec.second)); + F.Symbol, SequenceSpec.first, SequenceSpec.second.S)); // Wrap in an ambiguous node if needed. const ForestNode *Parsed = SequenceNodes.size() == 1 @@ -370,7 +344,6 @@ void glrReduce(std::vector &PendingReduce, const ParseParams &Params, llvm::ArrayRef BasesLeft = FamilyBases; while (!BasesLeft.empty()) { StateID NextState = BasesLeft.front().first; - auto &Parents = TempGSSNodes; Parents.clear(); for (const auto &Base : BasesLeft) { if (Base.first != NextState) @@ -378,14 +351,121 @@ void glrReduce(std::vector &PendingReduce, const ParseParams &Params, Parents.push_back(Base.second); } BasesLeft = BasesLeft.drop_front(Parents.size()); + Heads->push_back(Params.GSStack.addNode(NextState, Parsed, Parents)); + } + } - // Invoking the callback for new heads, a real GLR parser may add new - // reduces to the PendingReduce queue! - NewHeadCB(Params.GSStack.addNode(NextState, Parsed, Parents)); + // In general we split a reduce into a pop/push, so concurrently-available + // reductions can run in the correct order. The data structures are expensive. + // + // When only one reduction is possible at a time, we can skip this: + // we pop and immediately push, as an LR parser (as opposed to GLR) would. + // This is valid whenever there's only one concurrent PushSpec. + // + // This function handles a trivial but common subset of these cases: + // - there must be no pending pushes, and only one poppable head + // - the head must have only one reduction rule + // - the reduction path must be a straight line (no multiple parents) + // (Roughly this means there's no local ambiguity, so the LR algorithm works). + bool popAndPushTrivial() { + if (!Sequences.empty() || Heads->size() != NextPopHead + 1) + return false; + const GSS::Node *Head = Heads->back(); + llvm::Optional RID; + for (auto &A : Params.Table.getActions(Head->State, Lookahead)) { + if (A.kind() != LRTable::Action::Reduce) + continue; + if (RID) + return false; + RID = A.getReduceRule(); } - PopPending(); + if (!RID) + return true; // no reductions available, but we've processed the head! + const auto &Rule = Params.G.lookupRule(*RID); + const GSS::Node *Base = Head; + TempSequence.resize_for_overwrite(Rule.Size); + for (unsigned I = 0; I < Rule.Size; ++I) { + if (Base->parents().size() != 1) + return false; + TempSequence[Rule.Size - 1 - I] = Base->Payload; + Base = Base->parents().front(); + } + const ForestNode *Parsed = + &Params.Forest.createSequence(Rule.Target, *RID, TempSequence); + StateID NextState = Params.Table.getGoToState(Base->State, Rule.Target); + Heads->push_back(Params.GSStack.addNode(NextState, Parsed, {Base})); + return true; } - assert(Sequences.empty()); +}; + +} // namespace + +const ForestNode &glrParse(const TokenStream &Tokens, const ParseParams &Params, + SymbolID StartSymbol) { + GLRReduce Reduce(Params); + assert(isNonterminal(StartSymbol) && "Start symbol must be a nonterminal"); + llvm::ArrayRef Terminals = Params.Forest.createTerminals(Tokens); + auto &G = Params.G; + (void)G; + auto &GSS = Params.GSStack; + + StateID StartState = Params.Table.getStartState(StartSymbol); + // Heads correspond to the parse of tokens [0, I), NextHeads to [0, I+1). + std::vector Heads = {GSS.addNode(/*State=*/StartState, + /*ForestNode=*/nullptr, + {})}; + std::vector NextHeads; + auto MaybeGC = [&, Roots(std::vector{}), I(0u)]() mutable { + assert(NextHeads.empty() && "Running GC at the wrong time!"); + if (++I != 20) // Run periodically to balance CPU and memory usage. + return; + I = 0; + + // We need to copy the list: Roots is consumed by the GC. + Roots = Heads; + GSS.gc(std::move(Roots)); + }; + // Each iteration fully processes a single token. + for (unsigned I = 0; I < Terminals.size(); ++I) { + LLVM_DEBUG(llvm::dbgs() << llvm::formatv( + "Next token {0} (id={1})\n", + G.symbolName(Terminals[I].symbol()), Terminals[I].symbol())); + // Consume the token. + glrShift(Heads, Terminals[I], Params, NextHeads); + // Form nonterminals containing the token we just consumed. + SymbolID Lookahead = I + 1 == Terminals.size() ? tokenSymbol(tok::eof) + : Terminals[I + 1].symbol(); + Reduce(NextHeads, Lookahead); + // Prepare for the next token. + std::swap(Heads, NextHeads); + NextHeads.clear(); + MaybeGC(); + } + LLVM_DEBUG(llvm::dbgs() << llvm::formatv("Reached eof\n")); + + StateID AcceptState = Params.Table.getGoToState(StartState, StartSymbol); + const ForestNode *Result = nullptr; + for (const auto *Head : Heads) { + if (Head->State == AcceptState) { + assert(Head->Payload->symbol() == StartSymbol); + assert(Result == nullptr && "multiple results!"); + Result = Head->Payload; + } + } + if (Result) + return *Result; + // We failed to parse the input, returning an opaque forest node for recovery. + // + // FIXME: We will need to invoke our generic error-recovery handlers when we + // reach EOF without reaching accept state, and involving the eof + // token in the above main for-loopmay be the best way to reuse the code). + return Params.Forest.createOpaque(StartSymbol, /*Token::Index=*/0); +} + +void glrReduce(std::vector &Heads, SymbolID Lookahead, + const ParseParams &Params) { + // Create a new GLRReduce each time for tests, performance doesn't matter. + GLRReduce{Params}(Heads, Lookahead); } const GSS::Node *GSS::addNode(LRTable::StateID State, const ForestNode *Symbol, diff --git a/clang-tools-extra/pseudo/lib/grammar/LRTable.cpp b/clang-tools-extra/pseudo/lib/grammar/LRTable.cpp index 016949df5e640fcc34205cdc35574a639ddebf62..1f700e53a92f24bf2cae00df44f02d5001beb070 100644 --- a/clang-tools-extra/pseudo/lib/grammar/LRTable.cpp +++ b/clang-tools-extra/pseudo/lib/grammar/LRTable.cpp @@ -72,6 +72,17 @@ std::string LRTable::dumpForTests(const Grammar &G) const { return OS.str(); } +llvm::Optional +LRTable::getShiftState(StateID State, SymbolID Terminal) const { + // FIXME: we spend a significant amount of time on misses here. + // We could consider storing a std::bitset for a cheaper test? + assert(pseudo::isToken(Terminal) && "expected terminal symbol!"); + for (const auto &Result : getActions(State, Terminal)) + if (Result.kind() == Action::Shift) + return Result.getShiftState(); // unique: no shift/shift conflicts. + return llvm::None; +} + llvm::ArrayRef LRTable::getActions(StateID State, SymbolID Terminal) const { assert(pseudo::isToken(Terminal) && "expect terminal symbol!"); diff --git a/clang-tools-extra/pseudo/lib/grammar/LRTableBuild.cpp b/clang-tools-extra/pseudo/lib/grammar/LRTableBuild.cpp index 69251142ca134b9e7597ff4a321385a39f6e4e03..7d112b8cebfec4090f6d42dcbaee73199d526f37 100644 --- a/clang-tools-extra/pseudo/lib/grammar/LRTableBuild.cpp +++ b/clang-tools-extra/pseudo/lib/grammar/LRTableBuild.cpp @@ -97,8 +97,13 @@ private: LRTable LRTable::buildForTests(const GrammarTable >, llvm::ArrayRef Entries) { StateID MaxState = 0; - for (const auto &Entry : Entries) + for (const auto &Entry : Entries) { MaxState = std::max(MaxState, Entry.State); + if (Entry.Act.kind() == LRTable::Action::Shift) + MaxState = std::max(MaxState, Entry.Act.getShiftState()); + if (Entry.Act.kind() == LRTable::Action::GoTo) + MaxState = std::max(MaxState, Entry.Act.getGoToState()); + } Builder Build({}); for (const Entry &E : Entries) Build.insert(E); diff --git a/clang-tools-extra/pseudo/test/cxx/declarator-function.cpp b/clang-tools-extra/pseudo/test/cxx/declarator-function.cpp new file mode 100644 index 0000000000000000000000000000000000000000..0de4ec14ffcd05aede4deb2dfa175ed4e051c5d6 --- /dev/null +++ b/clang-tools-extra/pseudo/test/cxx/declarator-function.cpp @@ -0,0 +1,11 @@ +// The standard grammar allows an init-list with any declarator, including +// a function declarator. This creates an ambiguity where a function-definition +// is misparsed as a simple-declaration. +// FIXME: eliminate this false parse. +// XFAIL: * + +// RUN: clang-pseudo -grammar=%cxx-bnf-file -source=%s --print-forest | FileCheck %s +void s(){}; +// CHECK-NOT: simple-declaration +// CHECK: function-definition := decl-specifier-seq declarator +// function-body CHECK-NOT: simple-declaration diff --git a/clang-tools-extra/pseudo/test/cxx/declarator-var.cpp b/clang-tools-extra/pseudo/test/cxx/declarator-var.cpp new file mode 100644 index 0000000000000000000000000000000000000000..a5adb43dc3c5cbb19eb5d6470bd9f014e747929a --- /dev/null +++ b/clang-tools-extra/pseudo/test/cxx/declarator-var.cpp @@ -0,0 +1,11 @@ +// The standard grammar allows an function-body to use any declarator, including +// a non-function declarator. This creates an ambiguity where a +// simple-declaration is misparsed as a function-definition. +// FIXME: eliminate this false parse. +// XFAIL: * + +// RUN: clang-pseudo -grammar=%cxx-bnf-file -source=%s --print-forest | FileCheck %s +void (*s)(){}; +// CHECK-NOT: function-definition +// CHECK: init-declarator := declarator initializer +// CHECK-NOT: function-definition diff --git a/clang-tools-extra/pseudo/unittests/GLRTest.cpp b/clang-tools-extra/pseudo/unittests/GLRTest.cpp index d9555a8d56dd9cb3838fde3143b14c517c521860..6e72f1049878e32877c6a035798f418b6aa9f588 100644 --- a/clang-tools-extra/pseudo/unittests/GLRTest.cpp +++ b/clang-tools-extra/pseudo/unittests/GLRTest.cpp @@ -7,8 +7,8 @@ //===----------------------------------------------------------------------===// #include "clang-pseudo/GLR.h" -#include "clang-pseudo/grammar/Grammar.h" #include "clang-pseudo/Token.h" +#include "clang-pseudo/grammar/Grammar.h" #include "clang/Basic/LangOptions.h" #include "clang/Basic/TokenKinds.h" #include "llvm/ADT/StringExtras.h" @@ -31,6 +31,7 @@ namespace { using Action = LRTable::Action; using testing::AllOf; +using testing::UnorderedElementsAre; MATCHER_P(state, StateID, "") { return arg->State == StateID; } MATCHER_P(parsedSymbol, FNode, "") { return arg->Payload == FNode; } @@ -83,17 +84,10 @@ public: return 0; } - NewHeadCallback captureNewHeads() { - return [this](const GSS::Node *NewHead) { - NewHeadResults.push_back(NewHead); - }; - }; - protected: std::unique_ptr G; ForestArena Arena; GSS GSStack; - std::vector NewHeadResults; }; TEST_F(GLRTest, ShiftMergingHeads) { @@ -109,31 +103,32 @@ TEST_F(GLRTest, ShiftMergingHeads) { // └---3---5 auto *GSSNode0 = GSStack.addNode(/*State=*/0, /*ForestNode=*/nullptr, /*Parents=*/{}); - auto *GSSNode1 = GSStack.addNode(/*State=*/0, /*ForestNode=*/nullptr, + auto *GSSNode1 = GSStack.addNode(/*State=*/1, /*ForestNode=*/nullptr, /*Parents=*/{GSSNode0}); - auto *GSSNode2 = GSStack.addNode(/*State=*/0, /*ForestNode=*/nullptr, + auto *GSSNode2 = GSStack.addNode(/*State=*/2, /*ForestNode=*/nullptr, /*Parents=*/{GSSNode0}); - auto *GSSNode3 = GSStack.addNode(/*State=*/0, /*ForestNode=*/nullptr, + auto *GSSNode3 = GSStack.addNode(/*State=*/3, /*ForestNode=*/nullptr, /*Parents=*/{GSSNode0}); buildGrammar({}, {}); // Create a fake empty grammar. - LRTable T = LRTable::buildForTests(G->table(), /*Entries=*/{}); + LRTable T = + LRTable::buildForTests(G->table(), /*Entries=*/{ + {1, tokenSymbol(tok::semi), Action::shift(4)}, + {2, tokenSymbol(tok::semi), Action::shift(4)}, + {3, tokenSymbol(tok::semi), Action::shift(5)}, + }); ForestNode &SemiTerminal = Arena.createTerminal(tok::semi, 0); - std::vector PendingShift = { - {GSSNode1, Action::shift(4)}, - {GSSNode3, Action::shift(5)}, - {GSSNode2, Action::shift(4)}, - }; - glrShift(PendingShift, SemiTerminal, {*G, T, Arena, GSStack}, - captureNewHeads()); - - EXPECT_THAT(NewHeadResults, testing::UnorderedElementsAre( - AllOf(state(4), parsedSymbol(&SemiTerminal), - parents({GSSNode1, GSSNode2})), - AllOf(state(5), parsedSymbol(&SemiTerminal), - parents({GSSNode3})))) - << NewHeadResults; + std::vector NewHeads; + glrShift({GSSNode1, GSSNode2, GSSNode3}, SemiTerminal, + {*G, T, Arena, GSStack}, NewHeads); + + EXPECT_THAT(NewHeads, + UnorderedElementsAre(AllOf(state(4), parsedSymbol(&SemiTerminal), + parents({GSSNode1, GSSNode2})), + AllOf(state(5), parsedSymbol(&SemiTerminal), + parents({GSSNode3})))) + << NewHeads; } TEST_F(GLRTest, ReduceConflictsSplitting) { @@ -147,25 +142,29 @@ TEST_F(GLRTest, ReduceConflictsSplitting) { {"class-name := IDENTIFIER", "enum-name := IDENTIFIER"}); LRTable Table = LRTable::buildForTests( - G->table(), {{/*State=*/0, id("class-name"), Action::goTo(2)}, - {/*State=*/0, id("enum-name"), Action::goTo(3)}}); + G->table(), { + {/*State=*/0, id("class-name"), Action::goTo(2)}, + {/*State=*/0, id("enum-name"), Action::goTo(3)}, + {/*State=*/1, tokenSymbol(tok::l_brace), + Action::reduce(ruleFor("class-name"))}, + {/*State=*/1, tokenSymbol(tok::l_brace), + Action::reduce(ruleFor("enum-name"))}, + }); const auto *GSSNode0 = GSStack.addNode(/*State=*/0, /*ForestNode=*/nullptr, /*Parents=*/{}); const auto *GSSNode1 = - GSStack.addNode(3, &Arena.createTerminal(tok::identifier, 0), {GSSNode0}); - - std::vector PendingReduce = { - {GSSNode1, Action::reduce(ruleFor("class-name"))}, - {GSSNode1, Action::reduce(ruleFor("enum-name"))}}; - glrReduce(PendingReduce, {*G, Table, Arena, GSStack}, - captureNewHeads()); - EXPECT_THAT(NewHeadResults, - testing::UnorderedElementsAre( - AllOf(state(2), parsedSymbolID(id("class-name")), - parents({GSSNode0})), - AllOf(state(3), parsedSymbolID(id("enum-name")), - parents({GSSNode0})))) << NewHeadResults; + GSStack.addNode(1, &Arena.createTerminal(tok::identifier, 0), {GSSNode0}); + + std::vector Heads = {GSSNode1}; + glrReduce(Heads, tokenSymbol(tok::l_brace), {*G, Table, Arena, GSStack}); + EXPECT_THAT(Heads, UnorderedElementsAre( + GSSNode1, + AllOf(state(2), parsedSymbolID(id("class-name")), + parents({GSSNode0})), + AllOf(state(3), parsedSymbolID(id("enum-name")), + parents({GSSNode0})))) + << Heads; } TEST_F(GLRTest, ReduceSplittingDueToMultipleBases) { @@ -191,22 +190,25 @@ TEST_F(GLRTest, ReduceSplittingDueToMultipleBases) { LRTable Table = LRTable::buildForTests( G->table(), - {{/*State=*/2, id("ptr-operator"), Action::goTo(/*NextState=*/5)}, - {/*State=*/3, id("ptr-operator"), Action::goTo(/*NextState=*/6)}}); - std::vector PendingReduce = { - {GSSNode4, Action::reduce(ruleFor("ptr-operator"))}}; - glrReduce(PendingReduce, {*G, Table, Arena, GSStack}, - captureNewHeads()); - - EXPECT_THAT(NewHeadResults, - testing::UnorderedElementsAre( - AllOf(state(5), parsedSymbolID(id("ptr-operator")), - parents({GSSNode2})), - AllOf(state(6), parsedSymbolID(id("ptr-operator")), - parents({GSSNode3})))) << NewHeadResults; + { + {/*State=*/2, id("ptr-operator"), Action::goTo(/*NextState=*/5)}, + {/*State=*/3, id("ptr-operator"), Action::goTo(/*NextState=*/6)}, + {/*State=*/4, tokenSymbol(tok::identifier), + Action::reduce(ruleFor("ptr-operator"))}, + }); + std::vector Heads = {GSSNode4}; + glrReduce(Heads, tokenSymbol(tok::identifier), {*G, Table, Arena, GSStack}); + + EXPECT_THAT(Heads, UnorderedElementsAre( + GSSNode4, + AllOf(state(5), parsedSymbolID(id("ptr-operator")), + parents({GSSNode2})), + AllOf(state(6), parsedSymbolID(id("ptr-operator")), + parents({GSSNode3})))) + << Heads; // Verify that the payload of the two new heads is shared, only a single // ptr-operator node is created in the forest. - EXPECT_EQ(NewHeadResults[0]->Payload, NewHeadResults[1]->Payload); + EXPECT_EQ(Heads[1]->Payload, Heads[2]->Payload); } TEST_F(GLRTest, ReduceJoiningWithMultipleBases) { @@ -238,28 +240,28 @@ TEST_F(GLRTest, ReduceJoiningWithMultipleBases) { GSStack.addNode(/*State=*/4, /*ForestNode=*/EnumNameNode, /*Parents=*/{GSSNode2}); + // FIXME: figure out a way to get rid of the hard-coded reduce RuleID! LRTable Table = LRTable::buildForTests( G->table(), - {{/*State=*/1, id("type-name"), Action::goTo(/*NextState=*/5)}, - {/*State=*/2, id("type-name"), Action::goTo(/*NextState=*/5)}}); - // FIXME: figure out a way to get rid of the hard-coded reduce RuleID! - std::vector PendingReduce = { { - GSSNode3, Action::reduce(/*RuleID=*/0) // type-name := class-name - }, - { - GSSNode4, Action::reduce(/*RuleID=*/1) // type-name := enum-name - }}; - glrReduce(PendingReduce, {*G, Table, Arena, GSStack}, - captureNewHeads()); + {/*State=*/1, id("type-name"), Action::goTo(/*NextState=*/5)}, + {/*State=*/2, id("type-name"), Action::goTo(/*NextState=*/5)}, + {/*State=*/3, tokenSymbol(tok::l_paren), + Action::reduce(/* type-name := class-name */ 0)}, + {/*State=*/4, tokenSymbol(tok::l_paren), + Action::reduce(/* type-name := enum-name */ 1)}, + }); + std::vector Heads = {GSSNode3, GSSNode4}; + glrReduce(Heads, tokenSymbol(tok::l_paren), {*G, Table, Arena, GSStack}); // Verify that the stack heads are joint at state 5 after reduces. - EXPECT_THAT(NewHeadResults, testing::UnorderedElementsAre(AllOf( - state(5), parsedSymbolID(id("type-name")), - parents({GSSNode1, GSSNode2})))) - << NewHeadResults; + EXPECT_THAT(Heads, UnorderedElementsAre(GSSNode3, GSSNode4, + AllOf(state(5), + parsedSymbolID(id("type-name")), + parents({GSSNode1, GSSNode2})))) + << Heads; // Verify that we create an ambiguous ForestNode of two parses of `type-name`. - EXPECT_EQ(NewHeadResults.front()->Payload->dumpRecursive(*G), + EXPECT_EQ(Heads.back()->Payload->dumpRecursive(*G), "[ 1, end) type-name := \n" "[ 1, end) ├─type-name := class-name\n" "[ 1, end) │ └─class-name := \n" @@ -296,24 +298,24 @@ TEST_F(GLRTest, ReduceJoiningWithSameBase) { GSStack.addNode(/*State=*/4, /*ForestNode=*/StartTerminal, /*Parents=*/{GSSNode2}); - LRTable Table = LRTable::buildForTests( - G->table(), {{/*State=*/0, id("pointer"), Action::goTo(5)}}); // FIXME: figure out a way to get rid of the hard-coded reduce RuleID! - std::vector PendingReduce = { - { - GSSNode3, Action::reduce(/*RuleID=*/0) // pointer := class-name * - }, - { - GSSNode4, Action::reduce(/*RuleID=*/1) // pointer := enum-name * - }}; - glrReduce(PendingReduce, {*G, Table, Arena, GSStack}, - captureNewHeads()); - - EXPECT_THAT(NewHeadResults, testing::UnorderedElementsAre( + LRTable Table = LRTable::buildForTests( + G->table(), { + {/*State=*/0, id("pointer"), Action::goTo(5)}, + {3, tokenSymbol(tok::l_paren), + Action::reduce(/* pointer := class-name */ 0)}, + {4, tokenSymbol(tok::l_paren), + Action::reduce(/* pointer := enum-name */ 1)}, + }); + std::vector Heads = {GSSNode3, GSSNode4}; + glrReduce(Heads, tokenSymbol(tok::l_paren), {*G, Table, Arena, GSStack}); + + EXPECT_THAT( + Heads, UnorderedElementsAre(GSSNode3, GSSNode4, AllOf(state(5), parsedSymbolID(id("pointer")), parents({GSSNode0})))) - << NewHeadResults; - EXPECT_EQ(NewHeadResults.front()->Payload->dumpRecursive(*G), + << Heads; + EXPECT_EQ(Heads.back()->Payload->dumpRecursive(*G), "[ 0, end) pointer := \n" "[ 0, end) ├─pointer := class-name *\n" "[ 0, 1) │ ├─class-name := \n" diff --git a/clang-tools-extra/test/clang-tidy/checkers/readability/uppercase-literal-suffix.h b/clang-tools-extra/test/clang-tidy/checkers/Inputs/Headers/integral_constant.h similarity index 86% rename from clang-tools-extra/test/clang-tidy/checkers/readability/uppercase-literal-suffix.h rename to clang-tools-extra/test/clang-tidy/checkers/Inputs/Headers/integral_constant.h index 680321e734cc7005744994ef2da12902d3b75df3..225bf44ca1e5b3d1342c7cde0902a921e3d9b099 100644 --- a/clang-tools-extra/test/clang-tidy/checkers/readability/uppercase-literal-suffix.h +++ b/clang-tools-extra/test/clang-tidy/checkers/Inputs/Headers/integral_constant.h @@ -1,3 +1,6 @@ +#ifndef _INTEGRAL_CONSTANT_H_ +#define _INTEGRAL_CONSTANT_H_ + template struct integral_constant { static constexpr T value = v; @@ -14,3 +17,5 @@ struct is_same : false_type {}; template struct is_same : true_type {}; + +#endif diff --git a/clang-tools-extra/test/clang-tidy/checkers/cert/uppercase-literal-suffix-integer.cpp b/clang-tools-extra/test/clang-tidy/checkers/cert/uppercase-literal-suffix-integer.cpp index d6a0d56019d4fe8fe68781112d0abd25d82a5b4e..0dc06df4f18b4fd69780304fc4a4c5415d7acb96 100644 --- a/clang-tools-extra/test/clang-tidy/checkers/cert/uppercase-literal-suffix-integer.cpp +++ b/clang-tools-extra/test/clang-tidy/checkers/cert/uppercase-literal-suffix-integer.cpp @@ -1,9 +1,9 @@ -// RUN: %check_clang_tidy %s cert-dcl16-c %t -- -- -I %S +// RUN: %check_clang_tidy %s cert-dcl16-c %t -- -- -I %clang_tidy_headers // RUN: grep -Ev "// *[A-Z-]+:" %s > %t.cpp -// RUN: clang-tidy %t.cpp -checks='-*,cert-dcl16-c' -fix -- -I %S -// RUN: clang-tidy %t.cpp -checks='-*,cert-dcl16-c' -warnings-as-errors='-*,cert-dcl16-c' -- -I %S +// RUN: clang-tidy %t.cpp -checks='-*,cert-dcl16-c' -fix -- -I %clang_tidy_headers +// RUN: clang-tidy %t.cpp -checks='-*,cert-dcl16-c' -warnings-as-errors='-*,cert-dcl16-c' -- -I %clang_tidy_headers -#include "../readability/uppercase-literal-suffix.h" +#include "integral_constant.h" void integer_suffix() { static constexpr auto v0 = __LINE__; // synthetic diff --git a/clang-tools-extra/test/clang-tidy/checkers/cppcoreguidelines/virtual-class-destructor.cpp b/clang-tools-extra/test/clang-tidy/checkers/cppcoreguidelines/virtual-class-destructor.cpp index fcf558dcac8e6ce6c54bfcd7119a26335a940023..61e565075b15d21a531609b0a5f30f18a109a2ce 100644 --- a/clang-tools-extra/test/clang-tidy/checkers/cppcoreguidelines/virtual-class-destructor.cpp +++ b/clang-tools-extra/test/clang-tidy/checkers/cppcoreguidelines/virtual-class-destructor.cpp @@ -272,6 +272,7 @@ DerivedFromTemplateNonVirtualBaseStruct2Typedef InstantiationWithPublicNonVirtua } // namespace Bugzilla_51912 namespace macro_tests { +#define MY_VIRTUAL virtual #define CONCAT(x, y) x##y // CHECK-MESSAGES: :[[@LINE+2]]:7: warning: destructor of 'FooBar1' is protected and virtual [cppcoreguidelines-virtual-class-destructor] @@ -317,8 +318,17 @@ class FooBar5 { protected: XMACRO(CONCAT(vir, tual), ~CONCAT(Foo, Bar5());) // no-crash, no-fixit }; + +// CHECK-MESSAGES: :[[@LINE+2]]:7: warning: destructor of 'FooBar6' is protected and virtual [cppcoreguidelines-virtual-class-destructor] +// CHECK-MESSAGES: :[[@LINE+1]]:7: note: make it protected and non-virtual +class FooBar6 { +protected: + MY_VIRTUAL ~FooBar6(); // FIXME: We should have a fixit for this. +}; + #undef XMACRO #undef CONCAT +#undef MY_VIRTUAL } // namespace macro_tests namespace FinalClassCannotBeBaseClass { diff --git a/clang-tools-extra/test/clang-tidy/checkers/google/module.cpp b/clang-tools-extra/test/clang-tidy/checkers/google/module.cpp index 2c82237e4186d757de0b0312e25468a11624754d..d987e71ebafd22d1d4ecb35236b20edcc6bee959 100644 --- a/clang-tools-extra/test/clang-tidy/checkers/google/module.cpp +++ b/clang-tools-extra/test/clang-tidy/checkers/google/module.cpp @@ -1,6 +1,6 @@ // RUN: clang-tidy -checks='-*,google*' -config='{}' -dump-config - -- | FileCheck %s // CHECK: CheckOptions: -// CHECK-DAG: {{- key: *google-readability-braces-around-statements.ShortStatementLines *[[:space:]] *value: *'1'}} -// CHECK-DAG: {{- key: *google-readability-function-size.StatementThreshold *[[:space:]] *value: *'800'}} -// CHECK-DAG: {{- key: *google-readability-namespace-comments.ShortNamespaceLines *[[:space:]] *value: *'10'}} -// CHECK-DAG: {{- key: *google-readability-namespace-comments.SpacesBeforeComments *[[:space:]] *value: *'2'}} +// CHECK-DAG: {{google-readability-braces-around-statements.ShortStatementLines: '1'}} +// CHECK-DAG: {{google-readability-function-size.StatementThreshold: '800'}} +// CHECK-DAG: {{google-readability-namespace-comments.ShortNamespaceLines: '10'}} +// CHECK-DAG: {{google-readability-namespace-comments.SpacesBeforeComments: '2'}} diff --git a/clang-tools-extra/test/clang-tidy/checkers/readability/uppercase-literal-suffix-float16.cpp b/clang-tools-extra/test/clang-tidy/checkers/readability/uppercase-literal-suffix-float16.cpp index 89c10488069e42ddc11ef86388c6353f0226b2eb..a790597fcff51d7a6e60a1f950810316f9bc7c9f 100644 --- a/clang-tools-extra/test/clang-tidy/checkers/readability/uppercase-literal-suffix-float16.cpp +++ b/clang-tools-extra/test/clang-tidy/checkers/readability/uppercase-literal-suffix-float16.cpp @@ -1,6 +1,6 @@ -// RUN: %check_clang_tidy %s readability-uppercase-literal-suffix %t -- -- -target aarch64-linux-gnu -I %S +// RUN: %check_clang_tidy %s readability-uppercase-literal-suffix %t -- -- -target aarch64-linux-gnu -I %clang_tidy_headers -#include "uppercase-literal-suffix.h" +#include "integral_constant.h" void float16_normal_literals() { // _Float16 diff --git a/clang-tools-extra/test/clang-tidy/checkers/readability/uppercase-literal-suffix-floating-point.cpp b/clang-tools-extra/test/clang-tidy/checkers/readability/uppercase-literal-suffix-floating-point.cpp index 27e4bd8e21441499ae9e5c1e649c4618e1cb8d43..a0dcc38c8b23114985db38221fb4e7abcfbdacd1 100644 --- a/clang-tools-extra/test/clang-tidy/checkers/readability/uppercase-literal-suffix-floating-point.cpp +++ b/clang-tools-extra/test/clang-tidy/checkers/readability/uppercase-literal-suffix-floating-point.cpp @@ -1,9 +1,9 @@ -// RUN: %check_clang_tidy %s readability-uppercase-literal-suffix %t -- -- -target x86_64-pc-linux-gnu -I %S +// RUN: %check_clang_tidy %s readability-uppercase-literal-suffix %t -- -- -target x86_64-pc-linux-gnu -I %clang_tidy_headers // RUN: grep -Ev "// *[A-Z-]+:" %s > %t.cpp -// RUN: clang-tidy %t.cpp -checks='-*,readability-uppercase-literal-suffix' -fix -- -target x86_64-pc-linux-gnu -I %S -// RUN: clang-tidy %t.cpp -checks='-*,readability-uppercase-literal-suffix' -warnings-as-errors='-*,readability-uppercase-literal-suffix' -- -target x86_64-pc-linux-gnu -I %S +// RUN: clang-tidy %t.cpp -checks='-*,readability-uppercase-literal-suffix' -fix -- -target x86_64-pc-linux-gnu -I %clang_tidy_headers +// RUN: clang-tidy %t.cpp -checks='-*,readability-uppercase-literal-suffix' -warnings-as-errors='-*,readability-uppercase-literal-suffix' -- -target x86_64-pc-linux-gnu -I %clang_tidy_headers -#include "uppercase-literal-suffix.h" +#include "integral_constant.h" void floating_point_suffix() { static constexpr auto v0 = 1.; // no literal diff --git a/clang-tools-extra/test/clang-tidy/checkers/readability/uppercase-literal-suffix-hexadecimal-floating-point.cpp b/clang-tools-extra/test/clang-tidy/checkers/readability/uppercase-literal-suffix-hexadecimal-floating-point.cpp index 9f60abe740af7da2b8ec8ba697c2f23ce8fcb453..57d24fb5712c4f4af0f2046f71d7d06801db813f 100644 --- a/clang-tools-extra/test/clang-tidy/checkers/readability/uppercase-literal-suffix-hexadecimal-floating-point.cpp +++ b/clang-tools-extra/test/clang-tidy/checkers/readability/uppercase-literal-suffix-hexadecimal-floating-point.cpp @@ -1,9 +1,9 @@ -// RUN: %check_clang_tidy %s readability-uppercase-literal-suffix %t -- -- -target x86_64-pc-linux-gnu -I %S +// RUN: %check_clang_tidy %s readability-uppercase-literal-suffix %t -- -- -target x86_64-pc-linux-gnu -I %clang_tidy_headers // RUN: grep -Ev "// *[A-Z-]+:" %s > %t.cpp -// RUN: clang-tidy %t.cpp -checks='-*,readability-uppercase-literal-suffix' -fix -- -target x86_64-pc-linux-gnu -I %S -// RUN: clang-tidy %t.cpp -checks='-*,readability-uppercase-literal-suffix' -warnings-as-errors='-*,readability-uppercase-literal-suffix' -- -target x86_64-pc-linux-gnu -I %S +// RUN: clang-tidy %t.cpp -checks='-*,readability-uppercase-literal-suffix' -fix -- -target x86_64-pc-linux-gnu -I %clang_tidy_headers +// RUN: clang-tidy %t.cpp -checks='-*,readability-uppercase-literal-suffix' -warnings-as-errors='-*,readability-uppercase-literal-suffix' -- -target x86_64-pc-linux-gnu -I %clang_tidy_headers -#include "uppercase-literal-suffix.h" +#include "integral_constant.h" void floating_point_suffix() { static constexpr auto v0 = 0x0p0; // no literal diff --git a/clang-tools-extra/test/clang-tidy/checkers/readability/uppercase-literal-suffix-integer-custom-list.cpp b/clang-tools-extra/test/clang-tidy/checkers/readability/uppercase-literal-suffix-integer-custom-list.cpp index 9e83ce2c94f0d11be87473d04f6f0fb1fba01532..c787cab1fc4a758dfab221966e9d0dbb3074caba 100644 --- a/clang-tools-extra/test/clang-tidy/checkers/readability/uppercase-literal-suffix-integer-custom-list.cpp +++ b/clang-tools-extra/test/clang-tidy/checkers/readability/uppercase-literal-suffix-integer-custom-list.cpp @@ -1,9 +1,9 @@ -// RUN: %check_clang_tidy %s readability-uppercase-literal-suffix %t -- -config="{CheckOptions: [{key: readability-uppercase-literal-suffix.NewSuffixes, value: 'L;uL'}]}" -- -I %S +// RUN: %check_clang_tidy %s readability-uppercase-literal-suffix %t -- -config="{CheckOptions: [{key: readability-uppercase-literal-suffix.NewSuffixes, value: 'L;uL'}]}" -- -I %clang_tidy_headers // RUN: grep -Ev "// *[A-Z-]+:" %s > %t.cpp -// RUN: clang-tidy %t.cpp -checks='-*,readability-uppercase-literal-suffix' -fix -config="{CheckOptions: [{key: readability-uppercase-literal-suffix.NewSuffixes, value: 'L;uL'}]}" -- -I %S -// RUN: clang-tidy %t.cpp -checks='-*,readability-uppercase-literal-suffix' -warnings-as-errors='-*,readability-uppercase-literal-suffix' -config="{CheckOptions: [{key: readability-uppercase-literal-suffix.NewSuffixes, value: 'L;uL'}]}" -- -I %S +// RUN: clang-tidy %t.cpp -checks='-*,readability-uppercase-literal-suffix' -fix -config="{CheckOptions: [{key: readability-uppercase-literal-suffix.NewSuffixes, value: 'L;uL'}]}" -- -I %clang_tidy_headers +// RUN: clang-tidy %t.cpp -checks='-*,readability-uppercase-literal-suffix' -warnings-as-errors='-*,readability-uppercase-literal-suffix' -config="{CheckOptions: [{key: readability-uppercase-literal-suffix.NewSuffixes, value: 'L;uL'}]}" -- -I %clang_tidy_headers -#include "uppercase-literal-suffix.h" +#include "integral_constant.h" void integer_suffix() { // Unsigned diff --git a/clang-tools-extra/test/clang-tidy/checkers/readability/uppercase-literal-suffix-integer-ms.cpp b/clang-tools-extra/test/clang-tidy/checkers/readability/uppercase-literal-suffix-integer-ms.cpp index fb8dd23a5365169f8feb6d8d4eca0b46fc819c0d..fe3269f2c506d386a88860a7d95b5daaee6da344 100644 --- a/clang-tools-extra/test/clang-tidy/checkers/readability/uppercase-literal-suffix-integer-ms.cpp +++ b/clang-tools-extra/test/clang-tidy/checkers/readability/uppercase-literal-suffix-integer-ms.cpp @@ -1,9 +1,9 @@ -// RUN: %check_clang_tidy %s readability-uppercase-literal-suffix %t -- -- -target x86_64-pc-linux-gnu -I %S -fms-extensions +// RUN: %check_clang_tidy %s readability-uppercase-literal-suffix %t -- -- -target x86_64-pc-linux-gnu -I %clang_tidy_headers -fms-extensions // RUN: grep -Ev "// *[A-Z-]+:" %s > %t.cpp -// RUN: clang-tidy %t.cpp -checks='-*,readability-uppercase-literal-suffix' -fix -- -target x86_64-pc-linux-gnu -I %S -fms-extensions -// RUN: clang-tidy %t.cpp -checks='-*,readability-uppercase-literal-suffix' -warnings-as-errors='-*,readability-uppercase-literal-suffix' -- -target x86_64-pc-linux-gnu -I %S -fms-extensions +// RUN: clang-tidy %t.cpp -checks='-*,readability-uppercase-literal-suffix' -fix -- -target x86_64-pc-linux-gnu -I %clang_tidy_headers -fms-extensions +// RUN: clang-tidy %t.cpp -checks='-*,readability-uppercase-literal-suffix' -warnings-as-errors='-*,readability-uppercase-literal-suffix' -- -target x86_64-pc-linux-gnu -I %clang_tidy_headers -fms-extensions -#include "uppercase-literal-suffix.h" +#include "integral_constant.h" void integer_suffix() { static constexpr auto v0 = __LINE__; // synthetic diff --git a/clang-tools-extra/test/clang-tidy/checkers/readability/uppercase-literal-suffix-integer.cpp b/clang-tools-extra/test/clang-tidy/checkers/readability/uppercase-literal-suffix-integer.cpp index f4fdf29f1d73b93010e0663b44831c46a8101d27..c03ccc23992fc9bfa643b1f52521d5195f8f7a28 100644 --- a/clang-tools-extra/test/clang-tidy/checkers/readability/uppercase-literal-suffix-integer.cpp +++ b/clang-tools-extra/test/clang-tidy/checkers/readability/uppercase-literal-suffix-integer.cpp @@ -1,9 +1,9 @@ -// RUN: %check_clang_tidy %s readability-uppercase-literal-suffix %t -- -- -I %S +// RUN: %check_clang_tidy %s readability-uppercase-literal-suffix %t -- -- -I %clang_tidy_headers // RUN: grep -Ev "// *[A-Z-]+:" %s > %t.cpp -// RUN: clang-tidy %t.cpp -checks='-*,readability-uppercase-literal-suffix' -fix -- -I %S -// RUN: clang-tidy %t.cpp -checks='-*,readability-uppercase-literal-suffix' -warnings-as-errors='-*,readability-uppercase-literal-suffix' -- -I %S +// RUN: clang-tidy %t.cpp -checks='-*,readability-uppercase-literal-suffix' -fix -- -I %clang_tidy_headers +// RUN: clang-tidy %t.cpp -checks='-*,readability-uppercase-literal-suffix' -warnings-as-errors='-*,readability-uppercase-literal-suffix' -- -I %clang_tidy_headers -#include "uppercase-literal-suffix.h" +#include "integral_constant.h" void integer_suffix() { static constexpr auto v0 = __LINE__; // synthetic diff --git a/clang-tools-extra/test/clang-tidy/infrastructure/Inputs/config-files/4/key-dict/.clang-tidy b/clang-tools-extra/test/clang-tidy/infrastructure/Inputs/config-files/4/key-dict/.clang-tidy new file mode 100644 index 0000000000000000000000000000000000000000..3abef4360d8d6ca085755c6f9cf8a612b54cde11 --- /dev/null +++ b/clang-tools-extra/test/clang-tidy/infrastructure/Inputs/config-files/4/key-dict/.clang-tidy @@ -0,0 +1,7 @@ +InheritParentConfig: true +Checks: 'llvm-qualified-auto' +CheckOptions: + modernize-loop-convert.MaxCopySize: '20' + llvm-qualified-auto.AddConstToQualified: 'true' + IgnoreMacros: 'false' + diff --git a/clang-tools-extra/test/clang-tidy/infrastructure/config-files.cpp b/clang-tools-extra/test/clang-tidy/infrastructure/config-files.cpp index d708ec8777c9a12f53b6140d6c3d0a1957123f52..6c42bd7f495f7893c06d5ae78becbed9bdc5b7b1 100644 --- a/clang-tools-extra/test/clang-tidy/infrastructure/config-files.cpp +++ b/clang-tools-extra/test/clang-tidy/infrastructure/config-files.cpp @@ -15,12 +15,16 @@ // CHECK-COMMAND-LINE: HeaderFilterRegex: from command line // For this test we have to use names of the real checks because otherwise values are ignored. +// Running with the old key: , value: CheckOptions // RUN: clang-tidy -dump-config %S/Inputs/config-files/4/44/- -- | FileCheck %s -check-prefix=CHECK-CHILD4 +// Running with the new : syntax +// RUN: clang-tidy -dump-config %S/Inputs/config-files/4/key-dict/- -- | FileCheck %s -check-prefix=CHECK-CHILD4 + // CHECK-CHILD4: Checks: {{.*}}modernize-loop-convert,modernize-use-using,llvm-qualified-auto -// CHECK-CHILD4-DAG: - key: llvm-qualified-auto.AddConstToQualified{{ *[[:space:]] *}}value: 'true' -// CHECK-CHILD4-DAG: - key: modernize-loop-convert.MaxCopySize{{ *[[:space:]] *}}value: '20' -// CHECK-CHILD4-DAG: - key: modernize-loop-convert.MinConfidence{{ *[[:space:]] *}}value: reasonable -// CHECK-CHILD4-DAG: - key: modernize-use-using.IgnoreMacros{{ *[[:space:]] *}}value: 'false' +// CHECK-CHILD4-DAG: llvm-qualified-auto.AddConstToQualified: 'true' +// CHECK-CHILD4-DAG: modernize-loop-convert.MaxCopySize: '20' +// CHECK-CHILD4-DAG: modernize-loop-convert.MinConfidence: reasonable +// CHECK-CHILD4-DAG: modernize-use-using.IgnoreMacros: 'false' // RUN: clang-tidy --explain-config %S/Inputs/config-files/4/44/- -- | FileCheck %s -check-prefix=CHECK-EXPLAIN // CHECK-EXPLAIN: 'llvm-qualified-auto' is enabled in the {{.*}}{{[/\\]}}Inputs{{[/\\]}}config-files{{[/\\]}}4{{[/\\]}}44{{[/\\]}}.clang-tidy. @@ -32,14 +36,21 @@ // RUN: Checks: -llvm-qualified-auto, \ // RUN: CheckOptions: [{key: modernize-loop-convert.MaxCopySize, value: 21}]}' \ // RUN: %S/Inputs/config-files/4/44/- -- | FileCheck %s -check-prefix=CHECK-CHILD5 +// Also test with the {Key: Value} Syntax specified on command line +// RUN: clang-tidy -dump-config \ +// RUN: --config='{InheritParentConfig: true, \ +// RUN: Checks: -llvm-qualified-auto, \ +// RUN: CheckOptions: {modernize-loop-convert.MaxCopySize: 21}}' \ +// RUN: %S/Inputs/config-files/4/44/- -- | FileCheck %s -check-prefix=CHECK-CHILD5 + // CHECK-CHILD5: Checks: {{.*}}modernize-loop-convert,modernize-use-using,llvm-qualified-auto,-llvm-qualified-auto -// CHECK-CHILD5-DAG: - key: modernize-loop-convert.MaxCopySize{{ *[[:space:]] *}}value: '21' -// CHECK-CHILD5-DAG: - key: modernize-loop-convert.MinConfidence{{ *[[:space:]] *}}value: reasonable -// CHECK-CHILD5-DAG: - key: modernize-use-using.IgnoreMacros{{ *[[:space:]] *}}value: 'false' +// CHECK-CHILD5-DAG: modernize-loop-convert.MaxCopySize: '21' +// CHECK-CHILD5-DAG: modernize-loop-convert.MinConfidence: reasonable +// CHECK-CHILD5-DAG: modernize-use-using.IgnoreMacros: 'false' // RUN: clang-tidy -dump-config \ // RUN: --config='{InheritParentConfig: false, \ // RUN: Checks: -llvm-qualified-auto}' \ // RUN: %S/Inputs/config-files/4/44/- -- | FileCheck %s -check-prefix=CHECK-CHILD6 // CHECK-CHILD6: Checks: {{.*-llvm-qualified-auto'? *$}} -// CHECK-CHILD6-NOT: - key: modernize-use-using.IgnoreMacros +// CHECK-CHILD6-NOT: modernize-use-using.IgnoreMacros diff --git a/clang-tools-extra/test/clang-tidy/infrastructure/verify-config.cpp b/clang-tools-extra/test/clang-tidy/infrastructure/verify-config.cpp new file mode 100644 index 0000000000000000000000000000000000000000..edd6a9ee362dfc1331affea0518f54a77413fb2c --- /dev/null +++ b/clang-tools-extra/test/clang-tidy/infrastructure/verify-config.cpp @@ -0,0 +1,17 @@ +// RUN: clang-tidy -verify-config --config='' | FileCheck %s -check-prefix=CHECK-VERIFY-OK +// CHECK-VERIFY-OK: No config errors detected. + +// RUN: not clang-tidy -verify-config \ +// RUN: --checks='-*,bad*glob,llvm*,llvm-includeorder,my-made-up-check' --config='{Checks: "readability-else-after-ret", \ +// RUN: CheckOptions: [{key: "IgnoreMacros", value: "true"}, \ +// RUN: {key: "StriceMode", value: "true"}, \ +// RUN: {key: modernize-lop-convert.UseCxx20ReverseRanges, value: true} \ +// RUN: ]}' 2>&1 | FileCheck %s \ +// RUN: -check-prefix=CHECK-VERIFY -implicit-check-not='{{warning|error}}:' + +// CHECK-VERIFY-DAG: command-line option '-config': warning: unknown check 'readability-else-after-ret'; did you mean 'readability-else-after-return' [-verify-config] +// CHECK-VERIFY-DAG: command-line option '-config': warning: unknown check option 'modernize-lop-convert.UseCxx20ReverseRanges'; did you mean 'modernize-loop-convert.UseCxx20ReverseRanges' [-verify-config] +// CHECK-VERIFY-DAG: command-line option '-config': warning: unknown check option 'StriceMode'; did you mean 'StrictMode' [-verify-config] +// CHECK-VERIFY: command-line option '-checks': warning: check glob 'bad*glob' doesn't match any known check [-verify-config] +// CHECK-VERIFY: command-line option '-checks': warning: unknown check 'llvm-includeorder'; did you mean 'llvm-include-order' [-verify-config] +// CHECK-VERIFY: command-line option '-checks': warning: unknown check 'my-made-up-check' [-verify-config] diff --git a/clang-tools-extra/unittests/clang-doc/ClangDocTest.cpp b/clang-tools-extra/unittests/clang-doc/ClangDocTest.cpp index 970ffcebd717b23603e8f8c8f5bc49de800fee39..b8f50bf34bed42acb88fee3bb75dd9b69f769323 100644 --- a/clang-tools-extra/unittests/clang-doc/ClangDocTest.cpp +++ b/clang-tools-extra/unittests/clang-doc/ClangDocTest.cpp @@ -96,7 +96,7 @@ void CheckBaseInfo(Info *Expected, Info *Actual) { void CheckSymbolInfo(SymbolInfo *Expected, SymbolInfo *Actual) { CheckBaseInfo(Expected, Actual); EXPECT_EQ(Expected->DefLoc.hasValue(), Actual->DefLoc.hasValue()); - if (Expected->DefLoc.hasValue() && Actual->DefLoc.hasValue()) { + if (Expected->DefLoc && Actual->DefLoc.hasValue()) { EXPECT_EQ(Expected->DefLoc->LineNumber, Actual->DefLoc->LineNumber); EXPECT_EQ(Expected->DefLoc->Filename, Actual->DefLoc->Filename); } diff --git a/clang-tools-extra/unittests/clang-tidy/ClangTidyOptionsTest.cpp b/clang-tools-extra/unittests/clang-tidy/ClangTidyOptionsTest.cpp index 41621aad7d4f0d1488b6b0bd848ab2aebf99281a..3558b5f346a239fff17a39633134f036a86bd82e 100644 --- a/clang-tools-extra/unittests/clang-tidy/ClangTidyOptionsTest.cpp +++ b/clang-tools-extra/unittests/clang-tidy/ClangTidyOptionsTest.cpp @@ -292,7 +292,7 @@ TEST(CheckOptionsValidation, MissingOptions) { &DiagConsumer, false); Context.setDiagnosticsEngine(&DE); TestCheck TestCheck(&Context); - EXPECT_FALSE(TestCheck.getLocal("Opt").hasValue()); + EXPECT_FALSE(TestCheck.getLocal("Opt")); EXPECT_EQ(TestCheck.getLocal("Opt", "Unknown"), "Unknown"); // Missing options aren't errors. EXPECT_TRUE(DiagConsumer.take().empty()); @@ -336,7 +336,7 @@ TEST(CheckOptionsValidation, ValidIntOptions) { CHECK_VAL(TestCheck.getIntLocal("BoolFalseValue"), false); CHECK_VAL(TestCheck.getIntLocal("BoolTrueShort"), true); CHECK_VAL(TestCheck.getIntLocal("BoolFalseShort"), false); - EXPECT_FALSE(TestCheck.getIntLocal("BoolUnparseable").hasValue()); + EXPECT_FALSE(TestCheck.getIntLocal("BoolUnparseable")); EXPECT_THAT( DiagConsumer.take(), diff --git a/clang-tools-extra/unittests/clang-tidy/LLVMModuleTest.cpp b/clang-tools-extra/unittests/clang-tidy/LLVMModuleTest.cpp index ae9018faf6ae217e23a89b7b93dbf3adfdbefa54..93cffadb8279ec65bc04fcb972fadcdc0baaf718 100644 --- a/clang-tools-extra/unittests/clang-tidy/LLVMModuleTest.cpp +++ b/clang-tools-extra/unittests/clang-tidy/LLVMModuleTest.cpp @@ -21,7 +21,7 @@ static std::string runCheck(StringRef Code, const Twine &Filename, std::string Result = test::runCheckOnCode( Code, &Errors, Filename, std::string("-xc++-header"), ClangTidyOptions{}, std::move(PathsToContent)); - if (Errors.size() != (size_t)ExpectedWarning.hasValue()) + if (Errors.size() != (size_t)ExpectedWarning.has_value()) return "invalid error count"; if (ExpectedWarning && *ExpectedWarning != Errors.back().Message.Message) return "expected: '" + ExpectedWarning->str() + "', saw: '" + diff --git a/clang-tools-extra/unittests/clang-tidy/NamespaceAliaserTest.cpp b/clang-tools-extra/unittests/clang-tidy/NamespaceAliaserTest.cpp index e4cd74ede7e4a0a2b3a149db3712c4022c35bde5..aa51231d363da6d08ce53c5f83f036ce78903269 100644 --- a/clang-tools-extra/unittests/clang-tidy/NamespaceAliaserTest.cpp +++ b/clang-tools-extra/unittests/clang-tidy/NamespaceAliaserTest.cpp @@ -34,7 +34,7 @@ public: assert(Call != nullptr && "Did not find node \"foo\""); auto Hint = Aliaser->createAlias(*Result.Context, *Call, "::foo::bar", {"b", "some_alias"}); - if (Hint.hasValue()) + if (Hint) diag(Call->getBeginLoc(), "Fix for testing") << Hint.getValue(); diag(Call->getBeginLoc(), "insert call") << FixItHint::CreateInsertion( diff --git a/clang-tools-extra/unittests/clang-tidy/UsingInserterTest.cpp b/clang-tools-extra/unittests/clang-tidy/UsingInserterTest.cpp index 71c71596d0d908bf67b8552e4823a92b5a6bbbda..edd644c452d33863bad7c5890e40d47854d8d54b 100644 --- a/clang-tools-extra/unittests/clang-tidy/UsingInserterTest.cpp +++ b/clang-tools-extra/unittests/clang-tidy/UsingInserterTest.cpp @@ -37,7 +37,7 @@ public: auto Hint = Inserter->createUsingDeclaration(*Result.Context, *Call, "::foo::func"); - if (Hint.hasValue()) + if (Hint) diag(Call->getBeginLoc(), "Fix for testing") << Hint.getValue(); diag(Call->getBeginLoc(), "insert call") diff --git a/clang/docs/ClangCommandLineReference.rst b/clang/docs/ClangCommandLineReference.rst index 7e53f45c59689f44f60681ee6494459690e24aba..17892e34617073d8cbc1df1daec0c512ca324d6d 100644 --- a/clang/docs/ClangCommandLineReference.rst +++ b/clang/docs/ClangCommandLineReference.rst @@ -2639,6 +2639,12 @@ Enable unstable and experimental features .. option:: -fuse-init-array, -fno-use-init-array +.. option:: -fstrict-flex-arrays=, -fno-strict-flex-arrays + +Control which arrays are considered as flexible arrays members. +can be 1 (array of size 0, 1 and undefined are considered), 2 (array of size 0 +and undefined are considered) or 3 (only array of undefined size are considered). + .. option:: -fuse-ld= .. option:: -fuse-line-directives, -fno-use-line-directives diff --git a/clang/docs/ClangFormat.rst b/clang/docs/ClangFormat.rst index 745c66efa9e0e1f7a2269789e9ea2f0d64eeb5c2..16b316cdf0667730179e244832df5eb2185660ba 100644 --- a/clang/docs/ClangFormat.rst +++ b/clang/docs/ClangFormat.rst @@ -43,6 +43,17 @@ to format C/C++/Java/JavaScript/JSON/Objective-C/Protobuf/C# code. --assume-filename= - Override filename used to determine the language. When reading from stdin, clang-format assumes this filename to determine the language. + Unrecognized filenames are treated as C++. + supported: + CSharp: .cs + Java: .java + JavaScript: .mjs .js .ts + Json: .json + Objective-C: .m .mm + Proto: .proto .protodevel + TableGen: .td + TextProto: .textpb .pb.txt .textproto .asciipb + Verilog: .sv .svh .v .vh --cursor= - The position of the cursor when invoking clang-format from an editor integration --dry-run - If set, do not actually make the formatting changes diff --git a/clang/docs/LanguageExtensions.rst b/clang/docs/LanguageExtensions.rst index a69b798f31ee4f4d5c9bbe51b4398ab08ded932d..af697fafd8c41805f47983fc368447ebc02155b7 100644 --- a/clang/docs/LanguageExtensions.rst +++ b/clang/docs/LanguageExtensions.rst @@ -3813,6 +3813,44 @@ it causes the instantiation of ``twice`` and ``thrice`` with an ``int`` type; of these two instantiations, ``twice`` will be optimized (because its definition was outside the region) and ``thrice`` will not be optimized. +Clang also implements MSVC's range-based pragma, +``#pragma optimize("[optimization-list]", on | off)``. At the moment, Clang only +supports an empty optimization list, whereas MSVC supports the arguments, ``s``, +``g``, ``t``, and ``y``. Currently, the implementation of ``pragma optimize`` behaves +the same as ``#pragma clang optimize``. All functions +between ``off`` and ``on`` will be decorated with the ``optnone`` attribute. + +.. code-block:: c++ + + #pragma optimize("", off) + // This function will be decorated with optnone. + void f1() {} + + #pragma optimize("", on) + // This function will be optimized with whatever was specified on + // the commandline. + void f2() {} + + // This will warn with Clang's current implementation. + #pragma optimize("g", on) + void f3() {} + +For MSVC, an empty optimization list and ``off`` parameter will turn off +all optimizations, ``s``, ``g``, ``t``, and ``y``. An empty optimization and +``on`` parameter will reset the optimizations to the ones specified on the +commandline. + +.. list-table:: Parameters (unsupported by Clang) + + * - Parameter + - Type of optimization + * - g + - Deprecated + * - s or t + - Short or fast sequences of machine code + * - y + - Enable frame pointers + Extensions for loop hint optimizations ====================================== diff --git a/clang/docs/LibASTImporter.rst b/clang/docs/LibASTImporter.rst index bedaf527f5e9e90e295b880522377de70665739d..515eff7ebe330a48598b3c5c903f57640ff95d60 100644 --- a/clang/docs/LibASTImporter.rst +++ b/clang/docs/LibASTImporter.rst @@ -468,7 +468,7 @@ Note, there may be several different ASTImporter objects which import into the s cxxRecordDecl(hasName("Y"), isDefinition()), ToUnit); ToYDef->dump(); // An error is set for "ToYDef" in the shared state. - Optional OptErr = + Optional OptErr = ImporterState->getImportDeclErrorIfAny(ToYDef); assert(OptErr); diff --git a/clang/docs/ReleaseNotes.rst b/clang/docs/ReleaseNotes.rst index 6b4616d43bc2961c5497974cd9243a7d7f1f0314..c884f745b8fc9982c557edfc0896b9c6ca4edd3d 100644 --- a/clang/docs/ReleaseNotes.rst +++ b/clang/docs/ReleaseNotes.rst @@ -68,6 +68,11 @@ Major New Features Randomizing structure layout is a C-only feature. +- Clang now supports the ``-fstrict-flex-arrays=`` option to control which + array bounds lead to flexible array members. The option yields more accurate + ``__builtin_object_size`` and ``__builtin_dynamic_object_size`` results in + most cases but may be overly conservative for some legacy code. + Bug Fixes --------- - ``CXXNewExpr::getArraySize()`` previously returned a ``llvm::Optional`` @@ -331,6 +336,10 @@ New Pragmas in Clang - Added support for MSVC's ``#pragma alloc_text``. The pragma names the code section functions are placed in. The pragma only applies to functions with C linkage. +- Added support for an empty optimization list for MSVC's ``#pragma optimize``. + The pragma takes a list of optimizations to turn on or off which applies to + all functions following the pragma. At the moment, only an empty list is + supported. - ... diff --git a/clang/include/clang-c/Index.h b/clang/include/clang-c/Index.h index 15e795e463ab3fd1c5b432f620aee443727719b7..94d0145db476e4a3a79c86e654eb7c1cdfb7df64 100644 --- a/clang/include/clang-c/Index.h +++ b/clang/include/clang-c/Index.h @@ -2625,12 +2625,16 @@ enum CXCursorKind { /** OpenMP target parallel loop directive. */ CXCursor_OMPTargetParallelGenericLoopDirective = 299, - + /** OpenMP parallel masked directive. */ CXCursor_OMPParallelMaskedDirective = 300, + + /** OpenMP masked taskloop directive. + */ + CXCursor_OMPMaskedTaskLoopDirective = 301, - CXCursor_LastStmt = CXCursor_OMPParallelMaskedDirective, + CXCursor_LastStmt = CXCursor_OMPMaskedTaskLoopDirective, /** * Cursor that represents the translation unit itself. diff --git a/clang/include/clang/APINotes/Types.h b/clang/include/clang/APINotes/Types.h index ed5250f3d5b4e5c5703a69ae95a0975f0a78539e..d5372902ee095066c4655af4e0878e7cba1d399a 100644 --- a/clang/include/clang/APINotes/Types.h +++ b/clang/include/clang/APINotes/Types.h @@ -77,7 +77,7 @@ public: void setSwiftPrivate(llvm::Optional Private) { SwiftPrivateSpecified = Private.hasValue(); - SwiftPrivate = Private.hasValue() ? *Private : 0; + SwiftPrivate = Private ? *Private : 0; } friend bool operator==(const CommonEntityInfo &, const CommonEntityInfo &); diff --git a/clang/include/clang/AST/Expr.h b/clang/include/clang/AST/Expr.h index 574d2fad216ea6324a868ef7b2c8c658637a1f8f..392c5bcb1e20ad4321640d5608953b6792ac0cb6 100644 --- a/clang/include/clang/AST/Expr.h +++ b/clang/include/clang/AST/Expr.h @@ -443,6 +443,16 @@ public: return (OK == OK_Ordinary || OK == OK_BitField); } + /// True when this expression refers to a flexible array member in a + /// struct. \c StrictFlexArraysLevel controls which array bounds are + /// acceptable for such arrays: + /// + /// - 0 => any array bound, + /// - 1 => [0], [1], [ ] + /// - 2 => [0], [ ] + /// - 3 => [ ] + bool isFlexibleArrayMember(ASTContext &Ctx, int StrictFlexArraysLevel) const; + /// setValueKind - Set the value kind produced by this expression. void setValueKind(ExprValueKind Cat) { ExprBits.ValueKind = Cat; } diff --git a/clang/include/clang/AST/RecursiveASTVisitor.h b/clang/include/clang/AST/RecursiveASTVisitor.h index e5acaec7358e1eaa74f1fe42293ea5bac686efaf..70db0b8e8310f06424fd9965f50217356c4190c9 100644 --- a/clang/include/clang/AST/RecursiveASTVisitor.h +++ b/clang/include/clang/AST/RecursiveASTVisitor.h @@ -3078,6 +3078,9 @@ DEF_TRAVERSE_STMT(OMPParallelMasterTaskLoopDirective, DEF_TRAVERSE_STMT(OMPParallelMasterTaskLoopSimdDirective, { TRY_TO(TraverseOMPExecutableDirective(S)); }) +DEF_TRAVERSE_STMT(OMPMaskedTaskLoopDirective, + { TRY_TO(TraverseOMPExecutableDirective(S)); }) + DEF_TRAVERSE_STMT(OMPDistributeDirective, { TRY_TO(TraverseOMPExecutableDirective(S)); }) diff --git a/clang/include/clang/AST/StmtOpenMP.h b/clang/include/clang/AST/StmtOpenMP.h index 0482e6a3b7d3d3bc1731e6403c6207447c1e398b..ef66392c5455b730a19434660daf6172478d536e 100644 --- a/clang/include/clang/AST/StmtOpenMP.h +++ b/clang/include/clang/AST/StmtOpenMP.h @@ -1523,6 +1523,7 @@ public: T->getStmtClass() == OMPParallelForSimdDirectiveClass || T->getStmtClass() == OMPTaskLoopDirectiveClass || T->getStmtClass() == OMPTaskLoopSimdDirectiveClass || + T->getStmtClass() == OMPMaskedTaskLoopDirectiveClass || T->getStmtClass() == OMPMasterTaskLoopDirectiveClass || T->getStmtClass() == OMPMasterTaskLoopSimdDirectiveClass || T->getStmtClass() == OMPGenericLoopDirectiveClass || @@ -3858,6 +3859,82 @@ public: } }; +/// This represents '#pragma omp masked taskloop' directive. +/// +/// \code +/// #pragma omp masked taskloop private(a,b) grainsize(val) num_tasks(num) +/// \endcode +/// In this example directive '#pragma omp masked taskloop' has clauses +/// 'private' with the variables 'a' and 'b', 'grainsize' with expression 'val' +/// and 'num_tasks' with expression 'num'. +/// +class OMPMaskedTaskLoopDirective final : public OMPLoopDirective { + friend class ASTStmtReader; + friend class OMPExecutableDirective; + /// true if the construct has inner cancel directive. + bool HasCancel = false; + + /// Build directive with the given start and end location. + /// + /// \param StartLoc Starting location of the directive kind. + /// \param EndLoc Ending location of the directive. + /// \param CollapsedNum Number of collapsed nested loops. + /// + OMPMaskedTaskLoopDirective(SourceLocation StartLoc, SourceLocation EndLoc, + unsigned CollapsedNum) + : OMPLoopDirective(OMPMaskedTaskLoopDirectiveClass, + llvm::omp::OMPD_masked_taskloop, StartLoc, EndLoc, + CollapsedNum) {} + + /// Build an empty directive. + /// + /// \param CollapsedNum Number of collapsed nested loops. + /// + explicit OMPMaskedTaskLoopDirective(unsigned CollapsedNum) + : OMPLoopDirective(OMPMaskedTaskLoopDirectiveClass, + llvm::omp::OMPD_masked_taskloop, SourceLocation(), + SourceLocation(), CollapsedNum) {} + + /// Set cancel state. + void setHasCancel(bool Has) { HasCancel = Has; } + +public: + /// Creates directive with a list of \a Clauses. + /// + /// \param C AST context. + /// \param StartLoc Starting location of the directive kind. + /// \param EndLoc Ending Location of the directive. + /// \param CollapsedNum Number of collapsed loops. + /// \param Clauses List of clauses. + /// \param AssociatedStmt Statement, associated with the directive. + /// \param Exprs Helper expressions for CodeGen. + /// \param HasCancel true if this directive has inner cancel directive. + /// + static OMPMaskedTaskLoopDirective * + Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, + unsigned CollapsedNum, ArrayRef Clauses, + Stmt *AssociatedStmt, const HelperExprs &Exprs, bool HasCancel); + + /// Creates an empty directive with the place + /// for \a NumClauses clauses. + /// + /// \param C AST context. + /// \param CollapsedNum Number of collapsed nested loops. + /// \param NumClauses Number of clauses. + /// + static OMPMaskedTaskLoopDirective *CreateEmpty(const ASTContext &C, + unsigned NumClauses, + unsigned CollapsedNum, + EmptyShell); + + /// Return true if current directive has inner cancel directive. + bool hasCancel() const { return HasCancel; } + + static bool classof(const Stmt *T) { + return T->getStmtClass() == OMPMaskedTaskLoopDirectiveClass; + } +}; + /// This represents '#pragma omp master taskloop simd' directive. /// /// \code diff --git a/clang/include/clang/Analysis/FlowSensitive/DataflowAnalysisContext.h b/clang/include/clang/Analysis/FlowSensitive/DataflowAnalysisContext.h index e2820fcb556557e4f17eed176f7d0123f504336c..c1100d8474aa4707da6dafee2ece96e7195f9808 100644 --- a/clang/include/clang/Analysis/FlowSensitive/DataflowAnalysisContext.h +++ b/clang/include/clang/Analysis/FlowSensitive/DataflowAnalysisContext.h @@ -17,6 +17,7 @@ #include "clang/AST/Decl.h" #include "clang/AST/Expr.h" +#include "clang/AST/TypeOrdering.h" #include "clang/Analysis/FlowSensitive/Solver.h" #include "clang/Analysis/FlowSensitive/StorageLocation.h" #include "clang/Analysis/FlowSensitive/Value.h" @@ -44,6 +45,9 @@ namespace dataflow { const Expr &ignoreCFGOmittedNodes(const Expr &E); const Stmt &ignoreCFGOmittedNodes(const Stmt &S); +/// Returns the set of all fields in the type. +llvm::DenseSet getObjectFields(QualType Type); + /// Owns objects that encompass the state of a program and stores context that /// is used during dataflow analysis. class DataflowAnalysisContext { @@ -85,6 +89,19 @@ public: return *cast(Vals.back().get()); } + /// Returns a stable storage location appropriate for `Type`. + /// + /// Requirements: + /// + /// `Type` must not be null. + StorageLocation &getStableStorageLocation(QualType Type); + + /// Returns a stable storage location for `D`. + StorageLocation &getStableStorageLocation(const VarDecl &D); + + /// Returns a stable storage location for `E`. + StorageLocation &getStableStorageLocation(const Expr &E); + /// Assigns `Loc` as the storage location of `D`. /// /// Requirements: @@ -136,6 +153,10 @@ public: return ThisPointeeLoc; } + /// Returns a pointer value that represents a null pointer. Calls with + /// `PointeeType` that are canonically equivalent will return the same result. + PointerValue &getOrCreateNullPointerValue(QualType PointeeType); + /// Returns a symbolic boolean value that models a boolean literal equal to /// `Value`. AtomicBoolValue &getBoolLiteralValue(bool Value) const { @@ -151,17 +172,29 @@ public: /// `RHS`. Subsequent calls with the same arguments, regardless of their /// order, will return the same result. If the given boolean values represent /// the same value, the result will be the value itself. - BoolValue &getOrCreateConjunctionValue(BoolValue &LHS, BoolValue &RHS); + BoolValue &getOrCreateConjunction(BoolValue &LHS, BoolValue &RHS); /// Returns a boolean value that represents the disjunction of `LHS` and /// `RHS`. Subsequent calls with the same arguments, regardless of their /// order, will return the same result. If the given boolean values represent /// the same value, the result will be the value itself. - BoolValue &getOrCreateDisjunctionValue(BoolValue &LHS, BoolValue &RHS); + BoolValue &getOrCreateDisjunction(BoolValue &LHS, BoolValue &RHS); /// Returns a boolean value that represents the negation of `Val`. Subsequent /// calls with the same argument will return the same result. - BoolValue &getOrCreateNegationValue(BoolValue &Val); + BoolValue &getOrCreateNegation(BoolValue &Val); + + /// Returns a boolean value that represents `LHS => RHS`. Subsequent calls + /// with the same arguments, will return the same result. If the given boolean + /// values represent the same value, the result will be a value that + /// represents the true boolean literal. + BoolValue &getOrCreateImplication(BoolValue &LHS, BoolValue &RHS); + + /// Returns a boolean value that represents `LHS <=> RHS`. Subsequent calls + /// with the same arguments, regardless of their order, will return the same + /// result. If the given boolean values represent the same value, the result + /// will be a value that represents the true boolean literal. + BoolValue &getOrCreateIff(BoolValue &LHS, BoolValue &RHS); /// Creates a fresh flow condition and returns a token that identifies it. The /// token can be used to perform various operations on the flow condition such @@ -183,6 +216,27 @@ public: AtomicBoolValue &joinFlowConditions(AtomicBoolValue &FirstToken, AtomicBoolValue &SecondToken); + // FIXME: This function returns the flow condition expressed directly as its + // constraints: (C1 AND C2 AND ...). This differs from the general approach in + // the framework where a flow condition is represented as a token (an atomic + // boolean) with dependencies and constraints tracked in `FlowConditionDeps` + // and `FlowConditionConstraints`: (FC <=> C1 AND C2 AND ...). + // Consider if we should make the representation of flow condition consistent, + // returning an atomic boolean token with separate constraints instead. + // + /// Builds and returns the logical formula defining the flow condition + /// identified by `Token`. If a value in the formula is present as a key in + /// `Substitutions`, it will be substituted with the value it maps to. + /// As an example, say we have flow condition tokens FC1, FC2, FC3 and + /// FlowConditionConstraints: { FC1: C1, + /// FC2: C2, + /// FC3: (FC1 v FC2) ^ C3 } + /// buildAndSubstituteFlowCondition(FC3, {{C1 -> C1'}}) will return a value + /// corresponding to (C1' v C2) ^ C3. + BoolValue &buildAndSubstituteFlowCondition( + AtomicBoolValue &Token, + llvm::DenseMap Substitutions); + /// Returns true if and only if the constraints of the flow condition /// identified by `Token` imply that `Val` is true. bool flowConditionImplies(AtomicBoolValue &Token, BoolValue &Val); @@ -191,6 +245,11 @@ public: /// identified by `Token` are always true. bool flowConditionIsTautology(AtomicBoolValue &Token); + /// Returns true if `Val1` is equivalent to `Val2`. + /// Note: This function doesn't take into account constraints on `Val1` and + /// `Val2` imposed by the flow condition. + bool equivalentBoolValues(BoolValue &Val1, BoolValue &Val2); + private: /// Adds all constraints of the flow condition identified by `Token` and all /// of its transitive dependencies to `Constraints`. `VisitedTokens` is used @@ -198,7 +257,37 @@ private: /// calls. void addTransitiveFlowConditionConstraints( AtomicBoolValue &Token, llvm::DenseSet &Constraints, - llvm::DenseSet &VisitedTokens) const; + llvm::DenseSet &VisitedTokens); + + /// Returns the result of satisfiability checking on `Constraints`. + /// Possible return values are: + /// - `Satisfiable`: There exists a satisfying assignment for `Constraints`. + /// - `Unsatisfiable`: There is no satisfying assignment for `Constraints`. + /// - `TimedOut`: The solver gives up on finding a satisfying assignment. + Solver::Result querySolver(llvm::DenseSet Constraints); + + /// Returns true if the solver is able to prove that there is no satisfying + /// assignment for `Constraints` + bool isUnsatisfiable(llvm::DenseSet Constraints) { + return querySolver(std::move(Constraints)) == Solver::Result::Unsatisfiable; + } + + /// Returns a boolean value as a result of substituting `Val` and its sub + /// values based on entries in `SubstitutionsCache`. Intermediate results are + /// stored in `SubstitutionsCache` to avoid reprocessing values that have + /// already been visited. + BoolValue &substituteBoolValue( + BoolValue &Val, + llvm::DenseMap &SubstitutionsCache); + + /// Builds and returns the logical formula defining the flow condition + /// identified by `Token`, sub values may be substituted based on entries in + /// `SubstitutionsCache`. Intermediate results are stored in + /// `SubstitutionsCache` to avoid reprocessing values that have already been + /// visited. + BoolValue &buildAndSubstituteFlowConditionWithCache( + AtomicBoolValue &Token, + llvm::DenseMap &SubstitutionsCache); std::unique_ptr S; @@ -216,6 +305,14 @@ private: StorageLocation *ThisPointeeLoc = nullptr; + // Null pointer values, keyed by the canonical pointee type. + // + // FIXME: The pointer values are indexed by the pointee types which are + // required to initialize the `PointeeLoc` field in `PointerValue`. Consider + // creating a type-independent `NullPointerValue` without a `PointeeLoc` + // field. + llvm::DenseMap NullPointerVals; + AtomicBoolValue &TrueVal; AtomicBoolValue &FalseVal; @@ -232,21 +329,16 @@ private: // defines the flow condition. Conceptually, each binding corresponds to an // "iff" of the form `FC <=> (C1 ^ C2 ^ ...)` where `FC` is a flow condition // token (an atomic boolean) and `Ci`s are the set of constraints in the flow - // flow condition clause. Internally, we do not record the formula directly as - // an "iff". Instead, a flow condition clause is encoded as conjuncts of the - // form `(FC v !C1 v !C2 v ...) ^ (C1 v !FC) ^ (C2 v !FC) ^ ...`. The first - // conjuct is stored in the `FlowConditionFirstConjuncts` map and the set of - // remaining conjuncts are stored in the `FlowConditionRemainingConjuncts` - // map, both keyed by the token of the flow condition. + // flow condition clause. The set of constraints (C1 ^ C2 ^ ...) are stored in + // the `FlowConditionConstraints` map, keyed by the token of the flow + // condition. // // Flow conditions depend on other flow conditions if they are created using // `forkFlowCondition` or `joinFlowConditions`. The graph of flow condition // dependencies is stored in the `FlowConditionDeps` map. llvm::DenseMap> FlowConditionDeps; - llvm::DenseMap FlowConditionFirstConjuncts; - llvm::DenseMap> - FlowConditionRemainingConjuncts; + llvm::DenseMap FlowConditionConstraints; }; } // namespace dataflow diff --git a/clang/include/clang/Analysis/FlowSensitive/DataflowEnvironment.h b/clang/include/clang/Analysis/FlowSensitive/DataflowEnvironment.h index 0a2c75f804c2aa78c7e76b4515463f638d707c21..302e35d337e642205cb713b68288e4cb76e2c46c 100644 --- a/clang/include/clang/Analysis/FlowSensitive/DataflowEnvironment.h +++ b/clang/include/clang/Analysis/FlowSensitive/DataflowEnvironment.h @@ -203,6 +203,10 @@ public: /// in the environment. StorageLocation *getThisPointeeStorageLocation() const; + /// Returns a pointer value that represents a null pointer. Calls with + /// `PointeeType` that are canonically equivalent will return the same result. + PointerValue &getOrCreateNullPointerValue(QualType PointeeType); + /// Creates a value appropriate for `Type`, if `Type` is supported, otherwise /// return null. If `Type` is a pointer or reference type, creates all the /// necessary storage locations and values for indirections until it finds a @@ -268,7 +272,7 @@ public: /// order, will return the same result. If the given boolean values represent /// the same value, the result will be the value itself. BoolValue &makeAnd(BoolValue &LHS, BoolValue &RHS) const { - return DACtx->getOrCreateConjunctionValue(LHS, RHS); + return DACtx->getOrCreateConjunction(LHS, RHS); } /// Returns a boolean value that represents the disjunction of `LHS` and @@ -276,21 +280,21 @@ public: /// order, will return the same result. If the given boolean values represent /// the same value, the result will be the value itself. BoolValue &makeOr(BoolValue &LHS, BoolValue &RHS) const { - return DACtx->getOrCreateDisjunctionValue(LHS, RHS); + return DACtx->getOrCreateDisjunction(LHS, RHS); } /// Returns a boolean value that represents the negation of `Val`. Subsequent /// calls with the same argument will return the same result. BoolValue &makeNot(BoolValue &Val) const { - return DACtx->getOrCreateNegationValue(Val); + return DACtx->getOrCreateNegation(Val); } /// Returns a boolean value represents `LHS` => `RHS`. Subsequent calls with - /// the same arguments, regardless of their order, will return the same - /// result. If the given boolean values represent the same value, the result - /// will be a value that represents the true boolean literal. + /// the same arguments, will return the same result. If the given boolean + /// values represent the same value, the result will be a value that + /// represents the true boolean literal. BoolValue &makeImplication(BoolValue &LHS, BoolValue &RHS) const { - return &LHS == &RHS ? getBoolLiteralValue(true) : makeOr(makeNot(LHS), RHS); + return DACtx->getOrCreateImplication(LHS, RHS); } /// Returns a boolean value represents `LHS` <=> `RHS`. Subsequent calls with @@ -298,9 +302,7 @@ public: /// result. If the given boolean values represent the same value, the result /// will be a value that represents the true boolean literal. BoolValue &makeIff(BoolValue &LHS, BoolValue &RHS) const { - return &LHS == &RHS - ? getBoolLiteralValue(true) - : makeAnd(makeImplication(LHS, RHS), makeImplication(RHS, LHS)); + return DACtx->getOrCreateIff(LHS, RHS); } /// Returns the token that identifies the flow condition of the environment. diff --git a/clang/include/clang/Analysis/FlowSensitive/MatchSwitch.h b/clang/include/clang/Analysis/FlowSensitive/MatchSwitch.h index 2aaaf78f9cd0eed8a202d6f7ad0b25b0f5fe4dca..927aec7df57310d9a81dd36293874cc1d14fee66 100644 --- a/clang/include/clang/Analysis/FlowSensitive/MatchSwitch.h +++ b/clang/include/clang/Analysis/FlowSensitive/MatchSwitch.h @@ -46,8 +46,8 @@ template struct TransferState { /// Matches against `Stmt` and, based on its structure, dispatches to an /// appropriate handler. -template -using MatchSwitch = std::function; +template +using MatchSwitch = std::function; /// Collects cases of a "match switch": a collection of matchers paired with /// callbacks, which together define a switch that can be applied to a @@ -68,7 +68,7 @@ using MatchSwitch = std::function; /// .Build(); /// } /// \endcode -template class MatchSwitchBuilder { +template class MatchSwitchBuilder { public: /// Registers an action that will be triggered by the match of a pattern /// against the input statement. @@ -79,24 +79,24 @@ public: template MatchSwitchBuilder && CaseOf(ast_matchers::internal::Matcher M, - std::function + std::function A) && { Matchers.push_back(std::move(M)); Actions.push_back( [A = std::move(A)](const Stmt *Stmt, const ast_matchers::MatchFinder::MatchResult &R, - State &S) { A(cast(Stmt), R, S); }); + State &S) { return A(cast(Stmt), R, S); }); return std::move(*this); } - MatchSwitch Build() && { + MatchSwitch Build() && { return [Matcher = BuildMatcher(), Actions = std::move(Actions)]( - const Stmt &Stmt, ASTContext &Context, State &S) { + const Stmt &Stmt, ASTContext &Context, State &S) -> Result { auto Results = ast_matchers::matchDynamic(Matcher, Stmt, Context); if (Results.empty()) - return; + return Result(); // Look through the map for the first binding of the form "TagN..." use // that to select the action. for (const auto &Element : Results[0].getMap()) { @@ -104,12 +104,12 @@ public: size_t Index = 0; if (ID.consume_front("Tag") && !ID.getAsInteger(10, Index) && Index < Actions.size()) { - Actions[Index]( + return Actions[Index]( &Stmt, ast_matchers::MatchFinder::MatchResult(Results[0], &Context), S); - return; } } + return Result(); }; } @@ -142,7 +142,7 @@ private: } std::vector Matchers; - std::vector> Actions; }; diff --git a/clang/include/clang/Analysis/PathDiagnostic.h b/clang/include/clang/Analysis/PathDiagnostic.h index 47cb549c8e6694f2345f7490955b12efe1c06c66..9877f1e3d01fda5e25825ec6b1e2795012147651 100644 --- a/clang/include/clang/Analysis/PathDiagnostic.h +++ b/clang/include/clang/Analysis/PathDiagnostic.h @@ -544,8 +544,8 @@ public: /// flag may have been previously set, at which point it will not /// be reset unless one specifies to do so. void setPrunable(bool isPrunable, bool override = false) { - if (IsPrunable.hasValue() && !override) - return; + if (IsPrunable && !override) + return; IsPrunable = isPrunable; } diff --git a/clang/include/clang/Basic/CodeGenOptions.def b/clang/include/clang/Basic/CodeGenOptions.def index 17955c365aba4b386374ae2e39c48c637f890a36..364cde30fcb55d3c902b1d9071a9e1a4958a5084 100644 --- a/clang/include/clang/Basic/CodeGenOptions.def +++ b/clang/include/clang/Basic/CodeGenOptions.def @@ -187,6 +187,7 @@ CODEGENOPT(NoImplicitFloat , 1, 0) ///< Set when -mno-implicit-float is enable CODEGENOPT(NullPointerIsValid , 1, 0) ///< Assume Null pointer deference is defined. CODEGENOPT(OpenCLCorrectlyRoundedDivSqrt, 1, 0) ///< -cl-fp32-correctly-rounded-divide-sqrt CODEGENOPT(HIPCorrectlyRoundedDivSqrt, 1, 1) ///< -fno-hip-fp32-correctly-rounded-divide-sqrt +CODEGENOPT(HIPSaveKernelArgName, 1, 0) ///< Set when -fhip-kernel-arg-name is enabled. CODEGENOPT(UniqueInternalLinkageNames, 1, 0) ///< Internal Linkage symbols get unique names. CODEGENOPT(SplitMachineFunctions, 1, 0) ///< Split machine functions using profile information. diff --git a/clang/include/clang/Basic/Diagnostic.h b/clang/include/clang/Basic/Diagnostic.h index 33ad0827c0ca54189266ed72478be7dbb5dda4a6..09857da61d326322b85d1c4f924a8c4bf82bf55f 100644 --- a/clang/include/clang/Basic/Diagnostic.h +++ b/clang/include/clang/Basic/Diagnostic.h @@ -545,6 +545,7 @@ public: DiagnosticsEngine &operator=(const DiagnosticsEngine &) = delete; ~DiagnosticsEngine(); + friend void DiagnosticsTestHelper(DiagnosticsEngine &); LLVM_DUMP_METHOD void dump() const; LLVM_DUMP_METHOD void dump(StringRef DiagName) const; @@ -891,9 +892,9 @@ public: LastDiagLevel = Other.LastDiagLevel; } - /// Reset the state of the diagnostic object to its initial - /// configuration. - void Reset(); + /// Reset the state of the diagnostic object to its initial configuration. + /// \param[in] soft - if true, doesn't reset the diagnostic mappings and state + void Reset(bool soft = false); //===--------------------------------------------------------------------===// // DiagnosticsEngine classification and reporting interfaces. diff --git a/clang/include/clang/Basic/DiagnosticLexKinds.td b/clang/include/clang/Basic/DiagnosticLexKinds.td index 398fb15352fe646812086af419d5d13503963b01..ac86076140c58870b1708518e8b9570385730479 100644 --- a/clang/include/clang/Basic/DiagnosticLexKinds.td +++ b/clang/include/clang/Basic/DiagnosticLexKinds.td @@ -128,7 +128,7 @@ def warn_utf8_symbol_zero_width : Warning< "some environments">, InGroup>; def ext_delimited_escape_sequence : Extension< - "delimited escape sequences are a Clang extension">, + "%select{delimited|named}0 escape sequences are a Clang extension">, InGroup>; def err_delimited_escape_empty : Error< "delimited escape sequence cannot be empty">; @@ -138,6 +138,13 @@ def err_delimited_escape_invalid : Error< "invalid digit '%0' in escape sequence">; def err_hex_escape_no_digits : Error< "\\%0 used with no following hex digits">; +def err_invalid_ucn_name : Error< + "'%0' is not a valid Unicode character name">; +def note_invalid_ucn_name_loose_matching : Note< + "characters names in Unicode escape sequences are sensitive to case and whitespaces">; +def note_invalid_ucn_name_candidate : Note< + "did you mean %0 ('%2' U+%1)?">; + def warn_ucn_escape_no_digits : Warning< "\\%0 used with no following hex digits; " "treating as '\\' followed by identifier">, InGroup; @@ -145,10 +152,10 @@ def err_ucn_escape_incomplete : Error< "incomplete universal character name">; def warn_delimited_ucn_incomplete : Warning< "incomplete delimited universal character name; " - "treating as '\\' 'u' '{' identifier">, InGroup; + "treating as '\\' '%0' '{' identifier">, InGroup; def warn_delimited_ucn_empty : Warning< "empty delimited universal character name; " - "treating as '\\' 'u' '{' '}'">, InGroup; + "treating as '\\' '%0' '{' '}'">, InGroup; def warn_ucn_escape_incomplete : Warning< "incomplete universal character name; " "treating as '\\' followed by identifier">, InGroup; diff --git a/clang/include/clang/Basic/DiagnosticParseKinds.td b/clang/include/clang/Basic/DiagnosticParseKinds.td index b0abe2aa517e784ca8fa84df7b48c32a94c60150..352a050ba5cf105c2d6e246b686d1b529176a8cd 100644 --- a/clang/include/clang/Basic/DiagnosticParseKinds.td +++ b/clang/include/clang/Basic/DiagnosticParseKinds.td @@ -1169,10 +1169,6 @@ def warn_pragma_pack_malformed : Warning< def warn_pragma_intrinsic_builtin : Warning< "%0 is not a recognized builtin%select{|; consider including to access non-builtin intrinsics}1">, InGroup; -// - #pragma optimize -def warn_pragma_optimize : Warning< - "'#pragma optimize' is not supported">, - InGroup; // - #pragma unused def warn_pragma_unused_expected_var : Warning< "expected '#pragma unused' argument to be a variable name">, diff --git a/clang/include/clang/Basic/LangOptions.def b/clang/include/clang/Basic/LangOptions.def index 19c3907f482c81f1e5e99afe1293754080fbb06e..3f2bb54052f063af2f02d5fa3c9c49e844454493 100644 --- a/clang/include/clang/Basic/LangOptions.def +++ b/clang/include/clang/Basic/LangOptions.def @@ -425,6 +425,7 @@ LANGOPT(PaddingOnUnsignedFixedPoint, 1, 0, LANGOPT(RegisterStaticDestructors, 1, 1, "Register C++ static destructors") LANGOPT(MatrixTypes, 1, 0, "Enable or disable the builtin matrix type") +LANGOPT(StrictFlexArrays, 2, 0, "Rely on strict definition of flexible arrays") COMPATIBLE_VALUE_LANGOPT(MaxTokens, 32, 0, "Max number of tokens per TU or 0") diff --git a/clang/include/clang/Basic/SourceManager.h b/clang/include/clang/Basic/SourceManager.h index 73e6353109d9219640557017cb8c9e3ee4f9260c..5bdeecb07cd9ed462aee88bf605ba75675188196 100644 --- a/clang/include/clang/Basic/SourceManager.h +++ b/clang/include/clang/Basic/SourceManager.h @@ -1473,24 +1473,35 @@ public: /// Returns whether \p Loc is located in a file. bool isWrittenInBuiltinFile(SourceLocation Loc) const { - StringRef Filename(getPresumedLoc(Loc).getFilename()); + PresumedLoc Presumed = getPresumedLoc(Loc); + if (Presumed.isInvalid()) + return false; + StringRef Filename(Presumed.getFilename()); return Filename.equals(""); } /// Returns whether \p Loc is located in a file. bool isWrittenInCommandLineFile(SourceLocation Loc) const { - StringRef Filename(getPresumedLoc(Loc).getFilename()); + PresumedLoc Presumed = getPresumedLoc(Loc); + if (Presumed.isInvalid()) + return false; + StringRef Filename(Presumed.getFilename()); return Filename.equals(""); } /// Returns whether \p Loc is located in a file. bool isWrittenInScratchSpace(SourceLocation Loc) const { - StringRef Filename(getPresumedLoc(Loc).getFilename()); + PresumedLoc Presumed = getPresumedLoc(Loc); + if (Presumed.isInvalid()) + return false; + StringRef Filename(Presumed.getFilename()); return Filename.equals(""); } /// Returns if a SourceLocation is in a system header. bool isInSystemHeader(SourceLocation Loc) const { + if (Loc.isInvalid()) + return false; return isSystem(getFileCharacteristic(Loc)); } diff --git a/clang/include/clang/Basic/StmtNodes.td b/clang/include/clang/Basic/StmtNodes.td index 9ba6e6fa50df8ff41d13e0b078b47c8eff8b4474..50ca0cb4030aa2fdc4bd9febe942e93deb4b78d8 100644 --- a/clang/include/clang/Basic/StmtNodes.td +++ b/clang/include/clang/Basic/StmtNodes.td @@ -264,6 +264,7 @@ def OMPMasterTaskLoopDirective : StmtNode; def OMPMasterTaskLoopSimdDirective : StmtNode; def OMPParallelMasterTaskLoopDirective : StmtNode; def OMPParallelMasterTaskLoopSimdDirective : StmtNode; +def OMPMaskedTaskLoopDirective : StmtNode; def OMPDistributeDirective : StmtNode; def OMPDistributeParallelForDirective : StmtNode; def OMPDistributeParallelForSimdDirective : StmtNode; diff --git a/clang/include/clang/Basic/riscv_vector.td b/clang/include/clang/Basic/riscv_vector.td index 933a6c11f3359c6878c33b7dd0cb3938b3fbd5ed..afd53619bbb55b48814a5e8f81fa92f603f0cb05 100644 --- a/clang/include/clang/Basic/riscv_vector.td +++ b/clang/include/clang/Basic/riscv_vector.td @@ -2196,7 +2196,7 @@ let HasMasked = false, HasVL = false, IRName = "" in { // C/C++ Operand: VecTy, IR Operand: VecTy, Index let Name = "vlmul_trunc_v", OverloadedName = "vlmul_trunc", MaskedPolicy = NonePolicy, ManualCodegen = [{ { - ID = Intrinsic::experimental_vector_extract; + ID = Intrinsic::vector_extract; IntrinsicTypes = {ResultType, Ops[0]->getType()}; Ops.push_back(ConstantInt::get(Int64Ty, 0)); return Builder.CreateCall(CGM.getIntrinsic(ID, IntrinsicTypes), Ops, ""); @@ -2214,7 +2214,7 @@ let HasMasked = false, HasVL = false, IRName = "" in { // C/C++ Operand: SubVecTy, IR Operand: VecTy, SubVecTy, Index let Name = "vlmul_ext_v", OverloadedName = "vlmul_ext", MaskedPolicy = NonePolicy, ManualCodegen = [{ - ID = Intrinsic::experimental_vector_insert; + ID = Intrinsic::vector_insert; IntrinsicTypes = {ResultType, Ops[0]->getType()}; Ops.push_back(llvm::UndefValue::get(ResultType)); std::swap(Ops[0], Ops[1]); @@ -2233,7 +2233,7 @@ let HasMasked = false, HasVL = false, IRName = "" in { let Name = "vget_v", MaskedPolicy = NonePolicy, ManualCodegen = [{ { - ID = Intrinsic::experimental_vector_extract; + ID = Intrinsic::vector_extract; auto *VecTy = cast(ResultType); auto *OpVecTy = cast(Ops[0]->getType()); // Mask to only valid indices. @@ -2256,7 +2256,7 @@ let HasMasked = false, HasVL = false, IRName = "" in { let Name = "vset_v", Log2LMUL = [0, 1, 2], MaskedPolicy = NonePolicy, ManualCodegen = [{ { - ID = Intrinsic::experimental_vector_insert; + ID = Intrinsic::vector_insert; IntrinsicTypes = {ResultType, Ops[2]->getType()}; auto *ResVecTy = cast(ResultType); auto *VecTy = cast(Ops[2]->getType()); diff --git a/clang/include/clang/Driver/Options.td b/clang/include/clang/Driver/Options.td index 6e8dde6a4293bdca8c1e982b42de04b08d1b079e..4c5abd3a161d3b055e877ae67965f2cf6557fad9 100644 --- a/clang/include/clang/Driver/Options.td +++ b/clang/include/clang/Driver/Options.td @@ -721,14 +721,14 @@ def MV : Flag<["-"], "MV">, Group, Flags<[CC1Option]>, MarshallingInfoFlag, "DependencyOutputFormat::Make">, Normalizer<"makeFlagToValueNormalizer(DependencyOutputFormat::NMake)">; def Mach : Flag<["-"], "Mach">, Group; -def O0 : Flag<["-"], "O0">, Group, Flags<[CC1Option, HelpHidden]>; -def O4 : Flag<["-"], "O4">, Group, Flags<[CC1Option, HelpHidden]>; +def O0 : Flag<["-"], "O0">, Group, Flags<[CC1Option, FC1Option, HelpHidden]>; +def O4 : Flag<["-"], "O4">, Group, Flags<[CC1Option, FC1Option, HelpHidden]>; def ObjCXX : Flag<["-"], "ObjC++">, Flags<[NoXarchOption]>, HelpText<"Treat source input files as Objective-C++ inputs">; def ObjC : Flag<["-"], "ObjC">, Flags<[NoXarchOption]>, HelpText<"Treat source input files as Objective-C inputs">; -def O : Joined<["-"], "O">, Group, Flags<[CC1Option]>; -def O_flag : Flag<["-"], "O">, Flags<[CC1Option]>, Alias, AliasArgs<["1"]>; +def O : Joined<["-"], "O">, Group, Flags<[CC1Option,FC1Option]>; +def O_flag : Flag<["-"], "O">, Flags<[CC1Option,FC1Option]>, Alias, AliasArgs<["1"]>; def Ofast : Joined<["-"], "Ofast">, Group, Flags<[CC1Option]>; def P : Flag<["-"], "P">, Flags<[CC1Option,FlangOption,FC1Option]>, Group, HelpText<"Disable linemarker output in -E mode">, @@ -831,7 +831,7 @@ def Xlinker : Separate<["-"], "Xlinker">, Flags<[LinkerInput, RenderAsInput]>, HelpText<"Pass to the linker">, MetaVarName<"">, Group; def Xoffload_linker : JoinedAndSeparate<["-"], "Xoffload-linker">, - HelpText<"Pass to the offload linkers or the ones idenfied by -">, + HelpText<"Pass to the offload linkers or the ones idenfied by -">, MetaVarName<" ">, Group; def Xpreprocessor : Separate<["-"], "Xpreprocessor">, Group, HelpText<"Pass to the preprocessor">, MetaVarName<"">; @@ -1007,6 +1007,12 @@ defm hip_fp32_correctly_rounded_divide_sqrt : BoolFOption<"hip-fp32-correctly-ro BothFlags<[], " that single precision floating-point divide and sqrt used in " "the program source are correctly rounded (HIP device compilation only)">>, ShouldParseIf; +defm hip_kernel_arg_name : BoolFOption<"hip-kernel-arg-name", + CodeGenOpts<"HIPSaveKernelArgName">, DefaultFalse, + PosFlag, + NegFlag, + BothFlags<[], " that kernel argument names are preserved (HIP only)">>, + ShouldParseIf; def hipspv_pass_plugin_EQ : Joined<["--"], "hipspv-pass-plugin=">, Group, MetaVarName<"">, HelpText<"path to a pass plugin for HIP to SPIR-V passes.">; @@ -1134,6 +1140,12 @@ def fallow_unsupported : Flag<["-"], "fallow-unsupported">, Group; def fapple_kext : Flag<["-"], "fapple-kext">, Group, Flags<[CC1Option]>, HelpText<"Use Apple's kernel extensions ABI">, MarshallingInfoFlag>; +def fstrict_flex_arrays_EQ : Joined<["-"], "fstrict-flex-arrays=">,Group, + MetaVarName<"">, Values<"0,1,2,3">, + LangOpts<"StrictFlexArrays">, + Flags<[CC1Option]>, + HelpText<"Enable optimizations based on the strict definition of flexible arrays">, + MarshallingInfoInt>; defm apple_pragma_pack : BoolFOption<"apple-pragma-pack", LangOpts<"ApplePragmaPack">, DefaultFalse, PosFlag, @@ -5469,10 +5481,6 @@ defm lto_unit : BoolOption<"f", "lto-unit", CodeGenOpts<"LTOUnit">, DefaultFalse, PosFlag, NegFlag>; -defm debug_pass_manager : BoolOption<"f", "debug-pass-manager", - CodeGenOpts<"DebugPassManager">, DefaultFalse, - PosFlag, - NegFlag>; def fverify_debuginfo_preserve : Flag<["-"], "fverify-debuginfo-preserve">, HelpText<"Enable Debug Info Metadata preservation testing in " @@ -6276,6 +6284,10 @@ def load : Separate<["-"], "load">, MetaVarName<"">, HelpText<"Load the named plugin (dynamic shared object)">; def plugin : Separate<["-"], "plugin">, MetaVarName<"">, HelpText<"Use the named plugin action instead of the default action (use \"help\" to list available options)">; +defm debug_pass_manager : BoolOption<"f", "debug-pass-manager", + CodeGenOpts<"DebugPassManager">, DefaultFalse, + PosFlag, + NegFlag>; } // let Flags = [CC1Option, FC1Option, NoDriverOption] @@ -6858,3 +6870,6 @@ def emit_pristine_llvm : DXCFlag<"emit-pristine-llvm">, HelpText<"Emit pristine LLVM IR from the frontend by not running any LLVM passes at all." "Same as -S + -emit-llvm + -disable-llvm-passes.">; def fcgl : DXCFlag<"fcgl">, Alias; +def enable_16bit_types : DXCFlag<"enable-16bit-types">, Alias, + HelpText<"Enable 16-bit types and disable min precision types." + "Available in HLSL 2018 and shader model 6.2.">; diff --git a/clang/include/clang/Format/Format.h b/clang/include/clang/Format/Format.h index 8249060dc7c04677b355c8a93d02edccca7f3108..f8a4b069b2e75bf4f09258ba2b0a4ed9bbde8203 100644 --- a/clang/include/clang/Format/Format.h +++ b/clang/include/clang/Format/Format.h @@ -2589,12 +2589,17 @@ struct FormatStyle { LK_TableGen, /// Should be used for Protocol Buffer messages in text format /// (https://developers.google.com/protocol-buffers/). - LK_TextProto + LK_TextProto, + /// Should be used for Verilog and SystemVerilog. + /// https://standards.ieee.org/ieee/1800/6700/ + /// https://sci-hub.st/10.1109/IEEESTD.2018.8299595 + LK_Verilog }; bool isCpp() const { return Language == LK_Cpp || Language == LK_ObjC; } bool isCSharp() const { return Language == LK_CSharp; } bool isJson() const { return Language == LK_Json; } bool isJavaScript() const { return Language == LK_JavaScript; } + bool isVerilog() const { return Language == LK_Verilog; } /// Language, this format style is targeted at. /// \version 3.5 @@ -4285,6 +4290,8 @@ inline StringRef getLanguageName(FormatStyle::LanguageKind Language) { return "TableGen"; case FormatStyle::LK_TextProto: return "TextProto"; + case FormatStyle::LK_Verilog: + return "Verilog"; default: return "Unknown"; } diff --git a/clang/include/clang/Interpreter/Interpreter.h b/clang/include/clang/Interpreter/Interpreter.h index f2fdb90f5ba48777d4190319cf767ab37ed0a549..fd22af9766135f96ec1ee0435e388ca8a717e081 100644 --- a/clang/include/clang/Interpreter/Interpreter.h +++ b/clang/include/clang/Interpreter/Interpreter.h @@ -69,6 +69,9 @@ public: return llvm::Error::success(); } + /// Undo N previous incremental inputs. + llvm::Error Undo(unsigned N = 1); + /// \returns the \c JITTargetAddress of a \c GlobalDecl. This interface uses /// the CodeGenModule's internal mangling cache to avoid recomputing the /// mangled name. diff --git a/clang/include/clang/Lex/Lexer.h b/clang/include/clang/Lex/Lexer.h index d04b332934655566e0fed4eb07fa1859d6ef6de4..748a112b7d57bf45b0576c372e9d1c329613e5b6 100644 --- a/clang/include/clang/Lex/Lexer.h +++ b/clang/include/clang/Lex/Lexer.h @@ -769,6 +769,11 @@ private: void codeCompleteIncludedFile(const char *PathStart, const char *CompletionPoint, bool IsAngled); + llvm::Optional + tryReadNumericUCN(const char *&StartPtr, const char *SlashLoc, Token *Result); + llvm::Optional tryReadNamedUCN(const char *&StartPtr, + Token *Result); + /// Read a universal character name. /// /// \param StartPtr The position in the source buffer after the initial '\'. diff --git a/clang/include/clang/Parse/Parser.h b/clang/include/clang/Parse/Parser.h index 0eb6f7104a55bc1ca28e2505c8c713292608b965..76e1c9db5284e5bfe9a20d2bfc12457bd05c86c6 100644 --- a/clang/include/clang/Parse/Parser.h +++ b/clang/include/clang/Parse/Parser.h @@ -728,6 +728,8 @@ private: SourceLocation PragmaLocation); bool HandlePragmaMSAllocText(StringRef PragmaName, SourceLocation PragmaLocation); + bool HandlePragmaMSOptimize(StringRef PragmaName, + SourceLocation PragmaLocation); /// Handle the annotation token produced for /// #pragma align... diff --git a/clang/include/clang/Sema/Sema.h b/clang/include/clang/Sema/Sema.h index 22ebdfe0bfbb487cb365631209b9aca2bed3d84f..8b8b1b2e2e8648528bf3320ac309b2d5f47f479d 100644 --- a/clang/include/clang/Sema/Sema.h +++ b/clang/include/clang/Sema/Sema.h @@ -758,6 +758,13 @@ public: /// optimizations are currently "on", this is set to an invalid location. SourceLocation OptimizeOffPragmaLocation; + /// The "on" or "off" argument passed by \#pragma optimize, that denotes + /// whether the optimizations in the list passed to the pragma should be + /// turned off or on. This boolean is true by default because command line + /// options are honored when `#pragma optimize("", on)`. + /// (i.e. `ModifyFnAttributeMSPragmaOptimze()` does nothing) + bool MSPragmaOptimizeIsOn = true; + /// Set of no-builtin functions listed by \#pragma function. llvm::SmallSetVector MSFunctionNoBuiltins; @@ -1759,9 +1766,9 @@ public: template friend const SemaDiagnosticBuilder & operator<<(const SemaDiagnosticBuilder &Diag, const T &Value) { - if (Diag.ImmediateDiag.hasValue()) + if (Diag.ImmediateDiag) *Diag.ImmediateDiag << Value; - else if (Diag.PartialDiagId.hasValue()) + else if (Diag.PartialDiagId) Diag.S.DeviceDeferredDiags[Diag.Fn][*Diag.PartialDiagId].second << Value; return Diag; @@ -1773,26 +1780,26 @@ public: template ::value>::type> const SemaDiagnosticBuilder &operator<<(T &&V) const { - if (ImmediateDiag.hasValue()) + if (ImmediateDiag) *ImmediateDiag << std::move(V); - else if (PartialDiagId.hasValue()) + else if (PartialDiagId) S.DeviceDeferredDiags[Fn][*PartialDiagId].second << std::move(V); return *this; } friend const SemaDiagnosticBuilder & operator<<(const SemaDiagnosticBuilder &Diag, const PartialDiagnostic &PD) { - if (Diag.ImmediateDiag.hasValue()) + if (Diag.ImmediateDiag) PD.Emit(*Diag.ImmediateDiag); - else if (Diag.PartialDiagId.hasValue()) + else if (Diag.PartialDiagId) Diag.S.DeviceDeferredDiags[Diag.Fn][*Diag.PartialDiagId].second = PD; return Diag; } void AddFixItHint(const FixItHint &Hint) const { - if (ImmediateDiag.hasValue()) + if (ImmediateDiag) ImmediateDiag->AddFixItHint(Hint); - else if (PartialDiagId.hasValue()) + else if (PartialDiagId) S.DeviceDeferredDiags[Fn][*PartialDiagId].second.AddFixItHint(Hint); } @@ -10363,6 +10370,9 @@ public: /// Called on well formed \#pragma clang optimize. void ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc); + /// #pragma optimize("[optimization-list]", on | off). + void ActOnPragmaMSOptimize(SourceLocation Loc, bool IsOn); + /// Call on well formed \#pragma function. void ActOnPragmaMSFunction(SourceLocation Loc, @@ -10389,6 +10399,11 @@ public: /// attribute to be added (usually because of a pragma). void AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD, SourceLocation Loc); + /// Only called on function definitions; if there is a MSVC #pragma optimize + /// in scope, consider changing the function's attributes based on the + /// optimization list passed to the pragma. + void ModifyFnAttributesMSPragmaOptimize(FunctionDecl *FD); + /// Only called on function definitions; if there is a pragma in scope /// with the effect of a range-based no_builtin, consider marking the function /// with attribute no_builtin. @@ -11094,6 +11109,11 @@ public: StmtResult ActOnOpenMPParallelMasterTaskLoopSimdDirective( ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); + /// Called on well-formed '\#pragma omp masked taskloop' after parsing of the + /// associated statement. + StmtResult ActOnOpenMPMaskedTaskLoopDirective( + ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, + SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute' after parsing /// of the associated statement. StmtResult diff --git a/clang/include/clang/Serialization/ASTBitCodes.h b/clang/include/clang/Serialization/ASTBitCodes.h index 33b787a83e137c7cadbaf3b02f7bd0dd72b2049d..63b191ef469e00b9da480af67b5199431807b946 100644 --- a/clang/include/clang/Serialization/ASTBitCodes.h +++ b/clang/include/clang/Serialization/ASTBitCodes.h @@ -1949,6 +1949,7 @@ enum StmtCode { STMT_OMP_MASTER_TASKLOOP_SIMD_DIRECTIVE, STMT_OMP_PARALLEL_MASTER_TASKLOOP_DIRECTIVE, STMT_OMP_PARALLEL_MASTER_TASKLOOP_SIMD_DIRECTIVE, + STMT_OMP_MASKED_TASKLOOP_DIRECTIVE, STMT_OMP_DISTRIBUTE_DIRECTIVE, STMT_OMP_TARGET_UPDATE_DIRECTIVE, STMT_OMP_DISTRIBUTE_PARALLEL_FOR_DIRECTIVE, diff --git a/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ConstraintManager.h b/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ConstraintManager.h index 11c60b68956277107c211393a705390aeb5509e1..4b6cbd516628aab584805d68580250b928dffb33 100644 --- a/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ConstraintManager.h +++ b/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ConstraintManager.h @@ -53,14 +53,10 @@ public: } /// Return true if the constraint is perfectly constrained to 'true'. - bool isConstrainedTrue() const { - return Val.hasValue() && Val.getValue(); - } + bool isConstrainedTrue() const { return Val && Val.getValue(); } /// Return true if the constraint is perfectly constrained to 'false'. - bool isConstrainedFalse() const { - return Val.hasValue() && !Val.getValue(); - } + bool isConstrainedFalse() const { return Val && !Val.getValue(); } /// Return true if the constrained is perfectly constrained. bool isConstrained() const { diff --git a/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h b/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h index 3787f8f01b34eefee8664641a074c62efcb1cc41..415fa05586edf6c183d56dc600709953efdb4126 100644 --- a/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h +++ b/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h @@ -444,6 +444,10 @@ public: /// other functions that handle specific kinds of statements. void Visit(const Stmt *S, ExplodedNode *Pred, ExplodedNodeSet &Dst); + /// VisitArrayInitLoopExpr - Transfer function for array init loop. + void VisitArrayInitLoopExpr(const ArrayInitLoopExpr *Ex, ExplodedNode *Pred, + ExplodedNodeSet &Dst); + /// VisitArraySubscriptExpr - Transfer function for array accesses. void VisitArraySubscriptExpr(const ArraySubscriptExpr *Ex, ExplodedNode *Pred, diff --git a/clang/include/clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h b/clang/include/clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h index 2cb9a6a0a0ed64ce8d3341b720ec26ffbf092d48..34d44f709883d0015449a3d50e3e1d8d262a649f 100644 --- a/clang/include/clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h +++ b/clang/include/clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h @@ -1364,6 +1364,7 @@ public: ~MemRegionManager(); ASTContext &getContext() { return Ctx; } + const ASTContext &getContext() const { return Ctx; } llvm::BumpPtrAllocator &getAllocator() { return A; } diff --git a/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SMTConstraintManager.h b/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SMTConstraintManager.h index 250ba4f528968d1ffb46b94ea33afb40a6e9204f..61cab28918dbe2db258f29a65ac69e8de44d9343 100644 --- a/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SMTConstraintManager.h +++ b/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SMTConstraintManager.h @@ -341,7 +341,7 @@ protected: addStateConstraints(NewState); Optional res = Solver->check(); - if (!res.hasValue()) + if (!res) Cached[hash] = ConditionTruthVal(); else Cached[hash] = ConditionTruthVal(res.getValue()); diff --git a/clang/include/clang/Support/RISCVVIntrinsicUtils.h b/clang/include/clang/Support/RISCVVIntrinsicUtils.h index 0e9fe97ab735ef17c88dd0deb70497aa4192be96..3b6f205f9f220d6e65ae15ae42f71654ad113594 100644 --- a/clang/include/clang/Support/RISCVVIntrinsicUtils.h +++ b/clang/include/clang/Support/RISCVVIntrinsicUtils.h @@ -209,8 +209,8 @@ public: } bool isValid() const { return Valid; } - bool isScalar() const { return Scale.hasValue() && Scale.getValue() == 0; } - bool isVector() const { return Scale.hasValue() && Scale.getValue() != 0; } + bool isScalar() const { return Scale && Scale.getValue() == 0; } + bool isVector() const { return Scale && Scale.getValue() != 0; } bool isVector(unsigned Width) const { return isVector() && ElementBitwidth == Width; } diff --git a/clang/lib/AST/ASTContext.cpp b/clang/lib/AST/ASTContext.cpp index dfd49f5147ad580ee905b5a01311273f3be14eee..82fb2414758a2d87b423b0042d7f79c04bbcd07c 100644 --- a/clang/lib/AST/ASTContext.cpp +++ b/clang/lib/AST/ASTContext.cpp @@ -1707,8 +1707,17 @@ const llvm::fltSemantics &ASTContext::getFloatTypeSemantics(QualType T) const { case BuiltinType::BFloat16: return Target->getBFloat16Format(); case BuiltinType::Float16: - case BuiltinType::Half: return Target->getHalfFormat(); + case BuiltinType::Half: + // For HLSL, when the native half type is disabled, half will be treat as + // float. + if (getLangOpts().HLSL) + if (getLangOpts().NativeHalfType) + return Target->getHalfFormat(); + else + return Target->getFloatFormat(); + else + return Target->getHalfFormat(); case BuiltinType::Float: return Target->getFloatFormat(); case BuiltinType::Double: return Target->getDoubleFormat(); case BuiltinType::Ibm128: @@ -10374,11 +10383,11 @@ QualType ASTContext::mergeTypes(QualType LHS, QualType RHS, // Allow __auto_type to match anything; it merges to the type with more // information. if (const auto *AT = LHS->getAs()) { - if (AT->isGNUAutoType()) + if (!AT->isDeduced() && AT->isGNUAutoType()) return RHS; } if (const auto *AT = RHS->getAs()) { - if (AT->isGNUAutoType()) + if (!AT->isDeduced() && AT->isGNUAutoType()) return LHS; } return {}; diff --git a/clang/lib/AST/AttrImpl.cpp b/clang/lib/AST/AttrImpl.cpp index 7b8acfcd92bea21eb2154cb8b13c1e1cf0bf8590..c1e7435b22dacebc40c892afab152d928cbac3bb 100644 --- a/clang/lib/AST/AttrImpl.cpp +++ b/clang/lib/AST/AttrImpl.cpp @@ -168,7 +168,7 @@ OMPDeclareTargetDeclAttr::getActiveAttr(const ValueDecl *VD) { llvm::Optional OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(const ValueDecl *VD) { llvm::Optional ActiveAttr = getActiveAttr(VD); - if (ActiveAttr.hasValue()) + if (ActiveAttr) return ActiveAttr.getValue()->getMapType(); return llvm::None; } @@ -176,7 +176,7 @@ OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(const ValueDecl *VD) { llvm::Optional OMPDeclareTargetDeclAttr::getDeviceType(const ValueDecl *VD) { llvm::Optional ActiveAttr = getActiveAttr(VD); - if (ActiveAttr.hasValue()) + if (ActiveAttr) return ActiveAttr.getValue()->getDevType(); return llvm::None; } @@ -184,7 +184,7 @@ OMPDeclareTargetDeclAttr::getDeviceType(const ValueDecl *VD) { llvm::Optional OMPDeclareTargetDeclAttr::getLocation(const ValueDecl *VD) { llvm::Optional ActiveAttr = getActiveAttr(VD); - if (ActiveAttr.hasValue()) + if (ActiveAttr) return ActiveAttr.getValue()->getRange().getBegin(); return llvm::None; } diff --git a/clang/lib/AST/Expr.cpp b/clang/lib/AST/Expr.cpp index 79d092acccec9cef5326022d683906cf987e090f..ac1642ef37acae5a361877a79fc1e286b384f5cf 100644 --- a/clang/lib/AST/Expr.cpp +++ b/clang/lib/AST/Expr.cpp @@ -203,6 +203,91 @@ bool Expr::isKnownToHaveBooleanValue(bool Semantic) const { return false; } +bool Expr::isFlexibleArrayMember(ASTContext &Ctx, + int StrictFlexArraysLevel) const { + const NamedDecl *ND = nullptr; + if (const DeclRefExpr *DRE = dyn_cast(this)) + ND = DRE->getDecl(); + if (const MemberExpr *ME = dyn_cast(this)) + ND = ME->getMemberDecl(); + if (const ObjCIvarRefExpr *RE = dyn_cast(this)) + ND = RE->getDecl(); + if (!ND) + return false; + + const ConstantArrayType *ArrayTy = Ctx.getAsConstantArrayType(getType()); + + const Type *BaseType = + ArrayTy == nullptr ? nullptr : ArrayTy->getElementType().getTypePtr(); + bool IsUnboundedArray = (BaseType == nullptr); + + if (!IsUnboundedArray) { + llvm::APInt Size = ArrayTy->getSize(); + + switch (StrictFlexArraysLevel) { + case 3: + return false; + case 2: + if (Size != 0) + return false; + break; + case 1: + if (Size.ugt(1)) + return false; + break; + case 0: + break; + default: + llvm_unreachable("Invalid strict flex arrays level"); + } + } + + const FieldDecl *FD = dyn_cast(ND); + if (!FD) + return false; + + // Don't consider sizes resulting from macro expansions or template argument + // substitution to form C89 tail-padded arrays. + + TypeSourceInfo *TInfo = FD->getTypeSourceInfo(); + while (TInfo) { + TypeLoc TL = TInfo->getTypeLoc(); + // Look through typedefs. + if (TypedefTypeLoc TTL = TL.getAs()) { + const TypedefNameDecl *TDL = TTL.getTypedefNameDecl(); + TInfo = TDL->getTypeSourceInfo(); + continue; + } + if (ConstantArrayTypeLoc CTL = TL.getAs()) { + const Expr *SizeExpr = dyn_cast(CTL.getSizeExpr()); + if (!SizeExpr || SizeExpr->getExprLoc().isMacroID()) + return false; + } + break; + } + + const ObjCInterfaceDecl *ID = + dyn_cast(FD->getDeclContext()); + const RecordDecl *RD = dyn_cast(FD->getDeclContext()); + if (RD) { + if (RD->isUnion()) + return false; + if (const CXXRecordDecl *CRD = dyn_cast(RD)) { + if (!CRD->isStandardLayout()) + return false; + } + } else if (!ID) + return false; + + // See if this is the last field decl in the record. + const Decl *D = FD; + while ((D = D->getNextDeclInContext())) + if (isa(D)) + return false; + + return true; +} + const ValueDecl * Expr::getAsBuiltinConstantDeclRef(const ASTContext &Context) const { Expr::EvalResult Eval; diff --git a/clang/lib/AST/ExprConstant.cpp b/clang/lib/AST/ExprConstant.cpp index 910f943c00adcfdbb7bd7d6f801ea311f6500872..c9ecfb170b04022bdcfdeba918152ccd6c2a1ff9 100644 --- a/clang/lib/AST/ExprConstant.cpp +++ b/clang/lib/AST/ExprConstant.cpp @@ -11592,9 +11592,16 @@ static bool isUserWritingOffTheEnd(const ASTContext &Ctx, const LValue &LVal) { // conservative with the last element in structs (if it's an array), so our // current behavior is more compatible than an explicit list approach would // be. + int StrictFlexArraysLevel = Ctx.getLangOpts().StrictFlexArrays; return LVal.InvalidBase && Designator.Entries.size() == Designator.MostDerivedPathLength && Designator.MostDerivedIsArrayElement && + (Designator.isMostDerivedAnUnsizedArray() || + (Designator.getMostDerivedArraySize() == 0 && + StrictFlexArraysLevel < 3) || + (Designator.getMostDerivedArraySize() == 1 && + StrictFlexArraysLevel < 2) || + StrictFlexArraysLevel == 0) && isDesignatorAtObjectEnd(Ctx, LVal); } diff --git a/clang/lib/AST/MicrosoftMangle.cpp b/clang/lib/AST/MicrosoftMangle.cpp index e84946d1f21eca335c8f01dc9b5c9a72b02292bd..09075e60142a7bb5af823c053c2a651920d4dde1 100644 --- a/clang/lib/AST/MicrosoftMangle.cpp +++ b/clang/lib/AST/MicrosoftMangle.cpp @@ -2461,7 +2461,12 @@ void MicrosoftCXXNameMangler::mangleType(const BuiltinType *T, Qualifiers, break; case BuiltinType::Half: - mangleArtificialTagType(TTK_Struct, "_Half", {"__clang"}); + if (!getASTContext().getLangOpts().HLSL) + mangleArtificialTagType(TTK_Struct, "_Half", {"__clang"}); + else if (getASTContext().getLangOpts().NativeHalfType) + Out << "$f16@"; + else + Out << "$halff@"; break; #define SVE_TYPE(Name, Id, SingletonId) \ diff --git a/clang/lib/AST/StmtOpenMP.cpp b/clang/lib/AST/StmtOpenMP.cpp index c3dd47cdea9bd541a949e39b0ebac29a8920f0e8..b702e3ec4210a2f7bc4661a2a00e88a17d978b2b 100644 --- a/clang/lib/AST/StmtOpenMP.cpp +++ b/clang/lib/AST/StmtOpenMP.cpp @@ -1176,6 +1176,51 @@ OMPMasterTaskLoopDirective::CreateEmpty(const ASTContext &C, numLoopChildren(CollapsedNum, OMPD_master_taskloop), CollapsedNum); } +OMPMaskedTaskLoopDirective *OMPMaskedTaskLoopDirective::Create( + const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, + unsigned CollapsedNum, ArrayRef Clauses, Stmt *AssociatedStmt, + const HelperExprs &Exprs, bool HasCancel) { + auto *Dir = createDirective( + C, Clauses, AssociatedStmt, + numLoopChildren(CollapsedNum, OMPD_masked_taskloop), StartLoc, EndLoc, + CollapsedNum); + Dir->setIterationVariable(Exprs.IterationVarRef); + Dir->setLastIteration(Exprs.LastIteration); + Dir->setCalcLastIteration(Exprs.CalcLastIteration); + Dir->setPreCond(Exprs.PreCond); + Dir->setCond(Exprs.Cond); + Dir->setInit(Exprs.Init); + Dir->setInc(Exprs.Inc); + Dir->setIsLastIterVariable(Exprs.IL); + Dir->setLowerBoundVariable(Exprs.LB); + Dir->setUpperBoundVariable(Exprs.UB); + Dir->setStrideVariable(Exprs.ST); + Dir->setEnsureUpperBound(Exprs.EUB); + Dir->setNextLowerBound(Exprs.NLB); + Dir->setNextUpperBound(Exprs.NUB); + Dir->setNumIterations(Exprs.NumIterations); + Dir->setCounters(Exprs.Counters); + Dir->setPrivateCounters(Exprs.PrivateCounters); + Dir->setInits(Exprs.Inits); + Dir->setUpdates(Exprs.Updates); + Dir->setFinals(Exprs.Finals); + Dir->setDependentCounters(Exprs.DependentCounters); + Dir->setDependentInits(Exprs.DependentInits); + Dir->setFinalsConditions(Exprs.FinalsConditions); + Dir->setPreInits(Exprs.PreInits); + Dir->setHasCancel(HasCancel); + return Dir; +} + +OMPMaskedTaskLoopDirective * +OMPMaskedTaskLoopDirective::CreateEmpty(const ASTContext &C, + unsigned NumClauses, + unsigned CollapsedNum, EmptyShell) { + return createEmptyDirective( + C, NumClauses, /*HasAssociatedStmt=*/true, + numLoopChildren(CollapsedNum, OMPD_masked_taskloop), CollapsedNum); +} + OMPMasterTaskLoopSimdDirective *OMPMasterTaskLoopSimdDirective::Create( const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef Clauses, Stmt *AssociatedStmt, diff --git a/clang/lib/AST/StmtPrinter.cpp b/clang/lib/AST/StmtPrinter.cpp index 180155402d9de4e66074ef851a4942da6929b588..1fa37e9a1efe413d01966edf919b12265b415b4d 100644 --- a/clang/lib/AST/StmtPrinter.cpp +++ b/clang/lib/AST/StmtPrinter.cpp @@ -880,6 +880,12 @@ void StmtPrinter::VisitOMPMasterTaskLoopDirective( PrintOMPExecutableDirective(Node); } +void StmtPrinter::VisitOMPMaskedTaskLoopDirective( + OMPMaskedTaskLoopDirective *Node) { + Indent() << "#pragma omp masked taskloop"; + PrintOMPExecutableDirective(Node); +} + void StmtPrinter::VisitOMPMasterTaskLoopSimdDirective( OMPMasterTaskLoopSimdDirective *Node) { Indent() << "#pragma omp master taskloop simd"; diff --git a/clang/lib/AST/StmtProfile.cpp b/clang/lib/AST/StmtProfile.cpp index 858d483a890dd4c0a27d6e54c6df2bcbe0d61d63..3676d58d0bddd72ad68a54814d6e3e3fd17a5303 100644 --- a/clang/lib/AST/StmtProfile.cpp +++ b/clang/lib/AST/StmtProfile.cpp @@ -1095,6 +1095,11 @@ void StmtProfiler::VisitOMPMasterTaskLoopDirective( VisitOMPLoopDirective(S); } +void StmtProfiler::VisitOMPMaskedTaskLoopDirective( + const OMPMaskedTaskLoopDirective *S) { + VisitOMPLoopDirective(S); +} + void StmtProfiler::VisitOMPMasterTaskLoopSimdDirective( const OMPMasterTaskLoopSimdDirective *S) { VisitOMPLoopDirective(S); diff --git a/clang/lib/ASTMatchers/Dynamic/Parser.cpp b/clang/lib/ASTMatchers/Dynamic/Parser.cpp index ec14f7abfdccfd21f2d7cb4a4c6d50eb1a147927..6470df27e6e238cb34c8b6f0cb99d6e3c1e772ec 100644 --- a/clang/lib/ASTMatchers/Dynamic/Parser.cpp +++ b/clang/lib/ASTMatchers/Dynamic/Parser.cpp @@ -397,9 +397,9 @@ bool Parser::parseIdentifierPrefixImpl(VariantValue *Value) { assert(NamedValue.isMatcher()); llvm::Optional Result = NamedValue.getMatcher().getSingleMatcher(); - if (Result.hasValue()) { + if (Result) { llvm::Optional Bound = Result->tryBind(BindID); - if (Bound.hasValue()) { + if (Bound) { *Value = VariantMatcher::SingleMatcher(*Bound); return true; } diff --git a/clang/lib/ASTMatchers/Dynamic/Registry.cpp b/clang/lib/ASTMatchers/Dynamic/Registry.cpp index 72629d0aa91e5c9da5705f3a2090d89da80fc34f..42193e65496dce63cf32f33312ce56862af00534 100644 --- a/clang/lib/ASTMatchers/Dynamic/Registry.cpp +++ b/clang/lib/ASTMatchers/Dynamic/Registry.cpp @@ -797,9 +797,9 @@ VariantMatcher Registry::constructBoundMatcher(MatcherCtor Ctor, if (Out.isNull()) return Out; llvm::Optional Result = Out.getSingleMatcher(); - if (Result.hasValue()) { + if (Result) { llvm::Optional Bound = Result->tryBind(BindID); - if (Bound.hasValue()) { + if (Bound) { return VariantMatcher::SingleMatcher(*Bound); } } diff --git a/clang/lib/Analysis/BodyFarm.cpp b/clang/lib/Analysis/BodyFarm.cpp index 3587cf4678af8232cebaeb42de2487e0ba94e21d..8de69275a4529a55d9c6ef3cff755d30e1d5c8a3 100644 --- a/clang/lib/Analysis/BodyFarm.cpp +++ b/clang/lib/Analysis/BodyFarm.cpp @@ -697,7 +697,7 @@ static Stmt *create_OSAtomicCompareAndSwap(ASTContext &C, const FunctionDecl *D) Stmt *BodyFarm::getBody(const FunctionDecl *D) { Optional &Val = Bodies[D]; - if (Val.hasValue()) + if (Val) return Val.getValue(); Val = nullptr; @@ -872,7 +872,7 @@ Stmt *BodyFarm::getBody(const ObjCMethodDecl *D) { return nullptr; Optional &Val = Bodies[D]; - if (Val.hasValue()) + if (Val) return Val.getValue(); Val = nullptr; diff --git a/clang/lib/Analysis/FlowSensitive/ControlFlowContext.cpp b/clang/lib/Analysis/FlowSensitive/ControlFlowContext.cpp index c0b8119038f2d3be93b3cc4219a348438b98efc8..fe9907a7c99b072ad97fb06db412934eae6efc7f 100644 --- a/clang/lib/Analysis/FlowSensitive/ControlFlowContext.cpp +++ b/clang/lib/Analysis/FlowSensitive/ControlFlowContext.cpp @@ -33,7 +33,7 @@ buildStmtToBasicBlockMap(const CFG &Cfg) { for (const CFGElement &Element : *Block) { auto Stmt = Element.getAs(); - if (!Stmt.hasValue()) + if (!Stmt) continue; StmtToBlock[Stmt.getValue().getStmt()] = Block; diff --git a/clang/lib/Analysis/FlowSensitive/DataflowAnalysisContext.cpp b/clang/lib/Analysis/FlowSensitive/DataflowAnalysisContext.cpp index 81e37e6e6905af8aba4d82604c5a2c66d978ae08..4c7f0d1f94fa7620edf30f478f0c3af836b31a73 100644 --- a/clang/lib/Analysis/FlowSensitive/DataflowAnalysisContext.cpp +++ b/clang/lib/Analysis/FlowSensitive/DataflowAnalysisContext.cpp @@ -22,6 +22,52 @@ namespace clang { namespace dataflow { +StorageLocation & +DataflowAnalysisContext::getStableStorageLocation(QualType Type) { + assert(!Type.isNull()); + if (Type->isStructureOrClassType() || Type->isUnionType()) { + // FIXME: Explore options to avoid eager initialization of fields as some of + // them might not be needed for a particular analysis. + llvm::DenseMap FieldLocs; + for (const FieldDecl *Field : getObjectFields(Type)) + FieldLocs.insert({Field, &getStableStorageLocation(Field->getType())}); + return takeOwnership( + std::make_unique(Type, std::move(FieldLocs))); + } + return takeOwnership(std::make_unique(Type)); +} + +StorageLocation & +DataflowAnalysisContext::getStableStorageLocation(const VarDecl &D) { + if (auto *Loc = getStorageLocation(D)) + return *Loc; + auto &Loc = getStableStorageLocation(D.getType()); + setStorageLocation(D, Loc); + return Loc; +} + +StorageLocation & +DataflowAnalysisContext::getStableStorageLocation(const Expr &E) { + if (auto *Loc = getStorageLocation(E)) + return *Loc; + auto &Loc = getStableStorageLocation(E.getType()); + setStorageLocation(E, Loc); + return Loc; +} + +PointerValue & +DataflowAnalysisContext::getOrCreateNullPointerValue(QualType PointeeType) { + assert(!PointeeType.isNull()); + auto CanonicalPointeeType = PointeeType.getCanonicalType(); + auto Res = NullPointerVals.try_emplace(CanonicalPointeeType, nullptr); + if (Res.second) { + auto &PointeeLoc = getStableStorageLocation(CanonicalPointeeType); + Res.first->second = + &takeOwnership(std::make_unique(PointeeLoc)); + } + return *Res.first->second; +} + static std::pair makeCanonicalBoolValuePair(BoolValue &LHS, BoolValue &RHS) { auto Res = std::make_pair(&LHS, &RHS); @@ -30,9 +76,8 @@ makeCanonicalBoolValuePair(BoolValue &LHS, BoolValue &RHS) { return Res; } -BoolValue & -DataflowAnalysisContext::getOrCreateConjunctionValue(BoolValue &LHS, - BoolValue &RHS) { +BoolValue &DataflowAnalysisContext::getOrCreateConjunction(BoolValue &LHS, + BoolValue &RHS) { if (&LHS == &RHS) return LHS; @@ -44,9 +89,8 @@ DataflowAnalysisContext::getOrCreateConjunctionValue(BoolValue &LHS, return *Res.first->second; } -BoolValue & -DataflowAnalysisContext::getOrCreateDisjunctionValue(BoolValue &LHS, - BoolValue &RHS) { +BoolValue &DataflowAnalysisContext::getOrCreateDisjunction(BoolValue &LHS, + BoolValue &RHS) { if (&LHS == &RHS) return LHS; @@ -58,27 +102,37 @@ DataflowAnalysisContext::getOrCreateDisjunctionValue(BoolValue &LHS, return *Res.first->second; } -BoolValue &DataflowAnalysisContext::getOrCreateNegationValue(BoolValue &Val) { +BoolValue &DataflowAnalysisContext::getOrCreateNegation(BoolValue &Val) { auto Res = NegationVals.try_emplace(&Val, nullptr); if (Res.second) Res.first->second = &takeOwnership(std::make_unique(Val)); return *Res.first->second; } +BoolValue &DataflowAnalysisContext::getOrCreateImplication(BoolValue &LHS, + BoolValue &RHS) { + return &LHS == &RHS ? getBoolLiteralValue(true) + : getOrCreateDisjunction(getOrCreateNegation(LHS), RHS); +} + +BoolValue &DataflowAnalysisContext::getOrCreateIff(BoolValue &LHS, + BoolValue &RHS) { + return &LHS == &RHS + ? getBoolLiteralValue(true) + : getOrCreateConjunction(getOrCreateImplication(LHS, RHS), + getOrCreateImplication(RHS, LHS)); +} + AtomicBoolValue &DataflowAnalysisContext::makeFlowConditionToken() { - AtomicBoolValue &Token = createAtomicBoolValue(); - FlowConditionRemainingConjuncts[&Token] = {}; - FlowConditionFirstConjuncts[&Token] = &Token; - return Token; + return createAtomicBoolValue(); } void DataflowAnalysisContext::addFlowConditionConstraint( AtomicBoolValue &Token, BoolValue &Constraint) { - FlowConditionRemainingConjuncts[&Token].insert(&getOrCreateDisjunctionValue( - Constraint, getOrCreateNegationValue(Token))); - FlowConditionFirstConjuncts[&Token] = - &getOrCreateDisjunctionValue(*FlowConditionFirstConjuncts[&Token], - getOrCreateNegationValue(Constraint)); + auto Res = FlowConditionConstraints.try_emplace(&Token, &Constraint); + if (!Res.second) { + Res.first->second = &getOrCreateConjunction(*Res.first->second, Constraint); + } } AtomicBoolValue & @@ -95,11 +149,18 @@ DataflowAnalysisContext::joinFlowConditions(AtomicBoolValue &FirstToken, auto &Token = makeFlowConditionToken(); FlowConditionDeps[&Token].insert(&FirstToken); FlowConditionDeps[&Token].insert(&SecondToken); - addFlowConditionConstraint( - Token, getOrCreateDisjunctionValue(FirstToken, SecondToken)); + addFlowConditionConstraint(Token, + getOrCreateDisjunction(FirstToken, SecondToken)); return Token; } +Solver::Result +DataflowAnalysisContext::querySolver(llvm::DenseSet Constraints) { + Constraints.insert(&getBoolLiteralValue(true)); + Constraints.insert(&getOrCreateNegation(getBoolLiteralValue(false))); + return S->solve(std::move(Constraints)); +} + bool DataflowAnalysisContext::flowConditionImplies(AtomicBoolValue &Token, BoolValue &Val) { // Returns true if and only if truth assignment of the flow condition implies @@ -107,51 +168,121 @@ bool DataflowAnalysisContext::flowConditionImplies(AtomicBoolValue &Token, // reducing the problem to satisfiability checking. In other words, we attempt // to show that assuming `Val` is false makes the constraints induced by the // flow condition unsatisfiable. - llvm::DenseSet Constraints = { - &Token, - &getBoolLiteralValue(true), - &getOrCreateNegationValue(getBoolLiteralValue(false)), - &getOrCreateNegationValue(Val), - }; + llvm::DenseSet Constraints = {&Token, &getOrCreateNegation(Val)}; llvm::DenseSet VisitedTokens; addTransitiveFlowConditionConstraints(Token, Constraints, VisitedTokens); - return S->solve(std::move(Constraints)) == Solver::Result::Unsatisfiable; + return isUnsatisfiable(std::move(Constraints)); } bool DataflowAnalysisContext::flowConditionIsTautology(AtomicBoolValue &Token) { // Returns true if and only if we cannot prove that the flow condition can // ever be false. - llvm::DenseSet Constraints = { - &getBoolLiteralValue(true), - &getOrCreateNegationValue(getBoolLiteralValue(false)), - &getOrCreateNegationValue(Token), - }; + llvm::DenseSet Constraints = {&getOrCreateNegation(Token)}; llvm::DenseSet VisitedTokens; addTransitiveFlowConditionConstraints(Token, Constraints, VisitedTokens); - return S->solve(std::move(Constraints)) == Solver::Result::Unsatisfiable; + return isUnsatisfiable(std::move(Constraints)); +} + +bool DataflowAnalysisContext::equivalentBoolValues(BoolValue &Val1, + BoolValue &Val2) { + llvm::DenseSet Constraints = { + &getOrCreateNegation(getOrCreateIff(Val1, Val2))}; + return isUnsatisfiable(Constraints); } void DataflowAnalysisContext::addTransitiveFlowConditionConstraints( AtomicBoolValue &Token, llvm::DenseSet &Constraints, - llvm::DenseSet &VisitedTokens) const { + llvm::DenseSet &VisitedTokens) { auto Res = VisitedTokens.insert(&Token); if (!Res.second) return; - auto FirstConjunctIT = FlowConditionFirstConjuncts.find(&Token); - if (FirstConjunctIT != FlowConditionFirstConjuncts.end()) - Constraints.insert(FirstConjunctIT->second); - auto RemainingConjunctsIT = FlowConditionRemainingConjuncts.find(&Token); - if (RemainingConjunctsIT != FlowConditionRemainingConjuncts.end()) - Constraints.insert(RemainingConjunctsIT->second.begin(), - RemainingConjunctsIT->second.end()); + auto ConstraintsIT = FlowConditionConstraints.find(&Token); + if (ConstraintsIT == FlowConditionConstraints.end()) { + Constraints.insert(&Token); + } else { + // Bind flow condition token via `iff` to its set of constraints: + // FC <=> (C1 ^ C2 ^ ...), where Ci are constraints + Constraints.insert(&getOrCreateIff(Token, *ConstraintsIT->second)); + } auto DepsIT = FlowConditionDeps.find(&Token); if (DepsIT != FlowConditionDeps.end()) { - for (AtomicBoolValue *DepToken : DepsIT->second) + for (AtomicBoolValue *DepToken : DepsIT->second) { addTransitiveFlowConditionConstraints(*DepToken, Constraints, VisitedTokens); + } + } +} + +BoolValue &DataflowAnalysisContext::substituteBoolValue( + BoolValue &Val, + llvm::DenseMap &SubstitutionsCache) { + auto IT = SubstitutionsCache.find(&Val); + if (IT != SubstitutionsCache.end()) { + return *IT->second; + } + BoolValue *Result; + switch (Val.getKind()) { + case Value::Kind::AtomicBool: { + Result = &Val; + break; + } + case Value::Kind::Negation: { + auto &Negation = *cast(&Val); + auto &Sub = substituteBoolValue(Negation.getSubVal(), SubstitutionsCache); + Result = &getOrCreateNegation(Sub); + break; + } + case Value::Kind::Disjunction: { + auto &Disjunct = *cast(&Val); + auto &LeftSub = + substituteBoolValue(Disjunct.getLeftSubValue(), SubstitutionsCache); + auto &RightSub = + substituteBoolValue(Disjunct.getRightSubValue(), SubstitutionsCache); + Result = &getOrCreateDisjunction(LeftSub, RightSub); + break; + } + case Value::Kind::Conjunction: { + auto &Conjunct = *cast(&Val); + auto &LeftSub = + substituteBoolValue(Conjunct.getLeftSubValue(), SubstitutionsCache); + auto &RightSub = + substituteBoolValue(Conjunct.getRightSubValue(), SubstitutionsCache); + Result = &getOrCreateConjunction(LeftSub, RightSub); + break; + } + default: + llvm_unreachable("Unhandled Value Kind"); + } + SubstitutionsCache[&Val] = Result; + return *Result; +} + +BoolValue &DataflowAnalysisContext::buildAndSubstituteFlowCondition( + AtomicBoolValue &Token, + llvm::DenseMap Substitutions) { + llvm::DenseMap SubstitutionsCache( + Substitutions.begin(), Substitutions.end()); + return buildAndSubstituteFlowConditionWithCache(Token, SubstitutionsCache); +} + +BoolValue &DataflowAnalysisContext::buildAndSubstituteFlowConditionWithCache( + AtomicBoolValue &Token, + llvm::DenseMap &SubstitutionsCache) { + auto ConstraintsIT = FlowConditionConstraints.find(&Token); + if (ConstraintsIT == FlowConditionConstraints.end()) { + return getBoolLiteralValue(true); + } + auto DepsIT = FlowConditionDeps.find(&Token); + if (DepsIT != FlowConditionDeps.end()) { + for (AtomicBoolValue *DepToken : DepsIT->second) { + auto &NewDep = buildAndSubstituteFlowConditionWithCache( + *DepToken, SubstitutionsCache); + SubstitutionsCache[DepToken] = &NewDep; + } } + return substituteBoolValue(*ConstraintsIT->second, SubstitutionsCache); } } // namespace dataflow @@ -175,3 +306,27 @@ const Stmt &clang::dataflow::ignoreCFGOmittedNodes(const Stmt &S) { return ignoreCFGOmittedNodes(*E); return S; } + +// FIXME: Does not precisely handle non-virtual diamond inheritance. A single +// field decl will be modeled for all instances of the inherited field. +static void +getFieldsFromClassHierarchy(QualType Type, + llvm::DenseSet &Fields) { + if (Type->isIncompleteType() || Type->isDependentType() || + !Type->isRecordType()) + return; + + for (const FieldDecl *Field : Type->getAsRecordDecl()->fields()) + Fields.insert(Field); + if (auto *CXXRecord = Type->getAsCXXRecordDecl()) + for (const CXXBaseSpecifier &Base : CXXRecord->bases()) + getFieldsFromClassHierarchy(Base.getType(), Fields); +} + +/// Gets the set of all fields in the type. +llvm::DenseSet +clang::dataflow::getObjectFields(QualType Type) { + llvm::DenseSet Fields; + getFieldsFromClassHierarchy(Type, Fields); + return Fields; +} diff --git a/clang/lib/Analysis/FlowSensitive/DataflowEnvironment.cpp b/clang/lib/Analysis/FlowSensitive/DataflowEnvironment.cpp index e8d3a4e6d4505c817b4370cc44a9e99eb6139d18..3aea670f20aa24ecbe0a8b168506731e2e23145e 100644 --- a/clang/lib/Analysis/FlowSensitive/DataflowEnvironment.cpp +++ b/clang/lib/Analysis/FlowSensitive/DataflowEnvironment.cpp @@ -152,29 +152,6 @@ static void initGlobalVars(const Stmt &S, Environment &Env) { } } -// FIXME: Does not precisely handle non-virtual diamond inheritance. A single -// field decl will be modeled for all instances of the inherited field. -static void -getFieldsFromClassHierarchy(QualType Type, - llvm::DenseSet &Fields) { - if (Type->isIncompleteType() || Type->isDependentType() || - !Type->isRecordType()) - return; - - for (const FieldDecl *Field : Type->getAsRecordDecl()->fields()) - Fields.insert(Field); - if (auto *CXXRecord = Type->getAsCXXRecordDecl()) - for (const CXXBaseSpecifier &Base : CXXRecord->bases()) - getFieldsFromClassHierarchy(Base.getType(), Fields); -} - -/// Gets the set of all fields in the type. -static llvm::DenseSet getObjectFields(QualType Type) { - llvm::DenseSet Fields; - getFieldsFromClassHierarchy(Type, Fields); - return Fields; -} - Environment::Environment(DataflowAnalysisContext &DACtx) : DACtx(&DACtx), FlowConditionToken(&DACtx.makeFlowConditionToken()) {} @@ -310,39 +287,21 @@ LatticeJoinEffect Environment::join(const Environment &Other, } StorageLocation &Environment::createStorageLocation(QualType Type) { - assert(!Type.isNull()); - if (Type->isStructureOrClassType() || Type->isUnionType()) { - // FIXME: Explore options to avoid eager initialization of fields as some of - // them might not be needed for a particular analysis. - llvm::DenseMap FieldLocs; - for (const FieldDecl *Field : getObjectFields(Type)) - FieldLocs.insert({Field, &createStorageLocation(Field->getType())}); - return takeOwnership( - std::make_unique(Type, std::move(FieldLocs))); - } - return takeOwnership(std::make_unique(Type)); + return DACtx->getStableStorageLocation(Type); } StorageLocation &Environment::createStorageLocation(const VarDecl &D) { // Evaluated declarations are always assigned the same storage locations to // ensure that the environment stabilizes across loop iterations. Storage // locations for evaluated declarations are stored in the analysis context. - if (auto *Loc = DACtx->getStorageLocation(D)) - return *Loc; - auto &Loc = createStorageLocation(D.getType()); - DACtx->setStorageLocation(D, Loc); - return Loc; + return DACtx->getStableStorageLocation(D); } StorageLocation &Environment::createStorageLocation(const Expr &E) { // Evaluated expressions are always assigned the same storage locations to // ensure that the environment stabilizes across loop iterations. Storage // locations for evaluated expressions are stored in the analysis context. - if (auto *Loc = DACtx->getStorageLocation(E)) - return *Loc; - auto &Loc = createStorageLocation(E.getType()); - DACtx->setStorageLocation(E, Loc); - return Loc; + return DACtx->getStableStorageLocation(E); } void Environment::setStorageLocation(const ValueDecl &D, StorageLocation &Loc) { @@ -373,6 +332,10 @@ StorageLocation *Environment::getThisPointeeStorageLocation() const { return DACtx->getThisPointeeStorageLocation(); } +PointerValue &Environment::getOrCreateNullPointerValue(QualType PointeeType) { + return DACtx->getOrCreateNullPointerValue(PointeeType); +} + void Environment::setValue(const StorageLocation &Loc, Value &Val) { LocToVal[&Loc] = &Val; diff --git a/clang/lib/Analysis/FlowSensitive/Transfer.cpp b/clang/lib/Analysis/FlowSensitive/Transfer.cpp index befd3c626311475f432f8ea840db0d66aa76367f..500e1a7a939032d2d668dfaf8ae9f7030316ef5d 100644 --- a/clang/lib/Analysis/FlowSensitive/Transfer.cpp +++ b/clang/lib/Analysis/FlowSensitive/Transfer.cpp @@ -251,6 +251,16 @@ public: Env.setStorageLocation(*S, *SubExprLoc); break; } + case CK_NullToPointer: + case CK_NullToMemberPointer: { + auto &Loc = Env.createStorageLocation(S->getType()); + Env.setStorageLocation(*S, Loc); + + auto &NullPointerVal = + Env.getOrCreateNullPointerValue(S->getType()->getPointeeType()); + Env.setValue(Loc, NullPointerVal); + break; + } default: break; } diff --git a/clang/lib/Analysis/FlowSensitive/TypeErasedDataflowAnalysis.cpp b/clang/lib/Analysis/FlowSensitive/TypeErasedDataflowAnalysis.cpp index 68e897e03596287e1d0eae6781819613a2c29c07..ddd9daaff6bb5b654eefddbe26c2263cb35035ea 100644 --- a/clang/lib/Analysis/FlowSensitive/TypeErasedDataflowAnalysis.cpp +++ b/clang/lib/Analysis/FlowSensitive/TypeErasedDataflowAnalysis.cpp @@ -50,7 +50,7 @@ public: auto BlockIT = CFCtx.getStmtToBlock().find(&ignoreCFGOmittedNodes(S)); assert(BlockIT != CFCtx.getStmtToBlock().end()); const auto &State = BlockToState[BlockIT->getSecond()->getBlockID()]; - assert(State.hasValue()); + assert(State); return &State.getValue().Env; } @@ -209,7 +209,7 @@ static TypeErasedDataflowAnalysisState computeBlockInputState( // loop back edge to `Block`. const llvm::Optional &MaybePredState = BlockStates[Pred->getBlockID()]; - if (!MaybePredState.hasValue()) + if (!MaybePredState) continue; TypeErasedDataflowAnalysisState PredState = MaybePredState.getValue(); @@ -222,14 +222,14 @@ static TypeErasedDataflowAnalysisState computeBlockInputState( } } - if (MaybeState.hasValue()) { + if (MaybeState) { Analysis.joinTypeErased(MaybeState->Lattice, PredState.Lattice); MaybeState->Env.join(PredState.Env, Analysis); } else { MaybeState = std::move(PredState); } } - if (!MaybeState.hasValue()) { + if (!MaybeState) { // FIXME: Consider passing `Block` to `Analysis.typeErasedInitialElement()` // to enable building analyses like computation of dominators that // initialize the state of each basic block differently. @@ -367,7 +367,7 @@ runTypeErasedDataflowAnalysis(const ControlFlowContext &CFCtx, TypeErasedDataflowAnalysisState NewBlockState = transferBlock(CFCtx, BlockStates, *Block, InitEnv, Analysis); - if (OldBlockState.hasValue() && + if (OldBlockState && Analysis.isEqualTypeErased(OldBlockState.getValue().Lattice, NewBlockState.Lattice) && OldBlockState->Env.equivalentTo(NewBlockState.Env, Analysis)) { diff --git a/clang/lib/Analysis/PathDiagnostic.cpp b/clang/lib/Analysis/PathDiagnostic.cpp index 90c4624018069e184b27d399b08f181b185f21cd..8a7305000746ee69cf8966992fa9302aff15b103 100644 --- a/clang/lib/Analysis/PathDiagnostic.cpp +++ b/clang/lib/Analysis/PathDiagnostic.cpp @@ -319,7 +319,7 @@ static Optional comparePath(const PathPieces &X, const PathPieces &Y) { for ( ; X_I != X_end && Y_I != Y_end; ++X_I, ++Y_I) { Optional b = comparePiece(**X_I, **Y_I); - if (b.hasValue()) + if (b) return b.getValue(); } @@ -396,7 +396,7 @@ static bool compare(const PathDiagnostic &X, const PathDiagnostic &Y) { return (*XI) < (*YI); } Optional b = comparePath(X.path, Y.path); - assert(b.hasValue()); + assert(b); return b.getValue(); } diff --git a/clang/lib/Analysis/UninitializedValues.cpp b/clang/lib/Analysis/UninitializedValues.cpp index 811146e50b45a13fc0392e935953bbfe93ae8f90..800943a99d8710d611e55fbc55f15772f9a46bab 100644 --- a/clang/lib/Analysis/UninitializedValues.cpp +++ b/clang/lib/Analysis/UninitializedValues.cpp @@ -148,7 +148,7 @@ public: Value getValue(const CFGBlock *block, const CFGBlock *dstBlock, const VarDecl *vd) { const Optional &idx = declToIndex.getValueIndex(vd); - assert(idx.hasValue()); + assert(idx); return getValueVector(block)[idx.getValue()]; } }; @@ -209,7 +209,7 @@ void CFGBlockValues::resetScratch() { ValueVector::reference CFGBlockValues::operator[](const VarDecl *vd) { const Optional &idx = declToIndex.getValueIndex(vd); - assert(idx.hasValue()); + assert(idx); return scratch[idx.getValue()]; } diff --git a/clang/lib/Basic/Diagnostic.cpp b/clang/lib/Basic/Diagnostic.cpp index d14134f99ee95057c8732833e245ea42fce64bc8..deb398756e3731c4e0da59b2007ba023e760d16f 100644 --- a/clang/lib/Basic/Diagnostic.cpp +++ b/clang/lib/Basic/Diagnostic.cpp @@ -130,7 +130,7 @@ bool DiagnosticsEngine::popMappings(SourceLocation Loc) { return true; } -void DiagnosticsEngine::Reset() { +void DiagnosticsEngine::Reset(bool soft /*=false*/) { ErrorOccurred = false; UncompilableErrorOccurred = false; FatalErrorOccurred = false; @@ -145,15 +145,17 @@ void DiagnosticsEngine::Reset() { LastDiagLevel = DiagnosticIDs::Ignored; DelayedDiagID = 0; - // Clear state related to #pragma diagnostic. - DiagStates.clear(); - DiagStatesByLoc.clear(); - DiagStateOnPushStack.clear(); + if (!soft) { + // Clear state related to #pragma diagnostic. + DiagStates.clear(); + DiagStatesByLoc.clear(); + DiagStateOnPushStack.clear(); - // Create a DiagState and DiagStatePoint representing diagnostic changes - // through command-line. - DiagStates.emplace_back(); - DiagStatesByLoc.appendFirst(&DiagStates.back()); + // Create a DiagState and DiagStatePoint representing diagnostic changes + // through command-line. + DiagStates.emplace_back(); + DiagStatesByLoc.appendFirst(&DiagStates.back()); + } } void DiagnosticsEngine::SetDelayedDiagnostic(unsigned DiagID, StringRef Arg1, diff --git a/clang/lib/Basic/LangOptions.cpp b/clang/lib/Basic/LangOptions.cpp index 7549f3f2e23b4fc48a9f838bf0929912f6ede0fa..0bf410c053ed9319d317ef2b2bfcaa6ee1edcdeb 100644 --- a/clang/lib/Basic/LangOptions.cpp +++ b/clang/lib/Basic/LangOptions.cpp @@ -195,8 +195,8 @@ void LangOptions::setLangDefaults(LangOptions &Opts, Language Lang, // OpenCL, C++ and C2x have bool, true, false keywords. Opts.Bool = Opts.OpenCL || Opts.CPlusPlus || Opts.C2x; - // OpenCL has half keyword - Opts.Half = Opts.OpenCL; + // OpenCL and HLSL have half keyword + Opts.Half = Opts.OpenCL || Opts.HLSL; } FPOptions FPOptions::defaultWithoutTrailingStorage(const LangOptions &LO) { diff --git a/clang/lib/Basic/OpenMPKinds.cpp b/clang/lib/Basic/OpenMPKinds.cpp index 54460981c8f7e2ad5112212f64a1593139432f1e..0c0d5701f0685d59c18c8c87cb01241931b7eb02 100644 --- a/clang/lib/Basic/OpenMPKinds.cpp +++ b/clang/lib/Basic/OpenMPKinds.cpp @@ -484,6 +484,7 @@ bool clang::isOpenMPLoopDirective(OpenMPDirectiveKind DKind) { DKind == OMPD_master_taskloop || DKind == OMPD_master_taskloop_simd || DKind == OMPD_parallel_master_taskloop || DKind == OMPD_parallel_master_taskloop_simd || + DKind == OMPD_masked_taskloop || DKind == OMPD_distribute || DKind == OMPD_target_parallel_for || DKind == OMPD_distribute_parallel_for || DKind == OMPD_distribute_parallel_for_simd || @@ -521,6 +522,7 @@ bool clang::isOpenMPTaskLoopDirective(OpenMPDirectiveKind DKind) { return DKind == OMPD_taskloop || DKind == OMPD_taskloop_simd || DKind == OMPD_master_taskloop || DKind == OMPD_master_taskloop_simd || DKind == OMPD_parallel_master_taskloop || + DKind == OMPD_masked_taskloop || DKind == OMPD_parallel_master_taskloop_simd; } @@ -700,6 +702,7 @@ void clang::getOpenMPCaptureRegions( case OMPD_taskloop_simd: case OMPD_master_taskloop: case OMPD_master_taskloop_simd: + case OMPD_masked_taskloop: CaptureRegions.push_back(OMPD_taskloop); break; case OMPD_parallel_master_taskloop: diff --git a/clang/lib/Basic/Targets/AArch64.cpp b/clang/lib/Basic/Targets/AArch64.cpp index 38862056227f16f68271000a46fe2d0e164a01d1..60ef52ac3f0ddbdd236d7ff26ad4779097060710 100644 --- a/clang/lib/Basic/Targets/AArch64.cpp +++ b/clang/lib/Basic/Targets/AArch64.cpp @@ -485,6 +485,10 @@ void AArch64TargetInfo::getTargetDefines(const LangOptions &Opts, Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4"); Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8"); + // Allow detection of fast FMA support. + Builder.defineMacro("__FP_FAST_FMA", "1"); + Builder.defineMacro("__FP_FAST_FMAF", "1"); + if (Opts.VScaleMin && Opts.VScaleMin == Opts.VScaleMax) { Builder.defineMacro("__ARM_FEATURE_SVE_BITS", Twine(Opts.VScaleMin * 128)); Builder.defineMacro("__ARM_FEATURE_SVE_VECTOR_OPERATORS"); diff --git a/clang/lib/Basic/Targets/DirectX.h b/clang/lib/Basic/Targets/DirectX.h index 90ef1dfa55c89843c7e517795977b040a9737719..a773090b413f7210aef3238a7f617e13bb5f6966 100644 --- a/clang/lib/Basic/Targets/DirectX.h +++ b/clang/lib/Basic/Targets/DirectX.h @@ -57,8 +57,9 @@ public: NoAsmVariants = true; resetDataLayout("e-m:e-p:32:32-i1:32-i8:8-i16:16-i32:32-i64:64-f16:16-f32:" "32-f64:64-n8:16:32:64"); + TheCXXABI.set(TargetCXXABI::Microsoft); } - + bool useFP16ConversionIntrinsics() const override { return false; } void getTargetDefines(const LangOptions &Opts, MacroBuilder &Builder) const override; diff --git a/clang/lib/Basic/Targets/RISCV.cpp b/clang/lib/Basic/Targets/RISCV.cpp index d0c171fa349d77434f82db6358fa9dccfb1dc7b3..ce6b769a5e699c7897625f40ebb759b8bbe33aa9 100644 --- a/clang/lib/Basic/Targets/RISCV.cpp +++ b/clang/lib/Basic/Targets/RISCV.cpp @@ -264,7 +264,7 @@ bool RISCVTargetInfo::hasFeature(StringRef Feature) const { .Case("riscv64", Is64Bit) .Case("64bit", Is64Bit) .Default(None); - if (Result.hasValue()) + if (Result) return Result.getValue(); if (ISAInfo->isSupportedExtensionFeature(Feature)) diff --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp index 7cfe2204bf7283908aed94847dd5c60cbf05e2c7..d43c4eef0401bd00acc7d6374a1628be6d282742 100644 --- a/clang/lib/CodeGen/CGBuiltin.cpp +++ b/clang/lib/CodeGen/CGBuiltin.cpp @@ -19011,15 +19011,40 @@ Value *CodeGenFunction::EmitRISCVBuiltinExpr(unsigned BuiltinID, SmallVector Ops; llvm::Type *ResultType = ConvertType(E->getType()); + // Find out if any arguments are required to be integer constant expressions. + unsigned ICEArguments = 0; + ASTContext::GetBuiltinTypeError Error; + getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments); + if (Error == ASTContext::GE_Missing_type) { + // Vector intrinsics don't have a type string. + assert(BuiltinID >= clang::RISCV::FirstRVVBuiltin && + BuiltinID <= clang::RISCV::LastRVVBuiltin); + ICEArguments = 0; + if (BuiltinID == RISCVVector::BI__builtin_rvv_vget_v || + BuiltinID == RISCVVector::BI__builtin_rvv_vset_v) + ICEArguments = 1 << 1; + } else { + assert(Error == ASTContext::GE_None && "Unexpected error"); + } + for (unsigned i = 0, e = E->getNumArgs(); i != e; i++) { - const Expr *Arg = E->getArg(i); - if (hasAggregateEvaluationKind(Arg->getType())) { - LValue L = EmitAggExprToLValue(Arg); - llvm::Value *AggValue = Builder.CreateLoad(L.getAddress(*this)); - Ops.push_back(AggValue); - } else { - Ops.push_back(EmitScalarExpr(Arg)); + if ((ICEArguments & (1 << i)) == 0) { + const Expr *Arg = E->getArg(i); + if (hasAggregateEvaluationKind(Arg->getType())) { + LValue L = EmitAggExprToLValue(Arg); + llvm::Value *AggValue = Builder.CreateLoad(L.getAddress(*this)); + Ops.push_back(AggValue); + } else { + // If this is a normal argument, just emit it as a scalar. + Ops.push_back(EmitScalarExpr(Arg)); + } + continue; } + + // If this is required to be a constant, constant fold it so that we know + // that the generated intrinsic gets a ConstantInt. + Ops.push_back(llvm::ConstantInt::get( + getLLVMContext(), *E->getArg(i)->getIntegerConstantExpr(getContext()))); } Intrinsic::ID ID = Intrinsic::not_intrinsic; diff --git a/clang/lib/CodeGen/CGCall.cpp b/clang/lib/CodeGen/CGCall.cpp index 0239ea0758d60e1bad6a170b00488dbbe901b429..ad016d6f3aa90782f570fb65d30e792fb1d48e56 100644 --- a/clang/lib/CodeGen/CGCall.cpp +++ b/clang/lib/CodeGen/CGCall.cpp @@ -1289,8 +1289,8 @@ static llvm::Value *CreateCoercedLoad(Address Src, llvm::Type *Ty, } // If coercing a fixed vector to a scalable vector for ABI compatibility, and - // the types match, use the llvm.experimental.vector.insert intrinsic to - // perform the conversion. + // the types match, use the llvm.vector.insert intrinsic to perform the + // conversion. if (auto *ScalableDst = dyn_cast(Ty)) { if (auto *FixedSrc = dyn_cast(SrcTy)) { // If we are casting a fixed i8 vector to a scalable 16 x i1 predicate @@ -2934,8 +2934,7 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI, // VLST arguments are coerced to VLATs at the function boundary for // ABI consistency. If this is a VLST that was coerced to // a VLAT at the function boundary and the types match up, use - // llvm.experimental.vector.extract to convert back to the original - // VLST. + // llvm.vector.extract to convert back to the original VLST. if (auto *VecTyTo = dyn_cast(ConvertType(Ty))) { llvm::Value *Coerced = Fn->getArg(FirstIRArg); if (auto *VecTyFrom = diff --git a/clang/lib/CodeGen/CGDeclCXX.cpp b/clang/lib/CodeGen/CGDeclCXX.cpp index b4991f14ba193024fd3e33436ec83abc14f09261..de5cb913220a0a5d6572443d80c8ee22ed79d22e 100644 --- a/clang/lib/CodeGen/CGDeclCXX.cpp +++ b/clang/lib/CodeGen/CGDeclCXX.cpp @@ -707,7 +707,7 @@ CodeGenModule::EmitCXXGlobalInitFunc() { // dynamic resource allocation on the device and program scope variables are // destroyed by the runtime when program is released. if (getLangOpts().OpenCL) { - GenOpenCLArgMetadata(Fn); + GenKernelArgMetadata(Fn); Fn->setCallingConv(llvm::CallingConv::SPIR_KERNEL); } diff --git a/clang/lib/CodeGen/CGExpr.cpp b/clang/lib/CodeGen/CGExpr.cpp index d6ff40edbe17c7f3581fb2fe610908e2a22b4daa..c5622fdb06f0d1e791b2e232267e4dbdbb2e31cc 100644 --- a/clang/lib/CodeGen/CGExpr.cpp +++ b/clang/lib/CodeGen/CGExpr.cpp @@ -875,44 +875,6 @@ void CodeGenFunction::EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc, } } -/// Determine whether this expression refers to a flexible array member in a -/// struct. We disable array bounds checks for such members. -static bool isFlexibleArrayMemberExpr(const Expr *E) { - // For compatibility with existing code, we treat arrays of length 0 or - // 1 as flexible array members. - // FIXME: This is inconsistent with the warning code in SemaChecking. Unify - // the two mechanisms. - const ArrayType *AT = E->getType()->castAsArrayTypeUnsafe(); - if (const auto *CAT = dyn_cast(AT)) { - // FIXME: Sema doesn't treat [1] as a flexible array member if the bound - // was produced by macro expansion. - if (CAT->getSize().ugt(1)) - return false; - } else if (!isa(AT)) - return false; - - E = E->IgnoreParens(); - - // A flexible array member must be the last member in the class. - if (const auto *ME = dyn_cast(E)) { - // FIXME: If the base type of the member expr is not FD->getParent(), - // this should not be treated as a flexible array member access. - if (const auto *FD = dyn_cast(ME->getMemberDecl())) { - // FIXME: Sema doesn't treat a T[1] union member as a flexible array - // member, only a T[0] or T[] member gets that treatment. - if (FD->getParent()->isUnion()) - return true; - RecordDecl::field_iterator FI( - DeclContext::decl_iterator(const_cast(FD))); - return ++FI == FD->getParent()->field_end(); - } - } else if (const auto *IRE = dyn_cast(E)) { - return IRE->getDecl()->getNextIvar() == nullptr; - } - - return false; -} - llvm::Value *CodeGenFunction::LoadPassedObjectSize(const Expr *E, QualType EltTy) { ASTContext &C = getContext(); @@ -954,8 +916,11 @@ llvm::Value *CodeGenFunction::LoadPassedObjectSize(const Expr *E, /// If Base is known to point to the start of an array, return the length of /// that array. Return 0 if the length cannot be determined. -static llvm::Value *getArrayIndexingBound( - CodeGenFunction &CGF, const Expr *Base, QualType &IndexedType) { +static llvm::Value *getArrayIndexingBound(CodeGenFunction &CGF, + const Expr *Base, + QualType &IndexedType, + ASTContext &Context, + int StrictFlexArraysLevel) { // For the vector indexing extension, the bound is the number of elements. if (const VectorType *VT = Base->getType()->getAs()) { IndexedType = Base->getType(); @@ -966,7 +931,8 @@ static llvm::Value *getArrayIndexingBound( if (const auto *CE = dyn_cast(Base)) { if (CE->getCastKind() == CK_ArrayToPointerDecay && - !isFlexibleArrayMemberExpr(CE->getSubExpr())) { + !CE->getSubExpr()->IgnoreParens()->isFlexibleArrayMember( + Context, std::max(StrictFlexArraysLevel, 1))) { IndexedType = CE->getSubExpr()->getType(); const ArrayType *AT = IndexedType->castAsArrayTypeUnsafe(); if (const auto *CAT = dyn_cast(AT)) @@ -994,7 +960,8 @@ void CodeGenFunction::EmitBoundsCheck(const Expr *E, const Expr *Base, SanitizerScope SanScope(this); QualType IndexedType; - llvm::Value *Bound = getArrayIndexingBound(*this, Base, IndexedType); + llvm::Value *Bound = getArrayIndexingBound( + *this, Base, IndexedType, getContext(), getLangOpts().StrictFlexArrays); if (!Bound) return; diff --git a/clang/lib/CodeGen/CGExprScalar.cpp b/clang/lib/CodeGen/CGExprScalar.cpp index 98d8e8b4ad88dbf4116feb7926128dcf9ff44638..b150aaa376eb081378cbaf72222666d4f4497303 100644 --- a/clang/lib/CodeGen/CGExprScalar.cpp +++ b/clang/lib/CodeGen/CGExprScalar.cpp @@ -2084,8 +2084,8 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) { } // If Src is a fixed vector and Dst is a scalable vector, and both have the - // same element type, use the llvm.experimental.vector.insert intrinsic to - // perform the bitcast. + // same element type, use the llvm.vector.insert intrinsic to perform the + // bitcast. if (const auto *FixedSrc = dyn_cast(SrcTy)) { if (const auto *ScalableDst = dyn_cast(DstTy)) { // If we are casting a fixed i8 vector to a scalable 16 x i1 predicate @@ -2112,8 +2112,8 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) { } // If Src is a scalable vector and Dst is a fixed vector, and both have the - // same element type, use the llvm.experimental.vector.extract intrinsic to - // perform the bitcast. + // same element type, use the llvm.vector.extract intrinsic to perform the + // bitcast. if (const auto *ScalableSrc = dyn_cast(SrcTy)) { if (const auto *FixedDst = dyn_cast(DstTy)) { // If we are casting a scalable 16 x i1 predicate vector to a fixed i8 diff --git a/clang/lib/CodeGen/CGStmt.cpp b/clang/lib/CodeGen/CGStmt.cpp index f3564eb78707fcf62322644c79951c4d417b362d..226ee1d8c2577fe8b6ac755f59cf9bb3fe99e61a 100644 --- a/clang/lib/CodeGen/CGStmt.cpp +++ b/clang/lib/CodeGen/CGStmt.cpp @@ -314,6 +314,9 @@ void CodeGenFunction::EmitStmt(const Stmt *S, ArrayRef Attrs) { case Stmt::OMPMasterTaskLoopDirectiveClass: EmitOMPMasterTaskLoopDirective(cast(*S)); break; + case Stmt::OMPMaskedTaskLoopDirectiveClass: + llvm_unreachable("masked taskloop directive not supported yet."); + break; case Stmt::OMPMasterTaskLoopSimdDirectiveClass: EmitOMPMasterTaskLoopSimdDirective( cast(*S)); diff --git a/clang/lib/CodeGen/CGVTT.cpp b/clang/lib/CodeGen/CGVTT.cpp index 564d9f354e64658d8509328199724d0b0db30c9d..ebac9196df025246ed6b4cacb1e1c2c56aea1dc4 100644 --- a/clang/lib/CodeGen/CGVTT.cpp +++ b/clang/lib/CodeGen/CGVTT.cpp @@ -96,9 +96,6 @@ CodeGenVTables::EmitVTTDefinition(llvm::GlobalVariable *VTT, if (CGM.supportsCOMDAT() && VTT->isWeakForLinker()) VTT->setComdat(CGM.getModule().getOrInsertComdat(VTT->getName())); - - // Set the right visibility. - CGM.setGVProperties(VTT, RD); } llvm::GlobalVariable *CodeGenVTables::GetAddrOfVTT(const CXXRecordDecl *RD) { @@ -122,6 +119,7 @@ llvm::GlobalVariable *CodeGenVTables::GetAddrOfVTT(const CXXRecordDecl *RD) { llvm::GlobalVariable *GV = CGM.CreateOrReplaceCXXRuntimeVariable( Name, ArrayType, llvm::GlobalValue::ExternalLinkage, Align); GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global); + CGM.setGVProperties(GV, RD); return GV; } diff --git a/clang/lib/CodeGen/CodeGenFunction.cpp b/clang/lib/CodeGen/CodeGenFunction.cpp index 2745b0c898ff7db36774b16b3d493affc29e56ee..4255f1ca9759c03f049ab63b5ca64dd0ea885674 100644 --- a/clang/lib/CodeGen/CodeGenFunction.cpp +++ b/clang/lib/CodeGen/CodeGenFunction.cpp @@ -596,15 +596,17 @@ CodeGenFunction::DecodeAddrUsedInPrologue(llvm::Value *F, "decoded_addr"); } -void CodeGenFunction::EmitOpenCLKernelMetadata(const FunctionDecl *FD, - llvm::Function *Fn) -{ - if (!FD->hasAttr()) +void CodeGenFunction::EmitKernelMetadata(const FunctionDecl *FD, + llvm::Function *Fn) { + if (!FD->hasAttr() && !FD->hasAttr()) return; llvm::LLVMContext &Context = getLLVMContext(); - CGM.GenOpenCLArgMetadata(Fn, FD, this); + CGM.GenKernelArgMetadata(Fn, FD, this); + + if (!getLangOpts().OpenCL) + return; if (const VecTypeHintAttr *A = FD->getAttr()) { QualType HintQTy = A->getTypeHint(); @@ -919,9 +921,10 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy, if (D && D->hasAttr()) Fn->addFnAttr(llvm::Attribute::NoProfile); - if (FD && getLangOpts().OpenCL) { + if (FD && (getLangOpts().OpenCL || + (getLangOpts().HIP && getLangOpts().CUDAIsDevice))) { // Add metadata for a kernel function. - EmitOpenCLKernelMetadata(FD, Fn); + EmitKernelMetadata(FD, Fn); } // If we are checking function types, emit a function type signature as diff --git a/clang/lib/CodeGen/CodeGenFunction.h b/clang/lib/CodeGen/CodeGenFunction.h index a7de87b552d28ad90177c479054af72526a3c890..daf26d54641f068c784b1d255ab59cdbcf4e56f9 100644 --- a/clang/lib/CodeGen/CodeGenFunction.h +++ b/clang/lib/CodeGen/CodeGenFunction.h @@ -1968,8 +1968,7 @@ private: /// Add OpenCL kernel arg metadata and the kernel attribute metadata to /// the function metadata. - void EmitOpenCLKernelMetadata(const FunctionDecl *FD, - llvm::Function *Fn); + void EmitKernelMetadata(const FunctionDecl *FD, llvm::Function *Fn); public: CodeGenFunction(CodeGenModule &cgm, bool suppressNewContext=false); diff --git a/clang/lib/CodeGen/CodeGenModule.cpp b/clang/lib/CodeGen/CodeGenModule.cpp index ff3480a9ac840566dd31f9df3dbe5976e3ad2d63..34d8b3a7d3609cfcf9b873814a7df8a673c3f8de 100644 --- a/clang/lib/CodeGen/CodeGenModule.cpp +++ b/clang/lib/CodeGen/CodeGenModule.cpp @@ -1697,7 +1697,7 @@ static unsigned ArgInfoAddressSpace(LangAS AS) { } } -void CodeGenModule::GenOpenCLArgMetadata(llvm::Function *Fn, +void CodeGenModule::GenKernelArgMetadata(llvm::Function *Fn, const FunctionDecl *FD, CodeGenFunction *CGF) { assert(((FD && CGF) || (!FD && !CGF)) && @@ -1729,6 +1729,11 @@ void CodeGenModule::GenOpenCLArgMetadata(llvm::Function *Fn, if (FD && CGF) for (unsigned i = 0, e = FD->getNumParams(); i != e; ++i) { const ParmVarDecl *parm = FD->getParamDecl(i); + // Get argument name. + argNames.push_back(llvm::MDString::get(VMContext, parm->getName())); + + if (!getLangOpts().OpenCL) + continue; QualType ty = parm->getType(); std::string typeQuals; @@ -1747,9 +1752,6 @@ void CodeGenModule::GenOpenCLArgMetadata(llvm::Function *Fn, } else accessQuals.push_back(llvm::MDString::get(VMContext, "none")); - // Get argument name. - argNames.push_back(llvm::MDString::get(VMContext, parm->getName())); - auto getTypeSpelling = [&](QualType Ty) { auto typeName = Ty.getUnqualifiedType().getAsString(Policy); @@ -1822,17 +1824,20 @@ void CodeGenModule::GenOpenCLArgMetadata(llvm::Function *Fn, argTypeQuals.push_back(llvm::MDString::get(VMContext, typeQuals)); } - Fn->setMetadata("kernel_arg_addr_space", - llvm::MDNode::get(VMContext, addressQuals)); - Fn->setMetadata("kernel_arg_access_qual", - llvm::MDNode::get(VMContext, accessQuals)); - Fn->setMetadata("kernel_arg_type", - llvm::MDNode::get(VMContext, argTypeNames)); - Fn->setMetadata("kernel_arg_base_type", - llvm::MDNode::get(VMContext, argBaseTypeNames)); - Fn->setMetadata("kernel_arg_type_qual", - llvm::MDNode::get(VMContext, argTypeQuals)); - if (getCodeGenOpts().EmitOpenCLArgMetadata) + if (getLangOpts().OpenCL) { + Fn->setMetadata("kernel_arg_addr_space", + llvm::MDNode::get(VMContext, addressQuals)); + Fn->setMetadata("kernel_arg_access_qual", + llvm::MDNode::get(VMContext, accessQuals)); + Fn->setMetadata("kernel_arg_type", + llvm::MDNode::get(VMContext, argTypeNames)); + Fn->setMetadata("kernel_arg_base_type", + llvm::MDNode::get(VMContext, argBaseTypeNames)); + Fn->setMetadata("kernel_arg_type_qual", + llvm::MDNode::get(VMContext, argTypeQuals)); + } + if (getCodeGenOpts().EmitOpenCLArgMetadata || + getCodeGenOpts().HIPSaveKernelArgName) Fn->setMetadata("kernel_arg_name", llvm::MDNode::get(VMContext, argNames)); } @@ -2821,12 +2826,12 @@ bool CodeGenModule::isProfileInstrExcluded(llvm::Function *Fn, CodeGenOptions::ProfileInstrKind Kind = getCodeGenOpts().getProfileInstr(); // First, check the function name. Optional V = ProfileList.isFunctionExcluded(Fn->getName(), Kind); - if (V.hasValue()) + if (V) return *V; // Next, check the source location. if (Loc.isValid()) { Optional V = ProfileList.isLocationExcluded(Loc, Kind); - if (V.hasValue()) + if (V) return *V; } // If location is unknown, this may be a compiler-generated function. Assume @@ -2834,7 +2839,7 @@ bool CodeGenModule::isProfileInstrExcluded(llvm::Function *Fn, auto &SM = Context.getSourceManager(); if (const auto *MainFile = SM.getFileEntryForID(SM.getMainFileID())) { Optional V = ProfileList.isFileExcluded(MainFile->getName(), Kind); - if (V.hasValue()) + if (V) return *V; } return ProfileList.getDefault(); diff --git a/clang/lib/CodeGen/CodeGenModule.h b/clang/lib/CodeGen/CodeGenModule.h index 79e9a462a3d727ff47fdfad578d5adf40a5a889f..f5cbdafc8db62c200977a3068bc03415d5c49355 100644 --- a/clang/lib/CodeGen/CodeGenModule.h +++ b/clang/lib/CodeGen/CodeGenModule.h @@ -1460,7 +1460,7 @@ public: /// \param FN is a pointer to IR function being generated. /// \param FD is a pointer to function declaration if any. /// \param CGF is a pointer to CodeGenFunction that generates this function. - void GenOpenCLArgMetadata(llvm::Function *FN, + void GenKernelArgMetadata(llvm::Function *FN, const FunctionDecl *FD = nullptr, CodeGenFunction *CGF = nullptr); diff --git a/clang/lib/Driver/Driver.cpp b/clang/lib/Driver/Driver.cpp index 67bcb418a52eda10ebebeda30e087024feeae6c5..c34da3a67a2e0d6580dac0c41df72b6cd3d6461d 100644 --- a/clang/lib/Driver/Driver.cpp +++ b/clang/lib/Driver/Driver.cpp @@ -3325,8 +3325,8 @@ class OffloadingActionBuilder final { A = C.getDriver().ConstructPhaseAction(C, Args, CurPhase, A, AssociatedOffloadKind); - if (CompileDeviceOnly && CurPhase == FinalPhase && - BundleOutput.hasValue() && BundleOutput.getValue()) { + if (CompileDeviceOnly && CurPhase == FinalPhase && BundleOutput && + BundleOutput.getValue()) { for (unsigned I = 0, E = GpuArchList.size(); I != E; ++I) { OffloadAction::DeviceDependences DDep; DDep.add(*CudaDeviceActions[I], *ToolChains.front(), GpuArchList[I], @@ -4459,17 +4459,6 @@ Action *Driver::BuildOffloadingActions(Compilation &C, OffloadAction::DeviceDependences DDep; DDep.add(*A, *TCAndArch->first, TCAndArch->second.data(), Kind); A = C.MakeAction(HDep, DDep); - } else if (isa(A) && Kind == Action::OFK_Cuda) { - // The Cuda toolchain uses fatbinary as the linker phase to bundle the - // PTX and Cubin output. - ActionList FatbinActions; - for (Action *A : {A, A->getInputs()[0]}) { - OffloadAction::DeviceDependences DDep; - DDep.add(*A, *TCAndArch->first, TCAndArch->second.data(), Kind); - FatbinActions.emplace_back( - C.MakeAction(DDep, A->getType())); - } - A = C.MakeAction(FatbinActions, types::TY_CUDA_FATBIN); } ++TCAndArch; } diff --git a/clang/lib/Driver/ToolChains/AVR.cpp b/clang/lib/Driver/ToolChains/AVR.cpp index 2547d1312322e57fa5579168fcdf8ca0f107b77f..1e866553d82686287cc7f8f888518fa7ad153837 100644 --- a/clang/lib/Driver/ToolChains/AVR.cpp +++ b/clang/lib/Driver/ToolChains/AVR.cpp @@ -475,7 +475,7 @@ void AVR::Linker::ConstructJob(Compilation &C, const JobAction &JA, D.Diag(diag::warn_drv_avr_stdlib_not_linked); } - if (SectionAddressData.hasValue()) { + if (SectionAddressData) { std::string DataSectionArg = std::string("-Tdata=0x") + llvm::utohexstr(SectionAddressData.getValue()); CmdArgs.push_back(Args.MakeArgString(DataSectionArg)); diff --git a/clang/lib/Driver/ToolChains/Clang.cpp b/clang/lib/Driver/ToolChains/Clang.cpp index b9ab49e09c16eda2bdb3c2f0ccb2f385e921d825..b9aa7139c9ceee3a40f2b0312b1858bb69057053 100644 --- a/clang/lib/Driver/ToolChains/Clang.cpp +++ b/clang/lib/Driver/ToolChains/Clang.cpp @@ -3511,9 +3511,12 @@ static void RenderOpenCLOptions(const ArgList &Args, ArgStringList &CmdArgs, static void RenderHLSLOptions(const ArgList &Args, ArgStringList &CmdArgs, types::ID InputType) { - const unsigned ForwardedArguments[] = { - options::OPT_dxil_validator_version, options::OPT_D, options::OPT_S, - options::OPT_emit_llvm, options::OPT_disable_llvm_passes}; + const unsigned ForwardedArguments[] = {options::OPT_dxil_validator_version, + options::OPT_D, + options::OPT_S, + options::OPT_emit_llvm, + options::OPT_disable_llvm_passes, + options::OPT_fnative_half_type}; for (const auto &Arg : ForwardedArguments) if (const auto *A = Args.getLastArg(Arg)) @@ -3521,6 +3524,7 @@ static void RenderHLSLOptions(const ArgList &Args, ArgStringList &CmdArgs, // Add the default headers if dxc_no_stdinc is not set. if (!Args.hasArg(options::OPT_dxc_no_stdinc)) CmdArgs.push_back("-finclude-default-header"); + CmdArgs.push_back("-fallow-half-arguments-and-returns"); } static void RenderARCMigrateToolOptions(const Driver &D, const ArgList &Args, @@ -6258,6 +6262,8 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA, Args.AddLastArg(CmdArgs, options::OPT_funroll_loops, options::OPT_fno_unroll_loops); + Args.AddLastArg(CmdArgs, options::OPT_fstrict_flex_arrays_EQ); + Args.AddLastArg(CmdArgs, options::OPT_pthread); if (Args.hasFlag(options::OPT_mspeculative_load_hardening, @@ -6317,6 +6323,8 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA, if (Args.hasFlag(options::OPT_fgpu_allow_device_init, options::OPT_fno_gpu_allow_device_init, false)) CmdArgs.push_back("-fgpu-allow-device-init"); + Args.addOptInFlag(CmdArgs, options::OPT_fhip_kernel_arg_name, + options::OPT_fno_hip_kernel_arg_name); } if (IsCuda || IsHIP) { @@ -8344,11 +8352,27 @@ void OffloadPackager::ConstructJob(Compilation &C, const JobAction &JA, StringRef Arch = (OffloadAction->getOffloadingArch()) ? OffloadAction->getOffloadingArch() : TCArgs.getLastArgValue(options::OPT_march_EQ); + StringRef Kind = + Action::GetOffloadKindName(OffloadAction->getOffloadingDeviceKind()); + + ArgStringList Features; + SmallVector FeatureArgs; + getTargetFeatures(TC->getDriver(), TC->getTriple(), Args, Features, false); + llvm::copy_if(Features, std::back_inserter(FeatureArgs), + [](StringRef Arg) { return !Arg.startswith("-target"); }); + + SmallVector Parts{ + "file=" + File.str(), + "triple=" + TC->getTripleString(), + "arch=" + Arch.str(), + "kind=" + Kind.str(), + }; - CmdArgs.push_back(Args.MakeArgString( - "--image=file=" + File + "," + "triple=" + TC->getTripleString() + "," + - "arch=" + Arch + "," + "kind=" + - Action::GetOffloadKindName(OffloadAction->getOffloadingDeviceKind()))); + if (TC->getDriver().isUsingLTO(/* IsOffload */ true)) + for (StringRef Feature : FeatureArgs) + Parts.emplace_back("feature=" + Feature.str()); + + CmdArgs.push_back(Args.MakeArgString("--image=" + llvm::join(Parts, ","))); } C.addCommand(std::make_unique( @@ -8406,20 +8430,6 @@ void LinkerWrapper::ConstructJob(Compilation &C, const JobAction &JA, } if (D.isUsingLTO(/* IsOffload */ true)) { - // Pass in target features for each toolchain. - for (auto &I : - llvm::make_range(OpenMPTCRange.first, OpenMPTCRange.second)) { - const ToolChain *TC = I.second; - const ArgList &TCArgs = C.getArgsForToolChain(TC, "", Action::OFK_OpenMP); - ArgStringList FeatureArgs; - TC->addClangTargetOptions(TCArgs, FeatureArgs, Action::OFK_OpenMP); - auto FeatureIt = llvm::find(FeatureArgs, "-target-feature"); - if (FeatureIt != FeatureArgs.end()) - CmdArgs.push_back( - Args.MakeArgString("-target-feature=" + TC->getTripleString() + - "=" + *(FeatureIt + 1))); - } - // Pass in the optimization level to use for LTO. if (const Arg *A = Args.getLastArg(options::OPT_O_Group)) { StringRef OOpt; diff --git a/clang/lib/Driver/ToolChains/Cuda.cpp b/clang/lib/Driver/ToolChains/Cuda.cpp index dcdcc9992e64520c1ac50686a8e45bfe90323ba5..0e87540d1615d34d694b5ef02fdeed42c0170083 100644 --- a/clang/lib/Driver/ToolChains/Cuda.cpp +++ b/clang/lib/Driver/ToolChains/Cuda.cpp @@ -536,8 +536,9 @@ void NVPTX::Linker::ConstructJob(Compilation &C, const JobAction &JA, const char *Arch = (II.getType() == types::TY_PP_Asm) ? CudaArchToVirtualArchString(gpu_arch) : gpu_arch_str; - CmdArgs.push_back(Args.MakeArgString(llvm::Twine("--image=profile=") + - Arch + ",file=" + II.getFilename())); + CmdArgs.push_back( + Args.MakeArgString(llvm::Twine("--image=profile=") + Arch + + ",file=" + getToolChain().getInputFilename(II))); } for (const auto& A : Args.getAllArgValues(options::OPT_Xcuda_fatbinary)) @@ -695,9 +696,8 @@ CudaToolChain::CudaToolChain(const Driver &D, const llvm::Triple &Triple, std::string CudaToolChain::getInputFilename(const InputInfo &Input) const { // Only object files are changed, for example assembly files keep their .s - // extensions. CUDA also continues to use .o as they don't use nvlink but - // fatbinary. - if (!(OK == Action::OFK_OpenMP && Input.getType() == types::TY_Object)) + // extensions. + if (Input.getType() != types::TY_Object) return ToolChain::getInputFilename(Input); // Replace extension for object files with cubin because nvlink relies on diff --git a/clang/lib/Driver/ToolChains/Flang.cpp b/clang/lib/Driver/ToolChains/Flang.cpp index aeab9d5fc417ff2a54b14c5c81b40e9f8041c29c..3368f6785718072f14aa6b8c3b03ac323437a607 100644 --- a/clang/lib/Driver/ToolChains/Flang.cpp +++ b/clang/lib/Driver/ToolChains/Flang.cpp @@ -135,6 +135,16 @@ void Flang::ConstructJob(Compilation &C, const JobAction &JA, A->render(Args, CmdArgs); } + // Optimization level for CodeGen. + if (const Arg *A = Args.getLastArg(options::OPT_O_Group)) { + if (A->getOption().matches(options::OPT_O4)) { + CmdArgs.push_back("-O3"); + D.Diag(diag::warn_O4_is_O3); + } else { + A->render(Args, CmdArgs); + } + } + if (Output.isFilename()) { CmdArgs.push_back("-o"); CmdArgs.push_back(Output.getFilename()); diff --git a/clang/lib/Driver/ToolChains/Gnu.cpp b/clang/lib/Driver/ToolChains/Gnu.cpp index 4f05ae9cbff41d19ba0da8b9054ae2d2109ac6dd..bdaaadc91b5becf3a2dbcade6cf4570124ace4c7 100644 --- a/clang/lib/Driver/ToolChains/Gnu.cpp +++ b/clang/lib/Driver/ToolChains/Gnu.cpp @@ -2086,7 +2086,7 @@ void Generic_GCC::GCCInstallationDetector::print(raw_ostream &OS) const { } bool Generic_GCC::GCCInstallationDetector::getBiarchSibling(Multilib &M) const { - if (BiarchSibling.hasValue()) { + if (BiarchSibling) { M = BiarchSibling.getValue(); return true; } diff --git a/clang/lib/Driver/ToolChains/HLSL.cpp b/clang/lib/Driver/ToolChains/HLSL.cpp index 023780817ef4b4696529e37375dce1daefd925d7..584e00bb7f058e3af8dc27cbcf996e6ef82cbf60 100644 --- a/clang/lib/Driver/ToolChains/HLSL.cpp +++ b/clang/lib/Driver/ToolChains/HLSL.cpp @@ -177,5 +177,7 @@ HLSLToolChain::TranslateArgs(const DerivedArgList &Args, StringRef BoundArch, Opts.getOption(options::OPT_dxil_validator_version), DefaultValidatorVer); } + // FIXME: add validation for enable_16bit_types should be after HLSL 2018 and + // shader model 6.2. return DAL; } diff --git a/clang/lib/Driver/ToolChains/Minix.cpp b/clang/lib/Driver/ToolChains/Minix.cpp index 5bceb9aba3e964b3f0355e170c3a19558ef07e35..4b8670a79012ee7eaed44834af30a755f8e67254 100644 --- a/clang/lib/Driver/ToolChains/Minix.cpp +++ b/clang/lib/Driver/ToolChains/Minix.cpp @@ -56,7 +56,8 @@ void tools::minix::Linker::ConstructJob(Compilation &C, const JobAction &JA, assert(Output.isNothing() && "Invalid output."); } - if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles)) { + if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles, + options::OPT_r)) { CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath("crt1.o"))); CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath("crti.o"))); CmdArgs.push_back( @@ -71,7 +72,8 @@ void tools::minix::Linker::ConstructJob(Compilation &C, const JobAction &JA, getToolChain().addProfileRTLibs(Args, CmdArgs); - if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs)) { + if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs, + options::OPT_r)) { if (D.CCCIsCXX()) { if (getToolChain().ShouldLinkCXXStdlib(Args)) getToolChain().AddCXXStdlibLibArgs(Args, CmdArgs); @@ -79,7 +81,8 @@ void tools::minix::Linker::ConstructJob(Compilation &C, const JobAction &JA, } } - if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles)) { + if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles, + options::OPT_r)) { if (Args.hasArg(options::OPT_pthread)) CmdArgs.push_back("-lpthread"); CmdArgs.push_back("-lc"); diff --git a/clang/lib/Driver/ToolChains/OpenBSD.cpp b/clang/lib/Driver/ToolChains/OpenBSD.cpp index 62c430b66e5f72b1a717fd027a9ab57adb3950c0..54cf3cc89caf7df5fea5d20313cbbe6f05b053f8 100644 --- a/clang/lib/Driver/ToolChains/OpenBSD.cpp +++ b/clang/lib/Driver/ToolChains/OpenBSD.cpp @@ -17,6 +17,7 @@ #include "clang/Driver/SanitizerArgs.h" #include "llvm/Option/ArgList.h" #include "llvm/Support/Path.h" +#include "llvm/Support/VirtualFileSystem.h" using namespace clang::driver; using namespace clang::driver::tools; @@ -334,12 +335,21 @@ void OpenBSD::AddCXXStdlibLibArgs(const ArgList &Args, CmdArgs.push_back(Profiling ? "-lpthread_p" : "-lpthread"); } -std::string OpenBSD::getCompilerRT(const ArgList &Args, - StringRef Component, +std::string OpenBSD::getCompilerRT(const ArgList &Args, StringRef Component, FileType Type) const { - SmallString<128> Path(getDriver().SysRoot); - llvm::sys::path::append(Path, "/usr/lib/libcompiler_rt.a"); - return std::string(Path.str()); + if (Component == "builtins") { + SmallString<128> Path(getDriver().SysRoot); + llvm::sys::path::append(Path, "/usr/lib/libcompiler_rt.a"); + return std::string(Path.str()); + } + SmallString<128> P(getDriver().ResourceDir); + std::string CRTBasename = + buildCompilerRTBasename(Args, Component, Type, /*AddArch=*/false); + llvm::sys::path::append(P, "lib", CRTBasename); + // Checks if this is the base system case which uses a different location. + if (getVFS().exists(P)) + return std::string(P.str()); + return ToolChain::getCompilerRT(Args, Component, Type); } Tool *OpenBSD::buildAssembler() const { diff --git a/clang/lib/Edit/RewriteObjCFoundationAPI.cpp b/clang/lib/Edit/RewriteObjCFoundationAPI.cpp index 589bf8d216ed3950d6d344c1b8f00515278253b3..1ca041f3ed6da1f628021b806176f133c3a50c64 100644 --- a/clang/lib/Edit/RewriteObjCFoundationAPI.cpp +++ b/clang/lib/Edit/RewriteObjCFoundationAPI.cpp @@ -725,11 +725,11 @@ static bool getLiteralInfo(SourceRange literalRange, break; } - if (!UpperU.hasValue() && !UpperL.hasValue()) + if (!UpperU && !UpperL) UpperU = UpperL = true; - else if (UpperU.hasValue() && !UpperL.hasValue()) + else if (UpperU && !UpperL) UpperL = UpperU; - else if (UpperL.hasValue() && !UpperU.hasValue()) + else if (UpperL && !UpperU) UpperU = UpperL; Info.U = *UpperU ? "U" : "u"; diff --git a/clang/lib/Format/Format.cpp b/clang/lib/Format/Format.cpp index df1aa9da16e3966986eca29fa7025f44cade2004..51526dc2a6817d1481b7481bae905f98e8f1ea20 100644 --- a/clang/lib/Format/Format.cpp +++ b/clang/lib/Format/Format.cpp @@ -3471,6 +3471,12 @@ static FormatStyle::LanguageKind getLanguageByFileName(StringRef FileName) { return FormatStyle::LK_CSharp; if (FileName.endswith_insensitive(".json")) return FormatStyle::LK_Json; + if (FileName.endswith_insensitive(".sv") || + FileName.endswith_insensitive(".svh") || + FileName.endswith_insensitive(".v") || + FileName.endswith_insensitive(".vh")) { + return FormatStyle::LK_Verilog; + } return FormatStyle::LK_Cpp; } diff --git a/clang/lib/Format/FormatToken.h b/clang/lib/Format/FormatToken.h index 71acdf2f094babc6660cdb7059f77900c559746b..b6cc021affae38aad2fa359c92f5db6736068141 100644 --- a/clang/lib/Format/FormatToken.h +++ b/clang/lib/Format/FormatToken.h @@ -46,6 +46,13 @@ namespace format { /* l_brace of if/for/while */ \ TYPE(ControlStatementLBrace) \ TYPE(CppCastLParen) \ + TYPE(CSharpGenericTypeConstraint) \ + TYPE(CSharpGenericTypeConstraintColon) \ + TYPE(CSharpGenericTypeConstraintComma) \ + TYPE(CSharpNamedArgumentColon) \ + TYPE(CSharpNullable) \ + TYPE(CSharpNullConditionalLSquare) \ + TYPE(CSharpStringLiteral) \ TYPE(CtorInitializerColon) \ TYPE(CtorInitializerComma) \ TYPE(DesignatedInitializerLSquare) \ @@ -68,6 +75,7 @@ namespace format { TYPE(InlineASMColon) \ TYPE(InlineASMSymbolicNameLSquare) \ TYPE(JavaAnnotation) \ + TYPE(JsAndAndEqual) \ TYPE(JsComputedPropertyName) \ TYPE(JsExponentiation) \ TYPE(JsExponentiationEqual) \ @@ -76,7 +84,6 @@ namespace format { TYPE(JsTypeColon) \ TYPE(JsTypeOperator) \ TYPE(JsTypeOptionalQuestion) \ - TYPE(JsAndAndEqual) \ TYPE(LambdaArrow) \ TYPE(LambdaLBrace) \ TYPE(LambdaLSquare) \ @@ -101,6 +108,7 @@ namespace format { TYPE(OverloadedOperator) \ TYPE(OverloadedOperatorLParen) \ TYPE(PointerOrReference) \ + TYPE(ProtoExtensionLSquare) \ TYPE(PureVirtualSpecifier) \ TYPE(RangeBasedForLoopColon) \ TYPE(RecordLBrace) \ @@ -119,7 +127,6 @@ namespace format { TYPE(TemplateCloser) \ TYPE(TemplateOpener) \ TYPE(TemplateString) \ - TYPE(ProtoExtensionLSquare) \ TYPE(TrailingAnnotation) \ TYPE(TrailingReturnArrow) \ TYPE(TrailingUnaryOperator) \ @@ -128,13 +135,6 @@ namespace format { TYPE(UnaryOperator) \ TYPE(UnionLBrace) \ TYPE(UntouchableMacroFunc) \ - TYPE(CSharpStringLiteral) \ - TYPE(CSharpNamedArgumentColon) \ - TYPE(CSharpNullable) \ - TYPE(CSharpNullConditionalLSquare) \ - TYPE(CSharpGenericTypeConstraint) \ - TYPE(CSharpGenericTypeConstraintColon) \ - TYPE(CSharpGenericTypeConstraintComma) \ TYPE(Unknown) /// Determines the semantic type of a syntactic token, e.g. whether "<" is a @@ -979,6 +979,138 @@ struct AdditionalKeywords { kw_when = &IdentTable.get("when"); kw_where = &IdentTable.get("where"); + kw_always = &IdentTable.get("always"); + kw_always_comb = &IdentTable.get("always_comb"); + kw_always_ff = &IdentTable.get("always_ff"); + kw_always_latch = &IdentTable.get("always_latch"); + kw_assign = &IdentTable.get("assign"); + kw_assume = &IdentTable.get("assume"); + kw_automatic = &IdentTable.get("automatic"); + kw_before = &IdentTable.get("before"); + kw_begin = &IdentTable.get("begin"); + kw_begin_keywords = &IdentTable.get("begin_keywords"); + kw_bins = &IdentTable.get("bins"); + kw_binsof = &IdentTable.get("binsof"); + kw_casex = &IdentTable.get("casex"); + kw_casez = &IdentTable.get("casez"); + kw_celldefine = &IdentTable.get("celldefine"); + kw_checker = &IdentTable.get("checker"); + kw_clocking = &IdentTable.get("clocking"); + kw_constraint = &IdentTable.get("constraint"); + kw_cover = &IdentTable.get("cover"); + kw_covergroup = &IdentTable.get("covergroup"); + kw_coverpoint = &IdentTable.get("coverpoint"); + kw_default_decay_time = &IdentTable.get("default_decay_time"); + kw_default_nettype = &IdentTable.get("default_nettype"); + kw_default_trireg_strength = &IdentTable.get("default_trireg_strength"); + kw_delay_mode_distributed = &IdentTable.get("delay_mode_distributed"); + kw_delay_mode_path = &IdentTable.get("delay_mode_path"); + kw_delay_mode_unit = &IdentTable.get("delay_mode_unit"); + kw_delay_mode_zero = &IdentTable.get("delay_mode_zero"); + kw_disable = &IdentTable.get("disable"); + kw_dist = &IdentTable.get("dist"); + kw_elsif = &IdentTable.get("elsif"); + kw_end = &IdentTable.get("end"); + kw_end_keywords = &IdentTable.get("end_keywords"); + kw_endcase = &IdentTable.get("endcase"); + kw_endcelldefine = &IdentTable.get("endcelldefine"); + kw_endchecker = &IdentTable.get("endchecker"); + kw_endclass = &IdentTable.get("endclass"); + kw_endclocking = &IdentTable.get("endclocking"); + kw_endfunction = &IdentTable.get("endfunction"); + kw_endgenerate = &IdentTable.get("endgenerate"); + kw_endgroup = &IdentTable.get("endgroup"); + kw_endinterface = &IdentTable.get("endinterface"); + kw_endmodule = &IdentTable.get("endmodule"); + kw_endpackage = &IdentTable.get("endpackage"); + kw_endprimitive = &IdentTable.get("endprimitive"); + kw_endprogram = &IdentTable.get("endprogram"); + kw_endproperty = &IdentTable.get("endproperty"); + kw_endsequence = &IdentTable.get("endsequence"); + kw_endspecify = &IdentTable.get("endspecify"); + kw_endtable = &IdentTable.get("endtable"); + kw_endtask = &IdentTable.get("endtask"); + kw_forever = &IdentTable.get("forever"); + kw_fork = &IdentTable.get("fork"); + kw_generate = &IdentTable.get("generate"); + kw_highz0 = &IdentTable.get("highz0"); + kw_highz1 = &IdentTable.get("highz1"); + kw_iff = &IdentTable.get("iff"); + kw_ifnone = &IdentTable.get("ifnone"); + kw_ignore_bins = &IdentTable.get("ignore_bins"); + kw_illegal_bins = &IdentTable.get("illegal_bins"); + kw_initial = &IdentTable.get("initial"); + kw_inout = &IdentTable.get("inout"); + kw_input = &IdentTable.get("input"); + kw_inside = &IdentTable.get("inside"); + kw_interconnect = &IdentTable.get("interconnect"); + kw_intersect = &IdentTable.get("intersect"); + kw_join = &IdentTable.get("join"); + kw_join_any = &IdentTable.get("join_any"); + kw_join_none = &IdentTable.get("join_none"); + kw_large = &IdentTable.get("large"); + kw_local = &IdentTable.get("local"); + kw_localparam = &IdentTable.get("localparam"); + kw_macromodule = &IdentTable.get("macromodule"); + kw_matches = &IdentTable.get("matches"); + kw_medium = &IdentTable.get("medium"); + kw_nounconnected_drive = &IdentTable.get("nounconnected_drive"); + kw_output = &IdentTable.get("output"); + kw_packed = &IdentTable.get("packed"); + kw_parameter = &IdentTable.get("parameter"); + kw_primitive = &IdentTable.get("primitive"); + kw_priority = &IdentTable.get("priority"); + kw_program = &IdentTable.get("program"); + kw_property = &IdentTable.get("property"); + kw_pull0 = &IdentTable.get("pull0"); + kw_pull1 = &IdentTable.get("pull1"); + kw_pure = &IdentTable.get("pure"); + kw_rand = &IdentTable.get("rand"); + kw_randc = &IdentTable.get("randc"); + kw_randcase = &IdentTable.get("randcase"); + kw_randsequence = &IdentTable.get("randsequence"); + kw_repeat = &IdentTable.get("repeat"); + kw_resetall = &IdentTable.get("resetall"); + kw_sample = &IdentTable.get("sample"); + kw_scalared = &IdentTable.get("scalared"); + kw_sequence = &IdentTable.get("sequence"); + kw_small = &IdentTable.get("small"); + kw_soft = &IdentTable.get("soft"); + kw_solve = &IdentTable.get("solve"); + kw_specify = &IdentTable.get("specify"); + kw_specparam = &IdentTable.get("specparam"); + kw_strong0 = &IdentTable.get("strong0"); + kw_strong1 = &IdentTable.get("strong1"); + kw_supply0 = &IdentTable.get("supply0"); + kw_supply1 = &IdentTable.get("supply1"); + kw_table = &IdentTable.get("table"); + kw_tagged = &IdentTable.get("tagged"); + kw_task = &IdentTable.get("task"); + kw_timescale = &IdentTable.get("timescale"); + kw_tri = &IdentTable.get("tri"); + kw_tri0 = &IdentTable.get("tri0"); + kw_tri1 = &IdentTable.get("tri1"); + kw_triand = &IdentTable.get("triand"); + kw_trior = &IdentTable.get("trior"); + kw_trireg = &IdentTable.get("trireg"); + kw_unconnected_drive = &IdentTable.get("unconnected_drive"); + kw_undefineall = &IdentTable.get("undefineall"); + kw_unique = &IdentTable.get("unique"); + kw_unique0 = &IdentTable.get("unique0"); + kw_uwire = &IdentTable.get("uwire"); + kw_vectored = &IdentTable.get("vectored"); + kw_wand = &IdentTable.get("wand"); + kw_weak0 = &IdentTable.get("weak0"); + kw_weak1 = &IdentTable.get("weak1"); + kw_wildcard = &IdentTable.get("wildcard"); + kw_wire = &IdentTable.get("wire"); + kw_with = &IdentTable.get("with"); + kw_wor = &IdentTable.get("wor"); + + // Symbols that are treated as keywords. + kw_verilogHash = &IdentTable.get("#"); + kw_verilogHashHash = &IdentTable.get("##"); + // Keep this at the end of the constructor to make sure everything here // is // already initialized. @@ -1002,6 +1134,136 @@ struct AdditionalKeywords { kw_set, kw_type, kw_typeof, kw_var, kw_yield, // Keywords from the Java section. kw_abstract, kw_extends, kw_implements, kw_instanceof, kw_interface}); + + // Some keywords are not included here because they don't need special + // treatment like `showcancelled` or they should be treated as identifiers + // like `int` and `logic`. + VerilogExtraKeywords = + std::unordered_set({kw_always, + kw_always_comb, + kw_always_ff, + kw_always_latch, + kw_assert, + kw_assign, + kw_assume, + kw_automatic, + kw_before, + kw_begin, + kw_bins, + kw_binsof, + kw_casex, + kw_casez, + kw_celldefine, + kw_checker, + kw_clocking, + kw_constraint, + kw_cover, + kw_covergroup, + kw_coverpoint, + kw_disable, + kw_dist, + kw_end, + kw_endcase, + kw_endchecker, + kw_endclass, + kw_endclocking, + kw_endfunction, + kw_endgenerate, + kw_endgroup, + kw_endinterface, + kw_endmodule, + kw_endpackage, + kw_endprimitive, + kw_endprogram, + kw_endproperty, + kw_endsequence, + kw_endspecify, + kw_endtable, + kw_endtask, + kw_extends, + kw_final, + kw_foreach, + kw_forever, + kw_fork, + kw_function, + kw_generate, + kw_highz0, + kw_highz1, + kw_iff, + kw_ifnone, + kw_ignore_bins, + kw_illegal_bins, + kw_implements, + kw_import, + kw_initial, + kw_inout, + kw_input, + kw_inside, + kw_interconnect, + kw_interface, + kw_intersect, + kw_join, + kw_join_any, + kw_join_none, + kw_large, + kw_let, + kw_local, + kw_localparam, + kw_macromodule, + kw_matches, + kw_medium, + kw_output, + kw_package, + kw_packed, + kw_parameter, + kw_primitive, + kw_priority, + kw_program, + kw_property, + kw_pull0, + kw_pull1, + kw_pure, + kw_rand, + kw_randc, + kw_randcase, + kw_randsequence, + kw_ref, + kw_repeat, + kw_sample, + kw_scalared, + kw_sequence, + kw_small, + kw_soft, + kw_solve, + kw_specify, + kw_specparam, + kw_strong0, + kw_strong1, + kw_supply0, + kw_supply1, + kw_table, + kw_tagged, + kw_task, + kw_tri, + kw_tri0, + kw_tri1, + kw_triand, + kw_trior, + kw_trireg, + kw_unique, + kw_unique0, + kw_uwire, + kw_var, + kw_vectored, + kw_wand, + kw_weak0, + kw_weak1, + kw_wildcard, + kw_wire, + kw_with, + kw_wor, + kw_verilogHash, + kw_verilogHashHash}); } // Context sensitive keywords. @@ -1107,6 +1369,146 @@ struct AdditionalKeywords { IdentifierInfo *kw_when; IdentifierInfo *kw_where; + // Verilog keywords + IdentifierInfo *kw_always; + IdentifierInfo *kw_always_comb; + IdentifierInfo *kw_always_ff; + IdentifierInfo *kw_always_latch; + IdentifierInfo *kw_assign; + IdentifierInfo *kw_assume; + IdentifierInfo *kw_automatic; + IdentifierInfo *kw_before; + IdentifierInfo *kw_begin; + IdentifierInfo *kw_begin_keywords; + IdentifierInfo *kw_bins; + IdentifierInfo *kw_binsof; + IdentifierInfo *kw_casex; + IdentifierInfo *kw_casez; + IdentifierInfo *kw_celldefine; + IdentifierInfo *kw_checker; + IdentifierInfo *kw_clocking; + IdentifierInfo *kw_constraint; + IdentifierInfo *kw_cover; + IdentifierInfo *kw_covergroup; + IdentifierInfo *kw_coverpoint; + IdentifierInfo *kw_default_decay_time; + IdentifierInfo *kw_default_nettype; + IdentifierInfo *kw_default_trireg_strength; + IdentifierInfo *kw_delay_mode_distributed; + IdentifierInfo *kw_delay_mode_path; + IdentifierInfo *kw_delay_mode_unit; + IdentifierInfo *kw_delay_mode_zero; + IdentifierInfo *kw_disable; + IdentifierInfo *kw_dist; + IdentifierInfo *kw_elsif; + IdentifierInfo *kw_end; + IdentifierInfo *kw_end_keywords; + IdentifierInfo *kw_endcase; + IdentifierInfo *kw_endcelldefine; + IdentifierInfo *kw_endchecker; + IdentifierInfo *kw_endclass; + IdentifierInfo *kw_endclocking; + IdentifierInfo *kw_endfunction; + IdentifierInfo *kw_endgenerate; + IdentifierInfo *kw_endgroup; + IdentifierInfo *kw_endinterface; + IdentifierInfo *kw_endmodule; + IdentifierInfo *kw_endpackage; + IdentifierInfo *kw_endprimitive; + IdentifierInfo *kw_endprogram; + IdentifierInfo *kw_endproperty; + IdentifierInfo *kw_endsequence; + IdentifierInfo *kw_endspecify; + IdentifierInfo *kw_endtable; + IdentifierInfo *kw_endtask; + IdentifierInfo *kw_forever; + IdentifierInfo *kw_fork; + IdentifierInfo *kw_generate; + IdentifierInfo *kw_highz0; + IdentifierInfo *kw_highz1; + IdentifierInfo *kw_iff; + IdentifierInfo *kw_ifnone; + IdentifierInfo *kw_ignore_bins; + IdentifierInfo *kw_illegal_bins; + IdentifierInfo *kw_initial; + IdentifierInfo *kw_inout; + IdentifierInfo *kw_input; + IdentifierInfo *kw_inside; + IdentifierInfo *kw_interconnect; + IdentifierInfo *kw_intersect; + IdentifierInfo *kw_join; + IdentifierInfo *kw_join_any; + IdentifierInfo *kw_join_none; + IdentifierInfo *kw_large; + IdentifierInfo *kw_local; + IdentifierInfo *kw_localparam; + IdentifierInfo *kw_macromodule; + IdentifierInfo *kw_matches; + IdentifierInfo *kw_medium; + IdentifierInfo *kw_nounconnected_drive; + IdentifierInfo *kw_output; + IdentifierInfo *kw_packed; + IdentifierInfo *kw_parameter; + IdentifierInfo *kw_primitive; + IdentifierInfo *kw_priority; + IdentifierInfo *kw_program; + IdentifierInfo *kw_property; + IdentifierInfo *kw_pull0; + IdentifierInfo *kw_pull1; + IdentifierInfo *kw_pure; + IdentifierInfo *kw_rand; + IdentifierInfo *kw_randc; + IdentifierInfo *kw_randcase; + IdentifierInfo *kw_randsequence; + IdentifierInfo *kw_repeat; + IdentifierInfo *kw_resetall; + IdentifierInfo *kw_sample; + IdentifierInfo *kw_scalared; + IdentifierInfo *kw_sequence; + IdentifierInfo *kw_small; + IdentifierInfo *kw_soft; + IdentifierInfo *kw_solve; + IdentifierInfo *kw_specify; + IdentifierInfo *kw_specparam; + IdentifierInfo *kw_strong0; + IdentifierInfo *kw_strong1; + IdentifierInfo *kw_supply0; + IdentifierInfo *kw_supply1; + IdentifierInfo *kw_table; + IdentifierInfo *kw_tagged; + IdentifierInfo *kw_task; + IdentifierInfo *kw_timescale; + IdentifierInfo *kw_tri0; + IdentifierInfo *kw_tri1; + IdentifierInfo *kw_tri; + IdentifierInfo *kw_triand; + IdentifierInfo *kw_trior; + IdentifierInfo *kw_trireg; + IdentifierInfo *kw_unconnected_drive; + IdentifierInfo *kw_undefineall; + IdentifierInfo *kw_unique; + IdentifierInfo *kw_unique0; + IdentifierInfo *kw_uwire; + IdentifierInfo *kw_vectored; + IdentifierInfo *kw_wand; + IdentifierInfo *kw_weak0; + IdentifierInfo *kw_weak1; + IdentifierInfo *kw_wildcard; + IdentifierInfo *kw_wire; + IdentifierInfo *kw_with; + IdentifierInfo *kw_wor; + + // Workaround for hashes and backticks in Verilog. + IdentifierInfo *kw_verilogHash; + IdentifierInfo *kw_verilogHashHash; + + /// Returns \c true if \p Tok is a keyword or an identifier. + bool isWordLike(const FormatToken &Tok) const { + // getIdentifierinfo returns non-null for keywords as well as identifiers. + return Tok.Tok.getIdentifierInfo() != nullptr && + !Tok.isOneOf(kw_verilogHash, kw_verilogHashHash); + } + /// Returns \c true if \p Tok is a true JavaScript identifier, returns /// \c false if it is a keyword or a pseudo keyword. /// If \c AcceptIdentifierName is true, returns true not only for keywords, @@ -1233,12 +1635,101 @@ struct AdditionalKeywords { } } + bool isVerilogIdentifier(const FormatToken &Tok) const { + switch (Tok.Tok.getKind()) { + case tok::kw_case: + case tok::kw_class: + case tok::kw_const: + case tok::kw_continue: + case tok::kw_default: + case tok::kw_do: + case tok::kw_extern: + case tok::kw_else: + case tok::kw_enum: + case tok::kw_for: + case tok::kw_if: + case tok::kw_restrict: + case tok::kw_signed: + case tok::kw_static: + case tok::kw_struct: + case tok::kw_typedef: + case tok::kw_union: + case tok::kw_unsigned: + case tok::kw_virtual: + case tok::kw_while: + return false; + case tok::identifier: + return VerilogExtraKeywords.find(Tok.Tok.getIdentifierInfo()) == + VerilogExtraKeywords.end(); + default: + // getIdentifierInfo returns non-null for both identifiers and keywords. + return Tok.Tok.getIdentifierInfo() != nullptr; + } + } + + /// Returns whether \p Tok is a Verilog preprocessor directive. This is + /// needed because macro expansions start with a backtick as well and they + /// need to be treated differently. + bool isVerilogPPDirective(const FormatToken &Tok) const { + auto Info = Tok.Tok.getIdentifierInfo(); + if (!Info) + return false; + switch (Info->getPPKeywordID()) { + case tok::pp_define: + case tok::pp_else: + case tok::pp_endif: + case tok::pp_ifdef: + case tok::pp_ifndef: + case tok::pp_include: + case tok::pp_line: + case tok::pp_pragma: + case tok::pp_undef: + return true; + default: + return Tok.isOneOf(kw_begin_keywords, kw_celldefine, + kw_default_decay_time, kw_default_nettype, + kw_default_trireg_strength, kw_delay_mode_distributed, + kw_delay_mode_path, kw_delay_mode_unit, + kw_delay_mode_zero, kw_elsif, kw_end_keywords, + kw_endcelldefine, kw_nounconnected_drive, kw_resetall, + kw_timescale, kw_unconnected_drive, kw_undefineall); + } + } + + /// Returns whether \p Tok is a Verilog keyword that opens a block. + bool isVerilogBegin(const FormatToken &Tok) const { + // `table` is not included since it needs to be treated specially. + return !Tok.endsSequence(kw_fork, kw_disable) && + Tok.isOneOf(kw_begin, kw_fork, kw_generate, kw_specify); + } + + /// Returns whether \p Tok is a Verilog keyword that closes a block. + bool isVerilogEnd(const FormatToken &Tok) const { + return !Tok.endsSequence(kw_join, kw_rand) && + Tok.isOneOf(TT_MacroBlockEnd, kw_end, kw_endcase, kw_endclass, + kw_endclocking, kw_endchecker, kw_endfunction, + kw_endgenerate, kw_endgroup, kw_endinterface, + kw_endmodule, kw_endpackage, kw_endprimitive, + kw_endprogram, kw_endproperty, kw_endsequence, + kw_endspecify, kw_endtable, kw_endtask, kw_join, + kw_join_any, kw_join_none); + } + + /// Whether the token begins a block. + bool isBlockBegin(const FormatToken &Tok, const FormatStyle &Style) const { + return Tok.is(TT_MacroBlockBegin) || + (Style.isVerilog() ? isVerilogBegin(Tok) : Tok.is(tok::l_brace)); + } + private: /// The JavaScript keywords beyond the C++ keyword set. std::unordered_set JsExtraKeywords; /// The C# keywords beyond the C++ keyword set std::unordered_set CSharpExtraKeywords; + + /// The Verilog keywords beyond the C++ keyword set. + std::unordered_set VerilogExtraKeywords; }; } // namespace format diff --git a/clang/lib/Format/FormatTokenLexer.cpp b/clang/lib/Format/FormatTokenLexer.cpp index 214a52780888b66f3b6594f89edbb897c679dae3..88b0d3b1970f7592aaaec990e2ffe2874a7eb467 100644 --- a/clang/lib/Format/FormatTokenLexer.cpp +++ b/clang/lib/Format/FormatTokenLexer.cpp @@ -840,6 +840,56 @@ FormatToken *FormatTokenLexer::getStashedToken() { return FormatTok; } +/// Truncate the current token to the new length and make the lexer continue +/// from the end of the truncated token. Used for other languages that have +/// different token boundaries, like JavaScript in which a comment ends at a +/// line break regardless of whether the line break follows a backslash. Also +/// used to set the lexer to the end of whitespace if the lexer regards +/// whitespace and an unrecognized symbol as one token. +void FormatTokenLexer::truncateToken(size_t NewLen) { + assert(NewLen <= FormatTok->TokenText.size()); + resetLexer(SourceMgr.getFileOffset(Lex->getSourceLocation( + Lex->getBufferLocation() - FormatTok->TokenText.size() + NewLen))); + FormatTok->TokenText = FormatTok->TokenText.substr(0, NewLen); + FormatTok->ColumnWidth = encoding::columnWidthWithTabs( + FormatTok->TokenText, FormatTok->OriginalColumn, Style.TabWidth, + Encoding); + FormatTok->Tok.setLength(NewLen); +} + +/// Count the length of leading whitespace in a token. +static size_t countLeadingWhitespace(StringRef Text) { + // Basically counting the length matched by this regex. + // "^([\n\r\f\v \t]|(\\\\|\\?\\?/)[\n\r])+" + // Directly using the regex turned out to be slow. With the regex + // version formatting all files in this directory took about 1.25 + // seconds. This version took about 0.5 seconds. + const char *Cur = Text.begin(); + while (Cur < Text.end()) { + if (isspace(Cur[0])) { + ++Cur; + } else if (Cur[0] == '\\' && (Cur[1] == '\n' || Cur[1] == '\r')) { + // A '\' followed by a newline always escapes the newline, regardless + // of whether there is another '\' before it. + // The source has a null byte at the end. So the end of the entire input + // isn't reached yet. Also the lexer doesn't break apart an escaped + // newline. + assert(Text.end() - Cur >= 2); + Cur += 2; + } else if (Cur[0] == '?' && Cur[1] == '?' && Cur[2] == '/' && + (Cur[3] == '\n' || Cur[3] == '\r')) { + // Newlines can also be escaped by a '?' '?' '/' trigraph. By the way, the + // characters are quoted individually in this comment because if we write + // them together some compilers warn that we have a trigraph in the code. + assert(Text.end() - Cur >= 4); + Cur += 4; + } else { + break; + } + } + return Cur - Text.begin(); +} + FormatToken *FormatTokenLexer::getNextToken() { if (StateStack.top() == LexerState::TOKEN_STASHED) { StateStack.pop(); @@ -854,34 +904,33 @@ FormatToken *FormatTokenLexer::getNextToken() { IsFirstToken = false; // Consume and record whitespace until we find a significant token. + // Some tok::unknown tokens are not just whitespace, e.g. whitespace + // followed by a symbol such as backtick. Those symbols may be + // significant in other languages. unsigned WhitespaceLength = TrailingWhitespace; - while (FormatTok->is(tok::unknown)) { + while (FormatTok->isNot(tok::eof)) { + auto LeadingWhitespace = countLeadingWhitespace(FormatTok->TokenText); + if (LeadingWhitespace == 0) + break; + if (LeadingWhitespace < FormatTok->TokenText.size()) + truncateToken(LeadingWhitespace); StringRef Text = FormatTok->TokenText; - auto EscapesNewline = [&](int pos) { - // A '\r' here is just part of '\r\n'. Skip it. - if (pos >= 0 && Text[pos] == '\r') - --pos; - // See whether there is an odd number of '\' before this. - // FIXME: This is wrong. A '\' followed by a newline is always removed, - // regardless of whether there is another '\' before it. - // FIXME: Newlines can also be escaped by a '?' '?' '/' trigraph. - unsigned count = 0; - for (; pos >= 0; --pos, ++count) - if (Text[pos] != '\\') - break; - return count & 1; - }; - // FIXME: This miscounts tok:unknown tokens that are not just - // whitespace, e.g. a '`' character. + bool InEscape = false; for (int i = 0, e = Text.size(); i != e; ++i) { switch (Text[i]) { + case '\r': + // If this is a CRLF sequence, break here and the LF will be handled on + // the next loop iteration. Otherwise, this is a single Mac CR, treat it + // the same as a single LF. + if (i + 1 < e && Text[i + 1] == '\n') + break; + LLVM_FALLTHROUGH; case '\n': ++FormatTok->NewlinesBefore; - FormatTok->HasUnescapedNewline = !EscapesNewline(i - 1); - FormatTok->LastNewlineOffset = WhitespaceLength + i + 1; - Column = 0; - break; - case '\r': + if (!InEscape) + FormatTok->HasUnescapedNewline = true; + else + InEscape = false; FormatTok->LastNewlineOffset = WhitespaceLength + i + 1; Column = 0; break; @@ -897,24 +946,32 @@ FormatToken *FormatTokenLexer::getNextToken() { Style.TabWidth - (Style.TabWidth ? Column % Style.TabWidth : 0); break; case '\\': - if (i + 1 == e || (Text[i + 1] != '\r' && Text[i + 1] != '\n')) - FormatTok->setType(TT_ImplicitStringLiteral); + case '?': + case '/': + // The text was entirely whitespace when this loop was entered. Thus + // this has to be an escape sequence. + assert(Text.substr(i, 2) == "\\\r" || Text.substr(i, 2) == "\\\n" || + Text.substr(i, 4) == "\?\?/\r" || + Text.substr(i, 4) == "\?\?/\n" || + (i >= 1 && (Text.substr(i - 1, 4) == "\?\?/\r" || + Text.substr(i - 1, 4) == "\?\?/\n")) || + (i >= 2 && (Text.substr(i - 2, 4) == "\?\?/\r" || + Text.substr(i - 2, 4) == "\?\?/\n"))); + InEscape = true; break; default: - FormatTok->setType(TT_ImplicitStringLiteral); + // This shouldn't happen. + assert(false); break; } - if (FormatTok->getType() == TT_ImplicitStringLiteral) - break; } - - if (FormatTok->is(TT_ImplicitStringLiteral)) - break; - WhitespaceLength += FormatTok->Tok.getLength(); - + WhitespaceLength += Text.size(); readRawToken(*FormatTok); } + if (FormatTok->is(tok::unknown)) + FormatTok->setType(TT_ImplicitStringLiteral); + // JavaScript and Java do not allow to escape the end of the line with a // backslash. Backslashes are syntax errors in plain source, but can occur in // comments. When a single line comment ends with a \, it'll cause the next @@ -928,40 +985,30 @@ FormatToken *FormatTokenLexer::getNextToken() { while (BackslashPos != StringRef::npos) { if (BackslashPos + 1 < FormatTok->TokenText.size() && FormatTok->TokenText[BackslashPos + 1] == '\n') { - const char *Offset = Lex->getBufferLocation(); - Offset -= FormatTok->TokenText.size(); - Offset += BackslashPos + 1; - resetLexer(SourceMgr.getFileOffset(Lex->getSourceLocation(Offset))); - FormatTok->TokenText = FormatTok->TokenText.substr(0, BackslashPos + 1); - FormatTok->ColumnWidth = encoding::columnWidthWithTabs( - FormatTok->TokenText, FormatTok->OriginalColumn, Style.TabWidth, - Encoding); + truncateToken(BackslashPos + 1); break; } BackslashPos = FormatTok->TokenText.find('\\', BackslashPos + 1); } } - // In case the token starts with escaped newlines, we want to - // take them into account as whitespace - this pattern is quite frequent - // in macro definitions. - // FIXME: Add a more explicit test. - while (FormatTok->TokenText.size() > 1 && FormatTok->TokenText[0] == '\\') { - unsigned SkippedWhitespace = 0; - if (FormatTok->TokenText.size() > 2 && - (FormatTok->TokenText[1] == '\r' && FormatTok->TokenText[2] == '\n')) { - SkippedWhitespace = 3; - } else if (FormatTok->TokenText[1] == '\n') { - SkippedWhitespace = 2; - } else { - break; + if (Style.isVerilog()) { + // Verilog uses the backtick instead of the hash for preprocessor stuff. + // And it uses the hash for delays and parameter lists. In order to continue + // using `tok::hash` in other places, the backtick gets marked as the hash + // here. And in order to tell the backtick and hash apart for + // Verilog-specific stuff, the hash becomes an identifier. + if (FormatTok->isOneOf(tok::hash, tok::hashhash)) { + FormatTok->Tok.setKind(tok::raw_identifier); + } else if (FormatTok->is(tok::raw_identifier)) { + if (FormatTok->TokenText == "`") { + FormatTok->Tok.setIdentifierInfo(nullptr); + FormatTok->Tok.setKind(tok::hash); + } else if (FormatTok->TokenText == "``") { + FormatTok->Tok.setIdentifierInfo(nullptr); + FormatTok->Tok.setKind(tok::hashhash); + } } - - ++FormatTok->NewlinesBefore; - WhitespaceLength += SkippedWhitespace; - FormatTok->LastNewlineOffset = SkippedWhitespace; - Column = 0; - FormatTok->TokenText = FormatTok->TokenText.substr(SkippedWhitespace); } FormatTok->WhitespaceRange = SourceRange( @@ -1051,8 +1098,52 @@ FormatToken *FormatTokenLexer::getNextToken() { return FormatTok; } +bool FormatTokenLexer::readRawTokenVerilogSpecific(Token &Tok) { + // In Verilog the quote is not a character literal. + // + // Make the backtick and double backtick identifiers to match against them + // more easily. + // + // In Verilog an escaped identifier starts with backslash and ends with + // whitespace. Unless that whitespace is an escaped newline. A backslash can + // also begin an escaped newline outside of an escaped identifier. We check + // for that outside of the Regex since we can't use negative lookhead + // assertions. Simply changing the '*' to '+' breaks stuff as the escaped + // identifier may have a length of 0 according to Section A.9.3. + // FIXME: If there is an escaped newline in the middle of an escaped + // identifier, allow for pasting the two lines together, But escaped + // identifiers usually occur only in generated code anyway. + static const llvm::Regex VerilogToken(R"re(^('|``?|\\(\\)re" + "(\r?\n|\r)|[^[:space:]])*)"); + + SmallVector Matches; + const char *Start = Lex->getBufferLocation(); + if (!VerilogToken.match(StringRef(Start, Lex->getBuffer().end() - Start), + &Matches)) { + return false; + } + // There is a null byte at the end of the buffer, so we don't have to check + // Start[1] is within the buffer. + if (Start[0] == '\\' && (Start[1] == '\r' || Start[1] == '\n')) + return false; + size_t Len = Matches[0].size(); + + // The kind has to be an identifier so we can match it against those defined + // in Keywords. The kind has to be set before the length because the setLength + // function checks that the kind is not an annotation. + Tok.setKind(tok::raw_identifier); + Tok.setLength(Len); + Tok.setLocation(Lex->getSourceLocation(Start, Len)); + Tok.setRawIdentifierData(Start); + Lex->seek(Lex->getCurrentBufferOffset() + Len, /*IsAtStartofline=*/false); + return true; +} + void FormatTokenLexer::readRawToken(FormatToken &Tok) { - Lex->LexFromRawLexer(Tok.Tok); + // For Verilog, first see if there is a special token, and fall back to the + // normal lexer if there isn't one. + if (!Style.isVerilog() || !readRawTokenVerilogSpecific(Tok.Tok)) + Lex->LexFromRawLexer(Tok.Tok); Tok.TokenText = StringRef(SourceMgr.getCharacterData(Tok.Tok.getLocation()), Tok.Tok.getLength()); // For formatting, treat unterminated string literals like normal string diff --git a/clang/lib/Format/FormatTokenLexer.h b/clang/lib/Format/FormatTokenLexer.h index 3e7bad7b71513796b61a99cc4342f1a62b3d04f9..bff2c181d81e3134523dd0246df11df0aedc6bf8 100644 --- a/clang/lib/Format/FormatTokenLexer.h +++ b/clang/lib/Format/FormatTokenLexer.h @@ -92,6 +92,8 @@ private: bool tryMergeConflictMarkers(); + void truncateToken(size_t NewLen); + FormatToken *getStashedToken(); FormatToken *getNextToken(); @@ -124,6 +126,9 @@ private: // Targets that may appear inside a C# attribute. static const llvm::StringSet<> CSharpAttributeTargets; + /// Handle Verilog-specific tokens. + bool readRawTokenVerilogSpecific(Token &Tok); + void readRawToken(FormatToken &Tok); void resetLexer(unsigned Offset); diff --git a/clang/lib/Format/TokenAnnotator.cpp b/clang/lib/Format/TokenAnnotator.cpp index dbd503d25862b0c6ffa97fd5bff1d53474a630c7..1ee95f26d1fc7b7663224e87f539a5eb376644f3 100644 --- a/clang/lib/Format/TokenAnnotator.cpp +++ b/clang/lib/Format/TokenAnnotator.cpp @@ -1343,6 +1343,10 @@ private: // sequence. if (!CurrentToken->Tok.getIdentifierInfo()) return Type; + // In Verilog macro expansions start with a backtick just like preprocessor + // directives. Thus we stop if the word is not a preprocessor directive. + if (Style.isVerilog() && !Keywords.isVerilogPPDirective(*CurrentToken)) + return LT_Invalid; switch (CurrentToken->Tok.getIdentifierInfo()->getPPKeywordID()) { case tok::pp_include: case tok::pp_include_next: @@ -1385,8 +1389,14 @@ public: if (!CurrentToken) return LT_Invalid; NonTemplateLess.clear(); - if (CurrentToken->is(tok::hash)) - return parsePreprocessorDirective(); + if (CurrentToken->is(tok::hash)) { + // We were not yet allowed to use C++17 optional when this was being + // written. So we used LT_Invalid to mark that the line is not a + // preprocessor directive. + auto Type = parsePreprocessorDirective(); + if (Type != LT_Invalid) + return Type; + } // Directly allow to 'import ' to support protocol buffer // definitions (github.com/google/protobuf) or missing "#" (either way we @@ -3663,8 +3673,9 @@ bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line, if (Left.Finalized) return Right.hasWhitespaceBefore(); - if (Right.Tok.getIdentifierInfo() && Left.Tok.getIdentifierInfo()) - return true; // Never ever merge two identifiers. + // Never ever merge two words. + if (Keywords.isWordLike(Right) && Keywords.isWordLike(Left)) + return true; // Leave a space between * and /* to avoid C4138 `comment end` found outside // of comment. @@ -3931,6 +3942,21 @@ bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line, Right.is(TT_TemplateOpener)) { return true; } + } else if (Style.isVerilog()) { + // Don't add space within a delay like `#0`. + if (!Left.is(TT_BinaryOperator) && + Left.isOneOf(Keywords.kw_verilogHash, Keywords.kw_verilogHashHash)) { + return false; + } + // Add space after a delay. + if (!Right.is(tok::semi) && + (Left.endsSequence(tok::numeric_constant, Keywords.kw_verilogHash) || + Left.endsSequence(tok::numeric_constant, + Keywords.kw_verilogHashHash) || + (Left.is(tok::r_paren) && Left.MatchingParen && + Left.MatchingParen->endsSequence(tok::l_paren, tok::at)))) { + return true; + } } if (Left.is(TT_ImplicitStringLiteral)) return Right.hasWhitespaceBefore(); diff --git a/clang/lib/Format/UnwrappedLineFormatter.cpp b/clang/lib/Format/UnwrappedLineFormatter.cpp index b8a535b8d527f544b3bce3f1982bec226790ac1e..22509a50424659a30e0b1c340b177d14047255c7 100644 --- a/clang/lib/Format/UnwrappedLineFormatter.cpp +++ b/clang/lib/Format/UnwrappedLineFormatter.cpp @@ -1163,6 +1163,10 @@ private: // While not empty, take first element and follow edges. while (!Queue.empty()) { + // Quit if we still haven't found a solution by now. + if (Count > 25000000) + return 0; + Penalty = Queue.top().first.first; StateNode *Node = Queue.top().second; if (!Node->State.NextToken) { diff --git a/clang/lib/Format/UnwrappedLineParser.cpp b/clang/lib/Format/UnwrappedLineParser.cpp index 77a59d8585091b20f2a64f07b72a7fab08ca1711..d3383292f7a38e9015036c129ab6c949a5e0ff62 100644 --- a/clang/lib/Format/UnwrappedLineParser.cpp +++ b/clang/lib/Format/UnwrappedLineParser.cpp @@ -829,7 +829,17 @@ FormatToken *UnwrappedLineParser::parseBlock( bool MustBeDeclaration, unsigned AddLevels, bool MunchSemi, bool KeepBraces, IfStmtKind *IfKind, bool UnindentWhitesmithsBraces, bool CanContainBracedList, TokenType NextLBracesType) { - assert(FormatTok->isOneOf(tok::l_brace, TT_MacroBlockBegin) && + auto HandleVerilogBlockLabel = [this]() { + // ":" name + if (Style.isVerilog() && FormatTok->is(tok::colon)) { + nextToken(); + if (Keywords.isVerilogIdentifier(*FormatTok)) + nextToken(); + } + }; + + assert((FormatTok->isOneOf(tok::l_brace, TT_MacroBlockBegin) || + (Style.isVerilog() && Keywords.isVerilogBegin(*FormatTok))) && "'{' or macro block token expected"); FormatToken *Tok = FormatTok; const bool FollowedByComment = Tokens->peekNextToken()->is(tok::comment); @@ -846,6 +856,7 @@ FormatToken *UnwrappedLineParser::parseBlock( const unsigned InitialLevel = Line->Level; nextToken(/*LevelDifference=*/AddLevels); + HandleVerilogBlockLabel(); // Bail out if there are too many levels. Otherwise, the stack might overflow. if (Line->Level > 300) @@ -926,6 +937,7 @@ FormatToken *UnwrappedLineParser::parseBlock( // Munch the closing brace. nextToken(/*LevelDifference=*/-AddLevels); + HandleVerilogBlockLabel(); if (MacroBlock && FormatTok->is(tok::l_paren)) parseParens(); @@ -1897,9 +1909,18 @@ void UnwrappedLineParser::parseStructuralElement( if (Style.isJavaScript()) break; - TokenCount = Line->Tokens.size(); - if (TokenCount == 1 || - (TokenCount == 2 && Line->Tokens.front().Tok->is(tok::comment))) { + auto OneTokenSoFar = [&]() { + const UnwrappedLineNode *Tok = &Line->Tokens.front(), + *End = Tok + Line->Tokens.size(); + while (Tok != End && Tok->Tok->is(tok::comment)) + ++Tok; + // In Verilog, macro invocations start with a backtick which the code + // treats as a hash. Skip it. + if (Style.isVerilog() && Tok != End && Tok->Tok->is(tok::hash)) + ++Tok; + return End - Tok == 1; + }; + if (OneTokenSoFar()) { if (FormatTok->is(tok::colon) && !Line->MustBeDeclaration) { Line->Tokens.begin()->Tok->MustBreakBefore = true; parseLabel(!Style.IndentGotoLabels); @@ -2577,7 +2598,7 @@ FormatToken *UnwrappedLineParser::parseIfThenElse(IfStmtKind *IfKind, FormatToken *IfLeftBrace = nullptr; IfStmtKind IfBlockKind = IfStmtKind::NotIf; - if (FormatTok->is(tok::l_brace)) { + if (Keywords.isBlockBegin(*FormatTok, Style)) { FormatTok->setFinalizedType(TT_ControlStatementLBrace); IfLeftBrace = FormatTok; CompoundStatementIndenter Indenter(this, Style, Line->Level); @@ -2610,7 +2631,7 @@ FormatToken *UnwrappedLineParser::parseIfThenElse(IfStmtKind *IfKind, } nextToken(); handleAttributes(); - if (FormatTok->is(tok::l_brace)) { + if (Keywords.isBlockBegin(*FormatTok, Style)) { const bool FollowedByIf = Tokens->peekNextToken()->is(tok::kw_if); FormatTok->setFinalizedType(TT_ElseLBrace); ElseLeftBrace = FormatTok; @@ -2877,7 +2898,7 @@ void UnwrappedLineParser::parseNew() { void UnwrappedLineParser::parseLoopBody(bool KeepBraces, bool WrapRightBrace) { keepAncestorBraces(); - if (FormatTok->is(tok::l_brace)) { + if (Keywords.isBlockBegin(*FormatTok, Style)) { if (!KeepBraces) FormatTok->setFinalizedType(TT_ControlStatementLBrace); FormatToken *LeftBrace = FormatTok; @@ -3168,6 +3189,11 @@ bool clang::format::UnwrappedLineParser::parseRequires() { break; } default: + if (PreviousNonComment->isTypeOrIdentifier()) { + // This is a requires clause. + parseRequiresClause(RequiresToken); + return true; + } // It's an expression. parseRequiresExpression(RequiresToken); return false; @@ -4161,6 +4187,16 @@ void UnwrappedLineParser::nextToken(int LevelDifference) { else readTokenWithJavaScriptASI(); FormatTok->Previous = Previous; + if (Style.isVerilog()) { + // Blocks in Verilog can have `begin` and `end` instead of braces. For + // keywords like `begin`, we can't treat them the same as left braces + // because some contexts require one of them. For example structs use + // braces and if blocks use keywords, and a left brace can occur in an if + // statement, but it is not a block. For keywords like `end`, we simply + // treat them the same as right braces. + if (Keywords.isVerilogEnd(*FormatTok)) + FormatTok->Tok.setKind(tok::r_brace); + } } void UnwrappedLineParser::distributeComments( @@ -4256,6 +4292,8 @@ void UnwrappedLineParser::readToken(int LevelDifference) { PreviousWasComment = FormatTok->is(tok::comment); while (!Line->InPPDirective && FormatTok->is(tok::hash) && + (!Style.isVerilog() || + Keywords.isVerilogPPDirective(*Tokens->peekNextToken())) && FirstNonCommentOnLine) { distributeComments(Comments, FormatTok); Comments.clear(); diff --git a/clang/lib/Frontend/CompilerInvocation.cpp b/clang/lib/Frontend/CompilerInvocation.cpp index 1a303536285169d1faa552e82cc28140851f4e08..ba89534b70d50bc44cb0bd29cc0550464b2c5bb4 100644 --- a/clang/lib/Frontend/CompilerInvocation.cpp +++ b/clang/lib/Frontend/CompilerInvocation.cpp @@ -1951,7 +1951,7 @@ bool CompilerInvocation::ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, << "-fdiagnostics-hotness-threshold="; } else { Opts.DiagnosticsHotnessThreshold = *ResultOrErr; - if ((!Opts.DiagnosticsHotnessThreshold.hasValue() || + if ((!Opts.DiagnosticsHotnessThreshold || Opts.DiagnosticsHotnessThreshold.getValue() > 0) && !UsingProfile) Diags.Report(diag::warn_drv_diagnostics_hotness_requires_pgo) @@ -1968,7 +1968,7 @@ bool CompilerInvocation::ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, << "-fdiagnostics-misexpect-tolerance="; } else { Opts.DiagnosticsMisExpectTolerance = *ResultOrErr; - if ((!Opts.DiagnosticsMisExpectTolerance.hasValue() || + if ((!Opts.DiagnosticsMisExpectTolerance || Opts.DiagnosticsMisExpectTolerance.getValue() > 0) && !UsingProfile) Diags.Report(diag::warn_drv_diagnostics_misexpect_requires_pgo) @@ -2578,10 +2578,10 @@ static void GenerateFrontendArgs(const FrontendOptions &Opts, for (const auto &ModuleFile : Opts.ModuleFiles) GenerateArg(Args, OPT_fmodule_file, ModuleFile, SA); - if (Opts.AuxTargetCPU.hasValue()) + if (Opts.AuxTargetCPU) GenerateArg(Args, OPT_aux_target_cpu, *Opts.AuxTargetCPU, SA); - if (Opts.AuxTargetFeatures.hasValue()) + if (Opts.AuxTargetFeatures) for (const auto &Feature : *Opts.AuxTargetFeatures) GenerateArg(Args, OPT_aux_target_feature, Feature, SA); diff --git a/clang/lib/Frontend/InitPreprocessor.cpp b/clang/lib/Frontend/InitPreprocessor.cpp index 8a89eddcd49dc209d9b5cdc6b6cf5ca5cbcd8585..52c50fe462360de7b2c31fdd1c77b62809031c27 100644 --- a/clang/lib/Frontend/InitPreprocessor.cpp +++ b/clang/lib/Frontend/InitPreprocessor.cpp @@ -852,11 +852,11 @@ static void InitializePredefinedMacros(const TargetInfo &TI, VersionTuple tuple = LangOpts.ObjCRuntime.getVersion(); unsigned minor = 0; - if (tuple.getMinor().hasValue()) + if (tuple.getMinor()) minor = tuple.getMinor().getValue(); unsigned subminor = 0; - if (tuple.getSubminor().hasValue()) + if (tuple.getSubminor()) subminor = tuple.getSubminor().getValue(); Builder.defineMacro("__OBJFW_RUNTIME_ABI__", diff --git a/clang/lib/Interpreter/CMakeLists.txt b/clang/lib/Interpreter/CMakeLists.txt index 88a0a716e1269b4627b344a78e5f871128008dee..df359329d08b0e7ae639de1563266108b831df80 100644 --- a/clang/lib/Interpreter/CMakeLists.txt +++ b/clang/lib/Interpreter/CMakeLists.txt @@ -14,6 +14,7 @@ add_clang_library(clangInterpreter DEPENDS intrinsics_gen + ClangDriverOptions LINK_LIBS clangAST diff --git a/clang/lib/Interpreter/IncrementalExecutor.cpp b/clang/lib/Interpreter/IncrementalExecutor.cpp index 75c385aa409f30582254ca3f0b7e5aaabaef8563..c055827281b4f70d4321bcb0c1e11fce18d1bbc8 100644 --- a/clang/lib/Interpreter/IncrementalExecutor.cpp +++ b/clang/lib/Interpreter/IncrementalExecutor.cpp @@ -12,6 +12,7 @@ #include "IncrementalExecutor.h" +#include "clang/Interpreter/PartialTranslationUnit.h" #include "llvm/ExecutionEngine/ExecutionEngine.h" #include "llvm/ExecutionEngine/Orc/CompileUtils.h" #include "llvm/ExecutionEngine/Orc/ExecutionUtils.h" @@ -52,8 +53,24 @@ IncrementalExecutor::IncrementalExecutor(llvm::orc::ThreadSafeContext &TSC, IncrementalExecutor::~IncrementalExecutor() {} -llvm::Error IncrementalExecutor::addModule(std::unique_ptr M) { - return Jit->addIRModule(llvm::orc::ThreadSafeModule(std::move(M), TSCtx)); +llvm::Error IncrementalExecutor::addModule(PartialTranslationUnit &PTU) { + llvm::orc::ResourceTrackerSP RT = + Jit->getMainJITDylib().createResourceTracker(); + ResourceTrackers[&PTU] = RT; + + return Jit->addIRModule(RT, {std::move(PTU.TheModule), TSCtx}); +} + +llvm::Error IncrementalExecutor::removeModule(PartialTranslationUnit &PTU) { + + llvm::orc::ResourceTrackerSP RT = std::move(ResourceTrackers[&PTU]); + if (!RT) + return llvm::Error::success(); + + ResourceTrackers.erase(&PTU); + if (llvm::Error Err = RT->remove()) + return Err; + return llvm::Error::success(); } llvm::Error IncrementalExecutor::runCtors() const { diff --git a/clang/lib/Interpreter/IncrementalExecutor.h b/clang/lib/Interpreter/IncrementalExecutor.h index 51b4d83d10b1ce6cd02c7cc1f12e246f0a75eabd..580724e1e24e204aa80c5a4707b0579d31df7e5c 100644 --- a/clang/lib/Interpreter/IncrementalExecutor.h +++ b/clang/lib/Interpreter/IncrementalExecutor.h @@ -13,6 +13,7 @@ #ifndef LLVM_CLANG_LIB_INTERPRETER_INCREMENTALEXECUTOR_H #define LLVM_CLANG_LIB_INTERPRETER_INCREMENTALEXECUTOR_H +#include "llvm/ADT/DenseMap.h" #include "llvm/ADT/StringRef.h" #include "llvm/ADT/Triple.h" #include "llvm/ExecutionEngine/Orc/ExecutionUtils.h" @@ -29,11 +30,17 @@ class ThreadSafeContext; } // namespace llvm namespace clang { + +struct PartialTranslationUnit; + class IncrementalExecutor { using CtorDtorIterator = llvm::orc::CtorDtorIterator; std::unique_ptr Jit; llvm::orc::ThreadSafeContext &TSCtx; + llvm::DenseMap + ResourceTrackers; + public: enum SymbolNameKind { IRName, LinkerName }; @@ -41,7 +48,8 @@ public: const llvm::Triple &Triple); ~IncrementalExecutor(); - llvm::Error addModule(std::unique_ptr M); + llvm::Error addModule(PartialTranslationUnit &PTU); + llvm::Error removeModule(PartialTranslationUnit &PTU); llvm::Error runCtors() const; llvm::Expected getSymbolAddress(llvm::StringRef Name, SymbolNameKind NameKind) const; diff --git a/clang/lib/Interpreter/IncrementalParser.cpp b/clang/lib/Interpreter/IncrementalParser.cpp index e5712303cbbb2863e7f9d41f3152d7bc33b8ab8f..db854c4161b4fc4ac0733141e98d2e58b750fe21 100644 --- a/clang/lib/Interpreter/IncrementalParser.cpp +++ b/clang/lib/Interpreter/IncrementalParser.cpp @@ -181,30 +181,12 @@ IncrementalParser::ParseOrWrapTopLevelDecl() { DiagnosticsEngine &Diags = getCI()->getDiagnostics(); if (Diags.hasErrorOccurred()) { - TranslationUnitDecl *MostRecentTU = C.getTranslationUnitDecl(); - TranslationUnitDecl *PreviousTU = MostRecentTU->getPreviousDecl(); - assert(PreviousTU && "Must have a TU from the ASTContext initialization!"); - TranslationUnitDecl *FirstTU = MostRecentTU->getFirstDecl(); - assert(FirstTU); - FirstTU->RedeclLink.setLatest(PreviousTU); - C.TUDecl = PreviousTU; - S.TUScope->setEntity(PreviousTU); - - // Clean up the lookup table - if (StoredDeclsMap *Map = PreviousTU->getLookupPtr()) { - for (auto I = Map->begin(); I != Map->end(); ++I) { - StoredDeclsList &List = I->second; - DeclContextLookupResult R = List.getLookupResult(); - for (NamedDecl *D : R) - if (D->getTranslationUnitDecl() == MostRecentTU) - List.remove(D); - if (List.isNull()) - Map->erase(I); - } - } + PartialTranslationUnit MostRecentPTU = {C.getTranslationUnitDecl(), + nullptr}; + CleanUpPTU(MostRecentPTU); - // FIXME: Do not reset the pragma handlers. - Diags.Reset(); + Diags.Reset(/*soft=*/true); + Diags.getClient()->clear(); return llvm::make_error("Parsing failed.", std::error_code()); } @@ -296,6 +278,24 @@ IncrementalParser::Parse(llvm::StringRef input) { return PTU; } +void IncrementalParser::CleanUpPTU(PartialTranslationUnit &PTU) { + TranslationUnitDecl *MostRecentTU = PTU.TUPart; + TranslationUnitDecl *FirstTU = MostRecentTU->getFirstDecl(); + if (StoredDeclsMap *Map = FirstTU->getPrimaryContext()->getLookupPtr()) { + for (auto I = Map->begin(); I != Map->end(); ++I) { + StoredDeclsList &List = I->second; + DeclContextLookupResult R = List.getLookupResult(); + for (NamedDecl *D : R) { + if (D->getTranslationUnitDecl() == MostRecentTU) { + List.remove(D); + } + } + if (List.isNull()) + Map->erase(I); + } + } +} + llvm::StringRef IncrementalParser::GetMangledName(GlobalDecl GD) const { CodeGenerator *CG = getCodeGen(Act.get()); assert(CG); diff --git a/clang/lib/Interpreter/IncrementalParser.h b/clang/lib/Interpreter/IncrementalParser.h index d1f454f2123949b7cec4cc811ac6a65a75ba4e3a..8e45d6b5931bc2ba83f4a5e26fc50da93821c1f6 100644 --- a/clang/lib/Interpreter/IncrementalParser.h +++ b/clang/lib/Interpreter/IncrementalParser.h @@ -72,6 +72,10 @@ public: ///\returns the mangled name of a \c GD. llvm::StringRef GetMangledName(GlobalDecl GD) const; + void CleanUpPTU(PartialTranslationUnit &PTU); + + std::list &getPTUs() { return PTUs; } + private: llvm::Expected ParseOrWrapTopLevelDecl(); }; diff --git a/clang/lib/Interpreter/Interpreter.cpp b/clang/lib/Interpreter/Interpreter.cpp index 564b24efebdd04c07877f6e765bc8f7838047148..a10eb79b413b3962641b9f8556d0a7f33cbd910a 100644 --- a/clang/lib/Interpreter/Interpreter.cpp +++ b/clang/lib/Interpreter/Interpreter.cpp @@ -222,7 +222,7 @@ llvm::Error Interpreter::Execute(PartialTranslationUnit &T) { return Err; } // FIXME: Add a callback to retain the llvm::Module once the JIT is done. - if (auto Err = IncrExecutor->addModule(std::move(T.TheModule))) + if (auto Err = IncrExecutor->addModule(T)) return Err; if (auto Err = IncrExecutor->runCtors()) @@ -260,3 +260,22 @@ Interpreter::getSymbolAddressFromLinkerName(llvm::StringRef Name) const { return IncrExecutor->getSymbolAddress(Name, IncrementalExecutor::LinkerName); } + +llvm::Error Interpreter::Undo(unsigned N) { + + std::list &PTUs = IncrParser->getPTUs(); + if (N > PTUs.size()) + return llvm::make_error("Operation failed. " + "Too many undos", + std::error_code()); + for (unsigned I = 0; I < N; I++) { + if (IncrExecutor) { + if (llvm::Error Err = IncrExecutor->removeModule(PTUs.back())) + return Err; + } + + IncrParser->CleanUpPTU(PTUs.back()); + PTUs.pop_back(); + } + return llvm::Error::success(); +} diff --git a/clang/lib/Lex/DependencyDirectivesScanner.cpp b/clang/lib/Lex/DependencyDirectivesScanner.cpp index d8583841c607b002ba582bd38de9b0af24dd21d8..be7b7d6e17b2d2c1ea5593ecf977bf74e751aece 100644 --- a/clang/lib/Lex/DependencyDirectivesScanner.cpp +++ b/clang/lib/Lex/DependencyDirectivesScanner.cpp @@ -549,7 +549,7 @@ Scanner::tryLexIdentifierOrSkipLine(const char *&First, const char *const End) { StringRef Scanner::lexIdentifier(const char *&First, const char *const End) { Optional Id = tryLexIdentifierOrSkipLine(First, End); - assert(Id.hasValue() && "expected identifier token"); + assert(Id && "expected identifier token"); return Id.getValue(); } diff --git a/clang/lib/Lex/Lexer.cpp b/clang/lib/Lex/Lexer.cpp index d4601261b58bc6895caf3a0e2ddf6f9e4667e70d..122861c4d25208152db1952027bede2599944729 100644 --- a/clang/lib/Lex/Lexer.cpp +++ b/clang/lib/Lex/Lexer.cpp @@ -37,6 +37,7 @@ #include "llvm/Support/MathExtras.h" #include "llvm/Support/MemoryBufferRef.h" #include "llvm/Support/NativeFormatting.h" +#include "llvm/Support/Unicode.h" #include "llvm/Support/UnicodeCharRanges.h" #include #include @@ -3119,27 +3120,28 @@ bool Lexer::isCodeCompletionPoint(const char *CurPtr) const { return false; } -uint32_t Lexer::tryReadUCN(const char *&StartPtr, const char *SlashLoc, - Token *Result) { +llvm::Optional Lexer::tryReadNumericUCN(const char *&StartPtr, + const char *SlashLoc, + Token *Result) { unsigned CharSize; char Kind = getCharAndSize(StartPtr, CharSize); - bool Delimited = false; - bool FoundEndDelimiter = false; - unsigned Count = 0; - bool Diagnose = Result && !isLexingRawMode(); + assert((Kind == 'u' || Kind == 'U') && "expected a UCN"); unsigned NumHexDigits; if (Kind == 'u') NumHexDigits = 4; else if (Kind == 'U') NumHexDigits = 8; - else - return 0; + + bool Delimited = false; + bool FoundEndDelimiter = false; + unsigned Count = 0; + bool Diagnose = Result && !isLexingRawMode(); if (!LangOpts.CPlusPlus && !LangOpts.C99) { if (Diagnose) Diag(SlashLoc, diag::warn_ucn_not_valid_in_c89); - return 0; + return llvm::None; } const char *CurPtr = StartPtr + CharSize; @@ -3166,14 +3168,14 @@ uint32_t Lexer::tryReadUCN(const char *&StartPtr, const char *SlashLoc, break; if (Diagnose) Diag(BufferPtr, diag::warn_delimited_ucn_incomplete) - << StringRef(&C, 1); - return 0; + << StringRef(KindLoc, 1); + return llvm::None; } if (CodePoint & 0xF000'0000) { if (Diagnose) Diag(KindLoc, diag::err_escape_too_large) << 0; - return 0; + return llvm::None; } CodePoint <<= 4; @@ -3187,7 +3189,13 @@ uint32_t Lexer::tryReadUCN(const char *&StartPtr, const char *SlashLoc, Diag(StartPtr, FoundEndDelimiter ? diag::warn_delimited_ucn_empty : diag::warn_ucn_escape_no_digits) << StringRef(KindLoc, 1); - return 0; + return llvm::None; + } + + if (Delimited && Kind == 'U') { + if (Diagnose) + Diag(StartPtr, diag::err_hex_escape_no_digits) << StringRef(KindLoc, 1); + return llvm::None; } if (!Delimited && Count != NumHexDigits) { @@ -3200,11 +3208,11 @@ uint32_t Lexer::tryReadUCN(const char *&StartPtr, const char *SlashLoc, << FixItHint::CreateReplacement(URange, "u"); } } - return 0; + return llvm::None; } if (Delimited && PP) { - Diag(BufferPtr, diag::ext_delimited_escape_sequence); + Diag(BufferPtr, diag::ext_delimited_escape_sequence) << /*delimited*/ 0; } if (Result) { @@ -3217,6 +3225,110 @@ uint32_t Lexer::tryReadUCN(const char *&StartPtr, const char *SlashLoc, } else { StartPtr = CurPtr; } + return CodePoint; +} + +llvm::Optional Lexer::tryReadNamedUCN(const char *&StartPtr, + Token *Result) { + unsigned CharSize; + bool Diagnose = Result && !isLexingRawMode(); + + char C = getCharAndSize(StartPtr, CharSize); + assert(C == 'N' && "expected \\N{...}"); + + const char *CurPtr = StartPtr + CharSize; + const char *KindLoc = &CurPtr[-1]; + + C = getCharAndSize(CurPtr, CharSize); + if (C != '{') { + if (Diagnose) + Diag(StartPtr, diag::warn_ucn_escape_incomplete); + return llvm::None; + } + CurPtr += CharSize; + const char *StartName = CurPtr; + bool FoundEndDelimiter = false; + llvm::SmallVector Buffer; + while (C) { + C = getCharAndSize(CurPtr, CharSize); + CurPtr += CharSize; + if (C == '}') { + FoundEndDelimiter = true; + break; + } + + if (!isAlphanumeric(C) && C != '_' && C != '-' && C != ' ') + break; + Buffer.push_back(C); + } + + if (!FoundEndDelimiter || Buffer.empty()) { + if (Diagnose) + Diag(StartPtr, FoundEndDelimiter ? diag::warn_delimited_ucn_empty + : diag::warn_delimited_ucn_incomplete) + << StringRef(KindLoc, 1); + return llvm::None; + } + + StringRef Name(Buffer.data(), Buffer.size()); + llvm::Optional Res = + llvm::sys::unicode::nameToCodepointStrict(Name); + llvm::Optional LooseMatch; + if (!Res) { + if (!isLexingRawMode()) { + Diag(StartPtr, diag::err_invalid_ucn_name) + << StringRef(Buffer.data(), Buffer.size()); + LooseMatch = llvm::sys::unicode::nameToCodepointLooseMatching(Name); + if (LooseMatch) { + Diag(StartName, diag::note_invalid_ucn_name_loose_matching) + << FixItHint::CreateReplacement( + makeCharRange(*this, StartName, CurPtr - CharSize), + LooseMatch->Name); + } + } + // When finding a match using Unicode loose matching rules + // recover after having emitted a diagnostic. + if (!LooseMatch) + return llvm::None; + // We do not offer missspelled character names suggestions here + // as the set of what would be a valid suggestion depends on context, + // and we should not make invalid suggestions. + } + + if (Diagnose && PP && !LooseMatch) + Diag(BufferPtr, diag::ext_delimited_escape_sequence) << /*named*/ 1; + + if (LooseMatch) + Res = LooseMatch->CodePoint; + + if (Result) { + Result->setFlag(Token::HasUCN); + if (CurPtr - StartPtr == (ptrdiff_t)(Buffer.size() + 4)) + StartPtr = CurPtr; + else + while (StartPtr != CurPtr) + (void)getAndAdvanceChar(StartPtr, *Result); + } else { + StartPtr = CurPtr; + } + return *Res; +} + +uint32_t Lexer::tryReadUCN(const char *&StartPtr, const char *SlashLoc, + Token *Result) { + + unsigned CharSize; + llvm::Optional CodePointOpt; + char Kind = getCharAndSize(StartPtr, CharSize); + if (Kind == 'u' || Kind == 'U') + CodePointOpt = tryReadNumericUCN(StartPtr, SlashLoc, Result); + else if (Kind == 'N') + CodePointOpt = tryReadNamedUCN(StartPtr, Result); + + if (!CodePointOpt) + return 0; + + uint32_t CodePoint = *CodePointOpt; // Don't apply C family restrictions to UCNs in assembly mode if (LangOpts.AsmPreprocessor) diff --git a/clang/lib/Lex/LiteralSupport.cpp b/clang/lib/Lex/LiteralSupport.cpp index 9a30a41c851d74cddb2ad0ee91ccd8fb6623d4cc..e78113846667644b56e1c21197dd2537e520ed1b 100644 --- a/clang/lib/Lex/LiteralSupport.cpp +++ b/clang/lib/Lex/LiteralSupport.cpp @@ -27,6 +27,7 @@ #include "llvm/Support/ConvertUTF.h" #include "llvm/Support/Error.h" #include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/Unicode.h" #include #include #include @@ -233,7 +234,8 @@ static unsigned ProcessCharEscape(const char *ThisTokBegin, HadError = true; if (Diags) Diag(Diags, Features, Loc, ThisTokBegin, EscapeBegin, ThisTokBuf, - diag::err_delimited_escape_missing_brace); + diag::err_delimited_escape_missing_brace) + << "o"; break; } @@ -309,7 +311,8 @@ static unsigned ProcessCharEscape(const char *ThisTokBegin, << tok::r_brace; else if (!HadError) { Diag(Diags, Features, Loc, ThisTokBegin, EscapeBegin, ThisTokBuf, - diag::ext_delimited_escape_sequence); + diag::ext_delimited_escape_sequence) + << /*delimited*/ 0; } } @@ -335,7 +338,7 @@ void clang::expandUCNs(SmallVectorImpl &Buf, StringRef Input) { char Kind = *I; ++I; - assert(Kind == 'u' || Kind == 'U'); + assert(Kind == 'u' || Kind == 'U' || Kind == 'N'); uint32_t CodePoint = 0; if (Kind == 'u' && *I == '{') { @@ -349,6 +352,22 @@ void clang::expandUCNs(SmallVectorImpl &Buf, StringRef Input) { continue; } + if (Kind == 'N') { + assert(*I == '{'); + ++I; + auto Delim = std::find(I, Input.end(), '}'); + assert(Delim != Input.end()); + llvm::Optional Res = + llvm::sys::unicode::nameToCodepointLooseMatching( + StringRef(I, std::distance(I, Delim))); + assert(Res); + CodePoint = Res->CodePoint; + assert(CodePoint != 0xFFFFFFFF); + appendCodePoint(CodePoint, Buf); + I = Delim; + continue; + } + unsigned NumHexDigits; if (Kind == 'u') NumHexDigits = 4; @@ -370,23 +389,20 @@ void clang::expandUCNs(SmallVectorImpl &Buf, StringRef Input) { } } -/// ProcessUCNEscape - Read the Universal Character Name, check constraints and -/// return the UTF32. -static bool ProcessUCNEscape(const char *ThisTokBegin, const char *&ThisTokBuf, - const char *ThisTokEnd, - uint32_t &UcnVal, unsigned short &UcnLen, - FullSourceLoc Loc, DiagnosticsEngine *Diags, - const LangOptions &Features, - bool in_char_string_literal = false) { +static bool ProcessNumericUCNEscape(const char *ThisTokBegin, + const char *&ThisTokBuf, + const char *ThisTokEnd, uint32_t &UcnVal, + unsigned short &UcnLen, bool &Delimited, + FullSourceLoc Loc, DiagnosticsEngine *Diags, + const LangOptions &Features, + bool in_char_string_literal = false) { const char *UcnBegin = ThisTokBuf; + bool HasError = false; + bool EndDelimiterFound = false; // Skip the '\u' char's. ThisTokBuf += 2; - - bool Delimited = false; - bool EndDelimiterFound = false; - bool HasError = false; - + Delimited = false; if (UcnBegin[1] == 'u' && in_char_string_literal && ThisTokBuf != ThisTokEnd && *ThisTokBuf == '{') { Delimited = true; @@ -394,7 +410,8 @@ static bool ProcessUCNEscape(const char *ThisTokBegin, const char *&ThisTokBuf, } else if (ThisTokBuf == ThisTokEnd || !isHexDigit(*ThisTokBuf)) { if (Diags) Diag(Diags, Features, Loc, ThisTokBegin, UcnBegin, ThisTokBuf, - diag::err_hex_escape_no_digits) << StringRef(&ThisTokBuf[-1], 1); + diag::err_hex_escape_no_digits) + << StringRef(&ThisTokBuf[-1], 1); return false; } UcnLen = (ThisTokBuf[-1] == 'u' ? 4 : 8); @@ -455,7 +472,136 @@ static bool ProcessUCNEscape(const char *ThisTokBegin, const char *&ThisTokBuf, : diag::err_ucn_escape_incomplete); return false; } + return !HasError; +} + +static void DiagnoseInvalidUnicodeCharacterName( + DiagnosticsEngine *Diags, const LangOptions &Features, FullSourceLoc Loc, + const char *TokBegin, const char *TokRangeBegin, const char *TokRangeEnd, + llvm::StringRef Name) { + + Diag(Diags, Features, Loc, TokBegin, TokRangeBegin, TokRangeEnd, + diag::err_invalid_ucn_name) + << Name; + + namespace u = llvm::sys::unicode; + + llvm::Optional Res = + u::nameToCodepointLooseMatching(Name); + if (Res) { + Diag(Diags, Features, Loc, TokBegin, TokRangeBegin, TokRangeEnd, + diag::note_invalid_ucn_name_loose_matching) + << FixItHint::CreateReplacement( + MakeCharSourceRange(Features, Loc, TokBegin, TokRangeBegin, + TokRangeEnd), + Res->Name); + return; + } + + unsigned Distance = 0; + SmallVector Matches = + u::nearestMatchesForCodepointName(Name, 5); + assert(!Matches.empty() && "No unicode characters found"); + + for (const auto &Match : Matches) { + if (Distance == 0) + Distance = Match.Distance; + if (std::max(Distance, Match.Distance) - + std::min(Distance, Match.Distance) > + 3) + break; + Distance = Match.Distance; + + std::string Str; + llvm::UTF32 V = Match.Value; + LLVM_ATTRIBUTE_UNUSED bool Converted = + llvm::convertUTF32ToUTF8String(llvm::ArrayRef(&V, 1), Str); + assert(Converted && "Found a match wich is not a unicode character"); + + Diag(Diags, Features, Loc, TokBegin, TokRangeBegin, TokRangeEnd, + diag::note_invalid_ucn_name_candidate) + << Match.Name << llvm::utohexstr(Match.Value) + << Str // FIXME: Fix the rendering of non printable characters + << FixItHint::CreateReplacement( + MakeCharSourceRange(Features, Loc, TokBegin, TokRangeBegin, + TokRangeEnd), + Match.Name); + } +} +static bool ProcessNamedUCNEscape(const char *ThisTokBegin, + const char *&ThisTokBuf, + const char *ThisTokEnd, uint32_t &UcnVal, + unsigned short &UcnLen, FullSourceLoc Loc, + DiagnosticsEngine *Diags, + const LangOptions &Features) { + const char *UcnBegin = ThisTokBuf; + assert(UcnBegin[0] == '\\' && UcnBegin[1] == 'N'); + ThisTokBuf += 2; + if (ThisTokBuf == ThisTokEnd || *ThisTokBuf != '{') { + if (Diags) { + Diag(Diags, Features, Loc, ThisTokBegin, UcnBegin, ThisTokBuf, + diag::err_delimited_escape_missing_brace) + << StringRef(&ThisTokBuf[-1], 1); + } + ThisTokBuf++; + return false; + } + ThisTokBuf++; + const char *ClosingBrace = + std::find_if_not(ThisTokBuf, ThisTokEnd, [](char C) { + return llvm::isAlnum(C) || llvm::isSpace(C) || C == '_' || C == '-'; + }); + bool Incomplete = ClosingBrace == ThisTokEnd || *ClosingBrace != '}'; + bool Empty = ClosingBrace == ThisTokBuf; + if (Incomplete || Empty) { + if (Diags) { + Diag(Diags, Features, Loc, ThisTokBegin, UcnBegin, ThisTokBuf, + Incomplete ? diag::err_ucn_escape_incomplete + : diag::err_delimited_escape_empty) + << StringRef(&UcnBegin[1], 1); + } + ThisTokBuf = ClosingBrace == ThisTokEnd ? ClosingBrace : ClosingBrace + 1; + return false; + } + StringRef Name(ThisTokBuf, ClosingBrace - ThisTokBuf); + ThisTokBuf = ClosingBrace + 1; + llvm::Optional Res = + llvm::sys::unicode::nameToCodepointStrict(Name); + if (!Res) { + if (Diags) + DiagnoseInvalidUnicodeCharacterName(Diags, Features, Loc, ThisTokBegin, + &UcnBegin[3], ClosingBrace, Name); + return false; + } + UcnVal = *Res; + UcnLen = UcnVal > 0xFFFF ? 8 : 4; + return true; +} + +/// ProcessUCNEscape - Read the Universal Character Name, check constraints and +/// return the UTF32. +static bool ProcessUCNEscape(const char *ThisTokBegin, const char *&ThisTokBuf, + const char *ThisTokEnd, uint32_t &UcnVal, + unsigned short &UcnLen, FullSourceLoc Loc, + DiagnosticsEngine *Diags, + const LangOptions &Features, + bool in_char_string_literal = false) { + + bool HasError; + const char *UcnBegin = ThisTokBuf; + bool IsDelimitedEscapeSequence = false; + bool IsNamedEscapeSequence = false; + if (ThisTokBuf[1] == 'N') { + IsNamedEscapeSequence = true; + HasError = !ProcessNamedUCNEscape(ThisTokBegin, ThisTokBuf, ThisTokEnd, + UcnVal, UcnLen, Loc, Diags, Features); + } else { + HasError = + !ProcessNumericUCNEscape(ThisTokBegin, ThisTokBuf, ThisTokEnd, UcnVal, + UcnLen, IsDelimitedEscapeSequence, Loc, Diags, + Features, in_char_string_literal); + } if (HasError) return false; @@ -493,9 +639,10 @@ static bool ProcessUCNEscape(const char *ThisTokBegin, const char *&ThisTokBuf, Diag(Diags, Features, Loc, ThisTokBegin, UcnBegin, ThisTokBuf, diag::warn_ucn_not_valid_in_c89_literal); - if (Delimited && Diags) + if ((IsDelimitedEscapeSequence || IsNamedEscapeSequence) && Diags) Diag(Diags, Features, Loc, ThisTokBegin, UcnBegin, ThisTokBuf, - diag::ext_delimited_escape_sequence); + diag::ext_delimited_escape_sequence) + << (IsNamedEscapeSequence ? 1 : 0); return true; } @@ -1559,7 +1706,7 @@ CharLiteralParser::CharLiteralParser(const char *begin, const char *end, continue; } // Is this a Universal Character Name escape? - if (begin[1] == 'u' || begin[1] == 'U') { + if (begin[1] == 'u' || begin[1] == 'U' || begin[1] == 'N') { unsigned short UcnLen = 0; if (!ProcessUCNEscape(TokBegin, begin, end, *buffer_begin, UcnLen, FullSourceLoc(Loc, PP.getSourceManager()), @@ -1919,7 +2066,8 @@ void StringLiteralParser::init(ArrayRef StringToks){ continue; } // Is this a Universal Character Name escape? - if (ThisTokBuf[1] == 'u' || ThisTokBuf[1] == 'U') { + if (ThisTokBuf[1] == 'u' || ThisTokBuf[1] == 'U' || + ThisTokBuf[1] == 'N') { EncodeUCNEscape(ThisTokBegin, ThisTokBuf, ThisTokEnd, ResultPtr, hadError, FullSourceLoc(StringToks[i].getLocation(), SM), @@ -2112,7 +2260,8 @@ unsigned StringLiteralParser::getOffsetOfStringByte(const Token &Tok, // Otherwise, this is an escape character. Advance over it. bool HadError = false; - if (SpellingPtr[1] == 'u' || SpellingPtr[1] == 'U') { + if (SpellingPtr[1] == 'u' || SpellingPtr[1] == 'U' || + SpellingPtr[1] == 'N') { const char *EscapePtr = SpellingPtr; unsigned Len = MeasureUCNEscape(SpellingStart, SpellingPtr, SpellingEnd, 1, Features, HadError); diff --git a/clang/lib/Lex/MacroInfo.cpp b/clang/lib/Lex/MacroInfo.cpp index 4a8127d29a45913dec060c31f50dc34fb3d88ec9..310b95f36771dd0586482f752f6a1b2cef540e68 100644 --- a/clang/lib/Lex/MacroInfo.cpp +++ b/clang/lib/Lex/MacroInfo.cpp @@ -209,12 +209,11 @@ MacroDirective::DefInfo MacroDirective::getDefinition() { } VisibilityMacroDirective *VisMD = cast(MD); - if (!isPublic.hasValue()) + if (!isPublic) isPublic = VisMD->isPublic(); } - return DefInfo(nullptr, UndefLoc, - !isPublic.hasValue() || isPublic.getValue()); + return DefInfo(nullptr, UndefLoc, !isPublic || isPublic.getValue()); } const MacroDirective::DefInfo diff --git a/clang/lib/Lex/PPMacroExpansion.cpp b/clang/lib/Lex/PPMacroExpansion.cpp index 49e14732b3c2333eccb150fe76efe8282bf47b75..bf46e5422bc8d883aa11523f448ad07eb3d885e2 100644 --- a/clang/lib/Lex/PPMacroExpansion.cpp +++ b/clang/lib/Lex/PPMacroExpansion.cpp @@ -1325,7 +1325,7 @@ already_lexed: // The last ')' has been reached; return the value if one found or // a diagnostic and a dummy value. - if (Result.hasValue()) { + if (Result) { OS << Result.getValue(); // For strict conformance to __has_cpp_attribute rules, use 'L' // suffix for dated literals. diff --git a/clang/lib/Lex/PreprocessingRecord.cpp b/clang/lib/Lex/PreprocessingRecord.cpp index 432068b35f19cc0fbb0d4dff30325c52853c0586..673ef637e396a8792848d9fe977eda3bf20ada39 100644 --- a/clang/lib/Lex/PreprocessingRecord.cpp +++ b/clang/lib/Lex/PreprocessingRecord.cpp @@ -114,7 +114,7 @@ bool PreprocessingRecord::isEntityInFileID(iterator PPEI, FileID FID) { // deserializing it. Optional IsInFile = ExternalSource->isPreprocessedEntityInFileID(LoadedIndex, FID); - if (IsInFile.hasValue()) + if (IsInFile) return IsInFile.getValue(); // The external source did not provide a definite answer, go and deserialize diff --git a/clang/lib/Parse/ParseOpenMP.cpp b/clang/lib/Parse/ParseOpenMP.cpp index 05cd43c01b08846964798b2978f1e5caf36f493d..43a69a8e94e14d67495e2709408aa54d0d8ece91 100644 --- a/clang/lib/Parse/ParseOpenMP.cpp +++ b/clang/lib/Parse/ParseOpenMP.cpp @@ -178,6 +178,7 @@ static OpenMPDirectiveKindExWrapper parseOpenMPDirectiveKind(Parser &P) { {OMPD_target_teams_distribute_parallel_for, OMPD_simd, OMPD_target_teams_distribute_parallel_for_simd}, {OMPD_master, OMPD_taskloop, OMPD_master_taskloop}, + {OMPD_masked, OMPD_taskloop, OMPD_masked_taskloop}, {OMPD_master_taskloop, OMPD_simd, OMPD_master_taskloop_simd}, {OMPD_parallel, OMPD_master, OMPD_parallel_master}, {OMPD_parallel, OMPD_masked, OMPD_parallel_masked}, @@ -1872,7 +1873,7 @@ void Parser::ParseOMPDeclareTargetClauses( if (IsDeviceTypeClause) { Optional DevTypeData = parseOpenMPSimpleClause(*this, OMPC_device_type); - if (DevTypeData.hasValue()) { + if (DevTypeData) { if (DeviceTypeLoc.isValid()) { // We already saw another device_type clause, diagnose it. Diag(DevTypeData.getValue().Loc, @@ -2312,9 +2313,9 @@ Parser::DeclGroupPtrTy Parser::ParseOpenMPDeclarativeDirectiveWithExtDecl( Sema::DeclareTargetContextInfo DTCI(DKind, DTLoc); if (HasClauses) ParseOMPDeclareTargetClauses(DTCI); - bool HasImplicitMappings = - DKind == OMPD_begin_declare_target || !HasClauses || - (DTCI.ExplicitlyMapped.empty() && DTCI.Indirect.hasValue()); + bool HasImplicitMappings = DKind == OMPD_begin_declare_target || + !HasClauses || + (DTCI.ExplicitlyMapped.empty() && DTCI.Indirect); // Skip the last annot_pragma_openmp_end. ConsumeAnyToken(); @@ -2385,6 +2386,7 @@ Parser::DeclGroupPtrTy Parser::ParseOpenMPDeclarativeDirectiveWithExtDecl( case OMPD_master_taskloop_simd: case OMPD_parallel_master_taskloop: case OMPD_parallel_master_taskloop_simd: + case OMPD_masked_taskloop: case OMPD_distribute: case OMPD_target_update: case OMPD_distribute_parallel_for: @@ -2782,6 +2784,7 @@ StmtResult Parser::ParseOpenMPDeclarativeOrExecutableDirective( case OMPD_taskloop: case OMPD_taskloop_simd: case OMPD_master_taskloop: + case OMPD_masked_taskloop: case OMPD_master_taskloop_simd: case OMPD_parallel_master_taskloop: case OMPD_parallel_master_taskloop_simd: diff --git a/clang/lib/Parse/ParsePragma.cpp b/clang/lib/Parse/ParsePragma.cpp index d69081cbecedc66bd98402d026c637b8356bdc10..6ca98876b8fc8008db0939f21f22673b3d188445 100644 --- a/clang/lib/Parse/ParsePragma.cpp +++ b/clang/lib/Parse/ParsePragma.cpp @@ -255,12 +255,6 @@ struct PragmaMSIntrinsicHandler : public PragmaHandler { Token &FirstToken) override; }; -struct PragmaMSOptimizeHandler : public PragmaHandler { - PragmaMSOptimizeHandler() : PragmaHandler("optimize") {} - void HandlePragma(Preprocessor &PP, PragmaIntroducer Introducer, - Token &FirstToken) override; -}; - // "\#pragma fenv_access (on)". struct PragmaMSFenvAccessHandler : public PragmaHandler { PragmaMSFenvAccessHandler() : PragmaHandler("fenv_access") {} @@ -449,12 +443,12 @@ void Parser::initializePragmaHandlers() { PP.AddPragmaHandler(MSFunction.get()); MSAllocText = std::make_unique("alloc_text"); PP.AddPragmaHandler(MSAllocText.get()); + MSOptimize = std::make_unique("optimize"); + PP.AddPragmaHandler(MSOptimize.get()); MSRuntimeChecks = std::make_unique(); PP.AddPragmaHandler(MSRuntimeChecks.get()); MSIntrinsic = std::make_unique(); PP.AddPragmaHandler(MSIntrinsic.get()); - MSOptimize = std::make_unique(); - PP.AddPragmaHandler(MSOptimize.get()); MSFenvAccess = std::make_unique(); PP.AddPragmaHandler(MSFenvAccess.get()); } @@ -923,7 +917,8 @@ void Parser::HandlePragmaMSPragma() { .Case("section", &Parser::HandlePragmaMSSection) .Case("init_seg", &Parser::HandlePragmaMSInitSeg) .Case("function", &Parser::HandlePragmaMSFunction) - .Case("alloc_text", &Parser::HandlePragmaMSAllocText); + .Case("alloc_text", &Parser::HandlePragmaMSAllocText) + .Case("optimize", &Parser::HandlePragmaMSOptimize); if (!(this->*Handler)(PragmaName, PragmaLocation)) { // Pragma handling failed, and has been diagnosed. Slurp up the tokens @@ -3645,57 +3640,64 @@ bool Parser::HandlePragmaMSFunction(StringRef PragmaName, } // #pragma optimize("gsty", on|off) -void PragmaMSOptimizeHandler::HandlePragma(Preprocessor &PP, - PragmaIntroducer Introducer, - Token &Tok) { - SourceLocation StartLoc = Tok.getLocation(); - PP.Lex(Tok); - - if (Tok.isNot(tok::l_paren)) { - PP.Diag(Tok.getLocation(), diag::warn_pragma_expected_lparen) << "optimize"; - return; - } - PP.Lex(Tok); +bool Parser::HandlePragmaMSOptimize(StringRef PragmaName, + SourceLocation PragmaLocation) { + Token FirstTok = Tok; + if (ExpectAndConsume(tok::l_paren, diag::warn_pragma_expected_lparen, + PragmaName)) + return false; if (Tok.isNot(tok::string_literal)) { - PP.Diag(Tok.getLocation(), diag::warn_pragma_expected_string) << "optimize"; - return; + PP.Diag(PragmaLocation, diag::warn_pragma_expected_string) << PragmaName; + return false; } - // We could syntax check the string but it's probably not worth the effort. - PP.Lex(Tok); - - if (Tok.isNot(tok::comma)) { - PP.Diag(Tok.getLocation(), diag::warn_pragma_expected_comma) << "optimize"; - return; + ExprResult StringResult = ParseStringLiteralExpression(); + if (StringResult.isInvalid()) + return false; // Already diagnosed. + StringLiteral *OptimizationList = cast(StringResult.get()); + if (OptimizationList->getCharByteWidth() != 1) { + PP.Diag(PragmaLocation, diag::warn_pragma_expected_non_wide_string) + << PragmaName; + return false; } - PP.Lex(Tok); - if (Tok.is(tok::eod) || Tok.is(tok::r_paren)) { - PP.Diag(Tok.getLocation(), diag::warn_pragma_missing_argument) - << "optimize" << /*Expected=*/true << "'on' or 'off'"; - return; + if (ExpectAndConsume(tok::comma, diag::warn_pragma_expected_comma, + PragmaName)) + return false; + + if (Tok.is(tok::eof) || Tok.is(tok::r_paren)) { + PP.Diag(PragmaLocation, diag::warn_pragma_missing_argument) + << PragmaName << /*Expected=*/true << "'on' or 'off'"; + return false; } IdentifierInfo *II = Tok.getIdentifierInfo(); if (!II || (!II->isStr("on") && !II->isStr("off"))) { - PP.Diag(Tok.getLocation(), diag::warn_pragma_invalid_argument) - << PP.getSpelling(Tok) << "optimize" << /*Expected=*/true + PP.Diag(PragmaLocation, diag::warn_pragma_invalid_argument) + << PP.getSpelling(Tok) << PragmaName << /*Expected=*/true << "'on' or 'off'"; - return; + return false; } + bool IsOn = II->isStr("on"); PP.Lex(Tok); - if (Tok.isNot(tok::r_paren)) { - PP.Diag(Tok.getLocation(), diag::warn_pragma_expected_rparen) << "optimize"; - return; - } - PP.Lex(Tok); + if (ExpectAndConsume(tok::r_paren, diag::warn_pragma_expected_rparen, + PragmaName)) + return false; - if (Tok.isNot(tok::eod)) { - PP.Diag(Tok.getLocation(), diag::warn_pragma_extra_tokens_at_eol) - << "optimize"; - return; + // TODO: Add support for "sgty" + if (!OptimizationList->getString().empty()) { + PP.Diag(PragmaLocation, diag::warn_pragma_invalid_argument) + << OptimizationList->getString() << PragmaName << /*Expected=*/true + << "\"\""; + return false; } - PP.Diag(StartLoc, diag::warn_pragma_optimize); + + if (ExpectAndConsume(tok::eof, diag::warn_pragma_extra_tokens_at_eol, + PragmaName)) + return false; + + Actions.ActOnPragmaMSOptimize(FirstTok.getLocation(), IsOn); + return true; } void PragmaForceCUDAHostDeviceHandler::HandlePragma( diff --git a/clang/lib/Sema/SemaAttr.cpp b/clang/lib/Sema/SemaAttr.cpp index 8e6f029726bceffe871ea6ca9d3fb2cd9b91cc54..c7e62e58955336cb12edc62e330b3af49447a9f1 100644 --- a/clang/lib/Sema/SemaAttr.cpp +++ b/clang/lib/Sema/SemaAttr.cpp @@ -1144,6 +1144,15 @@ void Sema::ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc) { OptimizeOffPragmaLocation = PragmaLoc; } +void Sema::ActOnPragmaMSOptimize(SourceLocation Loc, bool IsOn) { + if (!CurContext->getRedeclContext()->isFileContext()) { + Diag(Loc, diag::err_pragma_expected_file_scope) << "optimize"; + return; + } + + MSPragmaOptimizeIsOn = IsOn; +} + void Sema::ActOnPragmaMSFunction( SourceLocation Loc, const llvm::SmallVectorImpl &NoBuiltins) { if (!CurContext->getRedeclContext()->isFileContext()) { @@ -1177,6 +1186,13 @@ void Sema::AddSectionMSAllocText(FunctionDecl *FD) { } } +void Sema::ModifyFnAttributesMSPragmaOptimize(FunctionDecl *FD) { + // Don't modify the function attributes if it's "on". "on" resets the + // optimizations to the ones listed on the command line + if (!MSPragmaOptimizeIsOn) + AddOptnoneAttributeIfNoConflicts(FD, FD->getBeginLoc()); +} + void Sema::AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD, SourceLocation Loc) { // Don't add a conflicting attribute. No diagnostic is needed. diff --git a/clang/lib/Sema/SemaCUDA.cpp b/clang/lib/Sema/SemaCUDA.cpp index a3b15fe7e90b6761d10656376dac5e46f87f1276..8f8144d658d816be8f62c76cb230bf6d17028030 100644 --- a/clang/lib/Sema/SemaCUDA.cpp +++ b/clang/lib/Sema/SemaCUDA.cpp @@ -444,7 +444,7 @@ bool Sema::inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl, // If no target was inferred, mark this member as __host__ __device__; // it's the least restrictive option that can be invoked from any target. bool NeedsH = true, NeedsD = true; - if (InferredTarget.hasValue()) { + if (InferredTarget) { if (InferredTarget.getValue() == CFT_Device) NeedsH = false; else if (InferredTarget.getValue() == CFT_Host) diff --git a/clang/lib/Sema/SemaChecking.cpp b/clang/lib/Sema/SemaChecking.cpp index 94d535bebf69a7174400a616e788f0ed771eaa60..117f48a93a5c487885bf68e9b747a9b7765afb2c 100644 --- a/clang/lib/Sema/SemaChecking.cpp +++ b/clang/lib/Sema/SemaChecking.cpp @@ -1873,7 +1873,7 @@ static ExprResult SemaBuiltinLaunder(Sema &S, CallExpr *TheCall) { return 2; return llvm::Optional{}; }(); - if (DiagSelect.hasValue()) { + if (DiagSelect) { S.Diag(TheCall->getBeginLoc(), diag::err_builtin_launder_invalid_arg) << DiagSelect.getValue() << TheCall->getSourceRange(); return ExprError(); @@ -15731,53 +15731,6 @@ void Sema::CheckCastAlign(Expr *Op, QualType T, SourceRange TRange) { << TRange << Op->getSourceRange(); } -/// Check whether this array fits the idiom of a size-one tail padded -/// array member of a struct. -/// -/// We avoid emitting out-of-bounds access warnings for such arrays as they are -/// commonly used to emulate flexible arrays in C89 code. -static bool IsTailPaddedMemberArray(Sema &S, const llvm::APInt &Size, - const NamedDecl *ND) { - if (Size != 1 || !ND) return false; - - const FieldDecl *FD = dyn_cast(ND); - if (!FD) return false; - - // Don't consider sizes resulting from macro expansions or template argument - // substitution to form C89 tail-padded arrays. - - TypeSourceInfo *TInfo = FD->getTypeSourceInfo(); - while (TInfo) { - TypeLoc TL = TInfo->getTypeLoc(); - // Look through typedefs. - if (TypedefTypeLoc TTL = TL.getAs()) { - const TypedefNameDecl *TDL = TTL.getTypedefNameDecl(); - TInfo = TDL->getTypeSourceInfo(); - continue; - } - if (ConstantArrayTypeLoc CTL = TL.getAs()) { - const Expr *SizeExpr = dyn_cast(CTL.getSizeExpr()); - if (!SizeExpr || SizeExpr->getExprLoc().isMacroID()) - return false; - } - break; - } - - const RecordDecl *RD = dyn_cast(FD->getDeclContext()); - if (!RD) return false; - if (RD->isUnion()) return false; - if (const CXXRecordDecl *CRD = dyn_cast(RD)) { - if (!CRD->isStandardLayout()) return false; - } - - // See if this is the last field decl in the record. - const Decl *D = FD; - while ((D = D->getNextDeclInContext())) - if (isa(D)) - return false; - return true; -} - void Sema::CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr, const ArraySubscriptExpr *ASE, bool AllowOnePastEnd, bool IndexNegated) { @@ -15930,10 +15883,9 @@ void Sema::CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr, if (AllowOnePastEnd ? index.ule(size) : index.ult(size)) return; - // Also don't warn for arrays of size 1 which are members of some - // structure. These are often used to approximate flexible arrays in C89 - // code. - if (IsTailPaddedMemberArray(*this, size, ND)) + // Also don't warn for flexible array members. + if (BaseExpr->isFlexibleArrayMember(Context, + getLangOpts().StrictFlexArrays)) return; // Suppress the warning if the subscript expression (as identified by the @@ -16688,7 +16640,7 @@ void Sema::DiagnoseEmptyLoopBody(const Stmt *S, Body = FS->getBody(); DiagID = diag::warn_empty_for_body; } else if (const WhileStmt *WS = dyn_cast(S)) { - StmtLoc = WS->getCond()->getSourceRange().getEnd(); + StmtLoc = WS->getRParenLoc(); Body = WS->getBody(); DiagID = diag::warn_empty_while_body; } else diff --git a/clang/lib/Sema/SemaConcept.cpp b/clang/lib/Sema/SemaConcept.cpp index c78b0df6ff4881a163077c852a8236a659a126d3..239e5dc4394c3815e067dda492fcbe63fa78691e 100644 --- a/clang/lib/Sema/SemaConcept.cpp +++ b/clang/lib/Sema/SemaConcept.cpp @@ -348,8 +348,9 @@ bool Sema::CheckConstraintSatisfaction(const Expr *ConstraintExpr, ConstraintSatisfaction &Satisfaction) { return calculateConstraintSatisfaction( *this, ConstraintExpr, Satisfaction, - [](const Expr *AtomicExpr) -> ExprResult { - return ExprResult(const_cast(AtomicExpr)); + [this](const Expr *AtomicExpr) -> ExprResult { + // We only do this to immitate lvalue-to-rvalue conversion. + return PerformContextuallyConvertToBool(const_cast(AtomicExpr)); }); } diff --git a/clang/lib/Sema/SemaDecl.cpp b/clang/lib/Sema/SemaDecl.cpp index fb7c14611a46ca0fed8dbaa8e121b44360047fe1..1453d1b09d16ff7a7762735d91095f7e688e46e0 100644 --- a/clang/lib/Sema/SemaDecl.cpp +++ b/clang/lib/Sema/SemaDecl.cpp @@ -10211,6 +10211,7 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC, AddRangeBasedOptnone(NewFD); AddImplicitMSFunctionNoBuiltinAttr(NewFD); AddSectionMSAllocText(NewFD); + ModifyFnAttributesMSPragmaOptimize(NewFD); } // If this is the first declaration of an extern C variable, update @@ -15457,7 +15458,7 @@ void Sema::AddKnownFunctionAttributesForReplaceableGlobalAllocationFunction( // (3.1) If the allocation function takes an argument of type // std​::​align_­val_­t, the storage will have the alignment // specified by the value of this argument. - if (AlignmentParam.hasValue() && !FD->hasAttr()) { + if (AlignmentParam && !FD->hasAttr()) { FD->addAttr(AllocAlignAttr::CreateImplicit( Context, ParamIdx(AlignmentParam.getValue(), FD), FD->getLocation())); } @@ -19123,12 +19124,12 @@ Sema::FunctionEmissionStatus Sema::getEmissionStatus(FunctionDecl *FD, // #pragma omp declare target to(*) device_type(*). // Therefore DevTy having no value does not imply host. The emission status // will be checked again at the end of compilation unit with Final = true. - if (DevTy.hasValue()) + if (DevTy) if (*DevTy == OMPDeclareTargetDeclAttr::DT_Host) return FunctionEmissionStatus::OMPDiscarded; // If we have an explicit value for the device type, or we are in a target // declare context, we need to emit all extern and used symbols. - if (isInOpenMPDeclareTargetContext() || DevTy.hasValue()) + if (isInOpenMPDeclareTargetContext() || DevTy) if (IsEmittedForExternalSymbol()) return FunctionEmissionStatus::Emitted; // Device mode only emits what it must, if it wasn't tagged yet and needed, diff --git a/clang/lib/Sema/SemaDeclAttr.cpp b/clang/lib/Sema/SemaDeclAttr.cpp index 4f8578542bca9e55d41c1ae5cafa5088d4bf49e6..18eeef9576ddcf96c890b8f04bf675ea81f1f9bf 100644 --- a/clang/lib/Sema/SemaDeclAttr.cpp +++ b/clang/lib/Sema/SemaDeclAttr.cpp @@ -2680,8 +2680,8 @@ static void handleAvailabilityAttr(Sema &S, Decl *D, const ParsedAttr &AL) { auto Major = Version.getMajor(); auto NewMajor = Major >= 9 ? Major - 7 : 0; if (NewMajor >= 2) { - if (Version.getMinor().hasValue()) { - if (Version.getSubminor().hasValue()) + if (Version.getMinor()) { + if (Version.getSubminor()) return VersionTuple(NewMajor, Version.getMinor().getValue(), Version.getSubminor().getValue()); else @@ -7865,6 +7865,11 @@ static bool isGlobalVar(const Decl *D) { return false; } +static bool isSanitizerAttributeAllowedOnGlobals(StringRef Sanitizer) { + return Sanitizer == "address" || Sanitizer == "hwaddress" || + Sanitizer == "memtag"; +} + static void handleNoSanitizeAttr(Sema &S, Decl *D, const ParsedAttr &AL) { if (!AL.checkAtLeastNumArgs(S, 1)) return; @@ -7882,7 +7887,7 @@ static void handleNoSanitizeAttr(Sema &S, Decl *D, const ParsedAttr &AL) { SanitizerMask() && SanitizerName != "coverage") S.Diag(LiteralLoc, diag::warn_unknown_sanitizer_ignored) << SanitizerName; - else if (isGlobalVar(D) && SanitizerName != "address") + else if (isGlobalVar(D) && !isSanitizerAttributeAllowedOnGlobals(SanitizerName)) S.Diag(D->getLocation(), diag::err_attribute_wrong_decl_type) << AL << ExpectedFunctionOrMethod; Sanitizers.push_back(SanitizerName); diff --git a/clang/lib/Sema/SemaExceptionSpec.cpp b/clang/lib/Sema/SemaExceptionSpec.cpp index eee283d74ae01d0bd3a58da935ce8e86a7c59bfc..e63c44ef505ff127073363e66b3164adc7842fd2 100644 --- a/clang/lib/Sema/SemaExceptionSpec.cpp +++ b/clang/lib/Sema/SemaExceptionSpec.cpp @@ -1453,6 +1453,7 @@ CanThrowResult Sema::canThrow(const Stmt *S) { case Stmt::OMPForSimdDirectiveClass: case Stmt::OMPMasterDirectiveClass: case Stmt::OMPMasterTaskLoopDirectiveClass: + case Stmt::OMPMaskedTaskLoopDirectiveClass: case Stmt::OMPMasterTaskLoopSimdDirectiveClass: case Stmt::OMPOrderedDirectiveClass: case Stmt::OMPCanonicalLoopClass: diff --git a/clang/lib/Sema/SemaExprCXX.cpp b/clang/lib/Sema/SemaExprCXX.cpp index bb1cf4b941b7856722c1b86e09d68db946691275..1e7975ed513f1353dc241ce1cd54eefa5a565753 100644 --- a/clang/lib/Sema/SemaExprCXX.cpp +++ b/clang/lib/Sema/SemaExprCXX.cpp @@ -2274,10 +2274,10 @@ Sema::BuildCXXNew(SourceRange Range, bool UseGlobal, // How many bytes do we want to allocate here? llvm::Optional AllocationSize; - if (!ArraySize.hasValue() && !AllocType->isDependentType()) { + if (!ArraySize && !AllocType->isDependentType()) { // For non-array operator new, we only want to allocate one element. AllocationSize = SingleEltSize; - } else if (KnownArraySize.hasValue() && !AllocType->isDependentType()) { + } else if (KnownArraySize && !AllocType->isDependentType()) { // For array operator new, only deal with static array size case. bool Overflow; AllocationSize = llvm::APInt(SizeTyWidth, *KnownArraySize) diff --git a/clang/lib/Sema/SemaOpenMP.cpp b/clang/lib/Sema/SemaOpenMP.cpp index e65f5a236a1fff744f57ea38a58bc7f776740174..2546b804c28bb2280e17d869c527d6cf76f41e1b 100644 --- a/clang/lib/Sema/SemaOpenMP.cpp +++ b/clang/lib/Sema/SemaOpenMP.cpp @@ -828,7 +828,7 @@ public: /// Returns optional parameter for the ordered region. std::pair getOrderedRegionParam() const { if (const SharingMapTy *Top = getTopOfStackOrNull()) - if (Top->OrderedRegion.hasValue()) + if (Top->OrderedRegion) return Top->OrderedRegion.getValue(); return std::make_pair(nullptr, nullptr); } @@ -843,7 +843,7 @@ public: std::pair getParentOrderedRegionParam() const { if (const SharingMapTy *Parent = getSecondOnStackOrNull()) - if (Parent->OrderedRegion.hasValue()) + if (Parent->OrderedRegion) return Parent->OrderedRegion.getValue(); return std::make_pair(nullptr, nullptr); } @@ -4133,6 +4133,7 @@ void Sema::ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope) { case OMPD_taskloop: case OMPD_taskloop_simd: case OMPD_master_taskloop: + case OMPD_masked_taskloop: case OMPD_master_taskloop_simd: { QualType KmpInt32Ty = Context.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1) @@ -4865,6 +4866,7 @@ static bool checkNestingOfRegions(Sema &SemaRef, const DSAStackTy *Stack, (SemaRef.getLangOpts().OpenMP >= 50 && (ParentRegion == OMPD_taskloop || ParentRegion == OMPD_master_taskloop || + ParentRegion == OMPD_masked_taskloop || ParentRegion == OMPD_parallel_master_taskloop)))) || (CancelRegion == OMPD_sections && (ParentRegion == OMPD_section || ParentRegion == OMPD_sections || @@ -6246,6 +6248,11 @@ StmtResult Sema::ActOnOpenMPExecutableDirective( ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA); AllowedNameModifiers.push_back(OMPD_taskloop); break; + case OMPD_masked_taskloop: + Res = ActOnOpenMPMaskedTaskLoopDirective( + ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA); + AllowedNameModifiers.push_back(OMPD_taskloop); + break; case OMPD_master_taskloop_simd: Res = ActOnOpenMPMasterTaskLoopSimdDirective( ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA); @@ -7754,7 +7761,7 @@ bool OpenMPIterationSpaceChecker::setStep(Expr *NewStep, bool Subtract) { bool IsConstZero = Result && !Result->getBoolValue(); // != with increment is treated as <; != with decrement is treated as > - if (!TestIsLessOp.hasValue()) + if (!TestIsLessOp) TestIsLessOp = IsConstPos || (IsUnsigned && !Subtract); if (UB && (IsConstZero || (TestIsLessOp.getValue() @@ -8867,6 +8874,7 @@ void Sema::ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init) { DVar.CKind != OMPC_private))) || ((isOpenMPWorksharingDirective(DKind) || DKind == OMPD_taskloop || DKind == OMPD_master_taskloop || + DKind == OMPD_masked_taskloop || DKind == OMPD_parallel_master_taskloop || isOpenMPDistributeDirective(DKind)) && !isOpenMPSimdDirective(DKind) && DVar.CKind != OMPC_unknown && @@ -13146,6 +13154,44 @@ StmtResult Sema::ActOnOpenMPMasterTaskLoopDirective( DSAStack->isCancelRegion()); } +StmtResult Sema::ActOnOpenMPMaskedTaskLoopDirective( + ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, + SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA) { + if (!AStmt) + return StmtError(); + + assert(isa(AStmt) && "Captured statement expected"); + OMPLoopBasedDirective::HelperExprs B; + // In presence of clause 'collapse' or 'ordered' with number of loops, it will + // define the nested loops number. + unsigned NestedLoopCount = + checkOpenMPLoop(OMPD_masked_taskloop, getCollapseNumberExpr(Clauses), + /*OrderedLoopCountExpr=*/nullptr, AStmt, *this, *DSAStack, + VarsWithImplicitDSA, B); + if (NestedLoopCount == 0) + return StmtError(); + + assert((CurContext->isDependentContext() || B.builtAll()) && + "omp for loop exprs were not built"); + + // OpenMP, [2.9.2 taskloop Construct, Restrictions] + // The grainsize clause and num_tasks clause are mutually exclusive and may + // not appear on the same taskloop directive. + if (checkMutuallyExclusiveClauses(*this, Clauses, + {OMPC_grainsize, OMPC_num_tasks})) + return StmtError(); + // OpenMP, [2.9.2 taskloop Construct, Restrictions] + // If a reduction clause is present on the taskloop directive, the nogroup + // clause must not be specified. + if (checkReductionClauseWithNogroup(*this, Clauses)) + return StmtError(); + + setFunctionHasBranchProtectedScope(); + return OMPMaskedTaskLoopDirective::Create(Context, StartLoc, EndLoc, + NestedLoopCount, Clauses, AStmt, B, + DSAStack->isCancelRegion()); +} + StmtResult Sema::ActOnOpenMPMasterTaskLoopSimdDirective( ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA) { @@ -14865,6 +14911,7 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause( case OMPD_task: case OMPD_taskloop: case OMPD_master_taskloop: + case OMPD_masked_taskloop: case OMPD_target_data: case OMPD_simd: case OMPD_for_simd: @@ -14954,6 +15001,7 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause( case OMPD_taskloop: case OMPD_taskloop_simd: case OMPD_master_taskloop: + case OMPD_masked_taskloop: case OMPD_master_taskloop_simd: case OMPD_threadprivate: case OMPD_allocate: @@ -15026,6 +15074,7 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause( case OMPD_taskloop: case OMPD_taskloop_simd: case OMPD_master_taskloop: + case OMPD_masked_taskloop: case OMPD_master_taskloop_simd: case OMPD_parallel_master_taskloop: case OMPD_parallel_master_taskloop_simd: @@ -15113,6 +15162,7 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause( case OMPD_taskloop: case OMPD_taskloop_simd: case OMPD_master_taskloop: + case OMPD_masked_taskloop: case OMPD_master_taskloop_simd: case OMPD_parallel_master_taskloop: case OMPD_parallel_master_taskloop_simd: @@ -15198,6 +15248,7 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause( case OMPD_taskloop: case OMPD_taskloop_simd: case OMPD_master_taskloop: + case OMPD_masked_taskloop: case OMPD_master_taskloop_simd: case OMPD_parallel_master_taskloop: case OMPD_parallel_master_taskloop_simd: @@ -15289,6 +15340,7 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause( case OMPD_taskloop: case OMPD_taskloop_simd: case OMPD_master_taskloop: + case OMPD_masked_taskloop: case OMPD_master_taskloop_simd: case OMPD_parallel_master_taskloop: case OMPD_parallel_master_taskloop_simd: @@ -15385,6 +15437,7 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause( case OMPD_taskloop: case OMPD_taskloop_simd: case OMPD_master_taskloop: + case OMPD_masked_taskloop: case OMPD_master_taskloop_simd: case OMPD_parallel_master_taskloop: case OMPD_parallel_master_taskloop_simd: @@ -15448,6 +15501,7 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause( case OMPD_taskloop: case OMPD_taskloop_simd: case OMPD_master_taskloop: + case OMPD_masked_taskloop: case OMPD_master_taskloop_simd: break; case OMPD_parallel_master_taskloop: @@ -22126,7 +22180,7 @@ void Sema::ActOnOpenMPDeclareTargetName(NamedDecl *ND, SourceLocation Loc, auto *VD = cast(ND); llvm::Optional ActiveAttr = OMPDeclareTargetDeclAttr::getActiveAttr(VD); - if (ActiveAttr.hasValue() && ActiveAttr.getValue()->getDevType() != DTCI.DT && + if (ActiveAttr && ActiveAttr.getValue()->getDevType() != DTCI.DT && ActiveAttr.getValue()->getLevel() == Level) { Diag(Loc, diag::err_omp_device_type_mismatch) << OMPDeclareTargetDeclAttr::ConvertDevTypeTyToStr(DTCI.DT) @@ -22134,18 +22188,18 @@ void Sema::ActOnOpenMPDeclareTargetName(NamedDecl *ND, SourceLocation Loc, ActiveAttr.getValue()->getDevType()); return; } - if (ActiveAttr.hasValue() && ActiveAttr.getValue()->getMapType() != MT && + if (ActiveAttr && ActiveAttr.getValue()->getMapType() != MT && ActiveAttr.getValue()->getLevel() == Level) { Diag(Loc, diag::err_omp_declare_target_to_and_link) << ND; return; } - if (ActiveAttr.hasValue() && ActiveAttr.getValue()->getLevel() == Level) + if (ActiveAttr && ActiveAttr.getValue()->getLevel() == Level) return; Expr *IndirectE = nullptr; bool IsIndirect = false; - if (DTCI.Indirect.hasValue()) { + if (DTCI.Indirect) { IndirectE = DTCI.Indirect.getValue(); if (!IndirectE) IsIndirect = true; @@ -22240,12 +22294,12 @@ void Sema::checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D, llvm::Optional ActiveAttr = OMPDeclareTargetDeclAttr::getActiveAttr(VD); unsigned Level = DeclareTargetNesting.size(); - if (ActiveAttr.hasValue() && ActiveAttr.getValue()->getLevel() >= Level) + if (ActiveAttr && ActiveAttr.getValue()->getLevel() >= Level) return; DeclareTargetContextInfo &DTCI = DeclareTargetNesting.back(); Expr *IndirectE = nullptr; bool IsIndirect = false; - if (DTCI.Indirect.hasValue()) { + if (DTCI.Indirect) { IndirectE = DTCI.Indirect.getValue(); if (!IndirectE) IsIndirect = true; diff --git a/clang/lib/Sema/SemaOverload.cpp b/clang/lib/Sema/SemaOverload.cpp index c627d631714d5f927812f351ff91aa355ad2dcf7..c226ed625479091306bd92a2bd2c6b3e46f94c2c 100644 --- a/clang/lib/Sema/SemaOverload.cpp +++ b/clang/lib/Sema/SemaOverload.cpp @@ -11266,6 +11266,13 @@ static void NoteFunctionCandidate(Sema &S, OverloadCandidate *Cand, if (shouldSkipNotingLambdaConversionDecl(Fn)) return; + // There is no physical candidate declaration to point to for OpenCL builtins. + // Except for failed conversions, the notes are identical for each candidate, + // so do not generate such notes. + if (S.getLangOpts().OpenCL && Fn->isImplicit() && + Cand->FailureKind != ovl_fail_bad_conversion) + return; + // Note deleted candidates, but only if they're viable. if (Cand->Viable) { if (Fn->isDeleted()) { diff --git a/clang/lib/Sema/SemaStmt.cpp b/clang/lib/Sema/SemaStmt.cpp index ae3ea4db233e50592eff5943b9fec48788140419..82831a448869dff231bf71f007478bcfb1043557 100644 --- a/clang/lib/Sema/SemaStmt.cpp +++ b/clang/lib/Sema/SemaStmt.cpp @@ -888,8 +888,7 @@ StmtResult Sema::ActOnIfStmt(SourceLocation IfLoc, CommaVisitor(*this).Visit(CondExpr); if (!ConstevalOrNegatedConsteval && !elseStmt) - DiagnoseEmptyStmtBody(CondExpr->getEndLoc(), thenStmt, - diag::warn_empty_if_body); + DiagnoseEmptyStmtBody(RParenLoc, thenStmt, diag::warn_empty_if_body); if (ConstevalOrNegatedConsteval || StatementKind == IfStatementKind::Constexpr) { diff --git a/clang/lib/Sema/SemaTemplateInstantiate.cpp b/clang/lib/Sema/SemaTemplateInstantiate.cpp index 8e59c449ae65605fa1adbb7ef704ac7977e85426..f09b3473c0749de54e0199bb40d3645f51ccb407 100644 --- a/clang/lib/Sema/SemaTemplateInstantiate.cpp +++ b/clang/lib/Sema/SemaTemplateInstantiate.cpp @@ -10,12 +10,14 @@ //===----------------------------------------------------------------------===/ #include "TreeTransform.h" +#include "clang/AST/ASTConcept.h" #include "clang/AST/ASTConsumer.h" #include "clang/AST/ASTContext.h" #include "clang/AST/ASTLambda.h" #include "clang/AST/ASTMutationListener.h" #include "clang/AST/DeclTemplate.h" #include "clang/AST/Expr.h" +#include "clang/AST/ExprConcepts.h" #include "clang/AST/PrettyDeclStackTrace.h" #include "clang/AST/TypeVisitor.h" #include "clang/Basic/LangOptions.h" @@ -2022,6 +2024,7 @@ TemplateInstantiator::TransformNestedRequirement( Req->getConstraintExpr()->getSourceRange()); ExprResult TransConstraint; + ConstraintSatisfaction Satisfaction; TemplateDeductionInfo Info(Req->getConstraintExpr()->getBeginLoc()); { EnterExpressionEvaluationContext ContextRAII( @@ -2033,6 +2036,25 @@ TemplateInstantiator::TransformNestedRequirement( if (ConstrInst.isInvalid()) return nullptr; TransConstraint = TransformExpr(Req->getConstraintExpr()); + if (!TransConstraint.isInvalid()) { + bool CheckSucceeded = + SemaRef.CheckConstraintExpression(TransConstraint.get()); + (void)CheckSucceeded; + assert((CheckSucceeded || Trap.hasErrorOccurred()) && + "CheckConstraintExpression failed, but " + "did not produce a SFINAE error"); + } + // Use version of CheckConstraintSatisfaction that does no substitutions. + if (!TransConstraint.isInvalid() && + !TransConstraint.get()->isInstantiationDependent() && + !Trap.hasErrorOccurred()) { + bool CheckFailed = SemaRef.CheckConstraintSatisfaction( + TransConstraint.get(), Satisfaction); + (void)CheckFailed; + assert((!CheckFailed || Trap.hasErrorOccurred()) && + "CheckConstraintSatisfaction failed, " + "but did not produce a SFINAE error"); + } if (TransConstraint.isInvalid() || Trap.hasErrorOccurred()) return RebuildNestedRequirement(createSubstDiag(SemaRef, Info, [&] (llvm::raw_ostream& OS) { @@ -2040,7 +2062,11 @@ TemplateInstantiator::TransformNestedRequirement( SemaRef.getPrintingPolicy()); })); } - return RebuildNestedRequirement(TransConstraint.get()); + if (TransConstraint.get()->isInstantiationDependent()) + return new (SemaRef.Context) + concepts::NestedRequirement(TransConstraint.get()); + return new (SemaRef.Context) concepts::NestedRequirement( + SemaRef.Context, TransConstraint.get(), Satisfaction); } diff --git a/clang/lib/Sema/TreeTransform.h b/clang/lib/Sema/TreeTransform.h index ef093525510440865850a581711250d34f00a7f4..d890c1472c2a3d0cb7875b399afc4efd12dab75c 100644 --- a/clang/lib/Sema/TreeTransform.h +++ b/clang/lib/Sema/TreeTransform.h @@ -9047,6 +9047,17 @@ StmtResult TreeTransform::TransformOMPMasterTaskLoopDirective( return Res; } +template +StmtResult TreeTransform::TransformOMPMaskedTaskLoopDirective( + OMPMaskedTaskLoopDirective *D) { + DeclarationNameInfo DirName; + getDerived().getSema().StartOpenMPDSABlock(OMPD_masked_taskloop, DirName, + nullptr, D->getBeginLoc()); + StmtResult Res = getDerived().TransformOMPExecutableDirective(D); + getDerived().getSema().EndOpenMPDSABlock(Res.get()); + return Res; +} + template StmtResult TreeTransform::TransformOMPMasterTaskLoopSimdDirective( OMPMasterTaskLoopSimdDirective *D) { diff --git a/clang/lib/Serialization/ASTReaderStmt.cpp b/clang/lib/Serialization/ASTReaderStmt.cpp index d7980c2c69482f7c1825e60ae9a15585e1270874..e4466054f50e182008bd45f4861fe54bed27120e 100644 --- a/clang/lib/Serialization/ASTReaderStmt.cpp +++ b/clang/lib/Serialization/ASTReaderStmt.cpp @@ -2508,6 +2508,12 @@ void ASTStmtReader::VisitOMPMasterTaskLoopDirective( D->setHasCancel(Record.readBool()); } +void ASTStmtReader::VisitOMPMaskedTaskLoopDirective( + OMPMaskedTaskLoopDirective *D) { + VisitOMPLoopDirective(D); + D->setHasCancel(Record.readBool()); +} + void ASTStmtReader::VisitOMPMasterTaskLoopSimdDirective( OMPMasterTaskLoopSimdDirective *D) { VisitOMPLoopDirective(D); @@ -3439,6 +3445,14 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) { CollapsedNum, Empty); break; } + + case STMT_OMP_MASKED_TASKLOOP_DIRECTIVE: { + unsigned CollapsedNum = Record[ASTStmtReader::NumStmtFields]; + unsigned NumClauses = Record[ASTStmtReader::NumStmtFields + 1]; + S = OMPMaskedTaskLoopDirective::CreateEmpty(Context, NumClauses, + CollapsedNum, Empty); + break; + } case STMT_OMP_MASTER_TASKLOOP_SIMD_DIRECTIVE: { unsigned CollapsedNum = Record[ASTStmtReader::NumStmtFields]; diff --git a/clang/lib/Serialization/ASTWriterStmt.cpp b/clang/lib/Serialization/ASTWriterStmt.cpp index f734998e6e495dc78ef231e2739015e1dd95bd82..aecbb4650a5c1198a267c225a887d2570bfa69ee 100644 --- a/clang/lib/Serialization/ASTWriterStmt.cpp +++ b/clang/lib/Serialization/ASTWriterStmt.cpp @@ -2458,6 +2458,13 @@ void ASTStmtWriter::VisitOMPMasterTaskLoopDirective( Code = serialization::STMT_OMP_MASTER_TASKLOOP_DIRECTIVE; } +void ASTStmtWriter::VisitOMPMaskedTaskLoopDirective( + OMPMaskedTaskLoopDirective *D) { + VisitOMPLoopDirective(D); + Record.writeBool(D->hasCancel()); + Code = serialization::STMT_OMP_MASKED_TASKLOOP_DIRECTIVE; +} + void ASTStmtWriter::VisitOMPMasterTaskLoopSimdDirective( OMPMasterTaskLoopSimdDirective *D) { VisitOMPLoopDirective(D); diff --git a/clang/lib/StaticAnalyzer/Checkers/BasicObjCFoundationChecks.cpp b/clang/lib/StaticAnalyzer/Checkers/BasicObjCFoundationChecks.cpp index 970bfd2d241cce1289362d83092f333fe442fea3..330ca90b7659e9e78db8523404d2083f1d135716 100644 --- a/clang/lib/StaticAnalyzer/Checkers/BasicObjCFoundationChecks.cpp +++ b/clang/lib/StaticAnalyzer/Checkers/BasicObjCFoundationChecks.cpp @@ -766,7 +766,7 @@ void VariadicMethodTypeChecker::checkPreObjCMessage(const ObjCMethodCall &msg, continue; // Generate only one error node to use for all bug reports. - if (!errorNode.hasValue()) + if (!errorNode) errorNode = C.generateNonFatalErrorNode(); if (!errorNode.getValue()) diff --git a/clang/lib/StaticAnalyzer/Checkers/GTestChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/GTestChecker.cpp index 0e273771498cc809b7492248663b22ad87a80081..dbfdff4d2a3b1ca1083d1d4704f3d1c42f13e942 100644 --- a/clang/lib/StaticAnalyzer/Checkers/GTestChecker.cpp +++ b/clang/lib/StaticAnalyzer/Checkers/GTestChecker.cpp @@ -272,12 +272,12 @@ ProgramStateRef GTestChecker::assumeValuesEqual(SVal Val1, SVal Val2, CheckerContext &C) { auto DVal1 = Val1.getAs(); auto DVal2 = Val2.getAs(); - if (!DVal1.hasValue() || !DVal2.hasValue()) + if (!DVal1 || !DVal2) return State; auto ValuesEqual = C.getSValBuilder().evalEQ(State, *DVal1, *DVal2).getAs(); - if (!ValuesEqual.hasValue()) + if (!ValuesEqual) return State; State = C.getConstraintManager().assume(State, *ValuesEqual, true); diff --git a/clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp index 552d042483393434ad4e522b98a2c95194e35d06..92d7cef78b13dc2f1ee94445ac376517859211f0 100644 --- a/clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp +++ b/clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp @@ -1238,7 +1238,7 @@ void MallocChecker::checkKernelMalloc(const CallEvent &Call, ProgramStateRef State = C.getState(); llvm::Optional MaybeState = performKernelMalloc(Call, C, State); - if (MaybeState.hasValue()) + if (MaybeState) State = MaybeState.getValue(); else State = MallocMemAux(C, Call, Call.getArgExpr(0), UndefinedVal(), State, @@ -3571,13 +3571,13 @@ void MallocChecker::printState(raw_ostream &Out, ProgramStateRef State, const RefState *RefS = State->get(I.getKey()); AllocationFamily Family = RefS->getAllocationFamily(); Optional CheckKind = getCheckIfTracked(Family); - if (!CheckKind.hasValue()) - CheckKind = getCheckIfTracked(Family, true); + if (!CheckKind) + CheckKind = getCheckIfTracked(Family, true); I.getKey()->dumpToStream(Out); Out << " : "; I.getData().dump(Out); - if (CheckKind.hasValue()) + if (CheckKind) Out << " (" << CheckNames[*CheckKind].getName() << ")"; Out << NL; } diff --git a/clang/lib/StaticAnalyzer/Checkers/StdLibraryFunctionsChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/StdLibraryFunctionsChecker.cpp index 5142cf76653ac74cfd89e7348a2fa54c19458ae8..ef673ae41a3dc3a9a0ea0c454d04893618f19a3a 100644 --- a/clang/lib/StaticAnalyzer/Checkers/StdLibraryFunctionsChecker.cpp +++ b/clang/lib/StaticAnalyzer/Checkers/StdLibraryFunctionsChecker.cpp @@ -1904,44 +1904,40 @@ void StdLibraryFunctionsChecker::initFunctionSummaries( ArgumentCondition(1, WithinRange, Range(0, SizeMax)))); // int mkdir(const char *pathname, mode_t mode); - // FIXME: returns 0 on success, ReturnsValidFileDescriptor is incorrect addToFunctionSummaryMap( "mkdir", Signature(ArgTypes{ConstCharPtrTy, Mode_tTy}, RetType{IntTy}), Summary(NoEvalCall) - .Case(ReturnsValidFileDescriptor, ErrnoMustNotBeChecked) + .Case(ReturnsZero, ErrnoMustNotBeChecked) .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant) .ArgConstraint(NotNull(ArgNo(0)))); // int mkdirat(int dirfd, const char *pathname, mode_t mode); - // FIXME: returns 0 on success, ReturnsValidFileDescriptor is incorrect addToFunctionSummaryMap( "mkdirat", Signature(ArgTypes{IntTy, ConstCharPtrTy, Mode_tTy}, RetType{IntTy}), Summary(NoEvalCall) - .Case(ReturnsValidFileDescriptor, ErrnoMustNotBeChecked) + .Case(ReturnsZero, ErrnoMustNotBeChecked) .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant) .ArgConstraint(NotNull(ArgNo(1)))); Optional Dev_tTy = lookupTy("dev_t"); // int mknod(const char *pathname, mode_t mode, dev_t dev); - // FIXME: returns 0 on success, ReturnsValidFileDescriptor is incorrect addToFunctionSummaryMap( "mknod", Signature(ArgTypes{ConstCharPtrTy, Mode_tTy, Dev_tTy}, RetType{IntTy}), Summary(NoEvalCall) - .Case(ReturnsValidFileDescriptor, ErrnoMustNotBeChecked) + .Case(ReturnsZero, ErrnoMustNotBeChecked) .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant) .ArgConstraint(NotNull(ArgNo(0)))); // int mknodat(int dirfd, const char *pathname, mode_t mode, dev_t dev); - // FIXME: returns 0 on success, ReturnsValidFileDescriptor is incorrect addToFunctionSummaryMap( "mknodat", Signature(ArgTypes{IntTy, ConstCharPtrTy, Mode_tTy, Dev_tTy}, RetType{IntTy}), Summary(NoEvalCall) - .Case(ReturnsValidFileDescriptor, ErrnoMustNotBeChecked) + .Case(ReturnsZero, ErrnoMustNotBeChecked) .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant) .ArgConstraint(NotNull(ArgNo(1)))); diff --git a/clang/lib/StaticAnalyzer/Core/AnalyzerOptions.cpp b/clang/lib/StaticAnalyzer/Core/AnalyzerOptions.cpp index de94cb7c978da2084c357dd64a74499b1586e504..79d19a3b99f262cb47575a308fcb85cc382f1355 100644 --- a/clang/lib/StaticAnalyzer/Core/AnalyzerOptions.cpp +++ b/clang/lib/StaticAnalyzer/Core/AnalyzerOptions.cpp @@ -77,7 +77,7 @@ AnalyzerOptions::getExplorationStrategy() const { .Case("bfs_block_dfs_contents", ExplorationStrategyKind::BFSBlockDFSContents) .Default(None); - assert(K.hasValue() && "User mode is invalid."); + assert(K && "User mode is invalid."); return K.getValue(); } @@ -88,7 +88,7 @@ CTUPhase1InliningKind AnalyzerOptions::getCTUPhase1Inlining() const { .Case("small", CTUPhase1InliningKind::Small) .Case("all", CTUPhase1InliningKind::All) .Default(None); - assert(K.hasValue() && "CTU inlining mode is invalid."); + assert(K && "CTU inlining mode is invalid."); return K.getValue(); } @@ -100,7 +100,7 @@ IPAKind AnalyzerOptions::getIPAMode() const { .Case("dynamic", IPAK_DynamicDispatch) .Case("dynamic-bifurcate", IPAK_DynamicDispatchBifurcate) .Default(None); - assert(K.hasValue() && "IPA Mode is invalid."); + assert(K && "IPA Mode is invalid."); return K.getValue(); } diff --git a/clang/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp b/clang/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp index 339a675ed1bafcd52258528284fefab0954f826d..5b72c91ccd74808714d9c2615470a3d38262881b 100644 --- a/clang/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp +++ b/clang/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp @@ -2949,7 +2949,7 @@ PathDiagnosticPieceRef ConditionBRVisitor::VisitTrueTest( PathDiagnosticLocation Loc(Cond, SM, LCtx); auto event = std::make_shared(Loc, Message); - if (shouldPrune.hasValue()) + if (shouldPrune) event->setPrunable(shouldPrune.getValue()); return event; } @@ -3279,7 +3279,7 @@ void FalsePositiveRefutationBRVisitor::finalizeVisitor( // And check for satisfiability Optional IsSAT = RefutationSolver->check(); - if (!IsSAT.hasValue()) + if (!IsSAT) return; if (!IsSAT.getValue()) diff --git a/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp b/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp index 6ba19d52488c732ea9bc2ab8c2722dd77873e91a..988029455fa142c26833473c1d1a66dec61a5291 100644 --- a/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp +++ b/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp @@ -1262,6 +1262,7 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred, case Stmt::OMPTaskLoopDirectiveClass: case Stmt::OMPTaskLoopSimdDirectiveClass: case Stmt::OMPMasterTaskLoopDirectiveClass: + case Stmt::OMPMaskedTaskLoopDirectiveClass: case Stmt::OMPMasterTaskLoopSimdDirectiveClass: case Stmt::OMPParallelMasterTaskLoopDirectiveClass: case Stmt::OMPParallelMasterTaskLoopSimdDirectiveClass: @@ -1363,10 +1364,14 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred, break; } + case Stmt::ArrayInitLoopExprClass: + Bldr.takeNodes(Pred); + VisitArrayInitLoopExpr(cast(S), Pred, Dst); + Bldr.addNodes(Dst); + break; // Cases not handled yet; but will handle some day. case Stmt::DesignatedInitExprClass: case Stmt::DesignatedInitUpdateExprClass: - case Stmt::ArrayInitLoopExprClass: case Stmt::ArrayInitIndexExprClass: case Stmt::ExtVectorElementExprClass: case Stmt::ImaginaryLiteralClass: @@ -2594,18 +2599,38 @@ void ExprEngine::VisitCommonDeclRefExpr(const Expr *Ex, const NamedDecl *D, if (const auto *BD = dyn_cast(D)) { const auto *DD = cast(BD->getDecomposedDecl()); + SVal Base = state->getLValue(DD, LCtx); + if (DD->getType()->isReferenceType()) { + Base = state->getSVal(Base.getAsRegion()); + } + + SVal V = UnknownVal(); + + // Handle binding to data members if (const auto *ME = dyn_cast(BD->getBinding())) { const auto *Field = cast(ME->getMemberDecl()); + V = state->getLValue(Field, Base); + } + // Handle binding to arrays + else if (const auto *ASE = dyn_cast(BD->getBinding())) { + SVal Idx = state->getSVal(ASE->getIdx(), LCtx); - SVal Base = state->getLValue(DD, LCtx); - if (DD->getType()->isReferenceType()) { - Base = state->getSVal(Base.getAsRegion()); - } - - SVal V = state->getLValue(Field, Base); + // Note: the index of an element in a structured binding is automatically + // created and it is a unique identifier of the specific element. Thus it + // cannot be a value that varies at runtime. + assert(Idx.isConstant() && "BindingDecl array index is not a constant!"); - Bldr.generateNode(Ex, Pred, state->BindExpr(Ex, LCtx, V)); + V = state->getLValue(BD->getType(), Idx, Base); } + // Handle binding to tuple-like strcutures + else if (BD->getHoldingVar()) { + // FIXME: handle tuples + return; + } else + llvm_unreachable("An unknown case of structured binding encountered!"); + + Bldr.generateNode(Ex, Pred, state->BindExpr(Ex, LCtx, V), nullptr, + ProgramPoint::PostLValueKind); return; } @@ -2613,6 +2638,99 @@ void ExprEngine::VisitCommonDeclRefExpr(const Expr *Ex, const NamedDecl *D, llvm_unreachable("Support for this Decl not implemented."); } +/// VisitArrayInitLoopExpr - Transfer function for array init loop. +void ExprEngine::VisitArrayInitLoopExpr(const ArrayInitLoopExpr *Ex, + ExplodedNode *Pred, + ExplodedNodeSet &Dst) { + ExplodedNodeSet CheckerPreStmt; + getCheckerManager().runCheckersForPreStmt(CheckerPreStmt, Pred, Ex, *this); + + ExplodedNodeSet EvalSet; + StmtNodeBuilder Bldr(CheckerPreStmt, EvalSet, *currBldrCtx); + + const Expr *Arr = Ex->getCommonExpr()->getSourceExpr(); + + for (auto *Node : CheckerPreStmt) { + const LocationContext *LCtx = Node->getLocationContext(); + ProgramStateRef state = Node->getState(); + + SVal Base = UnknownVal(); + + // As in case of this expression the sub-expressions are not visited by any + // other transfer functions, they are handled by matching their AST. + + // Case of implicit copy or move ctor of object with array member + // + // Note: ExprEngine::VisitMemberExpr is not able to bind the array to the + // environment. + // + // struct S { + // int arr[2]; + // }; + // + // + // S a; + // S b = a; + // + // The AST in case of a *copy constructor* looks like this: + // ArrayInitLoopExpr + // |-OpaqueValueExpr + // | `-MemberExpr <-- match this + // | `-DeclRefExpr + // ` ... + // + // + // S c; + // S d = std::move(d); + // + // In case of a *move constructor* the resulting AST looks like: + // ArrayInitLoopExpr + // |-OpaqueValueExpr + // | `-MemberExpr <-- match this first + // | `-CXXStaticCastExpr <-- match this after + // | `-DeclRefExpr + // ` ... + if (const auto *ME = dyn_cast(Arr)) { + Expr *MEBase = ME->getBase(); + + // Move ctor + if (auto CXXSCE = dyn_cast(MEBase)) { + MEBase = CXXSCE->getSubExpr(); + } + + auto ObjDeclExpr = cast(MEBase); + SVal Obj = state->getLValue(cast(ObjDeclExpr->getDecl()), LCtx); + + Base = state->getLValue(cast(ME->getMemberDecl()), Obj); + } + + // Case of lambda capture and decomposition declaration + // + // int arr[2]; + // + // [arr]{ int a = arr[0]; }(); + // auto[a, b] = arr; + // + // In both of these cases the AST looks like the following: + // ArrayInitLoopExpr + // |-OpaqueValueExpr + // | `-DeclRefExpr <-- match this + // ` ... + if (const DeclRefExpr *DRE = dyn_cast(Arr)) + Base = state->getLValue(cast(DRE->getDecl()), LCtx); + + // Create a lazy compound value to the original array + if (const MemRegion *R = Base.getAsRegion()) + Base = state->getSVal(R); + else + Base = UnknownVal(); + + Bldr.generateNode(Ex, Pred, state->BindExpr(Ex, LCtx, Base)); + } + + getCheckerManager().runCheckersForPostStmt(Dst, EvalSet, Ex, *this); +} + /// VisitArraySubscriptExpr - Transfer function for array accesses void ExprEngine::VisitArraySubscriptExpr(const ArraySubscriptExpr *A, ExplodedNode *Pred, diff --git a/clang/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp b/clang/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp index 326a3b1fb665f08b3149d812dde51ed6c21d0197..5f8a84591b2a0e555553e0e99f27a24fc3ed72ee 100644 --- a/clang/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp +++ b/clang/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp @@ -1015,7 +1015,7 @@ bool ExprEngine::shouldInlineCall(const CallEvent &Call, const Decl *D, // Check if this function has been marked as non-inlinable. Optional MayInline = Engine.FunctionSummaries->mayInline(D); - if (MayInline.hasValue()) { + if (MayInline) { if (!MayInline.getValue()) return false; diff --git a/clang/lib/StaticAnalyzer/Core/MemRegion.cpp b/clang/lib/StaticAnalyzer/Core/MemRegion.cpp index f0cda835e07c297b3c8ffbbf785f3c7c461cf837..389223e0b77664f00c890a4cb3fbc12f0570abbb 100644 --- a/clang/lib/StaticAnalyzer/Core/MemRegion.cpp +++ b/clang/lib/StaticAnalyzer/Core/MemRegion.cpp @@ -789,6 +789,9 @@ DefinedOrUnknownSVal MemRegionManager::getStaticSize(const MemRegion *MR, if (isa(AT)) return true; + if (getContext().getLangOpts().StrictFlexArrays) + return false; + if (const auto *CAT = dyn_cast(AT)) { const llvm::APInt &Size = CAT->getSize(); if (Size.isZero()) diff --git a/clang/lib/Support/RISCVVIntrinsicUtils.cpp b/clang/lib/Support/RISCVVIntrinsicUtils.cpp index ebfe1a168936b46f0062eac0e53bd5311055ba6e..0d7ca0ed0f1fa1b8c04e9803292f4d67a3c70202 100644 --- a/clang/lib/Support/RISCVVIntrinsicUtils.cpp +++ b/clang/lib/Support/RISCVVIntrinsicUtils.cpp @@ -114,7 +114,7 @@ bool RVVType::verifyType() const { return false; if (isScalar()) return true; - if (!Scale.hasValue()) + if (!Scale) return false; if (isFloat() && ElementBitwidth == 8) return false; @@ -799,7 +799,7 @@ RVVType::computeTypes(BasicType BT, int Log2LMUL, unsigned NF, RVVTypes Types; for (const PrototypeDescriptor &Proto : Prototype) { auto T = computeType(BT, Log2LMUL, Proto); - if (!T.hasValue()) + if (!T) return llvm::None; // Record legal type index Types.push_back(T.getValue()); diff --git a/clang/lib/Tooling/Core/Replacement.cpp b/clang/lib/Tooling/Core/Replacement.cpp index 30e1923bf1cb797991d9077c44574cd096f79e31..aca2afceea446df0d2034b2a9a818d9e58dfb64b 100644 --- a/clang/lib/Tooling/Core/Replacement.cpp +++ b/clang/lib/Tooling/Core/Replacement.cpp @@ -179,9 +179,9 @@ static std::string getReplacementErrString(replacement_error Err) { std::string ReplacementError::message() const { std::string Message = getReplacementErrString(Err); - if (NewReplacement.hasValue()) + if (NewReplacement) Message += "\nNew replacement: " + NewReplacement->toString(); - if (ExistingReplacement.hasValue()) + if (ExistingReplacement) Message += "\nExisting replacement: " + ExistingReplacement->toString(); return Message; } diff --git a/clang/test/Analysis/array-init-loop.cpp b/clang/test/Analysis/array-init-loop.cpp new file mode 100644 index 0000000000000000000000000000000000000000..40a60dde582c671dd25350cbd2ba3b64dcf9368f --- /dev/null +++ b/clang/test/Analysis/array-init-loop.cpp @@ -0,0 +1,127 @@ +// RUN: %clang_analyze_cc1 -analyzer-checker=core,debug.ExprInspection -std=c++17 -verify %s + +void clang_analyzer_eval(bool); + +void array_init() { + int arr[] = {1, 2, 3, 4, 5}; + + auto [a, b, c, d, e] = arr; + + clang_analyzer_eval(a == 1); // expected-warning{{TRUE}} + clang_analyzer_eval(b == 2); // expected-warning{{TRUE}} + clang_analyzer_eval(c == 3); // expected-warning{{TRUE}} + clang_analyzer_eval(d == 4); // expected-warning{{TRUE}} + clang_analyzer_eval(e == 5); // expected-warning{{TRUE}} +} + +void array_uninit() { + int arr[5]; + + auto [a, b, c, d, e] = arr; + + int x = e; // expected-warning{{Assigned value is garbage or undefined}} +} + +void lambda_init() { + int arr[] = {1, 2, 3, 4, 5}; + + auto l = [arr] { return arr[0]; }(); + clang_analyzer_eval(l == 1); // expected-warning{{TRUE}} + + l = [arr] { return arr[1]; }(); + clang_analyzer_eval(l == 2); // expected-warning{{TRUE}} + + l = [arr] { return arr[2]; }(); + clang_analyzer_eval(l == 3); // expected-warning{{TRUE}} + + l = [arr] { return arr[3]; }(); + clang_analyzer_eval(l == 4); // expected-warning{{TRUE}} + + l = [arr] { return arr[4]; }(); + clang_analyzer_eval(l == 5); // expected-warning{{TRUE}} +} + +void lambda_uninit() { + int arr[5]; + + // FIXME: These should be Undefined, but we fail to read Undefined from a lazyCompoundVal + int l = [arr] { return arr[0]; }(); + clang_analyzer_eval(l); // expected-warning{{UNKNOWN}} + + l = [arr] { return arr[1]; }(); + clang_analyzer_eval(l); // expected-warning{{UNKNOWN}} + + l = [arr] { return arr[2]; }(); + clang_analyzer_eval(l); // expected-warning{{UNKNOWN}} + + l = [arr] { return arr[3]; }(); + clang_analyzer_eval(l); // expected-warning{{UNKNOWN}} + + l = [arr] { return arr[4]; }(); + clang_analyzer_eval(l); // expected-warning{{UNKNOWN}} +} + +struct S { + int arr[5]; +}; + +void copy_ctor_init() { + S orig; + orig.arr[0] = 1; + orig.arr[1] = 2; + orig.arr[2] = 3; + orig.arr[3] = 4; + orig.arr[4] = 5; + + S copy = orig; + clang_analyzer_eval(copy.arr[0] == 1); // expected-warning{{TRUE}} + clang_analyzer_eval(copy.arr[1] == 2); // expected-warning{{TRUE}} + clang_analyzer_eval(copy.arr[2] == 3); // expected-warning{{TRUE}} + clang_analyzer_eval(copy.arr[3] == 4); // expected-warning{{TRUE}} + clang_analyzer_eval(copy.arr[4] == 5); // expected-warning{{TRUE}} +} + +void copy_ctor_uninit() { + S orig; + + S copy = orig; + + // FIXME: These should be Undefined, but we fail to read Undefined from a lazyCompoundVal. + // If the struct is not considered a small struct, instead of a copy, we store a lazy compound value. + // As the struct has an array data member, it is not considered small. + clang_analyzer_eval(copy.arr[0]); // expected-warning{{UNKNOWN}} + clang_analyzer_eval(copy.arr[1]); // expected-warning{{UNKNOWN}} + clang_analyzer_eval(copy.arr[2]); // expected-warning{{UNKNOWN}} + clang_analyzer_eval(copy.arr[3]); // expected-warning{{UNKNOWN}} + clang_analyzer_eval(copy.arr[4]); // expected-warning{{UNKNOWN}} +} + +void move_ctor_init() { + S orig; + orig.arr[0] = 1; + orig.arr[1] = 2; + orig.arr[2] = 3; + orig.arr[3] = 4; + orig.arr[4] = 5; + + S moved = (S &&) orig; + + clang_analyzer_eval(moved.arr[0] == 1); // expected-warning{{TRUE}} + clang_analyzer_eval(moved.arr[1] == 2); // expected-warning{{TRUE}} + clang_analyzer_eval(moved.arr[2] == 3); // expected-warning{{TRUE}} + clang_analyzer_eval(moved.arr[3] == 4); // expected-warning{{TRUE}} + clang_analyzer_eval(moved.arr[4] == 5); // expected-warning{{TRUE}} +} + +void move_ctor_uninit() { + S orig; + + S moved = (S &&) orig; + + // FIXME: These should be Undefined, but we fail to read Undefined from a lazyCompoundVal. + clang_analyzer_eval(moved.arr[0]); // expected-warning{{UNKNOWN}} + clang_analyzer_eval(moved.arr[1]); // expected-warning{{UNKNOWN}} + clang_analyzer_eval(moved.arr[2]); // expected-warning{{UNKNOWN}} + clang_analyzer_eval(moved.arr[3]); // expected-warning{{UNKNOWN}} + clang_analyzer_eval(moved.arr[4]); // expected-warning{{UNKNOWN}} +} diff --git a/clang/test/Analysis/cfg-openmp.cpp b/clang/test/Analysis/cfg-openmp.cpp index 7a263463c09197caa67aab9ea429f7dc2f27b346..3d9da0bf4f508b9a3ba6b238e18cea6d315d5c5e 100644 --- a/clang/test/Analysis/cfg-openmp.cpp +++ b/clang/test/Analysis/cfg-openmp.cpp @@ -688,6 +688,30 @@ void tl(int argc) { argc = x; } +// CHECK-LABEL: void maskedtaskloop(int argc) +void maskedtaskloop(int argc) { + int x, cond, fp, rd, lin, step, map; +// CHECK-DAG: [B3] +// CHECK-DAG: [[#MTLB:]]: x +// CHECK-DAG: [[#MTLB+1]]: [B3.[[#MTLB]]] (ImplicitCastExpr, LValueToRValue, int) +// CHECK-DAG: [[#MTLB+2]]: argc +// CHECK-DAG: [[#MTLB+3]]: [B3.[[#MTLB+2]]] = [B3.[[#MTLB+1]]] +// CHECK-DAG: [B1] +// CHECK-DAG: [[#MTL:]]: cond +// CHECK-DAG: [[#MTL+1]]: [B1.[[#MTL]]] (ImplicitCastExpr, LValueToRValue, int) +// CHECK-DAG: [[#MTL+2]]: [B1.[[#MTL+1]]] (ImplicitCastExpr, IntegralToBoolean, _Bool) +// CHECK-DAG: [[#MTL+3]]: fp +// CHECK-DAG: [[#MTL+4]]: rd +// CHECK-DAG: [[#MTL+5]]: [B3.[[#MTLB+2]]] +// CHECK-DAG: [[#MTL+6]]: [B3.[[#MTLB]]] +// CHECK-DAG: [[#MTL+7]]: #pragma omp masked taskloop if(cond) firstprivate(fp) reduction(+: rd) +// CHECK-DAG: for (int i = 0; +// CHECK-DAG: [B3.[[#MTLB+3]]]; +#pragma omp masked taskloop if(cond) firstprivate(fp) reduction(+:rd) + for (int i = 0; i < 10; ++i) + argc = x; +} + // CHECK-LABEL: void tls(int argc) void tls(int argc) { int x, cond, fp, rd, lin, step, map; diff --git a/clang/test/Analysis/uninit-structured-binding-array.cpp b/clang/test/Analysis/uninit-structured-binding-array.cpp new file mode 100644 index 0000000000000000000000000000000000000000..b727cb49daaa4aad78f74092e71bfb9aa9dbbac6 --- /dev/null +++ b/clang/test/Analysis/uninit-structured-binding-array.cpp @@ -0,0 +1,294 @@ +// RUN: %clang_analyze_cc1 -analyzer-checker=core,debug.ExprInspection -std=c++17 -verify %s + +void clang_analyzer_eval(bool); + +void array_value_a(void) { + int arr[2]; + auto [a, b] = arr; + arr[0] = 0; + + int x = a; // expected-warning{{Assigned value is garbage or undefined}} +} + +void array_value_b(void) { + int arr[] = {1, 2}; + auto [a, b] = arr; + + clang_analyzer_eval(a == 1); // expected-warning{{TRUE}} + clang_analyzer_eval(b == 2); // expected-warning{{TRUE}} + + int x = a; // no-warning +} + +void array_value_c(void) { + int arr[3]; + + arr[1] = 1; + + auto [a, b, c] = arr; + + clang_analyzer_eval(b == arr[1]); // expected-warning{{TRUE}} + + int y = b; // no-warning + int x = a; // expected-warning{{Assigned value is garbage or undefined}} +} + +void array_value_d(void) { + int arr[3]; + + arr[1] = 1; + + auto [a, b, c] = arr; + + clang_analyzer_eval(b == arr[1]); // expected-warning{{TRUE}} + + int y = b; // no-warning + int x = c; // expected-warning{{Assigned value is garbage or undefined}} +} + +void array_value_e(void) { + int uninit[2]; + int init[2] = {0}; + + uninit[0] = init[0]; + + auto [i, j] = init; + + clang_analyzer_eval(i == 0); // expected-warning{{TRUE}} + clang_analyzer_eval(j == 0); // expected-warning{{TRUE}} + + int a = i; // no-warning + int b = j; // no-warning +} + +void array_value_f(void) { + int uninit[2]; + int init[2] = {0}; + + uninit[0] = init[0]; + + auto [i, j] = uninit; + + clang_analyzer_eval(i == 0); // expected-warning{{TRUE}} + + int a = i; // no-warning + int b = j; // expected-warning{{Assigned value is garbage or undefined}} +} + +void array_lref_a(void) { + int arr[2]; + auto &[a, b] = arr; + int x = a; // expected-warning{{Assigned value is garbage or undefined}} +} + +void array_lref_b(void) { + int arr[] = {1, 2}; + auto &[a, b] = arr; + + clang_analyzer_eval(a == 1); // expected-warning{{TRUE}} + clang_analyzer_eval(b == 2); // expected-warning{{TRUE}} + + int x = a; // no-warning +} + +void array_lref_c(void) { + int arr[2]; + auto &[a, b] = arr; + + arr[0] = 1; + + clang_analyzer_eval(a == 1); // expected-warning{{TRUE}} + + int x = a; // no-warning + int y = b; // expected-warning{{Assigned value is garbage or undefined}} +} + +void array_lref_d(void) { + int arr[3]; + + arr[1] = 1; + + auto &[a, b, c] = arr; + + clang_analyzer_eval(b == 1); // expected-warning{{TRUE}} + + int y = b; // no-warning + int x = a; // expected-warning{{Assigned value is garbage or undefined}} +} + +void array_lref_e(void) { + int arr[3]; + + arr[1] = 1; + + auto &[a, b, c] = arr; + + clang_analyzer_eval(b == 1); // expected-warning{{TRUE}} + + int y = b; // no-warning + int x = c; // expected-warning{{Assigned value is garbage or undefined}} +} + +void array_lref_f(void) { + int uninit[2]; + int init[2] = {0}; + + uninit[0] = init[0]; + + auto &[i, j] = init; + + clang_analyzer_eval(i == 0); // expected-warning{{TRUE}} + clang_analyzer_eval(j == 0); // expected-warning{{TRUE}} + + int a = i; // no-warning + int b = j; // no-warning +} + +void array_lref_g(void) { + int uninit[2]; + int init[2] = {0}; + + uninit[0] = init[0]; + + auto &[i, j] = uninit; + + clang_analyzer_eval(i == 0); // expected-warning{{TRUE}} + + int a = i; // no-warning + int b = j; // expected-warning{{Assigned value is garbage or undefined}} +} + +void array_rref_a(void) { + int arr[2]; + auto &&[a, b] = arr; + int x = a; // expected-warning{{Assigned value is garbage or undefined}} +} + +void array_rref_b(void) { + int arr[] = {1, 2}; + auto &&[a, b] = arr; + + clang_analyzer_eval(a == 1); // expected-warning{{TRUE}} + clang_analyzer_eval(b == 2); // expected-warning{{TRUE}} + + int x = a; // no-warning +} + +void array_rref_c(void) { + int arr[2]; + auto &&[a, b] = arr; + + arr[0] = 1; + + clang_analyzer_eval(a == 1); // expected-warning{{TRUE}} + + int x = a; // no-warning + int y = b; // expected-warning{{Assigned value is garbage or undefined}} +} + +void array_rref_d(void) { + int arr[3]; + + arr[1] = 1; + + auto &&[a, b, c] = arr; + + clang_analyzer_eval(b == 1); // expected-warning{{TRUE}} + + int y = b; // no-warning + int x = a; // expected-warning{{Assigned value is garbage or undefined}} +} + +void array_rref_e(void) { + int arr[3]; + + arr[1] = 1; + + auto &&[a, b, c] = arr; + + clang_analyzer_eval(b == 1); // expected-warning{{TRUE}} + + int y = b; // no-warning + int x = c; // expected-warning{{Assigned value is garbage or undefined}} +} + +void array_rref_f(void) { + int uninit[2]; + int init[2] = {0}; + + uninit[0] = init[0]; + + auto &&[i, j] = init; + + clang_analyzer_eval(i == 0); // expected-warning{{TRUE}} + clang_analyzer_eval(j == 0); // expected-warning{{TRUE}} + + int a = i; // no-warning + int b = j; // no-warning +} + +void array_rref_g(void) { + int uninit[2]; + int init[2] = {0}; + + uninit[0] = init[0]; + + auto &&[i, j] = uninit; + + clang_analyzer_eval(i == 0); // expected-warning{{TRUE}} + + int a = i; // no-warning + int b = j; // expected-warning{{Assigned value is garbage or undefined}} +} + +void array_change_a(void) { + int arr[] = {1, 2}; + + auto [a, b] = arr; + + clang_analyzer_eval(a == 1); // expected-warning{{TRUE}} + a = 3; + clang_analyzer_eval(a == 3); // expected-warning{{TRUE}} + + clang_analyzer_eval(arr[0] == 1); // expected-warning{{TRUE}} + clang_analyzer_eval(arr[1] == 2); // expected-warning{{TRUE}} + + clang_analyzer_eval(b == 2); // expected-warning{{TRUE}} +} + +void array_change_b(void) { + int arr[] = {1, 2}; + + auto &[a, b] = arr; + + clang_analyzer_eval(a == 1); // expected-warning{{TRUE}} + clang_analyzer_eval(b == 2); // expected-warning{{TRUE}} + + a = 3; + clang_analyzer_eval(a == 3); // expected-warning{{TRUE}} + + clang_analyzer_eval(arr[0] == 3); // expected-warning{{TRUE}} + clang_analyzer_eval(arr[1] == 2); // expected-warning{{TRUE}} +} + +void array_small_a(void) { + int arr[5]; + + auto [a, b, c, d, e] = arr; + + int x = e; // expected-warning{{Assigned value is garbage or undefined}} +} + +void array_big_a(void) { + int arr[6]; + + auto [a, b, c, d, e, f] = arr; + + // FIXME: These will be Undefined when we handle reading Undefined values from lazyCompoundVal. + clang_analyzer_eval(a == 1); // expected-warning{{UNKNOWN}} + clang_analyzer_eval(b == 2); // expected-warning{{UNKNOWN}} + clang_analyzer_eval(c == 3); // expected-warning{{UNKNOWN}} + clang_analyzer_eval(d == 4); // expected-warning{{UNKNOWN}} + clang_analyzer_eval(e == 5); // expected-warning{{UNKNOWN}} + clang_analyzer_eval(f == 6); // expected-warning{{UNKNOWN}} +} diff --git a/clang/test/CXX/stmt.stmt/stmt.select/p3.cpp b/clang/test/CXX/stmt.stmt/stmt.select/p3.cpp index 4804cc559d007f9a4ec71d6b82738d862d9b10fd..2d45d82d43754aee9885957f6c1a5f3c9199cb17 100644 --- a/clang/test/CXX/stmt.stmt/stmt.select/p3.cpp +++ b/clang/test/CXX/stmt.stmt/stmt.select/p3.cpp @@ -63,8 +63,6 @@ void whileInitStatement() { // expected-note@-1 {{to match this '('}} // expected-error@-2 {{expected ';' after expression}} // expected-error@-3 {{expected expression}} - // expected-warning@-4 {{while loop has empty body}} - // expected-note@-5 {{put the semicolon on a separate line to silence this warning}} } // TODO: This is needed because clang can't seem to diagnose invalid syntax after the diff --git a/clang/test/CodeGen/Inputs/sanitizer-extra-source.cpp b/clang/test/CodeGen/Inputs/sanitizer-extra-source.cpp new file mode 100644 index 0000000000000000000000000000000000000000..21371d53425544a054745159afbdb88c2a4c63d7 --- /dev/null +++ b/clang/test/CodeGen/Inputs/sanitizer-extra-source.cpp @@ -0,0 +1 @@ +int extra_global; diff --git a/clang/test/CodeGen/Inputs/sanitizer-ignorelist-global.txt b/clang/test/CodeGen/Inputs/sanitizer-ignorelist-global.txt new file mode 100644 index 0000000000000000000000000000000000000000..40a1d07fb89589da7d2692af09b645595dcd76b1 --- /dev/null +++ b/clang/test/CodeGen/Inputs/sanitizer-ignorelist-global.txt @@ -0,0 +1 @@ +global:*ignorelisted_global* diff --git a/clang/test/CodeGen/Inputs/sanitizer-ignorelist-src.txt b/clang/test/CodeGen/Inputs/sanitizer-ignorelist-src.txt new file mode 100644 index 0000000000000000000000000000000000000000..67e50c852606f161e04aa17d0e4510f1091b9fde --- /dev/null +++ b/clang/test/CodeGen/Inputs/sanitizer-ignorelist-src.txt @@ -0,0 +1 @@ +src:*-globals.cpp diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vget.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vget.c index 09728162391c42e79302c709f36851b5a7059f68..4de8cc1626d58edcbb1e1afd8ec791075211f25f 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vget.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vget.c @@ -7,7 +7,7 @@ // CHECK-RV64-LABEL: @test_vget_v_i8m2_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i8.nxv16i8( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i8.nxv16i8( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vget_v_i8m2_i8m1(vint8m2_t src, size_t index) { @@ -16,7 +16,7 @@ vint8m1_t test_vget_v_i8m2_i8m1(vint8m2_t src, size_t index) { // CHECK-RV64-LABEL: @test_vget_v_i8m4_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i8.nxv32i8( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i8.nxv32i8( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vget_v_i8m4_i8m1(vint8m4_t src, size_t index) { @@ -25,7 +25,7 @@ vint8m1_t test_vget_v_i8m4_i8m1(vint8m4_t src, size_t index) { // CHECK-RV64-LABEL: @test_vget_v_i8m8_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i8.nxv64i8( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i8.nxv64i8( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vget_v_i8m8_i8m1(vint8m8_t src, size_t index) { @@ -34,7 +34,7 @@ vint8m1_t test_vget_v_i8m8_i8m1(vint8m8_t src, size_t index) { // CHECK-RV64-LABEL: @test_vget_v_i8m4_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16i8.nxv32i8( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv16i8.nxv32i8( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vget_v_i8m4_i8m2(vint8m4_t src, size_t index) { @@ -43,7 +43,7 @@ vint8m2_t test_vget_v_i8m4_i8m2(vint8m4_t src, size_t index) { // CHECK-RV64-LABEL: @test_vget_v_i8m8_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16i8.nxv64i8( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv16i8.nxv64i8( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vget_v_i8m8_i8m2(vint8m8_t src, size_t index) { @@ -52,7 +52,7 @@ vint8m2_t test_vget_v_i8m8_i8m2(vint8m8_t src, size_t index) { // CHECK-RV64-LABEL: @test_vget_v_i8m8_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv32i8.nxv64i8( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv32i8.nxv64i8( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vget_v_i8m8_i8m4(vint8m8_t src, size_t index) { @@ -61,7 +61,7 @@ vint8m4_t test_vget_v_i8m8_i8m4(vint8m8_t src, size_t index) { // CHECK-RV64-LABEL: @test_vget_v_u8m2_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i8.nxv16i8( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i8.nxv16i8( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vget_v_u8m2_u8m1(vuint8m2_t src, size_t index) { @@ -70,7 +70,7 @@ vuint8m1_t test_vget_v_u8m2_u8m1(vuint8m2_t src, size_t index) { // CHECK-RV64-LABEL: @test_vget_v_u8m4_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i8.nxv32i8( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i8.nxv32i8( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vget_v_u8m4_u8m1(vuint8m4_t src, size_t index) { @@ -79,7 +79,7 @@ vuint8m1_t test_vget_v_u8m4_u8m1(vuint8m4_t src, size_t index) { // CHECK-RV64-LABEL: @test_vget_v_u8m8_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i8.nxv64i8( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i8.nxv64i8( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vget_v_u8m8_u8m1(vuint8m8_t src, size_t index) { @@ -88,7 +88,7 @@ vuint8m1_t test_vget_v_u8m8_u8m1(vuint8m8_t src, size_t index) { // CHECK-RV64-LABEL: @test_vget_v_u8m4_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16i8.nxv32i8( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv16i8.nxv32i8( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vget_v_u8m4_u8m2(vuint8m4_t src, size_t index) { @@ -97,7 +97,7 @@ vuint8m2_t test_vget_v_u8m4_u8m2(vuint8m4_t src, size_t index) { // CHECK-RV64-LABEL: @test_vget_v_u8m8_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16i8.nxv64i8( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv16i8.nxv64i8( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vget_v_u8m8_u8m2(vuint8m8_t src, size_t index) { @@ -106,7 +106,7 @@ vuint8m2_t test_vget_v_u8m8_u8m2(vuint8m8_t src, size_t index) { // CHECK-RV64-LABEL: @test_vget_v_u8m8_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv32i8.nxv64i8( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv32i8.nxv64i8( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vget_v_u8m8_u8m4(vuint8m8_t src, size_t index) { @@ -115,7 +115,7 @@ vuint8m4_t test_vget_v_u8m8_u8m4(vuint8m8_t src, size_t index) { // CHECK-RV64-LABEL: @test_vget_v_i16m2_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i16.nxv8i16( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i16.nxv8i16( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vget_v_i16m2_i16m1(vint16m2_t src, size_t index) { @@ -124,7 +124,7 @@ vint16m1_t test_vget_v_i16m2_i16m1(vint16m2_t src, size_t index) { // CHECK-RV64-LABEL: @test_vget_v_i16m4_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i16.nxv16i16( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i16.nxv16i16( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vget_v_i16m4_i16m1(vint16m4_t src, size_t index) { @@ -133,7 +133,7 @@ vint16m1_t test_vget_v_i16m4_i16m1(vint16m4_t src, size_t index) { // CHECK-RV64-LABEL: @test_vget_v_i16m8_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i16.nxv32i16( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i16.nxv32i16( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vget_v_i16m8_i16m1(vint16m8_t src, size_t index) { @@ -142,7 +142,7 @@ vint16m1_t test_vget_v_i16m8_i16m1(vint16m8_t src, size_t index) { // CHECK-RV64-LABEL: @test_vget_v_i16m4_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i16.nxv16i16( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i16.nxv16i16( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vget_v_i16m4_i16m2(vint16m4_t src, size_t index) { @@ -151,7 +151,7 @@ vint16m2_t test_vget_v_i16m4_i16m2(vint16m4_t src, size_t index) { // CHECK-RV64-LABEL: @test_vget_v_i16m8_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i16.nxv32i16( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i16.nxv32i16( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vget_v_i16m8_i16m2(vint16m8_t src, size_t index) { @@ -160,7 +160,7 @@ vint16m2_t test_vget_v_i16m8_i16m2(vint16m8_t src, size_t index) { // CHECK-RV64-LABEL: @test_vget_v_i16m8_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16i16.nxv32i16( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv16i16.nxv32i16( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vget_v_i16m8_i16m4(vint16m8_t src, size_t index) { @@ -169,7 +169,7 @@ vint16m4_t test_vget_v_i16m8_i16m4(vint16m8_t src, size_t index) { // CHECK-RV64-LABEL: @test_vget_v_u16m2_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i16.nxv8i16( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i16.nxv8i16( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vget_v_u16m2_u16m1(vuint16m2_t src, size_t index) { @@ -178,7 +178,7 @@ vuint16m1_t test_vget_v_u16m2_u16m1(vuint16m2_t src, size_t index) { // CHECK-RV64-LABEL: @test_vget_v_u16m4_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i16.nxv16i16( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i16.nxv16i16( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vget_v_u16m4_u16m1(vuint16m4_t src, size_t index) { @@ -187,7 +187,7 @@ vuint16m1_t test_vget_v_u16m4_u16m1(vuint16m4_t src, size_t index) { // CHECK-RV64-LABEL: @test_vget_v_u16m8_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i16.nxv32i16( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i16.nxv32i16( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vget_v_u16m8_u16m1(vuint16m8_t src, size_t index) { @@ -196,7 +196,7 @@ vuint16m1_t test_vget_v_u16m8_u16m1(vuint16m8_t src, size_t index) { // CHECK-RV64-LABEL: @test_vget_v_u16m4_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i16.nxv16i16( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i16.nxv16i16( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vget_v_u16m4_u16m2(vuint16m4_t src, size_t index) { @@ -205,7 +205,7 @@ vuint16m2_t test_vget_v_u16m4_u16m2(vuint16m4_t src, size_t index) { // CHECK-RV64-LABEL: @test_vget_v_u16m8_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i16.nxv32i16( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i16.nxv32i16( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vget_v_u16m8_u16m2(vuint16m8_t src, size_t index) { @@ -214,7 +214,7 @@ vuint16m2_t test_vget_v_u16m8_u16m2(vuint16m8_t src, size_t index) { // CHECK-RV64-LABEL: @test_vget_v_u16m8_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16i16.nxv32i16( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv16i16.nxv32i16( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vget_v_u16m8_u16m4(vuint16m8_t src, size_t index) { @@ -223,7 +223,7 @@ vuint16m4_t test_vget_v_u16m8_u16m4(vuint16m8_t src, size_t index) { // CHECK-RV64-LABEL: @test_vget_v_i32m2_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i32.nxv4i32( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i32.nxv4i32( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vget_v_i32m2_i32m1(vint32m2_t src, size_t index) { @@ -232,7 +232,7 @@ vint32m1_t test_vget_v_i32m2_i32m1(vint32m2_t src, size_t index) { // CHECK-RV64-LABEL: @test_vget_v_i32m4_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i32.nxv8i32( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i32.nxv8i32( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vget_v_i32m4_i32m1(vint32m4_t src, size_t index) { @@ -241,7 +241,7 @@ vint32m1_t test_vget_v_i32m4_i32m1(vint32m4_t src, size_t index) { // CHECK-RV64-LABEL: @test_vget_v_i32m8_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i32.nxv16i32( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i32.nxv16i32( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vget_v_i32m8_i32m1(vint32m8_t src, size_t index) { @@ -250,7 +250,7 @@ vint32m1_t test_vget_v_i32m8_i32m1(vint32m8_t src, size_t index) { // CHECK-RV64-LABEL: @test_vget_v_i32m4_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i32.nxv8i32( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i32.nxv8i32( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vget_v_i32m4_i32m2(vint32m4_t src, size_t index) { @@ -259,7 +259,7 @@ vint32m2_t test_vget_v_i32m4_i32m2(vint32m4_t src, size_t index) { // CHECK-RV64-LABEL: @test_vget_v_i32m8_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i32.nxv16i32( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i32.nxv16i32( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vget_v_i32m8_i32m2(vint32m8_t src, size_t index) { @@ -268,7 +268,7 @@ vint32m2_t test_vget_v_i32m8_i32m2(vint32m8_t src, size_t index) { // CHECK-RV64-LABEL: @test_vget_v_i32m8_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i32.nxv16i32( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i32.nxv16i32( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vget_v_i32m8_i32m4(vint32m8_t src, size_t index) { @@ -277,7 +277,7 @@ vint32m4_t test_vget_v_i32m8_i32m4(vint32m8_t src, size_t index) { // CHECK-RV64-LABEL: @test_vget_v_u32m2_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i32.nxv4i32( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i32.nxv4i32( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vget_v_u32m2_u32m1(vuint32m2_t src, size_t index) { @@ -286,7 +286,7 @@ vuint32m1_t test_vget_v_u32m2_u32m1(vuint32m2_t src, size_t index) { // CHECK-RV64-LABEL: @test_vget_v_u32m4_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i32.nxv8i32( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i32.nxv8i32( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vget_v_u32m4_u32m1(vuint32m4_t src, size_t index) { @@ -295,7 +295,7 @@ vuint32m1_t test_vget_v_u32m4_u32m1(vuint32m4_t src, size_t index) { // CHECK-RV64-LABEL: @test_vget_v_u32m8_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i32.nxv16i32( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i32.nxv16i32( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vget_v_u32m8_u32m1(vuint32m8_t src, size_t index) { @@ -304,7 +304,7 @@ vuint32m1_t test_vget_v_u32m8_u32m1(vuint32m8_t src, size_t index) { // CHECK-RV64-LABEL: @test_vget_v_u32m4_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i32.nxv8i32( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i32.nxv8i32( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vget_v_u32m4_u32m2(vuint32m4_t src, size_t index) { @@ -313,7 +313,7 @@ vuint32m2_t test_vget_v_u32m4_u32m2(vuint32m4_t src, size_t index) { // CHECK-RV64-LABEL: @test_vget_v_u32m8_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i32.nxv16i32( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i32.nxv16i32( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vget_v_u32m8_u32m2(vuint32m8_t src, size_t index) { @@ -322,7 +322,7 @@ vuint32m2_t test_vget_v_u32m8_u32m2(vuint32m8_t src, size_t index) { // CHECK-RV64-LABEL: @test_vget_v_u32m8_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i32.nxv16i32( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i32.nxv16i32( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vget_v_u32m8_u32m4(vuint32m8_t src, size_t index) { @@ -331,7 +331,7 @@ vuint32m4_t test_vget_v_u32m8_u32m4(vuint32m8_t src, size_t index) { // CHECK-RV64-LABEL: @test_vget_v_f32m2_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f32.nxv4f32( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2f32.nxv4f32( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vget_v_f32m2_f32m1(vfloat32m2_t src, size_t index) { @@ -340,7 +340,7 @@ vfloat32m1_t test_vget_v_f32m2_f32m1(vfloat32m2_t src, size_t index) { // CHECK-RV64-LABEL: @test_vget_v_f32m4_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f32.nxv8f32( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2f32.nxv8f32( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vget_v_f32m4_f32m1(vfloat32m4_t src, size_t index) { @@ -349,7 +349,7 @@ vfloat32m1_t test_vget_v_f32m4_f32m1(vfloat32m4_t src, size_t index) { // CHECK-RV64-LABEL: @test_vget_v_f32m8_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f32.nxv16f32( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2f32.nxv16f32( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vget_v_f32m8_f32m1(vfloat32m8_t src, size_t index) { @@ -358,7 +358,7 @@ vfloat32m1_t test_vget_v_f32m8_f32m1(vfloat32m8_t src, size_t index) { // CHECK-RV64-LABEL: @test_vget_v_f32m4_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4f32.nxv8f32( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4f32.nxv8f32( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vget_v_f32m4_f32m2(vfloat32m4_t src, size_t index) { @@ -367,7 +367,7 @@ vfloat32m2_t test_vget_v_f32m4_f32m2(vfloat32m4_t src, size_t index) { // CHECK-RV64-LABEL: @test_vget_v_f32m8_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4f32.nxv16f32( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4f32.nxv16f32( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vget_v_f32m8_f32m2(vfloat32m8_t src, size_t index) { @@ -376,7 +376,7 @@ vfloat32m2_t test_vget_v_f32m8_f32m2(vfloat32m8_t src, size_t index) { // CHECK-RV64-LABEL: @test_vget_v_f32m8_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8f32.nxv16f32( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8f32.nxv16f32( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vget_v_f32m8_f32m4(vfloat32m8_t src, size_t index) { @@ -385,7 +385,7 @@ vfloat32m4_t test_vget_v_f32m8_f32m4(vfloat32m8_t src, size_t index) { // CHECK-RV64-LABEL: @test_vget_v_i64m2_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i64.nxv2i64( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i64.nxv2i64( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vget_v_i64m2_i64m1(vint64m2_t src, size_t index) { @@ -394,7 +394,7 @@ vint64m1_t test_vget_v_i64m2_i64m1(vint64m2_t src, size_t index) { // CHECK-RV64-LABEL: @test_vget_v_i64m4_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i64.nxv4i64( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i64.nxv4i64( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vget_v_i64m4_i64m1(vint64m4_t src, size_t index) { @@ -403,7 +403,7 @@ vint64m1_t test_vget_v_i64m4_i64m1(vint64m4_t src, size_t index) { // CHECK-RV64-LABEL: @test_vget_v_i64m8_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i64.nxv8i64( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i64.nxv8i64( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vget_v_i64m8_i64m1(vint64m8_t src, size_t index) { @@ -412,7 +412,7 @@ vint64m1_t test_vget_v_i64m8_i64m1(vint64m8_t src, size_t index) { // CHECK-RV64-LABEL: @test_vget_v_i64m4_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i64.nxv4i64( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i64.nxv4i64( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vget_v_i64m4_i64m2(vint64m4_t src, size_t index) { @@ -421,7 +421,7 @@ vint64m2_t test_vget_v_i64m4_i64m2(vint64m4_t src, size_t index) { // CHECK-RV64-LABEL: @test_vget_v_i64m8_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i64.nxv8i64( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i64.nxv8i64( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vget_v_i64m8_i64m2(vint64m8_t src, size_t index) { @@ -430,7 +430,7 @@ vint64m2_t test_vget_v_i64m8_i64m2(vint64m8_t src, size_t index) { // CHECK-RV64-LABEL: @test_vget_v_i64m8_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i64.nxv8i64( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i64.nxv8i64( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vget_v_i64m8_i64m4(vint64m8_t src, size_t index) { @@ -439,7 +439,7 @@ vint64m4_t test_vget_v_i64m8_i64m4(vint64m8_t src, size_t index) { // CHECK-RV64-LABEL: @test_vget_v_u64m2_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i64.nxv2i64( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i64.nxv2i64( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vget_v_u64m2_u64m1(vuint64m2_t src, size_t index) { @@ -448,7 +448,7 @@ vuint64m1_t test_vget_v_u64m2_u64m1(vuint64m2_t src, size_t index) { // CHECK-RV64-LABEL: @test_vget_v_u64m4_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i64.nxv4i64( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i64.nxv4i64( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vget_v_u64m4_u64m1(vuint64m4_t src, size_t index) { @@ -457,7 +457,7 @@ vuint64m1_t test_vget_v_u64m4_u64m1(vuint64m4_t src, size_t index) { // CHECK-RV64-LABEL: @test_vget_v_u64m8_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i64.nxv8i64( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i64.nxv8i64( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vget_v_u64m8_u64m1(vuint64m8_t src, size_t index) { @@ -466,7 +466,7 @@ vuint64m1_t test_vget_v_u64m8_u64m1(vuint64m8_t src, size_t index) { // CHECK-RV64-LABEL: @test_vget_v_u64m4_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i64.nxv4i64( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i64.nxv4i64( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vget_v_u64m4_u64m2(vuint64m4_t src, size_t index) { @@ -475,7 +475,7 @@ vuint64m2_t test_vget_v_u64m4_u64m2(vuint64m4_t src, size_t index) { // CHECK-RV64-LABEL: @test_vget_v_u64m8_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i64.nxv8i64( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i64.nxv8i64( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vget_v_u64m8_u64m2(vuint64m8_t src, size_t index) { @@ -484,7 +484,7 @@ vuint64m2_t test_vget_v_u64m8_u64m2(vuint64m8_t src, size_t index) { // CHECK-RV64-LABEL: @test_vget_v_u64m8_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i64.nxv8i64( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i64.nxv8i64( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vget_v_u64m8_u64m4(vuint64m8_t src, size_t index) { @@ -493,7 +493,7 @@ vuint64m4_t test_vget_v_u64m8_u64m4(vuint64m8_t src, size_t index) { // CHECK-RV64-LABEL: @test_vget_v_f64m2_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f64.nxv2f64( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1f64.nxv2f64( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vget_v_f64m2_f64m1(vfloat64m2_t src, size_t index) { @@ -502,7 +502,7 @@ vfloat64m1_t test_vget_v_f64m2_f64m1(vfloat64m2_t src, size_t index) { // CHECK-RV64-LABEL: @test_vget_v_f64m4_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f64.nxv4f64( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1f64.nxv4f64( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vget_v_f64m4_f64m1(vfloat64m4_t src, size_t index) { @@ -511,7 +511,7 @@ vfloat64m1_t test_vget_v_f64m4_f64m1(vfloat64m4_t src, size_t index) { // CHECK-RV64-LABEL: @test_vget_v_f64m8_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f64.nxv8f64( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1f64.nxv8f64( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vget_v_f64m8_f64m1(vfloat64m8_t src, size_t index) { @@ -520,7 +520,7 @@ vfloat64m1_t test_vget_v_f64m8_f64m1(vfloat64m8_t src, size_t index) { // CHECK-RV64-LABEL: @test_vget_v_f64m4_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f64.nxv4f64( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2f64.nxv4f64( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vget_v_f64m4_f64m2(vfloat64m4_t src, size_t index) { @@ -529,7 +529,7 @@ vfloat64m2_t test_vget_v_f64m4_f64m2(vfloat64m4_t src, size_t index) { // CHECK-RV64-LABEL: @test_vget_v_f64m8_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f64.nxv8f64( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2f64.nxv8f64( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vget_v_f64m8_f64m2(vfloat64m8_t src, size_t index) { @@ -538,7 +538,7 @@ vfloat64m2_t test_vget_v_f64m8_f64m2(vfloat64m8_t src, size_t index) { // CHECK-RV64-LABEL: @test_vget_v_f64m8_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4f64.nxv8f64( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4f64.nxv8f64( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vget_v_f64m8_f64m4(vfloat64m8_t src, size_t index) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vlmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vlmul.c index b6fa36f8416b62a57911bebc91fe92b09d778e6d..270422467ee2142d52a38920d0632d8191ec90fe 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vlmul.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vlmul.c @@ -7,7 +7,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf8_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv2i8.nxv1i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv2i8.nxv1i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vlmul_ext_v_i8mf8_i8mf4(vint8mf8_t op1) { @@ -16,7 +16,7 @@ vint8mf4_t test_vlmul_ext_v_i8mf8_i8mf4(vint8mf8_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf8_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i8.nxv1i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4i8.nxv1i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vlmul_ext_v_i8mf8_i8mf2(vint8mf8_t op1) { @@ -25,7 +25,7 @@ vint8mf2_t test_vlmul_ext_v_i8mf8_i8mf2(vint8mf8_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf8_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i8.nxv1i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i8.nxv1i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vlmul_ext_v_i8mf8_i8m1(vint8mf8_t op1) { @@ -34,7 +34,7 @@ vint8m1_t test_vlmul_ext_v_i8mf8_i8m1(vint8mf8_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf8_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.nxv1i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i8.nxv1i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vlmul_ext_v_i8mf8_i8m2(vint8mf8_t op1) { @@ -43,7 +43,7 @@ vint8m2_t test_vlmul_ext_v_i8mf8_i8m2(vint8mf8_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf8_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i8.nxv1i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32i8.nxv1i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vlmul_ext_v_i8mf8_i8m4(vint8mf8_t op1) { @@ -52,7 +52,7 @@ vint8m4_t test_vlmul_ext_v_i8mf8_i8m4(vint8mf8_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf8_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv64i8.nxv1i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv64i8.nxv1i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vlmul_ext_v_i8mf8_i8m8(vint8mf8_t op1) { @@ -61,7 +61,7 @@ vint8m8_t test_vlmul_ext_v_i8mf8_i8m8(vint8mf8_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf4_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i8.nxv2i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4i8.nxv2i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vlmul_ext_v_i8mf4_i8mf2(vint8mf4_t op1) { @@ -70,7 +70,7 @@ vint8mf2_t test_vlmul_ext_v_i8mf4_i8mf2(vint8mf4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf4_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i8.nxv2i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i8.nxv2i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vlmul_ext_v_i8mf4_i8m1(vint8mf4_t op1) { @@ -79,7 +79,7 @@ vint8m1_t test_vlmul_ext_v_i8mf4_i8m1(vint8mf4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf4_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.nxv2i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i8.nxv2i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vlmul_ext_v_i8mf4_i8m2(vint8mf4_t op1) { @@ -88,7 +88,7 @@ vint8m2_t test_vlmul_ext_v_i8mf4_i8m2(vint8mf4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf4_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i8.nxv2i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32i8.nxv2i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vlmul_ext_v_i8mf4_i8m4(vint8mf4_t op1) { @@ -97,7 +97,7 @@ vint8m4_t test_vlmul_ext_v_i8mf4_i8m4(vint8mf4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf4_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv64i8.nxv2i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv64i8.nxv2i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vlmul_ext_v_i8mf4_i8m8(vint8mf4_t op1) { @@ -106,7 +106,7 @@ vint8m8_t test_vlmul_ext_v_i8mf4_i8m8(vint8mf4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf2_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i8.nxv4i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i8.nxv4i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vlmul_ext_v_i8mf2_i8m1(vint8mf2_t op1) { @@ -115,7 +115,7 @@ vint8m1_t test_vlmul_ext_v_i8mf2_i8m1(vint8mf2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf2_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.nxv4i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i8.nxv4i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vlmul_ext_v_i8mf2_i8m2(vint8mf2_t op1) { @@ -124,7 +124,7 @@ vint8m2_t test_vlmul_ext_v_i8mf2_i8m2(vint8mf2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf2_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i8.nxv4i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32i8.nxv4i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vlmul_ext_v_i8mf2_i8m4(vint8mf2_t op1) { @@ -133,7 +133,7 @@ vint8m4_t test_vlmul_ext_v_i8mf2_i8m4(vint8mf2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf2_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv64i8.nxv4i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv64i8.nxv4i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vlmul_ext_v_i8mf2_i8m8(vint8mf2_t op1) { @@ -142,7 +142,7 @@ vint8m8_t test_vlmul_ext_v_i8mf2_i8m8(vint8mf2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_i8m1_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.nxv8i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i8.nxv8i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vlmul_ext_v_i8m1_i8m2(vint8m1_t op1) { @@ -151,7 +151,7 @@ vint8m2_t test_vlmul_ext_v_i8m1_i8m2(vint8m1_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_i8m1_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i8.nxv8i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32i8.nxv8i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vlmul_ext_v_i8m1_i8m4(vint8m1_t op1) { @@ -160,7 +160,7 @@ vint8m4_t test_vlmul_ext_v_i8m1_i8m4(vint8m1_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_i8m1_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv64i8.nxv8i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv64i8.nxv8i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vlmul_ext_v_i8m1_i8m8(vint8m1_t op1) { @@ -169,7 +169,7 @@ vint8m8_t test_vlmul_ext_v_i8m1_i8m8(vint8m1_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_i8m2_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i8.nxv16i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32i8.nxv16i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vlmul_ext_v_i8m2_i8m4(vint8m2_t op1) { @@ -178,7 +178,7 @@ vint8m4_t test_vlmul_ext_v_i8m2_i8m4(vint8m2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_i8m2_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv64i8.nxv16i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv64i8.nxv16i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vlmul_ext_v_i8m2_i8m8(vint8m2_t op1) { @@ -187,7 +187,7 @@ vint8m8_t test_vlmul_ext_v_i8m2_i8m8(vint8m2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_i8m4_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv64i8.nxv32i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv64i8.nxv32i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vlmul_ext_v_i8m4_i8m8(vint8m4_t op1) { @@ -196,7 +196,7 @@ vint8m8_t test_vlmul_ext_v_i8m4_i8m8(vint8m4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_i16mf4_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv2i16.nxv1i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv2i16.nxv1i16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vlmul_ext_v_i16mf4_i16mf2(vint16mf4_t op1) { @@ -205,7 +205,7 @@ vint16mf2_t test_vlmul_ext_v_i16mf4_i16mf2(vint16mf4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_i16mf4_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i16.nxv1i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4i16.nxv1i16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vlmul_ext_v_i16mf4_i16m1(vint16mf4_t op1) { @@ -214,7 +214,7 @@ vint16m1_t test_vlmul_ext_v_i16mf4_i16m1(vint16mf4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_i16mf4_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.nxv1i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i16.nxv1i16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vlmul_ext_v_i16mf4_i16m2(vint16mf4_t op1) { @@ -223,7 +223,7 @@ vint16m2_t test_vlmul_ext_v_i16mf4_i16m2(vint16mf4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_i16mf4_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i16.nxv1i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i16.nxv1i16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vlmul_ext_v_i16mf4_i16m4(vint16mf4_t op1) { @@ -232,7 +232,7 @@ vint16m4_t test_vlmul_ext_v_i16mf4_i16m4(vint16mf4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_i16mf4_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i16.nxv1i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32i16.nxv1i16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vlmul_ext_v_i16mf4_i16m8(vint16mf4_t op1) { @@ -241,7 +241,7 @@ vint16m8_t test_vlmul_ext_v_i16mf4_i16m8(vint16mf4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_i16mf2_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i16.nxv2i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4i16.nxv2i16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vlmul_ext_v_i16mf2_i16m1(vint16mf2_t op1) { @@ -250,7 +250,7 @@ vint16m1_t test_vlmul_ext_v_i16mf2_i16m1(vint16mf2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_i16mf2_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.nxv2i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i16.nxv2i16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vlmul_ext_v_i16mf2_i16m2(vint16mf2_t op1) { @@ -259,7 +259,7 @@ vint16m2_t test_vlmul_ext_v_i16mf2_i16m2(vint16mf2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_i16mf2_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i16.nxv2i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i16.nxv2i16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vlmul_ext_v_i16mf2_i16m4(vint16mf2_t op1) { @@ -268,7 +268,7 @@ vint16m4_t test_vlmul_ext_v_i16mf2_i16m4(vint16mf2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_i16mf2_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i16.nxv2i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32i16.nxv2i16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vlmul_ext_v_i16mf2_i16m8(vint16mf2_t op1) { @@ -277,7 +277,7 @@ vint16m8_t test_vlmul_ext_v_i16mf2_i16m8(vint16mf2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_i16m1_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.nxv4i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i16.nxv4i16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vlmul_ext_v_i16m1_i16m2(vint16m1_t op1) { @@ -286,7 +286,7 @@ vint16m2_t test_vlmul_ext_v_i16m1_i16m2(vint16m1_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_i16m1_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i16.nxv4i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i16.nxv4i16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vlmul_ext_v_i16m1_i16m4(vint16m1_t op1) { @@ -295,7 +295,7 @@ vint16m4_t test_vlmul_ext_v_i16m1_i16m4(vint16m1_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_i16m1_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i16.nxv4i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32i16.nxv4i16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vlmul_ext_v_i16m1_i16m8(vint16m1_t op1) { @@ -304,7 +304,7 @@ vint16m8_t test_vlmul_ext_v_i16m1_i16m8(vint16m1_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_i16m2_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i16.nxv8i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i16.nxv8i16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vlmul_ext_v_i16m2_i16m4(vint16m2_t op1) { @@ -313,7 +313,7 @@ vint16m4_t test_vlmul_ext_v_i16m2_i16m4(vint16m2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_i16m2_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i16.nxv8i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32i16.nxv8i16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vlmul_ext_v_i16m2_i16m8(vint16m2_t op1) { @@ -322,7 +322,7 @@ vint16m8_t test_vlmul_ext_v_i16m2_i16m8(vint16m2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_i16m4_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i16.nxv16i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32i16.nxv16i16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vlmul_ext_v_i16m4_i16m8(vint16m4_t op1) { @@ -331,7 +331,7 @@ vint16m8_t test_vlmul_ext_v_i16m4_i16m8(vint16m4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_i32mf2_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv2i32.nxv1i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv2i32.nxv1i32( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vlmul_ext_v_i32mf2_i32m1(vint32mf2_t op1) { @@ -340,7 +340,7 @@ vint32m1_t test_vlmul_ext_v_i32mf2_i32m1(vint32mf2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_i32mf2_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.nxv1i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4i32.nxv1i32( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vlmul_ext_v_i32mf2_i32m2(vint32mf2_t op1) { @@ -349,7 +349,7 @@ vint32m2_t test_vlmul_ext_v_i32mf2_i32m2(vint32mf2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_i32mf2_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i32.nxv1i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i32.nxv1i32( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vlmul_ext_v_i32mf2_i32m4(vint32mf2_t op1) { @@ -358,7 +358,7 @@ vint32m4_t test_vlmul_ext_v_i32mf2_i32m4(vint32mf2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_i32mf2_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i32.nxv1i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i32.nxv1i32( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vlmul_ext_v_i32mf2_i32m8(vint32mf2_t op1) { @@ -367,7 +367,7 @@ vint32m8_t test_vlmul_ext_v_i32mf2_i32m8(vint32mf2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_i32m1_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.nxv2i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4i32.nxv2i32( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vlmul_ext_v_i32m1_i32m2(vint32m1_t op1) { @@ -376,7 +376,7 @@ vint32m2_t test_vlmul_ext_v_i32m1_i32m2(vint32m1_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_i32m1_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i32.nxv2i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i32.nxv2i32( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vlmul_ext_v_i32m1_i32m4(vint32m1_t op1) { @@ -385,7 +385,7 @@ vint32m4_t test_vlmul_ext_v_i32m1_i32m4(vint32m1_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_i32m1_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i32.nxv2i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i32.nxv2i32( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vlmul_ext_v_i32m1_i32m8(vint32m1_t op1) { @@ -394,7 +394,7 @@ vint32m8_t test_vlmul_ext_v_i32m1_i32m8(vint32m1_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_i32m2_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i32.nxv4i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i32.nxv4i32( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vlmul_ext_v_i32m2_i32m4(vint32m2_t op1) { @@ -403,7 +403,7 @@ vint32m4_t test_vlmul_ext_v_i32m2_i32m4(vint32m2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_i32m2_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i32.nxv4i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i32.nxv4i32( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vlmul_ext_v_i32m2_i32m8(vint32m2_t op1) { @@ -412,7 +412,7 @@ vint32m8_t test_vlmul_ext_v_i32m2_i32m8(vint32m2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_i32m4_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i32.nxv8i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i32.nxv8i32( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vlmul_ext_v_i32m4_i32m8(vint32m4_t op1) { @@ -421,7 +421,7 @@ vint32m8_t test_vlmul_ext_v_i32m4_i32m8(vint32m4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_i64m1_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.nxv1i64( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv2i64.nxv1i64( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vlmul_ext_v_i64m1_i64m2(vint64m1_t op1) { @@ -430,7 +430,7 @@ vint64m2_t test_vlmul_ext_v_i64m1_i64m2(vint64m1_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_i64m1_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i64.nxv1i64( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4i64.nxv1i64( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vlmul_ext_v_i64m1_i64m4(vint64m1_t op1) { @@ -439,7 +439,7 @@ vint64m4_t test_vlmul_ext_v_i64m1_i64m4(vint64m1_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_i64m1_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i64.nxv1i64( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i64.nxv1i64( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vlmul_ext_v_i64m1_i64m8(vint64m1_t op1) { @@ -448,7 +448,7 @@ vint64m8_t test_vlmul_ext_v_i64m1_i64m8(vint64m1_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_i64m2_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i64.nxv2i64( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4i64.nxv2i64( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vlmul_ext_v_i64m2_i64m4(vint64m2_t op1) { @@ -457,7 +457,7 @@ vint64m4_t test_vlmul_ext_v_i64m2_i64m4(vint64m2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_i64m2_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i64.nxv2i64( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i64.nxv2i64( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vlmul_ext_v_i64m2_i64m8(vint64m2_t op1) { @@ -466,7 +466,7 @@ vint64m8_t test_vlmul_ext_v_i64m2_i64m8(vint64m2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_i64m4_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i64.nxv4i64( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i64.nxv4i64( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vlmul_ext_v_i64m4_i64m8(vint64m4_t op1) { @@ -475,7 +475,7 @@ vint64m8_t test_vlmul_ext_v_i64m4_i64m8(vint64m4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf8_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv2i8.nxv1i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv2i8.nxv1i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vlmul_ext_v_u8mf8_u8mf4(vuint8mf8_t op1) { @@ -484,7 +484,7 @@ vuint8mf4_t test_vlmul_ext_v_u8mf8_u8mf4(vuint8mf8_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf8_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i8.nxv1i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4i8.nxv1i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vlmul_ext_v_u8mf8_u8mf2(vuint8mf8_t op1) { @@ -493,7 +493,7 @@ vuint8mf2_t test_vlmul_ext_v_u8mf8_u8mf2(vuint8mf8_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf8_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i8.nxv1i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i8.nxv1i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vlmul_ext_v_u8mf8_u8m1(vuint8mf8_t op1) { @@ -502,7 +502,7 @@ vuint8m1_t test_vlmul_ext_v_u8mf8_u8m1(vuint8mf8_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf8_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.nxv1i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i8.nxv1i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vlmul_ext_v_u8mf8_u8m2(vuint8mf8_t op1) { @@ -511,7 +511,7 @@ vuint8m2_t test_vlmul_ext_v_u8mf8_u8m2(vuint8mf8_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf8_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i8.nxv1i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32i8.nxv1i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vlmul_ext_v_u8mf8_u8m4(vuint8mf8_t op1) { @@ -520,7 +520,7 @@ vuint8m4_t test_vlmul_ext_v_u8mf8_u8m4(vuint8mf8_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf8_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv64i8.nxv1i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv64i8.nxv1i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vlmul_ext_v_u8mf8_u8m8(vuint8mf8_t op1) { @@ -529,7 +529,7 @@ vuint8m8_t test_vlmul_ext_v_u8mf8_u8m8(vuint8mf8_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf4_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i8.nxv2i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4i8.nxv2i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vlmul_ext_v_u8mf4_u8mf2(vuint8mf4_t op1) { @@ -538,7 +538,7 @@ vuint8mf2_t test_vlmul_ext_v_u8mf4_u8mf2(vuint8mf4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf4_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i8.nxv2i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i8.nxv2i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vlmul_ext_v_u8mf4_u8m1(vuint8mf4_t op1) { @@ -547,7 +547,7 @@ vuint8m1_t test_vlmul_ext_v_u8mf4_u8m1(vuint8mf4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf4_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.nxv2i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i8.nxv2i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vlmul_ext_v_u8mf4_u8m2(vuint8mf4_t op1) { @@ -556,7 +556,7 @@ vuint8m2_t test_vlmul_ext_v_u8mf4_u8m2(vuint8mf4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf4_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i8.nxv2i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32i8.nxv2i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vlmul_ext_v_u8mf4_u8m4(vuint8mf4_t op1) { @@ -565,7 +565,7 @@ vuint8m4_t test_vlmul_ext_v_u8mf4_u8m4(vuint8mf4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf4_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv64i8.nxv2i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv64i8.nxv2i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vlmul_ext_v_u8mf4_u8m8(vuint8mf4_t op1) { @@ -574,7 +574,7 @@ vuint8m8_t test_vlmul_ext_v_u8mf4_u8m8(vuint8mf4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf2_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i8.nxv4i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i8.nxv4i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vlmul_ext_v_u8mf2_u8m1(vuint8mf2_t op1) { @@ -583,7 +583,7 @@ vuint8m1_t test_vlmul_ext_v_u8mf2_u8m1(vuint8mf2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf2_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.nxv4i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i8.nxv4i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vlmul_ext_v_u8mf2_u8m2(vuint8mf2_t op1) { @@ -592,7 +592,7 @@ vuint8m2_t test_vlmul_ext_v_u8mf2_u8m2(vuint8mf2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf2_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i8.nxv4i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32i8.nxv4i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vlmul_ext_v_u8mf2_u8m4(vuint8mf2_t op1) { @@ -601,7 +601,7 @@ vuint8m4_t test_vlmul_ext_v_u8mf2_u8m4(vuint8mf2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf2_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv64i8.nxv4i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv64i8.nxv4i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vlmul_ext_v_u8mf2_u8m8(vuint8mf2_t op1) { @@ -610,7 +610,7 @@ vuint8m8_t test_vlmul_ext_v_u8mf2_u8m8(vuint8mf2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_u8m1_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.nxv8i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i8.nxv8i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vlmul_ext_v_u8m1_u8m2(vuint8m1_t op1) { @@ -619,7 +619,7 @@ vuint8m2_t test_vlmul_ext_v_u8m1_u8m2(vuint8m1_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_u8m1_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i8.nxv8i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32i8.nxv8i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vlmul_ext_v_u8m1_u8m4(vuint8m1_t op1) { @@ -628,7 +628,7 @@ vuint8m4_t test_vlmul_ext_v_u8m1_u8m4(vuint8m1_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_u8m1_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv64i8.nxv8i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv64i8.nxv8i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vlmul_ext_v_u8m1_u8m8(vuint8m1_t op1) { @@ -637,7 +637,7 @@ vuint8m8_t test_vlmul_ext_v_u8m1_u8m8(vuint8m1_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_u8m2_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i8.nxv16i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32i8.nxv16i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vlmul_ext_v_u8m2_u8m4(vuint8m2_t op1) { @@ -646,7 +646,7 @@ vuint8m4_t test_vlmul_ext_v_u8m2_u8m4(vuint8m2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_u8m2_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv64i8.nxv16i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv64i8.nxv16i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vlmul_ext_v_u8m2_u8m8(vuint8m2_t op1) { @@ -655,7 +655,7 @@ vuint8m8_t test_vlmul_ext_v_u8m2_u8m8(vuint8m2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_u8m4_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv64i8.nxv32i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv64i8.nxv32i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vlmul_ext_v_u8m4_u8m8(vuint8m4_t op1) { @@ -664,7 +664,7 @@ vuint8m8_t test_vlmul_ext_v_u8m4_u8m8(vuint8m4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_u16mf4_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv2i16.nxv1i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv2i16.nxv1i16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vlmul_ext_v_u16mf4_u16mf2(vuint16mf4_t op1) { @@ -673,7 +673,7 @@ vuint16mf2_t test_vlmul_ext_v_u16mf4_u16mf2(vuint16mf4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_u16mf4_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i16.nxv1i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4i16.nxv1i16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vlmul_ext_v_u16mf4_u16m1(vuint16mf4_t op1) { @@ -682,7 +682,7 @@ vuint16m1_t test_vlmul_ext_v_u16mf4_u16m1(vuint16mf4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_u16mf4_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.nxv1i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i16.nxv1i16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vlmul_ext_v_u16mf4_u16m2(vuint16mf4_t op1) { @@ -691,7 +691,7 @@ vuint16m2_t test_vlmul_ext_v_u16mf4_u16m2(vuint16mf4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_u16mf4_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i16.nxv1i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i16.nxv1i16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vlmul_ext_v_u16mf4_u16m4(vuint16mf4_t op1) { @@ -700,7 +700,7 @@ vuint16m4_t test_vlmul_ext_v_u16mf4_u16m4(vuint16mf4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_u16mf4_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i16.nxv1i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32i16.nxv1i16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vlmul_ext_v_u16mf4_u16m8(vuint16mf4_t op1) { @@ -709,7 +709,7 @@ vuint16m8_t test_vlmul_ext_v_u16mf4_u16m8(vuint16mf4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_u16mf2_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i16.nxv2i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4i16.nxv2i16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vlmul_ext_v_u16mf2_u16m1(vuint16mf2_t op1) { @@ -718,7 +718,7 @@ vuint16m1_t test_vlmul_ext_v_u16mf2_u16m1(vuint16mf2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_u16mf2_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.nxv2i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i16.nxv2i16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vlmul_ext_v_u16mf2_u16m2(vuint16mf2_t op1) { @@ -727,7 +727,7 @@ vuint16m2_t test_vlmul_ext_v_u16mf2_u16m2(vuint16mf2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_u16mf2_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i16.nxv2i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i16.nxv2i16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vlmul_ext_v_u16mf2_u16m4(vuint16mf2_t op1) { @@ -736,7 +736,7 @@ vuint16m4_t test_vlmul_ext_v_u16mf2_u16m4(vuint16mf2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_u16mf2_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i16.nxv2i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32i16.nxv2i16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vlmul_ext_v_u16mf2_u16m8(vuint16mf2_t op1) { @@ -745,7 +745,7 @@ vuint16m8_t test_vlmul_ext_v_u16mf2_u16m8(vuint16mf2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_u16m1_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.nxv4i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i16.nxv4i16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vlmul_ext_v_u16m1_u16m2(vuint16m1_t op1) { @@ -754,7 +754,7 @@ vuint16m2_t test_vlmul_ext_v_u16m1_u16m2(vuint16m1_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_u16m1_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i16.nxv4i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i16.nxv4i16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vlmul_ext_v_u16m1_u16m4(vuint16m1_t op1) { @@ -763,7 +763,7 @@ vuint16m4_t test_vlmul_ext_v_u16m1_u16m4(vuint16m1_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_u16m1_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i16.nxv4i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32i16.nxv4i16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vlmul_ext_v_u16m1_u16m8(vuint16m1_t op1) { @@ -772,7 +772,7 @@ vuint16m8_t test_vlmul_ext_v_u16m1_u16m8(vuint16m1_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_u16m2_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i16.nxv8i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i16.nxv8i16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vlmul_ext_v_u16m2_u16m4(vuint16m2_t op1) { @@ -781,7 +781,7 @@ vuint16m4_t test_vlmul_ext_v_u16m2_u16m4(vuint16m2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_u16m2_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i16.nxv8i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32i16.nxv8i16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vlmul_ext_v_u16m2_u16m8(vuint16m2_t op1) { @@ -790,7 +790,7 @@ vuint16m8_t test_vlmul_ext_v_u16m2_u16m8(vuint16m2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_u16m4_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i16.nxv16i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32i16.nxv16i16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vlmul_ext_v_u16m4_u16m8(vuint16m4_t op1) { @@ -799,7 +799,7 @@ vuint16m8_t test_vlmul_ext_v_u16m4_u16m8(vuint16m4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_u32mf2_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv2i32.nxv1i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv2i32.nxv1i32( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vlmul_ext_v_u32mf2_u32m1(vuint32mf2_t op1) { @@ -808,7 +808,7 @@ vuint32m1_t test_vlmul_ext_v_u32mf2_u32m1(vuint32mf2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_u32mf2_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.nxv1i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4i32.nxv1i32( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vlmul_ext_v_u32mf2_u32m2(vuint32mf2_t op1) { @@ -817,7 +817,7 @@ vuint32m2_t test_vlmul_ext_v_u32mf2_u32m2(vuint32mf2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_u32mf2_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i32.nxv1i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i32.nxv1i32( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vlmul_ext_v_u32mf2_u32m4(vuint32mf2_t op1) { @@ -826,7 +826,7 @@ vuint32m4_t test_vlmul_ext_v_u32mf2_u32m4(vuint32mf2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_u32mf2_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i32.nxv1i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i32.nxv1i32( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vlmul_ext_v_u32mf2_u32m8(vuint32mf2_t op1) { @@ -835,7 +835,7 @@ vuint32m8_t test_vlmul_ext_v_u32mf2_u32m8(vuint32mf2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_u32m1_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.nxv2i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4i32.nxv2i32( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vlmul_ext_v_u32m1_u32m2(vuint32m1_t op1) { @@ -844,7 +844,7 @@ vuint32m2_t test_vlmul_ext_v_u32m1_u32m2(vuint32m1_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_u32m1_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i32.nxv2i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i32.nxv2i32( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vlmul_ext_v_u32m1_u32m4(vuint32m1_t op1) { @@ -853,7 +853,7 @@ vuint32m4_t test_vlmul_ext_v_u32m1_u32m4(vuint32m1_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_u32m1_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i32.nxv2i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i32.nxv2i32( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vlmul_ext_v_u32m1_u32m8(vuint32m1_t op1) { @@ -862,7 +862,7 @@ vuint32m8_t test_vlmul_ext_v_u32m1_u32m8(vuint32m1_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_u32m2_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i32.nxv4i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i32.nxv4i32( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vlmul_ext_v_u32m2_u32m4(vuint32m2_t op1) { @@ -871,7 +871,7 @@ vuint32m4_t test_vlmul_ext_v_u32m2_u32m4(vuint32m2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_u32m2_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i32.nxv4i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i32.nxv4i32( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vlmul_ext_v_u32m2_u32m8(vuint32m2_t op1) { @@ -880,7 +880,7 @@ vuint32m8_t test_vlmul_ext_v_u32m2_u32m8(vuint32m2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_u32m4_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i32.nxv8i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i32.nxv8i32( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vlmul_ext_v_u32m4_u32m8(vuint32m4_t op1) { @@ -889,7 +889,7 @@ vuint32m8_t test_vlmul_ext_v_u32m4_u32m8(vuint32m4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_u64m1_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.nxv1i64( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv2i64.nxv1i64( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vlmul_ext_v_u64m1_u64m2(vuint64m1_t op1) { @@ -898,7 +898,7 @@ vuint64m2_t test_vlmul_ext_v_u64m1_u64m2(vuint64m1_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_u64m1_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i64.nxv1i64( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4i64.nxv1i64( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vlmul_ext_v_u64m1_u64m4(vuint64m1_t op1) { @@ -907,7 +907,7 @@ vuint64m4_t test_vlmul_ext_v_u64m1_u64m4(vuint64m1_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_u64m1_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i64.nxv1i64( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i64.nxv1i64( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vlmul_ext_v_u64m1_u64m8(vuint64m1_t op1) { @@ -916,7 +916,7 @@ vuint64m8_t test_vlmul_ext_v_u64m1_u64m8(vuint64m1_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_u64m2_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i64.nxv2i64( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4i64.nxv2i64( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vlmul_ext_v_u64m2_u64m4(vuint64m2_t op1) { @@ -925,7 +925,7 @@ vuint64m4_t test_vlmul_ext_v_u64m2_u64m4(vuint64m2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_u64m2_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i64.nxv2i64( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i64.nxv2i64( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vlmul_ext_v_u64m2_u64m8(vuint64m2_t op1) { @@ -934,7 +934,7 @@ vuint64m8_t test_vlmul_ext_v_u64m2_u64m8(vuint64m2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_u64m4_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i64.nxv4i64( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i64.nxv4i64( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vlmul_ext_v_u64m4_u64m8(vuint64m4_t op1) { @@ -943,7 +943,7 @@ vuint64m8_t test_vlmul_ext_v_u64m4_u64m8(vuint64m4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_f32mf2_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv2f32.nxv1f32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv2f32.nxv1f32( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vlmul_ext_v_f32mf2_f32m1(vfloat32mf2_t op1) { @@ -952,7 +952,7 @@ vfloat32m1_t test_vlmul_ext_v_f32mf2_f32m1(vfloat32mf2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_f32mf2_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4f32.nxv1f32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4f32.nxv1f32( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vlmul_ext_v_f32mf2_f32m2(vfloat32mf2_t op1) { @@ -961,7 +961,7 @@ vfloat32m2_t test_vlmul_ext_v_f32mf2_f32m2(vfloat32mf2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_f32mf2_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8f32.nxv1f32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8f32.nxv1f32( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vlmul_ext_v_f32mf2_f32m4(vfloat32mf2_t op1) { @@ -970,7 +970,7 @@ vfloat32m4_t test_vlmul_ext_v_f32mf2_f32m4(vfloat32mf2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_f32mf2_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16f32.nxv1f32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16f32.nxv1f32( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vlmul_ext_v_f32mf2_f32m8(vfloat32mf2_t op1) { @@ -979,7 +979,7 @@ vfloat32m8_t test_vlmul_ext_v_f32mf2_f32m8(vfloat32mf2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_f32m1_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4f32.nxv2f32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4f32.nxv2f32( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vlmul_ext_v_f32m1_f32m2(vfloat32m1_t op1) { @@ -988,7 +988,7 @@ vfloat32m2_t test_vlmul_ext_v_f32m1_f32m2(vfloat32m1_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_f32m1_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8f32.nxv2f32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8f32.nxv2f32( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vlmul_ext_v_f32m1_f32m4(vfloat32m1_t op1) { @@ -997,7 +997,7 @@ vfloat32m4_t test_vlmul_ext_v_f32m1_f32m4(vfloat32m1_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_f32m1_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16f32.nxv2f32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16f32.nxv2f32( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vlmul_ext_v_f32m1_f32m8(vfloat32m1_t op1) { @@ -1006,7 +1006,7 @@ vfloat32m8_t test_vlmul_ext_v_f32m1_f32m8(vfloat32m1_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_f32m2_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8f32.nxv4f32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8f32.nxv4f32( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vlmul_ext_v_f32m2_f32m4(vfloat32m2_t op1) { @@ -1015,7 +1015,7 @@ vfloat32m4_t test_vlmul_ext_v_f32m2_f32m4(vfloat32m2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_f32m2_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16f32.nxv4f32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16f32.nxv4f32( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vlmul_ext_v_f32m2_f32m8(vfloat32m2_t op1) { @@ -1024,7 +1024,7 @@ vfloat32m8_t test_vlmul_ext_v_f32m2_f32m8(vfloat32m2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_f32m4_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16f32.nxv8f32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16f32.nxv8f32( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vlmul_ext_v_f32m4_f32m8(vfloat32m4_t op1) { @@ -1033,7 +1033,7 @@ vfloat32m8_t test_vlmul_ext_v_f32m4_f32m8(vfloat32m4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_f64m1_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv2f64.nxv1f64( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv2f64.nxv1f64( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vlmul_ext_v_f64m1_f64m2(vfloat64m1_t op1) { @@ -1042,7 +1042,7 @@ vfloat64m2_t test_vlmul_ext_v_f64m1_f64m2(vfloat64m1_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_f64m1_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4f64.nxv1f64( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4f64.nxv1f64( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vlmul_ext_v_f64m1_f64m4(vfloat64m1_t op1) { @@ -1051,7 +1051,7 @@ vfloat64m4_t test_vlmul_ext_v_f64m1_f64m4(vfloat64m1_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_f64m1_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8f64.nxv1f64( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8f64.nxv1f64( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vlmul_ext_v_f64m1_f64m8(vfloat64m1_t op1) { @@ -1060,7 +1060,7 @@ vfloat64m8_t test_vlmul_ext_v_f64m1_f64m8(vfloat64m1_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_f64m2_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4f64.nxv2f64( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4f64.nxv2f64( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vlmul_ext_v_f64m2_f64m4(vfloat64m2_t op1) { @@ -1069,7 +1069,7 @@ vfloat64m4_t test_vlmul_ext_v_f64m2_f64m4(vfloat64m2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_f64m2_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8f64.nxv2f64( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8f64.nxv2f64( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vlmul_ext_v_f64m2_f64m8(vfloat64m2_t op1) { @@ -1078,7 +1078,7 @@ vfloat64m8_t test_vlmul_ext_v_f64m2_f64m8(vfloat64m2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_f64m4_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8f64.nxv4f64( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8f64.nxv4f64( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vlmul_ext_v_f64m4_f64m8(vfloat64m4_t op1) { @@ -1087,7 +1087,7 @@ vfloat64m8_t test_vlmul_ext_v_f64m4_f64m8(vfloat64m4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8mf4_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv2i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i8.nxv2i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vlmul_trunc_v_i8mf4_i8mf8(vint8mf4_t op1) { @@ -1096,7 +1096,7 @@ vint8mf8_t test_vlmul_trunc_v_i8mf4_i8mf8(vint8mf4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8mf2_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv4i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i8.nxv4i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vlmul_trunc_v_i8mf2_i8mf8(vint8mf2_t op1) { @@ -1105,7 +1105,7 @@ vint8mf8_t test_vlmul_trunc_v_i8mf2_i8mf8(vint8mf2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8mf2_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i8.nxv4i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i8.nxv4i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vlmul_trunc_v_i8mf2_i8mf4(vint8mf2_t op1) { @@ -1114,7 +1114,7 @@ vint8mf4_t test_vlmul_trunc_v_i8mf2_i8mf4(vint8mf2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m1_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv8i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i8.nxv8i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vlmul_trunc_v_i8m1_i8mf8(vint8m1_t op1) { @@ -1123,7 +1123,7 @@ vint8mf8_t test_vlmul_trunc_v_i8m1_i8mf8(vint8m1_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m1_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i8.nxv8i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i8.nxv8i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vlmul_trunc_v_i8m1_i8mf4(vint8m1_t op1) { @@ -1132,7 +1132,7 @@ vint8mf4_t test_vlmul_trunc_v_i8m1_i8mf4(vint8m1_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m1_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i8.nxv8i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i8.nxv8i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vlmul_trunc_v_i8m1_i8mf2(vint8m1_t op1) { @@ -1141,7 +1141,7 @@ vint8mf2_t test_vlmul_trunc_v_i8m1_i8mf2(vint8m1_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m2_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv16i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i8.nxv16i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vlmul_trunc_v_i8m2_i8mf8(vint8m2_t op1) { @@ -1150,7 +1150,7 @@ vint8mf8_t test_vlmul_trunc_v_i8m2_i8mf8(vint8m2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m2_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i8.nxv16i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i8.nxv16i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vlmul_trunc_v_i8m2_i8mf4(vint8m2_t op1) { @@ -1159,7 +1159,7 @@ vint8mf4_t test_vlmul_trunc_v_i8m2_i8mf4(vint8m2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m2_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i8.nxv16i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i8.nxv16i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vlmul_trunc_v_i8m2_i8mf2(vint8m2_t op1) { @@ -1168,7 +1168,7 @@ vint8mf2_t test_vlmul_trunc_v_i8m2_i8mf2(vint8m2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m2_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i8.nxv16i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i8.nxv16i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vlmul_trunc_v_i8m2_i8m1(vint8m2_t op1) { @@ -1177,7 +1177,7 @@ vint8m1_t test_vlmul_trunc_v_i8m2_i8m1(vint8m2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m4_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv32i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i8.nxv32i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vlmul_trunc_v_i8m4_i8mf8(vint8m4_t op1) { @@ -1186,7 +1186,7 @@ vint8mf8_t test_vlmul_trunc_v_i8m4_i8mf8(vint8m4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m4_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i8.nxv32i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i8.nxv32i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vlmul_trunc_v_i8m4_i8mf4(vint8m4_t op1) { @@ -1195,7 +1195,7 @@ vint8mf4_t test_vlmul_trunc_v_i8m4_i8mf4(vint8m4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m4_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i8.nxv32i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i8.nxv32i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vlmul_trunc_v_i8m4_i8mf2(vint8m4_t op1) { @@ -1204,7 +1204,7 @@ vint8mf2_t test_vlmul_trunc_v_i8m4_i8mf2(vint8m4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m4_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i8.nxv32i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i8.nxv32i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vlmul_trunc_v_i8m4_i8m1(vint8m4_t op1) { @@ -1213,7 +1213,7 @@ vint8m1_t test_vlmul_trunc_v_i8m4_i8m1(vint8m4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m4_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16i8.nxv32i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv16i8.nxv32i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vlmul_trunc_v_i8m4_i8m2(vint8m4_t op1) { @@ -1222,7 +1222,7 @@ vint8m2_t test_vlmul_trunc_v_i8m4_i8m2(vint8m4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m8_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i8.nxv64i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vlmul_trunc_v_i8m8_i8mf8(vint8m8_t op1) { @@ -1231,7 +1231,7 @@ vint8mf8_t test_vlmul_trunc_v_i8m8_i8mf8(vint8m8_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m8_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i8.nxv64i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vlmul_trunc_v_i8m8_i8mf4(vint8m8_t op1) { @@ -1240,7 +1240,7 @@ vint8mf4_t test_vlmul_trunc_v_i8m8_i8mf4(vint8m8_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m8_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i8.nxv64i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vlmul_trunc_v_i8m8_i8mf2(vint8m8_t op1) { @@ -1249,7 +1249,7 @@ vint8mf2_t test_vlmul_trunc_v_i8m8_i8mf2(vint8m8_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m8_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i8.nxv64i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vlmul_trunc_v_i8m8_i8m1(vint8m8_t op1) { @@ -1258,7 +1258,7 @@ vint8m1_t test_vlmul_trunc_v_i8m8_i8m1(vint8m8_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m8_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv16i8.nxv64i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vlmul_trunc_v_i8m8_i8m2(vint8m8_t op1) { @@ -1267,7 +1267,7 @@ vint8m2_t test_vlmul_trunc_v_i8m8_i8m2(vint8m8_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m8_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv32i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv32i8.nxv64i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vlmul_trunc_v_i8m8_i8m4(vint8m8_t op1) { @@ -1276,7 +1276,7 @@ vint8m4_t test_vlmul_trunc_v_i8m8_i8m4(vint8m8_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16mf2_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i16.nxv2i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i16.nxv2i16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vlmul_trunc_v_i16mf2_i16mf4(vint16mf2_t op1) { @@ -1285,7 +1285,7 @@ vint16mf4_t test_vlmul_trunc_v_i16mf2_i16mf4(vint16mf2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m1_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i16.nxv4i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i16.nxv4i16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vlmul_trunc_v_i16m1_i16mf4(vint16m1_t op1) { @@ -1294,7 +1294,7 @@ vint16mf4_t test_vlmul_trunc_v_i16m1_i16mf4(vint16m1_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m1_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i16.nxv4i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i16.nxv4i16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vlmul_trunc_v_i16m1_i16mf2(vint16m1_t op1) { @@ -1303,7 +1303,7 @@ vint16mf2_t test_vlmul_trunc_v_i16m1_i16mf2(vint16m1_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m2_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i16.nxv8i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i16.nxv8i16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vlmul_trunc_v_i16m2_i16mf4(vint16m2_t op1) { @@ -1312,7 +1312,7 @@ vint16mf4_t test_vlmul_trunc_v_i16m2_i16mf4(vint16m2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m2_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i16.nxv8i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i16.nxv8i16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vlmul_trunc_v_i16m2_i16mf2(vint16m2_t op1) { @@ -1321,7 +1321,7 @@ vint16mf2_t test_vlmul_trunc_v_i16m2_i16mf2(vint16m2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m2_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i16.nxv8i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i16.nxv8i16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vlmul_trunc_v_i16m2_i16m1(vint16m2_t op1) { @@ -1330,7 +1330,7 @@ vint16m1_t test_vlmul_trunc_v_i16m2_i16m1(vint16m2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m4_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i16.nxv16i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i16.nxv16i16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vlmul_trunc_v_i16m4_i16mf4(vint16m4_t op1) { @@ -1339,7 +1339,7 @@ vint16mf4_t test_vlmul_trunc_v_i16m4_i16mf4(vint16m4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m4_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i16.nxv16i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i16.nxv16i16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vlmul_trunc_v_i16m4_i16mf2(vint16m4_t op1) { @@ -1348,7 +1348,7 @@ vint16mf2_t test_vlmul_trunc_v_i16m4_i16mf2(vint16m4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m4_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i16.nxv16i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i16.nxv16i16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vlmul_trunc_v_i16m4_i16m1(vint16m4_t op1) { @@ -1357,7 +1357,7 @@ vint16m1_t test_vlmul_trunc_v_i16m4_i16m1(vint16m4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m4_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i16.nxv16i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i16.nxv16i16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vlmul_trunc_v_i16m4_i16m2(vint16m4_t op1) { @@ -1366,7 +1366,7 @@ vint16m2_t test_vlmul_trunc_v_i16m4_i16m2(vint16m4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m8_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i16.nxv32i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i16.nxv32i16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vlmul_trunc_v_i16m8_i16mf4(vint16m8_t op1) { @@ -1375,7 +1375,7 @@ vint16mf4_t test_vlmul_trunc_v_i16m8_i16mf4(vint16m8_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m8_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i16.nxv32i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i16.nxv32i16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vlmul_trunc_v_i16m8_i16mf2(vint16m8_t op1) { @@ -1384,7 +1384,7 @@ vint16mf2_t test_vlmul_trunc_v_i16m8_i16mf2(vint16m8_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m8_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i16.nxv32i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i16.nxv32i16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vlmul_trunc_v_i16m8_i16m1(vint16m8_t op1) { @@ -1393,7 +1393,7 @@ vint16m1_t test_vlmul_trunc_v_i16m8_i16m1(vint16m8_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m8_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i16.nxv32i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i16.nxv32i16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vlmul_trunc_v_i16m8_i16m2(vint16m8_t op1) { @@ -1402,7 +1402,7 @@ vint16m2_t test_vlmul_trunc_v_i16m8_i16m2(vint16m8_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m8_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16i16.nxv32i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv16i16.nxv32i16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vlmul_trunc_v_i16m8_i16m4(vint16m8_t op1) { @@ -1411,7 +1411,7 @@ vint16m4_t test_vlmul_trunc_v_i16m8_i16m4(vint16m8_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m1_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i32.nxv2i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i32.nxv2i32( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vlmul_trunc_v_i32m1_i32mf2(vint32m1_t op1) { @@ -1420,7 +1420,7 @@ vint32mf2_t test_vlmul_trunc_v_i32m1_i32mf2(vint32m1_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m2_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i32.nxv4i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i32.nxv4i32( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vlmul_trunc_v_i32m2_i32mf2(vint32m2_t op1) { @@ -1429,7 +1429,7 @@ vint32mf2_t test_vlmul_trunc_v_i32m2_i32mf2(vint32m2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m2_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i32.nxv4i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i32.nxv4i32( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vlmul_trunc_v_i32m2_i32m1(vint32m2_t op1) { @@ -1438,7 +1438,7 @@ vint32m1_t test_vlmul_trunc_v_i32m2_i32m1(vint32m2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m4_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i32.nxv8i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i32.nxv8i32( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vlmul_trunc_v_i32m4_i32mf2(vint32m4_t op1) { @@ -1447,7 +1447,7 @@ vint32mf2_t test_vlmul_trunc_v_i32m4_i32mf2(vint32m4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m4_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i32.nxv8i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i32.nxv8i32( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vlmul_trunc_v_i32m4_i32m1(vint32m4_t op1) { @@ -1456,7 +1456,7 @@ vint32m1_t test_vlmul_trunc_v_i32m4_i32m1(vint32m4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m4_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i32.nxv8i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i32.nxv8i32( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vlmul_trunc_v_i32m4_i32m2(vint32m4_t op1) { @@ -1465,7 +1465,7 @@ vint32m2_t test_vlmul_trunc_v_i32m4_i32m2(vint32m4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m8_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i32.nxv16i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i32.nxv16i32( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vlmul_trunc_v_i32m8_i32mf2(vint32m8_t op1) { @@ -1474,7 +1474,7 @@ vint32mf2_t test_vlmul_trunc_v_i32m8_i32mf2(vint32m8_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m8_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i32.nxv16i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i32.nxv16i32( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vlmul_trunc_v_i32m8_i32m1(vint32m8_t op1) { @@ -1483,7 +1483,7 @@ vint32m1_t test_vlmul_trunc_v_i32m8_i32m1(vint32m8_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m8_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i32.nxv16i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i32.nxv16i32( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vlmul_trunc_v_i32m8_i32m2(vint32m8_t op1) { @@ -1492,7 +1492,7 @@ vint32m2_t test_vlmul_trunc_v_i32m8_i32m2(vint32m8_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m8_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i32.nxv16i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i32.nxv16i32( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vlmul_trunc_v_i32m8_i32m4(vint32m8_t op1) { @@ -1501,7 +1501,7 @@ vint32m4_t test_vlmul_trunc_v_i32m8_i32m4(vint32m8_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i64m2_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i64.nxv2i64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i64.nxv2i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vlmul_trunc_v_i64m2_i64m1(vint64m2_t op1) { @@ -1510,7 +1510,7 @@ vint64m1_t test_vlmul_trunc_v_i64m2_i64m1(vint64m2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i64m4_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i64.nxv4i64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i64.nxv4i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vlmul_trunc_v_i64m4_i64m1(vint64m4_t op1) { @@ -1519,7 +1519,7 @@ vint64m1_t test_vlmul_trunc_v_i64m4_i64m1(vint64m4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i64m4_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i64.nxv4i64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i64.nxv4i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vlmul_trunc_v_i64m4_i64m2(vint64m4_t op1) { @@ -1528,7 +1528,7 @@ vint64m2_t test_vlmul_trunc_v_i64m4_i64m2(vint64m4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i64m8_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i64.nxv8i64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i64.nxv8i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vlmul_trunc_v_i64m8_i64m1(vint64m8_t op1) { @@ -1537,7 +1537,7 @@ vint64m1_t test_vlmul_trunc_v_i64m8_i64m1(vint64m8_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i64m8_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i64.nxv8i64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i64.nxv8i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vlmul_trunc_v_i64m8_i64m2(vint64m8_t op1) { @@ -1546,7 +1546,7 @@ vint64m2_t test_vlmul_trunc_v_i64m8_i64m2(vint64m8_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i64m8_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i64.nxv8i64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i64.nxv8i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vlmul_trunc_v_i64m8_i64m4(vint64m8_t op1) { @@ -1555,7 +1555,7 @@ vint64m4_t test_vlmul_trunc_v_i64m8_i64m4(vint64m8_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8mf4_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv2i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i8.nxv2i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vlmul_trunc_v_u8mf4_u8mf8(vuint8mf4_t op1) { @@ -1564,7 +1564,7 @@ vuint8mf8_t test_vlmul_trunc_v_u8mf4_u8mf8(vuint8mf4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8mf2_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv4i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i8.nxv4i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vlmul_trunc_v_u8mf2_u8mf8(vuint8mf2_t op1) { @@ -1573,7 +1573,7 @@ vuint8mf8_t test_vlmul_trunc_v_u8mf2_u8mf8(vuint8mf2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8mf2_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i8.nxv4i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i8.nxv4i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vlmul_trunc_v_u8mf2_u8mf4(vuint8mf2_t op1) { @@ -1582,7 +1582,7 @@ vuint8mf4_t test_vlmul_trunc_v_u8mf2_u8mf4(vuint8mf2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m1_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv8i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i8.nxv8i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vlmul_trunc_v_u8m1_u8mf8(vuint8m1_t op1) { @@ -1591,7 +1591,7 @@ vuint8mf8_t test_vlmul_trunc_v_u8m1_u8mf8(vuint8m1_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m1_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i8.nxv8i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i8.nxv8i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vlmul_trunc_v_u8m1_u8mf4(vuint8m1_t op1) { @@ -1600,7 +1600,7 @@ vuint8mf4_t test_vlmul_trunc_v_u8m1_u8mf4(vuint8m1_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m1_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i8.nxv8i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i8.nxv8i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vlmul_trunc_v_u8m1_u8mf2(vuint8m1_t op1) { @@ -1609,7 +1609,7 @@ vuint8mf2_t test_vlmul_trunc_v_u8m1_u8mf2(vuint8m1_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m2_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv16i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i8.nxv16i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vlmul_trunc_v_u8m2_u8mf8(vuint8m2_t op1) { @@ -1618,7 +1618,7 @@ vuint8mf8_t test_vlmul_trunc_v_u8m2_u8mf8(vuint8m2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m2_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i8.nxv16i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i8.nxv16i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vlmul_trunc_v_u8m2_u8mf4(vuint8m2_t op1) { @@ -1627,7 +1627,7 @@ vuint8mf4_t test_vlmul_trunc_v_u8m2_u8mf4(vuint8m2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m2_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i8.nxv16i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i8.nxv16i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vlmul_trunc_v_u8m2_u8mf2(vuint8m2_t op1) { @@ -1636,7 +1636,7 @@ vuint8mf2_t test_vlmul_trunc_v_u8m2_u8mf2(vuint8m2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m2_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i8.nxv16i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i8.nxv16i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vlmul_trunc_v_u8m2_u8m1(vuint8m2_t op1) { @@ -1645,7 +1645,7 @@ vuint8m1_t test_vlmul_trunc_v_u8m2_u8m1(vuint8m2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m4_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv32i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i8.nxv32i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vlmul_trunc_v_u8m4_u8mf8(vuint8m4_t op1) { @@ -1654,7 +1654,7 @@ vuint8mf8_t test_vlmul_trunc_v_u8m4_u8mf8(vuint8m4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m4_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i8.nxv32i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i8.nxv32i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vlmul_trunc_v_u8m4_u8mf4(vuint8m4_t op1) { @@ -1663,7 +1663,7 @@ vuint8mf4_t test_vlmul_trunc_v_u8m4_u8mf4(vuint8m4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m4_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i8.nxv32i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i8.nxv32i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vlmul_trunc_v_u8m4_u8mf2(vuint8m4_t op1) { @@ -1672,7 +1672,7 @@ vuint8mf2_t test_vlmul_trunc_v_u8m4_u8mf2(vuint8m4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m4_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i8.nxv32i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i8.nxv32i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vlmul_trunc_v_u8m4_u8m1(vuint8m4_t op1) { @@ -1681,7 +1681,7 @@ vuint8m1_t test_vlmul_trunc_v_u8m4_u8m1(vuint8m4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m4_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16i8.nxv32i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv16i8.nxv32i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vlmul_trunc_v_u8m4_u8m2(vuint8m4_t op1) { @@ -1690,7 +1690,7 @@ vuint8m2_t test_vlmul_trunc_v_u8m4_u8m2(vuint8m4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m8_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i8.nxv64i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vlmul_trunc_v_u8m8_u8mf8(vuint8m8_t op1) { @@ -1699,7 +1699,7 @@ vuint8mf8_t test_vlmul_trunc_v_u8m8_u8mf8(vuint8m8_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m8_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i8.nxv64i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vlmul_trunc_v_u8m8_u8mf4(vuint8m8_t op1) { @@ -1708,7 +1708,7 @@ vuint8mf4_t test_vlmul_trunc_v_u8m8_u8mf4(vuint8m8_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m8_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i8.nxv64i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vlmul_trunc_v_u8m8_u8mf2(vuint8m8_t op1) { @@ -1717,7 +1717,7 @@ vuint8mf2_t test_vlmul_trunc_v_u8m8_u8mf2(vuint8m8_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m8_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i8.nxv64i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vlmul_trunc_v_u8m8_u8m1(vuint8m8_t op1) { @@ -1726,7 +1726,7 @@ vuint8m1_t test_vlmul_trunc_v_u8m8_u8m1(vuint8m8_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m8_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv16i8.nxv64i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vlmul_trunc_v_u8m8_u8m2(vuint8m8_t op1) { @@ -1735,7 +1735,7 @@ vuint8m2_t test_vlmul_trunc_v_u8m8_u8m2(vuint8m8_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m8_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv32i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv32i8.nxv64i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vlmul_trunc_v_u8m8_u8m4(vuint8m8_t op1) { @@ -1744,7 +1744,7 @@ vuint8m4_t test_vlmul_trunc_v_u8m8_u8m4(vuint8m8_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16mf2_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i16.nxv2i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i16.nxv2i16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vlmul_trunc_v_u16mf2_u16mf4(vuint16mf2_t op1) { @@ -1753,7 +1753,7 @@ vuint16mf4_t test_vlmul_trunc_v_u16mf2_u16mf4(vuint16mf2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m1_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i16.nxv4i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i16.nxv4i16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vlmul_trunc_v_u16m1_u16mf4(vuint16m1_t op1) { @@ -1762,7 +1762,7 @@ vuint16mf4_t test_vlmul_trunc_v_u16m1_u16mf4(vuint16m1_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m1_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i16.nxv4i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i16.nxv4i16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vlmul_trunc_v_u16m1_u16mf2(vuint16m1_t op1) { @@ -1771,7 +1771,7 @@ vuint16mf2_t test_vlmul_trunc_v_u16m1_u16mf2(vuint16m1_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m2_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i16.nxv8i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i16.nxv8i16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vlmul_trunc_v_u16m2_u16mf4(vuint16m2_t op1) { @@ -1780,7 +1780,7 @@ vuint16mf4_t test_vlmul_trunc_v_u16m2_u16mf4(vuint16m2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m2_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i16.nxv8i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i16.nxv8i16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vlmul_trunc_v_u16m2_u16mf2(vuint16m2_t op1) { @@ -1789,7 +1789,7 @@ vuint16mf2_t test_vlmul_trunc_v_u16m2_u16mf2(vuint16m2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m2_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i16.nxv8i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i16.nxv8i16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vlmul_trunc_v_u16m2_u16m1(vuint16m2_t op1) { @@ -1798,7 +1798,7 @@ vuint16m1_t test_vlmul_trunc_v_u16m2_u16m1(vuint16m2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m4_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i16.nxv16i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i16.nxv16i16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vlmul_trunc_v_u16m4_u16mf4(vuint16m4_t op1) { @@ -1807,7 +1807,7 @@ vuint16mf4_t test_vlmul_trunc_v_u16m4_u16mf4(vuint16m4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m4_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i16.nxv16i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i16.nxv16i16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vlmul_trunc_v_u16m4_u16mf2(vuint16m4_t op1) { @@ -1816,7 +1816,7 @@ vuint16mf2_t test_vlmul_trunc_v_u16m4_u16mf2(vuint16m4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m4_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i16.nxv16i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i16.nxv16i16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vlmul_trunc_v_u16m4_u16m1(vuint16m4_t op1) { @@ -1825,7 +1825,7 @@ vuint16m1_t test_vlmul_trunc_v_u16m4_u16m1(vuint16m4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m4_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i16.nxv16i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i16.nxv16i16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vlmul_trunc_v_u16m4_u16m2(vuint16m4_t op1) { @@ -1834,7 +1834,7 @@ vuint16m2_t test_vlmul_trunc_v_u16m4_u16m2(vuint16m4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m8_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i16.nxv32i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i16.nxv32i16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vlmul_trunc_v_u16m8_u16mf4(vuint16m8_t op1) { @@ -1843,7 +1843,7 @@ vuint16mf4_t test_vlmul_trunc_v_u16m8_u16mf4(vuint16m8_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m8_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i16.nxv32i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i16.nxv32i16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vlmul_trunc_v_u16m8_u16mf2(vuint16m8_t op1) { @@ -1852,7 +1852,7 @@ vuint16mf2_t test_vlmul_trunc_v_u16m8_u16mf2(vuint16m8_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m8_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i16.nxv32i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i16.nxv32i16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vlmul_trunc_v_u16m8_u16m1(vuint16m8_t op1) { @@ -1861,7 +1861,7 @@ vuint16m1_t test_vlmul_trunc_v_u16m8_u16m1(vuint16m8_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m8_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i16.nxv32i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i16.nxv32i16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vlmul_trunc_v_u16m8_u16m2(vuint16m8_t op1) { @@ -1870,7 +1870,7 @@ vuint16m2_t test_vlmul_trunc_v_u16m8_u16m2(vuint16m8_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m8_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16i16.nxv32i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv16i16.nxv32i16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vlmul_trunc_v_u16m8_u16m4(vuint16m8_t op1) { @@ -1879,7 +1879,7 @@ vuint16m4_t test_vlmul_trunc_v_u16m8_u16m4(vuint16m8_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m1_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i32.nxv2i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i32.nxv2i32( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vlmul_trunc_v_u32m1_u32mf2(vuint32m1_t op1) { @@ -1888,7 +1888,7 @@ vuint32mf2_t test_vlmul_trunc_v_u32m1_u32mf2(vuint32m1_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m2_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i32.nxv4i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i32.nxv4i32( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vlmul_trunc_v_u32m2_u32mf2(vuint32m2_t op1) { @@ -1897,7 +1897,7 @@ vuint32mf2_t test_vlmul_trunc_v_u32m2_u32mf2(vuint32m2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m2_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i32.nxv4i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i32.nxv4i32( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vlmul_trunc_v_u32m2_u32m1(vuint32m2_t op1) { @@ -1906,7 +1906,7 @@ vuint32m1_t test_vlmul_trunc_v_u32m2_u32m1(vuint32m2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m4_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i32.nxv8i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i32.nxv8i32( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vlmul_trunc_v_u32m4_u32mf2(vuint32m4_t op1) { @@ -1915,7 +1915,7 @@ vuint32mf2_t test_vlmul_trunc_v_u32m4_u32mf2(vuint32m4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m4_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i32.nxv8i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i32.nxv8i32( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vlmul_trunc_v_u32m4_u32m1(vuint32m4_t op1) { @@ -1924,7 +1924,7 @@ vuint32m1_t test_vlmul_trunc_v_u32m4_u32m1(vuint32m4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m4_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i32.nxv8i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i32.nxv8i32( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vlmul_trunc_v_u32m4_u32m2(vuint32m4_t op1) { @@ -1933,7 +1933,7 @@ vuint32m2_t test_vlmul_trunc_v_u32m4_u32m2(vuint32m4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m8_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i32.nxv16i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i32.nxv16i32( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vlmul_trunc_v_u32m8_u32mf2(vuint32m8_t op1) { @@ -1942,7 +1942,7 @@ vuint32mf2_t test_vlmul_trunc_v_u32m8_u32mf2(vuint32m8_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m8_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i32.nxv16i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i32.nxv16i32( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vlmul_trunc_v_u32m8_u32m1(vuint32m8_t op1) { @@ -1951,7 +1951,7 @@ vuint32m1_t test_vlmul_trunc_v_u32m8_u32m1(vuint32m8_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m8_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i32.nxv16i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i32.nxv16i32( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vlmul_trunc_v_u32m8_u32m2(vuint32m8_t op1) { @@ -1960,7 +1960,7 @@ vuint32m2_t test_vlmul_trunc_v_u32m8_u32m2(vuint32m8_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m8_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i32.nxv16i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i32.nxv16i32( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vlmul_trunc_v_u32m8_u32m4(vuint32m8_t op1) { @@ -1969,7 +1969,7 @@ vuint32m4_t test_vlmul_trunc_v_u32m8_u32m4(vuint32m8_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u64m2_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i64.nxv2i64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i64.nxv2i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vlmul_trunc_v_u64m2_u64m1(vuint64m2_t op1) { @@ -1978,7 +1978,7 @@ vuint64m1_t test_vlmul_trunc_v_u64m2_u64m1(vuint64m2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u64m4_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i64.nxv4i64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i64.nxv4i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vlmul_trunc_v_u64m4_u64m1(vuint64m4_t op1) { @@ -1987,7 +1987,7 @@ vuint64m1_t test_vlmul_trunc_v_u64m4_u64m1(vuint64m4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u64m4_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i64.nxv4i64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i64.nxv4i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vlmul_trunc_v_u64m4_u64m2(vuint64m4_t op1) { @@ -1996,7 +1996,7 @@ vuint64m2_t test_vlmul_trunc_v_u64m4_u64m2(vuint64m4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u64m8_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i64.nxv8i64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i64.nxv8i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vlmul_trunc_v_u64m8_u64m1(vuint64m8_t op1) { @@ -2005,7 +2005,7 @@ vuint64m1_t test_vlmul_trunc_v_u64m8_u64m1(vuint64m8_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u64m8_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i64.nxv8i64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i64.nxv8i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vlmul_trunc_v_u64m8_u64m2(vuint64m8_t op1) { @@ -2014,7 +2014,7 @@ vuint64m2_t test_vlmul_trunc_v_u64m8_u64m2(vuint64m8_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u64m8_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i64.nxv8i64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i64.nxv8i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vlmul_trunc_v_u64m8_u64m4(vuint64m8_t op1) { @@ -2023,7 +2023,7 @@ vuint64m4_t test_vlmul_trunc_v_u64m8_u64m4(vuint64m8_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m1_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f32.nxv2f32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1f32.nxv2f32( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vlmul_trunc_v_f32m1_f32mf2(vfloat32m1_t op1) { @@ -2032,7 +2032,7 @@ vfloat32mf2_t test_vlmul_trunc_v_f32m1_f32mf2(vfloat32m1_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m2_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f32.nxv4f32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1f32.nxv4f32( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vlmul_trunc_v_f32m2_f32mf2(vfloat32m2_t op1) { @@ -2041,7 +2041,7 @@ vfloat32mf2_t test_vlmul_trunc_v_f32m2_f32mf2(vfloat32m2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m2_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f32.nxv4f32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2f32.nxv4f32( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vlmul_trunc_v_f32m2_f32m1(vfloat32m2_t op1) { @@ -2050,7 +2050,7 @@ vfloat32m1_t test_vlmul_trunc_v_f32m2_f32m1(vfloat32m2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m4_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f32.nxv8f32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1f32.nxv8f32( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vlmul_trunc_v_f32m4_f32mf2(vfloat32m4_t op1) { @@ -2059,7 +2059,7 @@ vfloat32mf2_t test_vlmul_trunc_v_f32m4_f32mf2(vfloat32m4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m4_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f32.nxv8f32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2f32.nxv8f32( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vlmul_trunc_v_f32m4_f32m1(vfloat32m4_t op1) { @@ -2068,7 +2068,7 @@ vfloat32m1_t test_vlmul_trunc_v_f32m4_f32m1(vfloat32m4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m4_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4f32.nxv8f32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4f32.nxv8f32( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vlmul_trunc_v_f32m4_f32m2(vfloat32m4_t op1) { @@ -2077,7 +2077,7 @@ vfloat32m2_t test_vlmul_trunc_v_f32m4_f32m2(vfloat32m4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m8_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f32.nxv16f32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1f32.nxv16f32( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vlmul_trunc_v_f32m8_f32mf2(vfloat32m8_t op1) { @@ -2086,7 +2086,7 @@ vfloat32mf2_t test_vlmul_trunc_v_f32m8_f32mf2(vfloat32m8_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m8_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f32.nxv16f32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2f32.nxv16f32( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vlmul_trunc_v_f32m8_f32m1(vfloat32m8_t op1) { @@ -2095,7 +2095,7 @@ vfloat32m1_t test_vlmul_trunc_v_f32m8_f32m1(vfloat32m8_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m8_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4f32.nxv16f32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4f32.nxv16f32( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vlmul_trunc_v_f32m8_f32m2(vfloat32m8_t op1) { @@ -2104,7 +2104,7 @@ vfloat32m2_t test_vlmul_trunc_v_f32m8_f32m2(vfloat32m8_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m8_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8f32.nxv16f32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8f32.nxv16f32( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vlmul_trunc_v_f32m8_f32m4(vfloat32m8_t op1) { @@ -2113,7 +2113,7 @@ vfloat32m4_t test_vlmul_trunc_v_f32m8_f32m4(vfloat32m8_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f64m2_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f64.nxv2f64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1f64.nxv2f64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vlmul_trunc_v_f64m2_f64m1(vfloat64m2_t op1) { @@ -2122,7 +2122,7 @@ vfloat64m1_t test_vlmul_trunc_v_f64m2_f64m1(vfloat64m2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f64m4_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f64.nxv4f64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1f64.nxv4f64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vlmul_trunc_v_f64m4_f64m1(vfloat64m4_t op1) { @@ -2131,7 +2131,7 @@ vfloat64m1_t test_vlmul_trunc_v_f64m4_f64m1(vfloat64m4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f64m4_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f64.nxv4f64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2f64.nxv4f64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vlmul_trunc_v_f64m4_f64m2(vfloat64m4_t op1) { @@ -2140,7 +2140,7 @@ vfloat64m2_t test_vlmul_trunc_v_f64m4_f64m2(vfloat64m4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f64m8_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f64.nxv8f64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1f64.nxv8f64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vlmul_trunc_v_f64m8_f64m1(vfloat64m8_t op1) { @@ -2149,7 +2149,7 @@ vfloat64m1_t test_vlmul_trunc_v_f64m8_f64m1(vfloat64m8_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f64m8_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f64.nxv8f64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2f64.nxv8f64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vlmul_trunc_v_f64m8_f64m2(vfloat64m8_t op1) { @@ -2158,7 +2158,7 @@ vfloat64m2_t test_vlmul_trunc_v_f64m8_f64m2(vfloat64m8_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f64m8_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4f64.nxv8f64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4f64.nxv8f64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vlmul_trunc_v_f64m8_f64m4(vfloat64m8_t op1) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vset.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vset.c index efc4ee494f7ec656481ef7c4be8db38a69c0ebfa..15950a0679304ac4b1562f18fe77a1f7b81da4fc 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vset.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vset.c @@ -7,7 +7,7 @@ // CHECK-RV64-LABEL: @test_vset_v_i8m1_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.nxv8i8( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i8.nxv8i8( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vset_v_i8m1_i8m2(vint8m2_t dest, size_t index, vint8m1_t val) { @@ -16,7 +16,7 @@ vint8m2_t test_vset_v_i8m1_i8m2(vint8m2_t dest, size_t index, vint8m1_t val) { // CHECK-RV64-LABEL: @test_vset_v_i8m1_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i8.nxv8i8( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32i8.nxv8i8( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vset_v_i8m1_i8m4(vint8m4_t dest, size_t index, vint8m1_t val) { @@ -25,7 +25,7 @@ vint8m4_t test_vset_v_i8m1_i8m4(vint8m4_t dest, size_t index, vint8m1_t val) { // CHECK-RV64-LABEL: @test_vset_v_i8m2_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i8.nxv16i8( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32i8.nxv16i8( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vset_v_i8m2_i8m4(vint8m4_t dest, size_t index, vint8m2_t val) { @@ -34,7 +34,7 @@ vint8m4_t test_vset_v_i8m2_i8m4(vint8m4_t dest, size_t index, vint8m2_t val) { // CHECK-RV64-LABEL: @test_vset_v_i8m1_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv64i8.nxv8i8( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv64i8.nxv8i8( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vset_v_i8m1_i8m8(vint8m8_t dest, size_t index, vint8m1_t val) { @@ -43,7 +43,7 @@ vint8m8_t test_vset_v_i8m1_i8m8(vint8m8_t dest, size_t index, vint8m1_t val) { // CHECK-RV64-LABEL: @test_vset_v_i8m2_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv64i8.nxv16i8( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv64i8.nxv16i8( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vset_v_i8m2_i8m8(vint8m8_t dest, size_t index, vint8m2_t val) { @@ -52,7 +52,7 @@ vint8m8_t test_vset_v_i8m2_i8m8(vint8m8_t dest, size_t index, vint8m2_t val) { // CHECK-RV64-LABEL: @test_vset_v_i8m4_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv64i8.nxv32i8( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv64i8.nxv32i8( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vset_v_i8m4_i8m8(vint8m8_t dest, size_t index, vint8m4_t val) { @@ -61,7 +61,7 @@ vint8m8_t test_vset_v_i8m4_i8m8(vint8m8_t dest, size_t index, vint8m4_t val) { // CHECK-RV64-LABEL: @test_vset_v_u8m1_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.nxv8i8( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i8.nxv8i8( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vset_v_u8m1_u8m2(vuint8m2_t dest, size_t index, vuint8m1_t val) { @@ -70,7 +70,7 @@ vuint8m2_t test_vset_v_u8m1_u8m2(vuint8m2_t dest, size_t index, vuint8m1_t val) // CHECK-RV64-LABEL: @test_vset_v_u8m1_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i8.nxv8i8( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32i8.nxv8i8( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vset_v_u8m1_u8m4(vuint8m4_t dest, size_t index, vuint8m1_t val) { @@ -79,7 +79,7 @@ vuint8m4_t test_vset_v_u8m1_u8m4(vuint8m4_t dest, size_t index, vuint8m1_t val) // CHECK-RV64-LABEL: @test_vset_v_u8m2_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i8.nxv16i8( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32i8.nxv16i8( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vset_v_u8m2_u8m4(vuint8m4_t dest, size_t index, vuint8m2_t val) { @@ -88,7 +88,7 @@ vuint8m4_t test_vset_v_u8m2_u8m4(vuint8m4_t dest, size_t index, vuint8m2_t val) // CHECK-RV64-LABEL: @test_vset_v_u8m1_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv64i8.nxv8i8( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv64i8.nxv8i8( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vset_v_u8m1_u8m8(vuint8m8_t dest, size_t index, vuint8m1_t val) { @@ -97,7 +97,7 @@ vuint8m8_t test_vset_v_u8m1_u8m8(vuint8m8_t dest, size_t index, vuint8m1_t val) // CHECK-RV64-LABEL: @test_vset_v_u8m2_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv64i8.nxv16i8( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv64i8.nxv16i8( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vset_v_u8m2_u8m8(vuint8m8_t dest, size_t index, vuint8m2_t val) { @@ -106,7 +106,7 @@ vuint8m8_t test_vset_v_u8m2_u8m8(vuint8m8_t dest, size_t index, vuint8m2_t val) // CHECK-RV64-LABEL: @test_vset_v_u8m4_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv64i8.nxv32i8( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv64i8.nxv32i8( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vset_v_u8m4_u8m8(vuint8m8_t dest, size_t index, vuint8m4_t val) { @@ -115,7 +115,7 @@ vuint8m8_t test_vset_v_u8m4_u8m8(vuint8m8_t dest, size_t index, vuint8m4_t val) // CHECK-RV64-LABEL: @test_vset_v_i16m1_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.nxv4i16( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i16.nxv4i16( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vset_v_i16m1_i16m2(vint16m2_t dest, size_t index, vint16m1_t val) { @@ -124,7 +124,7 @@ vint16m2_t test_vset_v_i16m1_i16m2(vint16m2_t dest, size_t index, vint16m1_t val // CHECK-RV64-LABEL: @test_vset_v_i16m1_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i16.nxv4i16( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i16.nxv4i16( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vset_v_i16m1_i16m4(vint16m4_t dest, size_t index, vint16m1_t val) { @@ -133,7 +133,7 @@ vint16m4_t test_vset_v_i16m1_i16m4(vint16m4_t dest, size_t index, vint16m1_t val // CHECK-RV64-LABEL: @test_vset_v_i16m2_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i16.nxv8i16( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i16.nxv8i16( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vset_v_i16m2_i16m4(vint16m4_t dest, size_t index, vint16m2_t val) { @@ -142,7 +142,7 @@ vint16m4_t test_vset_v_i16m2_i16m4(vint16m4_t dest, size_t index, vint16m2_t val // CHECK-RV64-LABEL: @test_vset_v_i16m1_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i16.nxv4i16( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32i16.nxv4i16( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vset_v_i16m1_i16m8(vint16m8_t dest, size_t index, vint16m1_t val) { @@ -151,7 +151,7 @@ vint16m8_t test_vset_v_i16m1_i16m8(vint16m8_t dest, size_t index, vint16m1_t val // CHECK-RV64-LABEL: @test_vset_v_i16m2_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i16.nxv8i16( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32i16.nxv8i16( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vset_v_i16m2_i16m8(vint16m8_t dest, size_t index, vint16m2_t val) { @@ -160,7 +160,7 @@ vint16m8_t test_vset_v_i16m2_i16m8(vint16m8_t dest, size_t index, vint16m2_t val // CHECK-RV64-LABEL: @test_vset_v_i16m4_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i16.nxv16i16( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32i16.nxv16i16( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vset_v_i16m4_i16m8(vint16m8_t dest, size_t index, vint16m4_t val) { @@ -169,7 +169,7 @@ vint16m8_t test_vset_v_i16m4_i16m8(vint16m8_t dest, size_t index, vint16m4_t val // CHECK-RV64-LABEL: @test_vset_v_u16m1_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.nxv4i16( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i16.nxv4i16( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vset_v_u16m1_u16m2(vuint16m2_t dest, size_t index, vuint16m1_t val) { @@ -178,7 +178,7 @@ vuint16m2_t test_vset_v_u16m1_u16m2(vuint16m2_t dest, size_t index, vuint16m1_t // CHECK-RV64-LABEL: @test_vset_v_u16m1_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i16.nxv4i16( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i16.nxv4i16( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vset_v_u16m1_u16m4(vuint16m4_t dest, size_t index, vuint16m1_t val) { @@ -187,7 +187,7 @@ vuint16m4_t test_vset_v_u16m1_u16m4(vuint16m4_t dest, size_t index, vuint16m1_t // CHECK-RV64-LABEL: @test_vset_v_u16m2_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i16.nxv8i16( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i16.nxv8i16( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vset_v_u16m2_u16m4(vuint16m4_t dest, size_t index, vuint16m2_t val) { @@ -196,7 +196,7 @@ vuint16m4_t test_vset_v_u16m2_u16m4(vuint16m4_t dest, size_t index, vuint16m2_t // CHECK-RV64-LABEL: @test_vset_v_u16m1_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i16.nxv4i16( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32i16.nxv4i16( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vset_v_u16m1_u16m8(vuint16m8_t dest, size_t index, vuint16m1_t val) { @@ -205,7 +205,7 @@ vuint16m8_t test_vset_v_u16m1_u16m8(vuint16m8_t dest, size_t index, vuint16m1_t // CHECK-RV64-LABEL: @test_vset_v_u16m2_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i16.nxv8i16( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32i16.nxv8i16( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vset_v_u16m2_u16m8(vuint16m8_t dest, size_t index, vuint16m2_t val) { @@ -214,7 +214,7 @@ vuint16m8_t test_vset_v_u16m2_u16m8(vuint16m8_t dest, size_t index, vuint16m2_t // CHECK-RV64-LABEL: @test_vset_v_u16m4_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i16.nxv16i16( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32i16.nxv16i16( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vset_v_u16m4_u16m8(vuint16m8_t dest, size_t index, vuint16m4_t val) { @@ -223,7 +223,7 @@ vuint16m8_t test_vset_v_u16m4_u16m8(vuint16m8_t dest, size_t index, vuint16m4_t // CHECK-RV64-LABEL: @test_vset_v_i32m1_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.nxv2i32( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4i32.nxv2i32( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vset_v_i32m1_i32m2(vint32m2_t dest, size_t index, vint32m1_t val) { @@ -232,7 +232,7 @@ vint32m2_t test_vset_v_i32m1_i32m2(vint32m2_t dest, size_t index, vint32m1_t val // CHECK-RV64-LABEL: @test_vset_v_i32m1_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i32.nxv2i32( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i32.nxv2i32( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vset_v_i32m1_i32m4(vint32m4_t dest, size_t index, vint32m1_t val) { @@ -241,7 +241,7 @@ vint32m4_t test_vset_v_i32m1_i32m4(vint32m4_t dest, size_t index, vint32m1_t val // CHECK-RV64-LABEL: @test_vset_v_i32m2_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i32.nxv4i32( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i32.nxv4i32( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vset_v_i32m2_i32m4(vint32m4_t dest, size_t index, vint32m2_t val) { @@ -250,7 +250,7 @@ vint32m4_t test_vset_v_i32m2_i32m4(vint32m4_t dest, size_t index, vint32m2_t val // CHECK-RV64-LABEL: @test_vset_v_i32m1_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i32.nxv2i32( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i32.nxv2i32( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vset_v_i32m1_i32m8(vint32m8_t dest, size_t index, vint32m1_t val) { @@ -259,7 +259,7 @@ vint32m8_t test_vset_v_i32m1_i32m8(vint32m8_t dest, size_t index, vint32m1_t val // CHECK-RV64-LABEL: @test_vset_v_i32m2_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i32.nxv4i32( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i32.nxv4i32( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vset_v_i32m2_i32m8(vint32m8_t dest, size_t index, vint32m2_t val) { @@ -268,7 +268,7 @@ vint32m8_t test_vset_v_i32m2_i32m8(vint32m8_t dest, size_t index, vint32m2_t val // CHECK-RV64-LABEL: @test_vset_v_i32m4_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i32.nxv8i32( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i32.nxv8i32( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vset_v_i32m4_i32m8(vint32m8_t dest, size_t index, vint32m4_t val) { @@ -277,7 +277,7 @@ vint32m8_t test_vset_v_i32m4_i32m8(vint32m8_t dest, size_t index, vint32m4_t val // CHECK-RV64-LABEL: @test_vset_v_u32m1_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.nxv2i32( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4i32.nxv2i32( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vset_v_u32m1_u32m2(vuint32m2_t dest, size_t index, vuint32m1_t val) { @@ -286,7 +286,7 @@ vuint32m2_t test_vset_v_u32m1_u32m2(vuint32m2_t dest, size_t index, vuint32m1_t // CHECK-RV64-LABEL: @test_vset_v_u32m1_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i32.nxv2i32( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i32.nxv2i32( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vset_v_u32m1_u32m4(vuint32m4_t dest, size_t index, vuint32m1_t val) { @@ -295,7 +295,7 @@ vuint32m4_t test_vset_v_u32m1_u32m4(vuint32m4_t dest, size_t index, vuint32m1_t // CHECK-RV64-LABEL: @test_vset_v_u32m2_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i32.nxv4i32( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i32.nxv4i32( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vset_v_u32m2_u32m4(vuint32m4_t dest, size_t index, vuint32m2_t val) { @@ -304,7 +304,7 @@ vuint32m4_t test_vset_v_u32m2_u32m4(vuint32m4_t dest, size_t index, vuint32m2_t // CHECK-RV64-LABEL: @test_vset_v_u32m1_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i32.nxv2i32( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i32.nxv2i32( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vset_v_u32m1_u32m8(vuint32m8_t dest, size_t index, vuint32m1_t val) { @@ -313,7 +313,7 @@ vuint32m8_t test_vset_v_u32m1_u32m8(vuint32m8_t dest, size_t index, vuint32m1_t // CHECK-RV64-LABEL: @test_vset_v_u32m2_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i32.nxv4i32( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i32.nxv4i32( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vset_v_u32m2_u32m8(vuint32m8_t dest, size_t index, vuint32m2_t val) { @@ -322,7 +322,7 @@ vuint32m8_t test_vset_v_u32m2_u32m8(vuint32m8_t dest, size_t index, vuint32m2_t // CHECK-RV64-LABEL: @test_vset_v_u32m4_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i32.nxv8i32( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i32.nxv8i32( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vset_v_u32m4_u32m8(vuint32m8_t dest, size_t index, vuint32m4_t val) { @@ -331,7 +331,7 @@ vuint32m8_t test_vset_v_u32m4_u32m8(vuint32m8_t dest, size_t index, vuint32m4_t // CHECK-RV64-LABEL: @test_vset_v_f32m1_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4f32.nxv2f32( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4f32.nxv2f32( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vset_v_f32m1_f32m2(vfloat32m2_t dest, size_t index, vfloat32m1_t val) { @@ -340,7 +340,7 @@ vfloat32m2_t test_vset_v_f32m1_f32m2(vfloat32m2_t dest, size_t index, vfloat32m1 // CHECK-RV64-LABEL: @test_vset_v_f32m1_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8f32.nxv2f32( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8f32.nxv2f32( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vset_v_f32m1_f32m4(vfloat32m4_t dest, size_t index, vfloat32m1_t val) { @@ -349,7 +349,7 @@ vfloat32m4_t test_vset_v_f32m1_f32m4(vfloat32m4_t dest, size_t index, vfloat32m1 // CHECK-RV64-LABEL: @test_vset_v_f32m2_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8f32.nxv4f32( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8f32.nxv4f32( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vset_v_f32m2_f32m4(vfloat32m4_t dest, size_t index, vfloat32m2_t val) { @@ -358,7 +358,7 @@ vfloat32m4_t test_vset_v_f32m2_f32m4(vfloat32m4_t dest, size_t index, vfloat32m2 // CHECK-RV64-LABEL: @test_vset_v_f32m1_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16f32.nxv2f32( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16f32.nxv2f32( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vset_v_f32m1_f32m8(vfloat32m8_t dest, size_t index, vfloat32m1_t val) { @@ -367,7 +367,7 @@ vfloat32m8_t test_vset_v_f32m1_f32m8(vfloat32m8_t dest, size_t index, vfloat32m1 // CHECK-RV64-LABEL: @test_vset_v_f32m2_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16f32.nxv4f32( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16f32.nxv4f32( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vset_v_f32m2_f32m8(vfloat32m8_t dest, size_t index, vfloat32m2_t val) { @@ -376,7 +376,7 @@ vfloat32m8_t test_vset_v_f32m2_f32m8(vfloat32m8_t dest, size_t index, vfloat32m2 // CHECK-RV64-LABEL: @test_vset_v_f32m4_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16f32.nxv8f32( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16f32.nxv8f32( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vset_v_f32m4_f32m8(vfloat32m8_t dest, size_t index, vfloat32m4_t val) { @@ -385,7 +385,7 @@ vfloat32m8_t test_vset_v_f32m4_f32m8(vfloat32m8_t dest, size_t index, vfloat32m4 // CHECK-RV64-LABEL: @test_vset_v_i64m1_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.nxv1i64( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv2i64.nxv1i64( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vset_v_i64m1_i64m2(vint64m2_t dest, size_t index, vint64m1_t val) { @@ -394,7 +394,7 @@ vint64m2_t test_vset_v_i64m1_i64m2(vint64m2_t dest, size_t index, vint64m1_t val // CHECK-RV64-LABEL: @test_vset_v_i64m1_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i64.nxv1i64( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4i64.nxv1i64( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vset_v_i64m1_i64m4(vint64m4_t dest, size_t index, vint64m1_t val) { @@ -403,7 +403,7 @@ vint64m4_t test_vset_v_i64m1_i64m4(vint64m4_t dest, size_t index, vint64m1_t val // CHECK-RV64-LABEL: @test_vset_v_i64m2_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i64.nxv2i64( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4i64.nxv2i64( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vset_v_i64m2_i64m4(vint64m4_t dest, size_t index, vint64m2_t val) { @@ -412,7 +412,7 @@ vint64m4_t test_vset_v_i64m2_i64m4(vint64m4_t dest, size_t index, vint64m2_t val // CHECK-RV64-LABEL: @test_vset_v_i64m1_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i64.nxv1i64( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i64.nxv1i64( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vset_v_i64m1_i64m8(vint64m8_t dest, size_t index, vint64m1_t val) { @@ -421,7 +421,7 @@ vint64m8_t test_vset_v_i64m1_i64m8(vint64m8_t dest, size_t index, vint64m1_t val // CHECK-RV64-LABEL: @test_vset_v_i64m2_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i64.nxv2i64( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i64.nxv2i64( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vset_v_i64m2_i64m8(vint64m8_t dest, size_t index, vint64m2_t val) { @@ -430,7 +430,7 @@ vint64m8_t test_vset_v_i64m2_i64m8(vint64m8_t dest, size_t index, vint64m2_t val // CHECK-RV64-LABEL: @test_vset_v_i64m4_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i64.nxv4i64( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i64.nxv4i64( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vset_v_i64m4_i64m8(vint64m8_t dest, size_t index, vint64m4_t val) { @@ -439,7 +439,7 @@ vint64m8_t test_vset_v_i64m4_i64m8(vint64m8_t dest, size_t index, vint64m4_t val // CHECK-RV64-LABEL: @test_vset_v_u64m1_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.nxv1i64( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv2i64.nxv1i64( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vset_v_u64m1_u64m2(vuint64m2_t dest, size_t index, vuint64m1_t val) { @@ -448,7 +448,7 @@ vuint64m2_t test_vset_v_u64m1_u64m2(vuint64m2_t dest, size_t index, vuint64m1_t // CHECK-RV64-LABEL: @test_vset_v_u64m1_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i64.nxv1i64( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4i64.nxv1i64( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vset_v_u64m1_u64m4(vuint64m4_t dest, size_t index, vuint64m1_t val) { @@ -457,7 +457,7 @@ vuint64m4_t test_vset_v_u64m1_u64m4(vuint64m4_t dest, size_t index, vuint64m1_t // CHECK-RV64-LABEL: @test_vset_v_u64m2_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i64.nxv2i64( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4i64.nxv2i64( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vset_v_u64m2_u64m4(vuint64m4_t dest, size_t index, vuint64m2_t val) { @@ -466,7 +466,7 @@ vuint64m4_t test_vset_v_u64m2_u64m4(vuint64m4_t dest, size_t index, vuint64m2_t // CHECK-RV64-LABEL: @test_vset_v_u64m1_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i64.nxv1i64( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i64.nxv1i64( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vset_v_u64m1_u64m8(vuint64m8_t dest, size_t index, vuint64m1_t val) { @@ -475,7 +475,7 @@ vuint64m8_t test_vset_v_u64m1_u64m8(vuint64m8_t dest, size_t index, vuint64m1_t // CHECK-RV64-LABEL: @test_vset_v_u64m2_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i64.nxv2i64( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i64.nxv2i64( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vset_v_u64m2_u64m8(vuint64m8_t dest, size_t index, vuint64m2_t val) { @@ -484,7 +484,7 @@ vuint64m8_t test_vset_v_u64m2_u64m8(vuint64m8_t dest, size_t index, vuint64m2_t // CHECK-RV64-LABEL: @test_vset_v_u64m4_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i64.nxv4i64( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i64.nxv4i64( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vset_v_u64m4_u64m8(vuint64m8_t dest, size_t index, vuint64m4_t val) { @@ -493,7 +493,7 @@ vuint64m8_t test_vset_v_u64m4_u64m8(vuint64m8_t dest, size_t index, vuint64m4_t // CHECK-RV64-LABEL: @test_vset_v_f64m1_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv2f64.nxv1f64( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv2f64.nxv1f64( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vset_v_f64m1_f64m2(vfloat64m2_t dest, size_t index, vfloat64m1_t val) { @@ -502,7 +502,7 @@ vfloat64m2_t test_vset_v_f64m1_f64m2(vfloat64m2_t dest, size_t index, vfloat64m1 // CHECK-RV64-LABEL: @test_vset_v_f64m1_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4f64.nxv1f64( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4f64.nxv1f64( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vset_v_f64m1_f64m4(vfloat64m4_t dest, size_t index, vfloat64m1_t val) { @@ -511,7 +511,7 @@ vfloat64m4_t test_vset_v_f64m1_f64m4(vfloat64m4_t dest, size_t index, vfloat64m1 // CHECK-RV64-LABEL: @test_vset_v_f64m2_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4f64.nxv2f64( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4f64.nxv2f64( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vset_v_f64m2_f64m4(vfloat64m4_t dest, size_t index, vfloat64m2_t val) { @@ -520,7 +520,7 @@ vfloat64m4_t test_vset_v_f64m2_f64m4(vfloat64m4_t dest, size_t index, vfloat64m2 // CHECK-RV64-LABEL: @test_vset_v_f64m1_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8f64.nxv1f64( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8f64.nxv1f64( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vset_v_f64m1_f64m8(vfloat64m8_t dest, size_t index, vfloat64m1_t val) { @@ -529,7 +529,7 @@ vfloat64m8_t test_vset_v_f64m1_f64m8(vfloat64m8_t dest, size_t index, vfloat64m1 // CHECK-RV64-LABEL: @test_vset_v_f64m2_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8f64.nxv2f64( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8f64.nxv2f64( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vset_v_f64m2_f64m8(vfloat64m8_t dest, size_t index, vfloat64m2_t val) { @@ -538,7 +538,7 @@ vfloat64m8_t test_vset_v_f64m2_f64m8(vfloat64m8_t dest, size_t index, vfloat64m2 // CHECK-RV64-LABEL: @test_vset_v_f64m4_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8f64.nxv4f64( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8f64.nxv4f64( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vset_v_f64m4_f64m8(vfloat64m8_t dest, size_t index, vfloat64m4_t val) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vget-vset-ice.cpp b/clang/test/CodeGen/RISCV/rvv-intrinsics/vget-vset-ice.cpp new file mode 100644 index 0000000000000000000000000000000000000000..0dfbdc5cb06be27b12c510426937fd71e6225a20 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vget-vset-ice.cpp @@ -0,0 +1,29 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \ +// RUN: -target-feature +v -target-feature +zfh -target-feature +experimental-zvfh \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// Use constexpr function to make sure we correctly evaluate it as a constant +// when emitting IR for the vget/vset builtins. +constexpr int foo() { return 1; } + +// CHECK-RV64-LABEL: @_Z21test_vget_v_i8m2_i8m1u14__rvv_int8m2_t +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i8.nxv16i8( [[SRC:%.*]], i64 8) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vget_v_i8m2_i8m1(vint8m2_t src) { + return vget_v_i8m2_i8m1(src, foo()); +} + +// CHECK-RV64-LABEL: @_Z21test_vset_v_i8m1_i8m2u14__rvv_int8m2_tu14__rvv_int8m1_t +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i8.nxv8i8( [[DEST:%.*]], [[VAL:%.*]], i64 8) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vset_v_i8m1_i8m2(vint8m2_t dest, vint8m1_t val) { + return vset_v_i8m1_i8m2(dest, foo(), val); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vget.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vget.c index 2bd89e8f8d5f1cdcc302e51181c031a825d5500b..0762c8aa416ec2ec9dead33f1ae0f16388256af1 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vget.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vget.c @@ -8,7 +8,7 @@ // CHECK-RV64-LABEL: @test_vget_v_i8m2_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i8.nxv16i8( [[SRC:%.*]], i64 8) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i8.nxv16i8( [[SRC:%.*]], i64 8) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vget_v_i8m2_i8m1(vint8m2_t src) { @@ -17,7 +17,7 @@ vint8m1_t test_vget_v_i8m2_i8m1(vint8m2_t src) { // CHECK-RV64-LABEL: @test_vget_v_i8m4_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i8.nxv32i8( [[SRC:%.*]], i64 16) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i8.nxv32i8( [[SRC:%.*]], i64 16) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vget_v_i8m4_i8m1(vint8m4_t src) { @@ -26,7 +26,7 @@ vint8m1_t test_vget_v_i8m4_i8m1(vint8m4_t src) { // CHECK-RV64-LABEL: @test_vget_v_i8m4_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16i8.nxv32i8( [[SRC:%.*]], i64 16) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv16i8.nxv32i8( [[SRC:%.*]], i64 16) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vget_v_i8m4_i8m2(vint8m4_t src) { @@ -35,7 +35,7 @@ vint8m2_t test_vget_v_i8m4_i8m2(vint8m4_t src) { // CHECK-RV64-LABEL: @test_vget_v_i8m8_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i8.nxv64i8( [[SRC:%.*]], i64 48) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i8.nxv64i8( [[SRC:%.*]], i64 48) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vget_v_i8m8_i8m1(vint8m8_t src) { @@ -44,7 +44,7 @@ vint8m1_t test_vget_v_i8m8_i8m1(vint8m8_t src) { // CHECK-RV64-LABEL: @test_vget_v_i8m8_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16i8.nxv64i8( [[SRC:%.*]], i64 48) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv16i8.nxv64i8( [[SRC:%.*]], i64 48) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vget_v_i8m8_i8m2(vint8m8_t src) { @@ -53,7 +53,7 @@ vint8m2_t test_vget_v_i8m8_i8m2(vint8m8_t src) { // CHECK-RV64-LABEL: @test_vget_v_i8m8_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv32i8.nxv64i8( [[SRC:%.*]], i64 32) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv32i8.nxv64i8( [[SRC:%.*]], i64 32) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vget_v_i8m8_i8m4(vint8m8_t src) { @@ -62,7 +62,7 @@ vint8m4_t test_vget_v_i8m8_i8m4(vint8m8_t src) { // CHECK-RV64-LABEL: @test_vget_v_i16m2_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i16.nxv8i16( [[SRC:%.*]], i64 4) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i16.nxv8i16( [[SRC:%.*]], i64 4) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vget_v_i16m2_i16m1(vint16m2_t src) { @@ -71,7 +71,7 @@ vint16m1_t test_vget_v_i16m2_i16m1(vint16m2_t src) { // CHECK-RV64-LABEL: @test_vget_v_i16m4_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i16.nxv16i16( [[SRC:%.*]], i64 8) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i16.nxv16i16( [[SRC:%.*]], i64 8) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vget_v_i16m4_i16m1(vint16m4_t src) { @@ -80,7 +80,7 @@ vint16m1_t test_vget_v_i16m4_i16m1(vint16m4_t src) { // CHECK-RV64-LABEL: @test_vget_v_i16m4_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i16.nxv16i16( [[SRC:%.*]], i64 8) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i16.nxv16i16( [[SRC:%.*]], i64 8) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vget_v_i16m4_i16m2(vint16m4_t src) { @@ -89,7 +89,7 @@ vint16m2_t test_vget_v_i16m4_i16m2(vint16m4_t src) { // CHECK-RV64-LABEL: @test_vget_v_i16m8_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i16.nxv32i16( [[SRC:%.*]], i64 24) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i16.nxv32i16( [[SRC:%.*]], i64 24) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vget_v_i16m8_i16m1(vint16m8_t src) { @@ -98,7 +98,7 @@ vint16m1_t test_vget_v_i16m8_i16m1(vint16m8_t src) { // CHECK-RV64-LABEL: @test_vget_v_i16m8_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i16.nxv32i16( [[SRC:%.*]], i64 24) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i16.nxv32i16( [[SRC:%.*]], i64 24) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vget_v_i16m8_i16m2(vint16m8_t src) { @@ -107,7 +107,7 @@ vint16m2_t test_vget_v_i16m8_i16m2(vint16m8_t src) { // CHECK-RV64-LABEL: @test_vget_v_i16m8_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16i16.nxv32i16( [[SRC:%.*]], i64 16) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv16i16.nxv32i16( [[SRC:%.*]], i64 16) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vget_v_i16m8_i16m4(vint16m8_t src) { @@ -116,7 +116,7 @@ vint16m4_t test_vget_v_i16m8_i16m4(vint16m8_t src) { // CHECK-RV64-LABEL: @test_vget_v_i32m2_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i32.nxv4i32( [[SRC:%.*]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i32.nxv4i32( [[SRC:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vget_v_i32m2_i32m1(vint32m2_t src) { @@ -125,7 +125,7 @@ vint32m1_t test_vget_v_i32m2_i32m1(vint32m2_t src) { // CHECK-RV64-LABEL: @test_vget_v_i32m4_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i32.nxv8i32( [[SRC:%.*]], i64 4) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i32.nxv8i32( [[SRC:%.*]], i64 4) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vget_v_i32m4_i32m1(vint32m4_t src) { @@ -134,7 +134,7 @@ vint32m1_t test_vget_v_i32m4_i32m1(vint32m4_t src) { // CHECK-RV64-LABEL: @test_vget_v_i32m4_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i32.nxv8i32( [[SRC:%.*]], i64 4) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i32.nxv8i32( [[SRC:%.*]], i64 4) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vget_v_i32m4_i32m2(vint32m4_t src) { @@ -143,7 +143,7 @@ vint32m2_t test_vget_v_i32m4_i32m2(vint32m4_t src) { // CHECK-RV64-LABEL: @test_vget_v_i32m8_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i32.nxv16i32( [[SRC:%.*]], i64 12) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i32.nxv16i32( [[SRC:%.*]], i64 12) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vget_v_i32m8_i32m1(vint32m8_t src) { @@ -152,7 +152,7 @@ vint32m1_t test_vget_v_i32m8_i32m1(vint32m8_t src) { // CHECK-RV64-LABEL: @test_vget_v_i32m8_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i32.nxv16i32( [[SRC:%.*]], i64 12) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i32.nxv16i32( [[SRC:%.*]], i64 12) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vget_v_i32m8_i32m2(vint32m8_t src) { @@ -161,7 +161,7 @@ vint32m2_t test_vget_v_i32m8_i32m2(vint32m8_t src) { // CHECK-RV64-LABEL: @test_vget_v_i32m8_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i32.nxv16i32( [[SRC:%.*]], i64 8) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i32.nxv16i32( [[SRC:%.*]], i64 8) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vget_v_i32m8_i32m4(vint32m8_t src) { @@ -170,7 +170,7 @@ vint32m4_t test_vget_v_i32m8_i32m4(vint32m8_t src) { // CHECK-RV64-LABEL: @test_vget_v_i64m2_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i64.nxv2i64( [[SRC:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i64.nxv2i64( [[SRC:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vget_v_i64m2_i64m1(vint64m2_t src) { @@ -179,7 +179,7 @@ vint64m1_t test_vget_v_i64m2_i64m1(vint64m2_t src) { // CHECK-RV64-LABEL: @test_vget_v_i64m4_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i64.nxv4i64( [[SRC:%.*]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i64.nxv4i64( [[SRC:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vget_v_i64m4_i64m1(vint64m4_t src) { @@ -188,7 +188,7 @@ vint64m1_t test_vget_v_i64m4_i64m1(vint64m4_t src) { // CHECK-RV64-LABEL: @test_vget_v_i64m4_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i64.nxv4i64( [[SRC:%.*]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i64.nxv4i64( [[SRC:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vget_v_i64m4_i64m2(vint64m4_t src) { @@ -197,7 +197,7 @@ vint64m2_t test_vget_v_i64m4_i64m2(vint64m4_t src) { // CHECK-RV64-LABEL: @test_vget_v_i64m8_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i64.nxv8i64( [[SRC:%.*]], i64 6) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i64.nxv8i64( [[SRC:%.*]], i64 6) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vget_v_i64m8_i64m1(vint64m8_t src) { @@ -206,7 +206,7 @@ vint64m1_t test_vget_v_i64m8_i64m1(vint64m8_t src) { // CHECK-RV64-LABEL: @test_vget_v_i64m8_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i64.nxv8i64( [[SRC:%.*]], i64 6) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i64.nxv8i64( [[SRC:%.*]], i64 6) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vget_v_i64m8_i64m2(vint64m8_t src) { @@ -215,7 +215,7 @@ vint64m2_t test_vget_v_i64m8_i64m2(vint64m8_t src) { // CHECK-RV64-LABEL: @test_vget_v_i64m8_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i64.nxv8i64( [[SRC:%.*]], i64 4) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i64.nxv8i64( [[SRC:%.*]], i64 4) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vget_v_i64m8_i64m4(vint64m8_t src) { @@ -224,7 +224,7 @@ vint64m4_t test_vget_v_i64m8_i64m4(vint64m8_t src) { // CHECK-RV64-LABEL: @test_vget_v_u8m2_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i8.nxv16i8( [[SRC:%.*]], i64 8) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i8.nxv16i8( [[SRC:%.*]], i64 8) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vget_v_u8m2_u8m1(vuint8m2_t src) { @@ -233,7 +233,7 @@ vuint8m1_t test_vget_v_u8m2_u8m1(vuint8m2_t src) { // CHECK-RV64-LABEL: @test_vget_v_u8m4_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i8.nxv32i8( [[SRC:%.*]], i64 16) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i8.nxv32i8( [[SRC:%.*]], i64 16) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vget_v_u8m4_u8m1(vuint8m4_t src) { @@ -242,7 +242,7 @@ vuint8m1_t test_vget_v_u8m4_u8m1(vuint8m4_t src) { // CHECK-RV64-LABEL: @test_vget_v_u8m4_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16i8.nxv32i8( [[SRC:%.*]], i64 16) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv16i8.nxv32i8( [[SRC:%.*]], i64 16) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vget_v_u8m4_u8m2(vuint8m4_t src) { @@ -251,7 +251,7 @@ vuint8m2_t test_vget_v_u8m4_u8m2(vuint8m4_t src) { // CHECK-RV64-LABEL: @test_vget_v_u8m8_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i8.nxv64i8( [[SRC:%.*]], i64 48) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i8.nxv64i8( [[SRC:%.*]], i64 48) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vget_v_u8m8_u8m1(vuint8m8_t src) { @@ -260,7 +260,7 @@ vuint8m1_t test_vget_v_u8m8_u8m1(vuint8m8_t src) { // CHECK-RV64-LABEL: @test_vget_v_u8m8_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16i8.nxv64i8( [[SRC:%.*]], i64 48) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv16i8.nxv64i8( [[SRC:%.*]], i64 48) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vget_v_u8m8_u8m2(vuint8m8_t src) { @@ -269,7 +269,7 @@ vuint8m2_t test_vget_v_u8m8_u8m2(vuint8m8_t src) { // CHECK-RV64-LABEL: @test_vget_v_u8m8_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv32i8.nxv64i8( [[SRC:%.*]], i64 32) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv32i8.nxv64i8( [[SRC:%.*]], i64 32) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vget_v_u8m8_u8m4(vuint8m8_t src) { @@ -278,7 +278,7 @@ vuint8m4_t test_vget_v_u8m8_u8m4(vuint8m8_t src) { // CHECK-RV64-LABEL: @test_vget_v_u16m2_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i16.nxv8i16( [[SRC:%.*]], i64 4) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i16.nxv8i16( [[SRC:%.*]], i64 4) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vget_v_u16m2_u16m1(vuint16m2_t src) { @@ -287,7 +287,7 @@ vuint16m1_t test_vget_v_u16m2_u16m1(vuint16m2_t src) { // CHECK-RV64-LABEL: @test_vget_v_u16m4_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i16.nxv16i16( [[SRC:%.*]], i64 8) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i16.nxv16i16( [[SRC:%.*]], i64 8) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vget_v_u16m4_u16m1(vuint16m4_t src) { @@ -296,7 +296,7 @@ vuint16m1_t test_vget_v_u16m4_u16m1(vuint16m4_t src) { // CHECK-RV64-LABEL: @test_vget_v_u16m4_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i16.nxv16i16( [[SRC:%.*]], i64 8) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i16.nxv16i16( [[SRC:%.*]], i64 8) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vget_v_u16m4_u16m2(vuint16m4_t src) { @@ -305,7 +305,7 @@ vuint16m2_t test_vget_v_u16m4_u16m2(vuint16m4_t src) { // CHECK-RV64-LABEL: @test_vget_v_u16m8_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i16.nxv32i16( [[SRC:%.*]], i64 24) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i16.nxv32i16( [[SRC:%.*]], i64 24) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vget_v_u16m8_u16m1(vuint16m8_t src) { @@ -314,7 +314,7 @@ vuint16m1_t test_vget_v_u16m8_u16m1(vuint16m8_t src) { // CHECK-RV64-LABEL: @test_vget_v_u16m8_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i16.nxv32i16( [[SRC:%.*]], i64 24) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i16.nxv32i16( [[SRC:%.*]], i64 24) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vget_v_u16m8_u16m2(vuint16m8_t src) { @@ -323,7 +323,7 @@ vuint16m2_t test_vget_v_u16m8_u16m2(vuint16m8_t src) { // CHECK-RV64-LABEL: @test_vget_v_u16m8_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16i16.nxv32i16( [[SRC:%.*]], i64 16) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv16i16.nxv32i16( [[SRC:%.*]], i64 16) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vget_v_u16m8_u16m4(vuint16m8_t src) { @@ -332,7 +332,7 @@ vuint16m4_t test_vget_v_u16m8_u16m4(vuint16m8_t src) { // CHECK-RV64-LABEL: @test_vget_v_u32m2_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i32.nxv4i32( [[SRC:%.*]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i32.nxv4i32( [[SRC:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vget_v_u32m2_u32m1(vuint32m2_t src) { @@ -341,7 +341,7 @@ vuint32m1_t test_vget_v_u32m2_u32m1(vuint32m2_t src) { // CHECK-RV64-LABEL: @test_vget_v_u32m4_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i32.nxv8i32( [[SRC:%.*]], i64 4) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i32.nxv8i32( [[SRC:%.*]], i64 4) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vget_v_u32m4_u32m1(vuint32m4_t src) { @@ -350,7 +350,7 @@ vuint32m1_t test_vget_v_u32m4_u32m1(vuint32m4_t src) { // CHECK-RV64-LABEL: @test_vget_v_u32m4_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i32.nxv8i32( [[SRC:%.*]], i64 4) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i32.nxv8i32( [[SRC:%.*]], i64 4) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vget_v_u32m4_u32m2(vuint32m4_t src) { @@ -359,7 +359,7 @@ vuint32m2_t test_vget_v_u32m4_u32m2(vuint32m4_t src) { // CHECK-RV64-LABEL: @test_vget_v_u32m8_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i32.nxv16i32( [[SRC:%.*]], i64 12) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i32.nxv16i32( [[SRC:%.*]], i64 12) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vget_v_u32m8_u32m1(vuint32m8_t src) { @@ -368,7 +368,7 @@ vuint32m1_t test_vget_v_u32m8_u32m1(vuint32m8_t src) { // CHECK-RV64-LABEL: @test_vget_v_u32m8_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i32.nxv16i32( [[SRC:%.*]], i64 12) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i32.nxv16i32( [[SRC:%.*]], i64 12) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vget_v_u32m8_u32m2(vuint32m8_t src) { @@ -377,7 +377,7 @@ vuint32m2_t test_vget_v_u32m8_u32m2(vuint32m8_t src) { // CHECK-RV64-LABEL: @test_vget_v_u32m8_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i32.nxv16i32( [[SRC:%.*]], i64 8) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i32.nxv16i32( [[SRC:%.*]], i64 8) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vget_v_u32m8_u32m4(vuint32m8_t src) { @@ -386,7 +386,7 @@ vuint32m4_t test_vget_v_u32m8_u32m4(vuint32m8_t src) { // CHECK-RV64-LABEL: @test_vget_v_u64m2_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i64.nxv2i64( [[SRC:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i64.nxv2i64( [[SRC:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vget_v_u64m2_u64m1(vuint64m2_t src) { @@ -395,7 +395,7 @@ vuint64m1_t test_vget_v_u64m2_u64m1(vuint64m2_t src) { // CHECK-RV64-LABEL: @test_vget_v_u64m4_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i64.nxv4i64( [[SRC:%.*]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i64.nxv4i64( [[SRC:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vget_v_u64m4_u64m1(vuint64m4_t src) { @@ -404,7 +404,7 @@ vuint64m1_t test_vget_v_u64m4_u64m1(vuint64m4_t src) { // CHECK-RV64-LABEL: @test_vget_v_u64m4_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i64.nxv4i64( [[SRC:%.*]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i64.nxv4i64( [[SRC:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vget_v_u64m4_u64m2(vuint64m4_t src) { @@ -413,7 +413,7 @@ vuint64m2_t test_vget_v_u64m4_u64m2(vuint64m4_t src) { // CHECK-RV64-LABEL: @test_vget_v_u64m8_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i64.nxv8i64( [[SRC:%.*]], i64 6) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i64.nxv8i64( [[SRC:%.*]], i64 6) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vget_v_u64m8_u64m1(vuint64m8_t src) { @@ -422,7 +422,7 @@ vuint64m1_t test_vget_v_u64m8_u64m1(vuint64m8_t src) { // CHECK-RV64-LABEL: @test_vget_v_u64m8_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i64.nxv8i64( [[SRC:%.*]], i64 6) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i64.nxv8i64( [[SRC:%.*]], i64 6) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vget_v_u64m8_u64m2(vuint64m8_t src) { @@ -431,7 +431,7 @@ vuint64m2_t test_vget_v_u64m8_u64m2(vuint64m8_t src) { // CHECK-RV64-LABEL: @test_vget_v_u64m8_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i64.nxv8i64( [[SRC:%.*]], i64 4) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i64.nxv8i64( [[SRC:%.*]], i64 4) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vget_v_u64m8_u64m4(vuint64m8_t src) { @@ -440,7 +440,7 @@ vuint64m4_t test_vget_v_u64m8_u64m4(vuint64m8_t src) { // CHECK-RV64-LABEL: @test_vget_v_f32m2_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f32.nxv4f32( [[SRC:%.*]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2f32.nxv4f32( [[SRC:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vget_v_f32m2_f32m1(vfloat32m2_t src) { @@ -449,7 +449,7 @@ vfloat32m1_t test_vget_v_f32m2_f32m1(vfloat32m2_t src) { // CHECK-RV64-LABEL: @test_vget_v_f32m4_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f32.nxv8f32( [[SRC:%.*]], i64 4) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2f32.nxv8f32( [[SRC:%.*]], i64 4) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vget_v_f32m4_f32m1(vfloat32m4_t src) { @@ -458,7 +458,7 @@ vfloat32m1_t test_vget_v_f32m4_f32m1(vfloat32m4_t src) { // CHECK-RV64-LABEL: @test_vget_v_f32m4_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4f32.nxv8f32( [[SRC:%.*]], i64 4) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4f32.nxv8f32( [[SRC:%.*]], i64 4) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vget_v_f32m4_f32m2(vfloat32m4_t src) { @@ -467,7 +467,7 @@ vfloat32m2_t test_vget_v_f32m4_f32m2(vfloat32m4_t src) { // CHECK-RV64-LABEL: @test_vget_v_f32m8_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f32.nxv16f32( [[SRC:%.*]], i64 12) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2f32.nxv16f32( [[SRC:%.*]], i64 12) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vget_v_f32m8_f32m1(vfloat32m8_t src) { @@ -476,7 +476,7 @@ vfloat32m1_t test_vget_v_f32m8_f32m1(vfloat32m8_t src) { // CHECK-RV64-LABEL: @test_vget_v_f32m8_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4f32.nxv16f32( [[SRC:%.*]], i64 12) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4f32.nxv16f32( [[SRC:%.*]], i64 12) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vget_v_f32m8_f32m2(vfloat32m8_t src) { @@ -485,7 +485,7 @@ vfloat32m2_t test_vget_v_f32m8_f32m2(vfloat32m8_t src) { // CHECK-RV64-LABEL: @test_vget_v_f32m8_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8f32.nxv16f32( [[SRC:%.*]], i64 8) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8f32.nxv16f32( [[SRC:%.*]], i64 8) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vget_v_f32m8_f32m4(vfloat32m8_t src) { @@ -494,7 +494,7 @@ vfloat32m4_t test_vget_v_f32m8_f32m4(vfloat32m8_t src) { // CHECK-RV64-LABEL: @test_vget_v_f64m2_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f64.nxv2f64( [[SRC:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1f64.nxv2f64( [[SRC:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vget_v_f64m2_f64m1(vfloat64m2_t src) { @@ -503,7 +503,7 @@ vfloat64m1_t test_vget_v_f64m2_f64m1(vfloat64m2_t src) { // CHECK-RV64-LABEL: @test_vget_v_f64m4_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f64.nxv4f64( [[SRC:%.*]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1f64.nxv4f64( [[SRC:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vget_v_f64m4_f64m1(vfloat64m4_t src) { @@ -512,7 +512,7 @@ vfloat64m1_t test_vget_v_f64m4_f64m1(vfloat64m4_t src) { // CHECK-RV64-LABEL: @test_vget_v_f64m4_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f64.nxv4f64( [[SRC:%.*]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2f64.nxv4f64( [[SRC:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vget_v_f64m4_f64m2(vfloat64m4_t src) { @@ -521,7 +521,7 @@ vfloat64m2_t test_vget_v_f64m4_f64m2(vfloat64m4_t src) { // CHECK-RV64-LABEL: @test_vget_v_f64m8_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f64.nxv8f64( [[SRC:%.*]], i64 6) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1f64.nxv8f64( [[SRC:%.*]], i64 6) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vget_v_f64m8_f64m1(vfloat64m8_t src) { @@ -530,7 +530,7 @@ vfloat64m1_t test_vget_v_f64m8_f64m1(vfloat64m8_t src) { // CHECK-RV64-LABEL: @test_vget_v_f64m8_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f64.nxv8f64( [[SRC:%.*]], i64 6) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2f64.nxv8f64( [[SRC:%.*]], i64 6) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vget_v_f64m8_f64m2(vfloat64m8_t src) { @@ -539,7 +539,7 @@ vfloat64m2_t test_vget_v_f64m8_f64m2(vfloat64m8_t src) { // CHECK-RV64-LABEL: @test_vget_v_f64m8_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4f64.nxv8f64( [[SRC:%.*]], i64 4) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4f64.nxv8f64( [[SRC:%.*]], i64 4) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vget_v_f64m8_f64m4(vfloat64m8_t src) { @@ -548,7 +548,7 @@ vfloat64m4_t test_vget_v_f64m8_f64m4(vfloat64m8_t src) { // CHECK-RV64-LABEL: @test_vget_v_f16m2_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4f16.nxv8f16( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4f16.nxv8f16( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vget_v_f16m2_f16m1 (vfloat16m2_t src) { @@ -557,7 +557,7 @@ vfloat16m1_t test_vget_v_f16m2_f16m1 (vfloat16m2_t src) { // CHECK-RV64-LABEL: @test_vget_v_f16m4_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4f16.nxv16f16( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4f16.nxv16f16( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vget_v_f16m4_f16m1 (vfloat16m4_t src) { @@ -566,7 +566,7 @@ vfloat16m1_t test_vget_v_f16m4_f16m1 (vfloat16m4_t src) { // CHECK-RV64-LABEL: @test_vget_v_f16m8_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4f16.nxv32f16( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4f16.nxv32f16( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vget_v_f16m8_f16m1 (vfloat16m8_t src) { @@ -575,7 +575,7 @@ vfloat16m1_t test_vget_v_f16m8_f16m1 (vfloat16m8_t src) { // CHECK-RV64-LABEL: @test_vget_v_f16m4_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8f16.nxv16f16( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8f16.nxv16f16( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vget_v_f16m4_f16m2 (vfloat16m4_t src) { @@ -584,7 +584,7 @@ vfloat16m2_t test_vget_v_f16m4_f16m2 (vfloat16m4_t src) { // CHECK-RV64-LABEL: @test_vget_v_f16m8_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8f16.nxv32f16( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8f16.nxv32f16( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vget_v_f16m8_f16m2 (vfloat16m8_t src) { @@ -593,7 +593,7 @@ vfloat16m2_t test_vget_v_f16m8_f16m2 (vfloat16m8_t src) { // CHECK-RV64-LABEL: @test_vget_v_f16m8_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16f16.nxv32f16( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv16f16.nxv32f16( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vget_v_f16m8_f16m4 (vfloat16m8_t src) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vlmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vlmul.c index 16dfbc2177e64851cc65d3b56777af5b0af5fe5f..44458f1744aba78ce1950e46debf71a4bb404737 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vlmul.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vlmul.c @@ -8,7 +8,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf8_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv2i8.nxv1i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv2i8.nxv1i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vlmul_ext_v_i8mf8_i8mf4(vint8mf8_t op1) { @@ -17,7 +17,7 @@ vint8mf4_t test_vlmul_ext_v_i8mf8_i8mf4(vint8mf8_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf8_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i8.nxv1i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4i8.nxv1i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vlmul_ext_v_i8mf8_i8mf2(vint8mf8_t op1) { @@ -26,7 +26,7 @@ vint8mf2_t test_vlmul_ext_v_i8mf8_i8mf2(vint8mf8_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf8_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i8.nxv1i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i8.nxv1i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vlmul_ext_v_i8mf8_i8m1(vint8mf8_t op1) { @@ -35,7 +35,7 @@ vint8m1_t test_vlmul_ext_v_i8mf8_i8m1(vint8mf8_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf8_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.nxv1i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i8.nxv1i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vlmul_ext_v_i8mf8_i8m2(vint8mf8_t op1) { @@ -44,7 +44,7 @@ vint8m2_t test_vlmul_ext_v_i8mf8_i8m2(vint8mf8_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf8_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i8.nxv1i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32i8.nxv1i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vlmul_ext_v_i8mf8_i8m4(vint8mf8_t op1) { @@ -53,7 +53,7 @@ vint8m4_t test_vlmul_ext_v_i8mf8_i8m4(vint8mf8_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf8_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv64i8.nxv1i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv64i8.nxv1i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vlmul_ext_v_i8mf8_i8m8(vint8mf8_t op1) { @@ -62,7 +62,7 @@ vint8m8_t test_vlmul_ext_v_i8mf8_i8m8(vint8mf8_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf4_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i8.nxv2i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4i8.nxv2i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vlmul_ext_v_i8mf4_i8mf2(vint8mf4_t op1) { @@ -71,7 +71,7 @@ vint8mf2_t test_vlmul_ext_v_i8mf4_i8mf2(vint8mf4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf4_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i8.nxv2i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i8.nxv2i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vlmul_ext_v_i8mf4_i8m1(vint8mf4_t op1) { @@ -80,7 +80,7 @@ vint8m1_t test_vlmul_ext_v_i8mf4_i8m1(vint8mf4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf4_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.nxv2i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i8.nxv2i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vlmul_ext_v_i8mf4_i8m2(vint8mf4_t op1) { @@ -89,7 +89,7 @@ vint8m2_t test_vlmul_ext_v_i8mf4_i8m2(vint8mf4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf4_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i8.nxv2i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32i8.nxv2i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vlmul_ext_v_i8mf4_i8m4(vint8mf4_t op1) { @@ -98,7 +98,7 @@ vint8m4_t test_vlmul_ext_v_i8mf4_i8m4(vint8mf4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf4_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv64i8.nxv2i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv64i8.nxv2i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vlmul_ext_v_i8mf4_i8m8(vint8mf4_t op1) { @@ -107,7 +107,7 @@ vint8m8_t test_vlmul_ext_v_i8mf4_i8m8(vint8mf4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf2_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i8.nxv4i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i8.nxv4i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vlmul_ext_v_i8mf2_i8m1(vint8mf2_t op1) { @@ -116,7 +116,7 @@ vint8m1_t test_vlmul_ext_v_i8mf2_i8m1(vint8mf2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf2_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.nxv4i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i8.nxv4i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vlmul_ext_v_i8mf2_i8m2(vint8mf2_t op1) { @@ -125,7 +125,7 @@ vint8m2_t test_vlmul_ext_v_i8mf2_i8m2(vint8mf2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf2_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i8.nxv4i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32i8.nxv4i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vlmul_ext_v_i8mf2_i8m4(vint8mf2_t op1) { @@ -134,7 +134,7 @@ vint8m4_t test_vlmul_ext_v_i8mf2_i8m4(vint8mf2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf2_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv64i8.nxv4i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv64i8.nxv4i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vlmul_ext_v_i8mf2_i8m8(vint8mf2_t op1) { @@ -143,7 +143,7 @@ vint8m8_t test_vlmul_ext_v_i8mf2_i8m8(vint8mf2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_i8m1_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.nxv8i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i8.nxv8i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vlmul_ext_v_i8m1_i8m2(vint8m1_t op1) { @@ -152,7 +152,7 @@ vint8m2_t test_vlmul_ext_v_i8m1_i8m2(vint8m1_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_i8m1_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i8.nxv8i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32i8.nxv8i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vlmul_ext_v_i8m1_i8m4(vint8m1_t op1) { @@ -161,7 +161,7 @@ vint8m4_t test_vlmul_ext_v_i8m1_i8m4(vint8m1_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_i8m1_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv64i8.nxv8i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv64i8.nxv8i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vlmul_ext_v_i8m1_i8m8(vint8m1_t op1) { @@ -170,7 +170,7 @@ vint8m8_t test_vlmul_ext_v_i8m1_i8m8(vint8m1_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_i8m2_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i8.nxv16i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32i8.nxv16i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vlmul_ext_v_i8m2_i8m4(vint8m2_t op1) { @@ -179,7 +179,7 @@ vint8m4_t test_vlmul_ext_v_i8m2_i8m4(vint8m2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_i8m2_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv64i8.nxv16i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv64i8.nxv16i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vlmul_ext_v_i8m2_i8m8(vint8m2_t op1) { @@ -188,7 +188,7 @@ vint8m8_t test_vlmul_ext_v_i8m2_i8m8(vint8m2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_i8m4_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv64i8.nxv32i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv64i8.nxv32i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vlmul_ext_v_i8m4_i8m8(vint8m4_t op1) { @@ -197,7 +197,7 @@ vint8m8_t test_vlmul_ext_v_i8m4_i8m8(vint8m4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_i16mf4_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv2i16.nxv1i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv2i16.nxv1i16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vlmul_ext_v_i16mf4_i16mf2(vint16mf4_t op1) { @@ -206,7 +206,7 @@ vint16mf2_t test_vlmul_ext_v_i16mf4_i16mf2(vint16mf4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_i16mf4_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i16.nxv1i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4i16.nxv1i16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vlmul_ext_v_i16mf4_i16m1(vint16mf4_t op1) { @@ -215,7 +215,7 @@ vint16m1_t test_vlmul_ext_v_i16mf4_i16m1(vint16mf4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_i16mf4_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.nxv1i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i16.nxv1i16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vlmul_ext_v_i16mf4_i16m2(vint16mf4_t op1) { @@ -224,7 +224,7 @@ vint16m2_t test_vlmul_ext_v_i16mf4_i16m2(vint16mf4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_i16mf4_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i16.nxv1i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i16.nxv1i16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vlmul_ext_v_i16mf4_i16m4(vint16mf4_t op1) { @@ -233,7 +233,7 @@ vint16m4_t test_vlmul_ext_v_i16mf4_i16m4(vint16mf4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_i16mf4_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i16.nxv1i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32i16.nxv1i16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vlmul_ext_v_i16mf4_i16m8(vint16mf4_t op1) { @@ -242,7 +242,7 @@ vint16m8_t test_vlmul_ext_v_i16mf4_i16m8(vint16mf4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_i16mf2_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i16.nxv2i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4i16.nxv2i16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vlmul_ext_v_i16mf2_i16m1(vint16mf2_t op1) { @@ -251,7 +251,7 @@ vint16m1_t test_vlmul_ext_v_i16mf2_i16m1(vint16mf2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_i16mf2_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.nxv2i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i16.nxv2i16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vlmul_ext_v_i16mf2_i16m2(vint16mf2_t op1) { @@ -260,7 +260,7 @@ vint16m2_t test_vlmul_ext_v_i16mf2_i16m2(vint16mf2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_i16mf2_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i16.nxv2i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i16.nxv2i16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vlmul_ext_v_i16mf2_i16m4(vint16mf2_t op1) { @@ -269,7 +269,7 @@ vint16m4_t test_vlmul_ext_v_i16mf2_i16m4(vint16mf2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_i16mf2_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i16.nxv2i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32i16.nxv2i16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vlmul_ext_v_i16mf2_i16m8(vint16mf2_t op1) { @@ -278,7 +278,7 @@ vint16m8_t test_vlmul_ext_v_i16mf2_i16m8(vint16mf2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_i16m1_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.nxv4i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i16.nxv4i16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vlmul_ext_v_i16m1_i16m2(vint16m1_t op1) { @@ -287,7 +287,7 @@ vint16m2_t test_vlmul_ext_v_i16m1_i16m2(vint16m1_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_i16m1_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i16.nxv4i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i16.nxv4i16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vlmul_ext_v_i16m1_i16m4(vint16m1_t op1) { @@ -296,7 +296,7 @@ vint16m4_t test_vlmul_ext_v_i16m1_i16m4(vint16m1_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_i16m1_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i16.nxv4i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32i16.nxv4i16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vlmul_ext_v_i16m1_i16m8(vint16m1_t op1) { @@ -305,7 +305,7 @@ vint16m8_t test_vlmul_ext_v_i16m1_i16m8(vint16m1_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_i16m2_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i16.nxv8i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i16.nxv8i16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vlmul_ext_v_i16m2_i16m4(vint16m2_t op1) { @@ -314,7 +314,7 @@ vint16m4_t test_vlmul_ext_v_i16m2_i16m4(vint16m2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_i16m2_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i16.nxv8i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32i16.nxv8i16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vlmul_ext_v_i16m2_i16m8(vint16m2_t op1) { @@ -323,7 +323,7 @@ vint16m8_t test_vlmul_ext_v_i16m2_i16m8(vint16m2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_i16m4_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i16.nxv16i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32i16.nxv16i16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vlmul_ext_v_i16m4_i16m8(vint16m4_t op1) { @@ -332,7 +332,7 @@ vint16m8_t test_vlmul_ext_v_i16m4_i16m8(vint16m4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_i32mf2_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv2i32.nxv1i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv2i32.nxv1i32( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vlmul_ext_v_i32mf2_i32m1(vint32mf2_t op1) { @@ -341,7 +341,7 @@ vint32m1_t test_vlmul_ext_v_i32mf2_i32m1(vint32mf2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_i32mf2_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.nxv1i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4i32.nxv1i32( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vlmul_ext_v_i32mf2_i32m2(vint32mf2_t op1) { @@ -350,7 +350,7 @@ vint32m2_t test_vlmul_ext_v_i32mf2_i32m2(vint32mf2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_i32mf2_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i32.nxv1i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i32.nxv1i32( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vlmul_ext_v_i32mf2_i32m4(vint32mf2_t op1) { @@ -359,7 +359,7 @@ vint32m4_t test_vlmul_ext_v_i32mf2_i32m4(vint32mf2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_i32mf2_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i32.nxv1i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i32.nxv1i32( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vlmul_ext_v_i32mf2_i32m8(vint32mf2_t op1) { @@ -368,7 +368,7 @@ vint32m8_t test_vlmul_ext_v_i32mf2_i32m8(vint32mf2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_i32m1_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.nxv2i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4i32.nxv2i32( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vlmul_ext_v_i32m1_i32m2(vint32m1_t op1) { @@ -377,7 +377,7 @@ vint32m2_t test_vlmul_ext_v_i32m1_i32m2(vint32m1_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_i32m1_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i32.nxv2i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i32.nxv2i32( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vlmul_ext_v_i32m1_i32m4(vint32m1_t op1) { @@ -386,7 +386,7 @@ vint32m4_t test_vlmul_ext_v_i32m1_i32m4(vint32m1_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_i32m1_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i32.nxv2i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i32.nxv2i32( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vlmul_ext_v_i32m1_i32m8(vint32m1_t op1) { @@ -395,7 +395,7 @@ vint32m8_t test_vlmul_ext_v_i32m1_i32m8(vint32m1_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_i32m2_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i32.nxv4i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i32.nxv4i32( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vlmul_ext_v_i32m2_i32m4(vint32m2_t op1) { @@ -404,7 +404,7 @@ vint32m4_t test_vlmul_ext_v_i32m2_i32m4(vint32m2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_i32m2_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i32.nxv4i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i32.nxv4i32( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vlmul_ext_v_i32m2_i32m8(vint32m2_t op1) { @@ -413,7 +413,7 @@ vint32m8_t test_vlmul_ext_v_i32m2_i32m8(vint32m2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_i32m4_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i32.nxv8i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i32.nxv8i32( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vlmul_ext_v_i32m4_i32m8(vint32m4_t op1) { @@ -422,7 +422,7 @@ vint32m8_t test_vlmul_ext_v_i32m4_i32m8(vint32m4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_i64m1_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.nxv1i64( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv2i64.nxv1i64( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vlmul_ext_v_i64m1_i64m2(vint64m1_t op1) { @@ -431,7 +431,7 @@ vint64m2_t test_vlmul_ext_v_i64m1_i64m2(vint64m1_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_i64m1_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i64.nxv1i64( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4i64.nxv1i64( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vlmul_ext_v_i64m1_i64m4(vint64m1_t op1) { @@ -440,7 +440,7 @@ vint64m4_t test_vlmul_ext_v_i64m1_i64m4(vint64m1_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_i64m1_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i64.nxv1i64( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i64.nxv1i64( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vlmul_ext_v_i64m1_i64m8(vint64m1_t op1) { @@ -449,7 +449,7 @@ vint64m8_t test_vlmul_ext_v_i64m1_i64m8(vint64m1_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_i64m2_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i64.nxv2i64( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4i64.nxv2i64( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vlmul_ext_v_i64m2_i64m4(vint64m2_t op1) { @@ -458,7 +458,7 @@ vint64m4_t test_vlmul_ext_v_i64m2_i64m4(vint64m2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_i64m2_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i64.nxv2i64( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i64.nxv2i64( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vlmul_ext_v_i64m2_i64m8(vint64m2_t op1) { @@ -467,7 +467,7 @@ vint64m8_t test_vlmul_ext_v_i64m2_i64m8(vint64m2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_i64m4_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i64.nxv4i64( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i64.nxv4i64( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vlmul_ext_v_i64m4_i64m8(vint64m4_t op1) { @@ -476,7 +476,7 @@ vint64m8_t test_vlmul_ext_v_i64m4_i64m8(vint64m4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf8_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv2i8.nxv1i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv2i8.nxv1i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vlmul_ext_v_u8mf8_u8mf4(vuint8mf8_t op1) { @@ -485,7 +485,7 @@ vuint8mf4_t test_vlmul_ext_v_u8mf8_u8mf4(vuint8mf8_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf8_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i8.nxv1i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4i8.nxv1i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vlmul_ext_v_u8mf8_u8mf2(vuint8mf8_t op1) { @@ -494,7 +494,7 @@ vuint8mf2_t test_vlmul_ext_v_u8mf8_u8mf2(vuint8mf8_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf8_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i8.nxv1i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i8.nxv1i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vlmul_ext_v_u8mf8_u8m1(vuint8mf8_t op1) { @@ -503,7 +503,7 @@ vuint8m1_t test_vlmul_ext_v_u8mf8_u8m1(vuint8mf8_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf8_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.nxv1i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i8.nxv1i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vlmul_ext_v_u8mf8_u8m2(vuint8mf8_t op1) { @@ -512,7 +512,7 @@ vuint8m2_t test_vlmul_ext_v_u8mf8_u8m2(vuint8mf8_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf8_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i8.nxv1i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32i8.nxv1i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vlmul_ext_v_u8mf8_u8m4(vuint8mf8_t op1) { @@ -521,7 +521,7 @@ vuint8m4_t test_vlmul_ext_v_u8mf8_u8m4(vuint8mf8_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf8_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv64i8.nxv1i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv64i8.nxv1i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vlmul_ext_v_u8mf8_u8m8(vuint8mf8_t op1) { @@ -530,7 +530,7 @@ vuint8m8_t test_vlmul_ext_v_u8mf8_u8m8(vuint8mf8_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf4_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i8.nxv2i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4i8.nxv2i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vlmul_ext_v_u8mf4_u8mf2(vuint8mf4_t op1) { @@ -539,7 +539,7 @@ vuint8mf2_t test_vlmul_ext_v_u8mf4_u8mf2(vuint8mf4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf4_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i8.nxv2i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i8.nxv2i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vlmul_ext_v_u8mf4_u8m1(vuint8mf4_t op1) { @@ -548,7 +548,7 @@ vuint8m1_t test_vlmul_ext_v_u8mf4_u8m1(vuint8mf4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf4_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.nxv2i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i8.nxv2i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vlmul_ext_v_u8mf4_u8m2(vuint8mf4_t op1) { @@ -557,7 +557,7 @@ vuint8m2_t test_vlmul_ext_v_u8mf4_u8m2(vuint8mf4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf4_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i8.nxv2i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32i8.nxv2i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vlmul_ext_v_u8mf4_u8m4(vuint8mf4_t op1) { @@ -566,7 +566,7 @@ vuint8m4_t test_vlmul_ext_v_u8mf4_u8m4(vuint8mf4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf4_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv64i8.nxv2i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv64i8.nxv2i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vlmul_ext_v_u8mf4_u8m8(vuint8mf4_t op1) { @@ -575,7 +575,7 @@ vuint8m8_t test_vlmul_ext_v_u8mf4_u8m8(vuint8mf4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf2_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i8.nxv4i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i8.nxv4i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vlmul_ext_v_u8mf2_u8m1(vuint8mf2_t op1) { @@ -584,7 +584,7 @@ vuint8m1_t test_vlmul_ext_v_u8mf2_u8m1(vuint8mf2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf2_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.nxv4i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i8.nxv4i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vlmul_ext_v_u8mf2_u8m2(vuint8mf2_t op1) { @@ -593,7 +593,7 @@ vuint8m2_t test_vlmul_ext_v_u8mf2_u8m2(vuint8mf2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf2_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i8.nxv4i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32i8.nxv4i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vlmul_ext_v_u8mf2_u8m4(vuint8mf2_t op1) { @@ -602,7 +602,7 @@ vuint8m4_t test_vlmul_ext_v_u8mf2_u8m4(vuint8mf2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf2_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv64i8.nxv4i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv64i8.nxv4i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vlmul_ext_v_u8mf2_u8m8(vuint8mf2_t op1) { @@ -611,7 +611,7 @@ vuint8m8_t test_vlmul_ext_v_u8mf2_u8m8(vuint8mf2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_u8m1_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.nxv8i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i8.nxv8i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vlmul_ext_v_u8m1_u8m2(vuint8m1_t op1) { @@ -620,7 +620,7 @@ vuint8m2_t test_vlmul_ext_v_u8m1_u8m2(vuint8m1_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_u8m1_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i8.nxv8i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32i8.nxv8i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vlmul_ext_v_u8m1_u8m4(vuint8m1_t op1) { @@ -629,7 +629,7 @@ vuint8m4_t test_vlmul_ext_v_u8m1_u8m4(vuint8m1_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_u8m1_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv64i8.nxv8i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv64i8.nxv8i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vlmul_ext_v_u8m1_u8m8(vuint8m1_t op1) { @@ -638,7 +638,7 @@ vuint8m8_t test_vlmul_ext_v_u8m1_u8m8(vuint8m1_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_u8m2_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i8.nxv16i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32i8.nxv16i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vlmul_ext_v_u8m2_u8m4(vuint8m2_t op1) { @@ -647,7 +647,7 @@ vuint8m4_t test_vlmul_ext_v_u8m2_u8m4(vuint8m2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_u8m2_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv64i8.nxv16i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv64i8.nxv16i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vlmul_ext_v_u8m2_u8m8(vuint8m2_t op1) { @@ -656,7 +656,7 @@ vuint8m8_t test_vlmul_ext_v_u8m2_u8m8(vuint8m2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_u8m4_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv64i8.nxv32i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv64i8.nxv32i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vlmul_ext_v_u8m4_u8m8(vuint8m4_t op1) { @@ -665,7 +665,7 @@ vuint8m8_t test_vlmul_ext_v_u8m4_u8m8(vuint8m4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_u16mf4_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv2i16.nxv1i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv2i16.nxv1i16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vlmul_ext_v_u16mf4_u16mf2(vuint16mf4_t op1) { @@ -674,7 +674,7 @@ vuint16mf2_t test_vlmul_ext_v_u16mf4_u16mf2(vuint16mf4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_u16mf4_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i16.nxv1i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4i16.nxv1i16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vlmul_ext_v_u16mf4_u16m1(vuint16mf4_t op1) { @@ -683,7 +683,7 @@ vuint16m1_t test_vlmul_ext_v_u16mf4_u16m1(vuint16mf4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_u16mf4_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.nxv1i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i16.nxv1i16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vlmul_ext_v_u16mf4_u16m2(vuint16mf4_t op1) { @@ -692,7 +692,7 @@ vuint16m2_t test_vlmul_ext_v_u16mf4_u16m2(vuint16mf4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_u16mf4_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i16.nxv1i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i16.nxv1i16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vlmul_ext_v_u16mf4_u16m4(vuint16mf4_t op1) { @@ -701,7 +701,7 @@ vuint16m4_t test_vlmul_ext_v_u16mf4_u16m4(vuint16mf4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_u16mf4_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i16.nxv1i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32i16.nxv1i16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vlmul_ext_v_u16mf4_u16m8(vuint16mf4_t op1) { @@ -710,7 +710,7 @@ vuint16m8_t test_vlmul_ext_v_u16mf4_u16m8(vuint16mf4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_u16mf2_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i16.nxv2i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4i16.nxv2i16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vlmul_ext_v_u16mf2_u16m1(vuint16mf2_t op1) { @@ -719,7 +719,7 @@ vuint16m1_t test_vlmul_ext_v_u16mf2_u16m1(vuint16mf2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_u16mf2_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.nxv2i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i16.nxv2i16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vlmul_ext_v_u16mf2_u16m2(vuint16mf2_t op1) { @@ -728,7 +728,7 @@ vuint16m2_t test_vlmul_ext_v_u16mf2_u16m2(vuint16mf2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_u16mf2_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i16.nxv2i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i16.nxv2i16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vlmul_ext_v_u16mf2_u16m4(vuint16mf2_t op1) { @@ -737,7 +737,7 @@ vuint16m4_t test_vlmul_ext_v_u16mf2_u16m4(vuint16mf2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_u16mf2_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i16.nxv2i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32i16.nxv2i16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vlmul_ext_v_u16mf2_u16m8(vuint16mf2_t op1) { @@ -746,7 +746,7 @@ vuint16m8_t test_vlmul_ext_v_u16mf2_u16m8(vuint16mf2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_u16m1_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.nxv4i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i16.nxv4i16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vlmul_ext_v_u16m1_u16m2(vuint16m1_t op1) { @@ -755,7 +755,7 @@ vuint16m2_t test_vlmul_ext_v_u16m1_u16m2(vuint16m1_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_u16m1_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i16.nxv4i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i16.nxv4i16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vlmul_ext_v_u16m1_u16m4(vuint16m1_t op1) { @@ -764,7 +764,7 @@ vuint16m4_t test_vlmul_ext_v_u16m1_u16m4(vuint16m1_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_u16m1_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i16.nxv4i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32i16.nxv4i16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vlmul_ext_v_u16m1_u16m8(vuint16m1_t op1) { @@ -773,7 +773,7 @@ vuint16m8_t test_vlmul_ext_v_u16m1_u16m8(vuint16m1_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_u16m2_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i16.nxv8i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i16.nxv8i16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vlmul_ext_v_u16m2_u16m4(vuint16m2_t op1) { @@ -782,7 +782,7 @@ vuint16m4_t test_vlmul_ext_v_u16m2_u16m4(vuint16m2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_u16m2_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i16.nxv8i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32i16.nxv8i16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vlmul_ext_v_u16m2_u16m8(vuint16m2_t op1) { @@ -791,7 +791,7 @@ vuint16m8_t test_vlmul_ext_v_u16m2_u16m8(vuint16m2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_u16m4_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i16.nxv16i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32i16.nxv16i16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vlmul_ext_v_u16m4_u16m8(vuint16m4_t op1) { @@ -800,7 +800,7 @@ vuint16m8_t test_vlmul_ext_v_u16m4_u16m8(vuint16m4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_u32mf2_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv2i32.nxv1i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv2i32.nxv1i32( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vlmul_ext_v_u32mf2_u32m1(vuint32mf2_t op1) { @@ -809,7 +809,7 @@ vuint32m1_t test_vlmul_ext_v_u32mf2_u32m1(vuint32mf2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_u32mf2_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.nxv1i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4i32.nxv1i32( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vlmul_ext_v_u32mf2_u32m2(vuint32mf2_t op1) { @@ -818,7 +818,7 @@ vuint32m2_t test_vlmul_ext_v_u32mf2_u32m2(vuint32mf2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_u32mf2_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i32.nxv1i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i32.nxv1i32( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vlmul_ext_v_u32mf2_u32m4(vuint32mf2_t op1) { @@ -827,7 +827,7 @@ vuint32m4_t test_vlmul_ext_v_u32mf2_u32m4(vuint32mf2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_u32mf2_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i32.nxv1i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i32.nxv1i32( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vlmul_ext_v_u32mf2_u32m8(vuint32mf2_t op1) { @@ -836,7 +836,7 @@ vuint32m8_t test_vlmul_ext_v_u32mf2_u32m8(vuint32mf2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_u32m1_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.nxv2i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4i32.nxv2i32( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vlmul_ext_v_u32m1_u32m2(vuint32m1_t op1) { @@ -845,7 +845,7 @@ vuint32m2_t test_vlmul_ext_v_u32m1_u32m2(vuint32m1_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_u32m1_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i32.nxv2i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i32.nxv2i32( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vlmul_ext_v_u32m1_u32m4(vuint32m1_t op1) { @@ -854,7 +854,7 @@ vuint32m4_t test_vlmul_ext_v_u32m1_u32m4(vuint32m1_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_u32m1_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i32.nxv2i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i32.nxv2i32( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vlmul_ext_v_u32m1_u32m8(vuint32m1_t op1) { @@ -863,7 +863,7 @@ vuint32m8_t test_vlmul_ext_v_u32m1_u32m8(vuint32m1_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_u32m2_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i32.nxv4i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i32.nxv4i32( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vlmul_ext_v_u32m2_u32m4(vuint32m2_t op1) { @@ -872,7 +872,7 @@ vuint32m4_t test_vlmul_ext_v_u32m2_u32m4(vuint32m2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_u32m2_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i32.nxv4i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i32.nxv4i32( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vlmul_ext_v_u32m2_u32m8(vuint32m2_t op1) { @@ -881,7 +881,7 @@ vuint32m8_t test_vlmul_ext_v_u32m2_u32m8(vuint32m2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_u32m4_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i32.nxv8i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i32.nxv8i32( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vlmul_ext_v_u32m4_u32m8(vuint32m4_t op1) { @@ -890,7 +890,7 @@ vuint32m8_t test_vlmul_ext_v_u32m4_u32m8(vuint32m4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_u64m1_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.nxv1i64( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv2i64.nxv1i64( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vlmul_ext_v_u64m1_u64m2(vuint64m1_t op1) { @@ -899,7 +899,7 @@ vuint64m2_t test_vlmul_ext_v_u64m1_u64m2(vuint64m1_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_u64m1_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i64.nxv1i64( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4i64.nxv1i64( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vlmul_ext_v_u64m1_u64m4(vuint64m1_t op1) { @@ -908,7 +908,7 @@ vuint64m4_t test_vlmul_ext_v_u64m1_u64m4(vuint64m1_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_u64m1_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i64.nxv1i64( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i64.nxv1i64( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vlmul_ext_v_u64m1_u64m8(vuint64m1_t op1) { @@ -917,7 +917,7 @@ vuint64m8_t test_vlmul_ext_v_u64m1_u64m8(vuint64m1_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_u64m2_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i64.nxv2i64( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4i64.nxv2i64( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vlmul_ext_v_u64m2_u64m4(vuint64m2_t op1) { @@ -926,7 +926,7 @@ vuint64m4_t test_vlmul_ext_v_u64m2_u64m4(vuint64m2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_u64m2_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i64.nxv2i64( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i64.nxv2i64( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vlmul_ext_v_u64m2_u64m8(vuint64m2_t op1) { @@ -935,7 +935,7 @@ vuint64m8_t test_vlmul_ext_v_u64m2_u64m8(vuint64m2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_u64m4_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i64.nxv4i64( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i64.nxv4i64( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vlmul_ext_v_u64m4_u64m8(vuint64m4_t op1) { @@ -944,7 +944,7 @@ vuint64m8_t test_vlmul_ext_v_u64m4_u64m8(vuint64m4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_f32mf2_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv2f32.nxv1f32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv2f32.nxv1f32( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vlmul_ext_v_f32mf2_f32m1(vfloat32mf2_t op1) { @@ -953,7 +953,7 @@ vfloat32m1_t test_vlmul_ext_v_f32mf2_f32m1(vfloat32mf2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_f32mf2_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4f32.nxv1f32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4f32.nxv1f32( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vlmul_ext_v_f32mf2_f32m2(vfloat32mf2_t op1) { @@ -962,7 +962,7 @@ vfloat32m2_t test_vlmul_ext_v_f32mf2_f32m2(vfloat32mf2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_f32mf2_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8f32.nxv1f32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8f32.nxv1f32( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vlmul_ext_v_f32mf2_f32m4(vfloat32mf2_t op1) { @@ -971,7 +971,7 @@ vfloat32m4_t test_vlmul_ext_v_f32mf2_f32m4(vfloat32mf2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_f32mf2_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16f32.nxv1f32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16f32.nxv1f32( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vlmul_ext_v_f32mf2_f32m8(vfloat32mf2_t op1) { @@ -980,7 +980,7 @@ vfloat32m8_t test_vlmul_ext_v_f32mf2_f32m8(vfloat32mf2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_f32m1_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4f32.nxv2f32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4f32.nxv2f32( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vlmul_ext_v_f32m1_f32m2(vfloat32m1_t op1) { @@ -989,7 +989,7 @@ vfloat32m2_t test_vlmul_ext_v_f32m1_f32m2(vfloat32m1_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_f32m1_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8f32.nxv2f32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8f32.nxv2f32( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vlmul_ext_v_f32m1_f32m4(vfloat32m1_t op1) { @@ -998,7 +998,7 @@ vfloat32m4_t test_vlmul_ext_v_f32m1_f32m4(vfloat32m1_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_f32m1_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16f32.nxv2f32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16f32.nxv2f32( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vlmul_ext_v_f32m1_f32m8(vfloat32m1_t op1) { @@ -1007,7 +1007,7 @@ vfloat32m8_t test_vlmul_ext_v_f32m1_f32m8(vfloat32m1_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_f32m2_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8f32.nxv4f32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8f32.nxv4f32( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vlmul_ext_v_f32m2_f32m4(vfloat32m2_t op1) { @@ -1016,7 +1016,7 @@ vfloat32m4_t test_vlmul_ext_v_f32m2_f32m4(vfloat32m2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_f32m2_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16f32.nxv4f32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16f32.nxv4f32( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vlmul_ext_v_f32m2_f32m8(vfloat32m2_t op1) { @@ -1025,7 +1025,7 @@ vfloat32m8_t test_vlmul_ext_v_f32m2_f32m8(vfloat32m2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_f32m4_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16f32.nxv8f32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16f32.nxv8f32( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vlmul_ext_v_f32m4_f32m8(vfloat32m4_t op1) { @@ -1034,7 +1034,7 @@ vfloat32m8_t test_vlmul_ext_v_f32m4_f32m8(vfloat32m4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_f64m1_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv2f64.nxv1f64( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv2f64.nxv1f64( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vlmul_ext_v_f64m1_f64m2(vfloat64m1_t op1) { @@ -1043,7 +1043,7 @@ vfloat64m2_t test_vlmul_ext_v_f64m1_f64m2(vfloat64m1_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_f64m1_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4f64.nxv1f64( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4f64.nxv1f64( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vlmul_ext_v_f64m1_f64m4(vfloat64m1_t op1) { @@ -1052,7 +1052,7 @@ vfloat64m4_t test_vlmul_ext_v_f64m1_f64m4(vfloat64m1_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_f64m1_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8f64.nxv1f64( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8f64.nxv1f64( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vlmul_ext_v_f64m1_f64m8(vfloat64m1_t op1) { @@ -1061,7 +1061,7 @@ vfloat64m8_t test_vlmul_ext_v_f64m1_f64m8(vfloat64m1_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_f64m2_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4f64.nxv2f64( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4f64.nxv2f64( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vlmul_ext_v_f64m2_f64m4(vfloat64m2_t op1) { @@ -1070,7 +1070,7 @@ vfloat64m4_t test_vlmul_ext_v_f64m2_f64m4(vfloat64m2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_f64m2_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8f64.nxv2f64( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8f64.nxv2f64( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vlmul_ext_v_f64m2_f64m8(vfloat64m2_t op1) { @@ -1079,7 +1079,7 @@ vfloat64m8_t test_vlmul_ext_v_f64m2_f64m8(vfloat64m2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_f64m4_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8f64.nxv4f64( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8f64.nxv4f64( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vlmul_ext_v_f64m4_f64m8(vfloat64m4_t op1) { @@ -1088,7 +1088,7 @@ vfloat64m8_t test_vlmul_ext_v_f64m4_f64m8(vfloat64m4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8mf4_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv2i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i8.nxv2i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vlmul_trunc_v_i8mf4_i8mf8(vint8mf4_t op1) { @@ -1097,7 +1097,7 @@ vint8mf8_t test_vlmul_trunc_v_i8mf4_i8mf8(vint8mf4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8mf2_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv4i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i8.nxv4i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vlmul_trunc_v_i8mf2_i8mf8(vint8mf2_t op1) { @@ -1106,7 +1106,7 @@ vint8mf8_t test_vlmul_trunc_v_i8mf2_i8mf8(vint8mf2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8mf2_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i8.nxv4i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i8.nxv4i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vlmul_trunc_v_i8mf2_i8mf4(vint8mf2_t op1) { @@ -1115,7 +1115,7 @@ vint8mf4_t test_vlmul_trunc_v_i8mf2_i8mf4(vint8mf2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m1_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv8i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i8.nxv8i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vlmul_trunc_v_i8m1_i8mf8(vint8m1_t op1) { @@ -1124,7 +1124,7 @@ vint8mf8_t test_vlmul_trunc_v_i8m1_i8mf8(vint8m1_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m1_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i8.nxv8i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i8.nxv8i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vlmul_trunc_v_i8m1_i8mf4(vint8m1_t op1) { @@ -1133,7 +1133,7 @@ vint8mf4_t test_vlmul_trunc_v_i8m1_i8mf4(vint8m1_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m1_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i8.nxv8i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i8.nxv8i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vlmul_trunc_v_i8m1_i8mf2(vint8m1_t op1) { @@ -1142,7 +1142,7 @@ vint8mf2_t test_vlmul_trunc_v_i8m1_i8mf2(vint8m1_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m2_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv16i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i8.nxv16i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vlmul_trunc_v_i8m2_i8mf8(vint8m2_t op1) { @@ -1151,7 +1151,7 @@ vint8mf8_t test_vlmul_trunc_v_i8m2_i8mf8(vint8m2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m2_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i8.nxv16i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i8.nxv16i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vlmul_trunc_v_i8m2_i8mf4(vint8m2_t op1) { @@ -1160,7 +1160,7 @@ vint8mf4_t test_vlmul_trunc_v_i8m2_i8mf4(vint8m2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m2_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i8.nxv16i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i8.nxv16i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vlmul_trunc_v_i8m2_i8mf2(vint8m2_t op1) { @@ -1169,7 +1169,7 @@ vint8mf2_t test_vlmul_trunc_v_i8m2_i8mf2(vint8m2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m2_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i8.nxv16i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i8.nxv16i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vlmul_trunc_v_i8m2_i8m1(vint8m2_t op1) { @@ -1178,7 +1178,7 @@ vint8m1_t test_vlmul_trunc_v_i8m2_i8m1(vint8m2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m4_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv32i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i8.nxv32i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vlmul_trunc_v_i8m4_i8mf8(vint8m4_t op1) { @@ -1187,7 +1187,7 @@ vint8mf8_t test_vlmul_trunc_v_i8m4_i8mf8(vint8m4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m4_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i8.nxv32i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i8.nxv32i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vlmul_trunc_v_i8m4_i8mf4(vint8m4_t op1) { @@ -1196,7 +1196,7 @@ vint8mf4_t test_vlmul_trunc_v_i8m4_i8mf4(vint8m4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m4_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i8.nxv32i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i8.nxv32i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vlmul_trunc_v_i8m4_i8mf2(vint8m4_t op1) { @@ -1205,7 +1205,7 @@ vint8mf2_t test_vlmul_trunc_v_i8m4_i8mf2(vint8m4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m4_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i8.nxv32i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i8.nxv32i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vlmul_trunc_v_i8m4_i8m1(vint8m4_t op1) { @@ -1214,7 +1214,7 @@ vint8m1_t test_vlmul_trunc_v_i8m4_i8m1(vint8m4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m4_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16i8.nxv32i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv16i8.nxv32i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vlmul_trunc_v_i8m4_i8m2(vint8m4_t op1) { @@ -1223,7 +1223,7 @@ vint8m2_t test_vlmul_trunc_v_i8m4_i8m2(vint8m4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m8_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i8.nxv64i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vlmul_trunc_v_i8m8_i8mf8(vint8m8_t op1) { @@ -1232,7 +1232,7 @@ vint8mf8_t test_vlmul_trunc_v_i8m8_i8mf8(vint8m8_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m8_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i8.nxv64i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vlmul_trunc_v_i8m8_i8mf4(vint8m8_t op1) { @@ -1241,7 +1241,7 @@ vint8mf4_t test_vlmul_trunc_v_i8m8_i8mf4(vint8m8_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m8_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i8.nxv64i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vlmul_trunc_v_i8m8_i8mf2(vint8m8_t op1) { @@ -1250,7 +1250,7 @@ vint8mf2_t test_vlmul_trunc_v_i8m8_i8mf2(vint8m8_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m8_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i8.nxv64i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vlmul_trunc_v_i8m8_i8m1(vint8m8_t op1) { @@ -1259,7 +1259,7 @@ vint8m1_t test_vlmul_trunc_v_i8m8_i8m1(vint8m8_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m8_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv16i8.nxv64i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vlmul_trunc_v_i8m8_i8m2(vint8m8_t op1) { @@ -1268,7 +1268,7 @@ vint8m2_t test_vlmul_trunc_v_i8m8_i8m2(vint8m8_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m8_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv32i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv32i8.nxv64i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vlmul_trunc_v_i8m8_i8m4(vint8m8_t op1) { @@ -1277,7 +1277,7 @@ vint8m4_t test_vlmul_trunc_v_i8m8_i8m4(vint8m8_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16mf2_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i16.nxv2i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i16.nxv2i16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vlmul_trunc_v_i16mf2_i16mf4(vint16mf2_t op1) { @@ -1286,7 +1286,7 @@ vint16mf4_t test_vlmul_trunc_v_i16mf2_i16mf4(vint16mf2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m1_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i16.nxv4i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i16.nxv4i16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vlmul_trunc_v_i16m1_i16mf4(vint16m1_t op1) { @@ -1295,7 +1295,7 @@ vint16mf4_t test_vlmul_trunc_v_i16m1_i16mf4(vint16m1_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m1_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i16.nxv4i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i16.nxv4i16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vlmul_trunc_v_i16m1_i16mf2(vint16m1_t op1) { @@ -1304,7 +1304,7 @@ vint16mf2_t test_vlmul_trunc_v_i16m1_i16mf2(vint16m1_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m2_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i16.nxv8i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i16.nxv8i16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vlmul_trunc_v_i16m2_i16mf4(vint16m2_t op1) { @@ -1313,7 +1313,7 @@ vint16mf4_t test_vlmul_trunc_v_i16m2_i16mf4(vint16m2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m2_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i16.nxv8i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i16.nxv8i16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vlmul_trunc_v_i16m2_i16mf2(vint16m2_t op1) { @@ -1322,7 +1322,7 @@ vint16mf2_t test_vlmul_trunc_v_i16m2_i16mf2(vint16m2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m2_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i16.nxv8i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i16.nxv8i16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vlmul_trunc_v_i16m2_i16m1(vint16m2_t op1) { @@ -1331,7 +1331,7 @@ vint16m1_t test_vlmul_trunc_v_i16m2_i16m1(vint16m2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m4_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i16.nxv16i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i16.nxv16i16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vlmul_trunc_v_i16m4_i16mf4(vint16m4_t op1) { @@ -1340,7 +1340,7 @@ vint16mf4_t test_vlmul_trunc_v_i16m4_i16mf4(vint16m4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m4_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i16.nxv16i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i16.nxv16i16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vlmul_trunc_v_i16m4_i16mf2(vint16m4_t op1) { @@ -1349,7 +1349,7 @@ vint16mf2_t test_vlmul_trunc_v_i16m4_i16mf2(vint16m4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m4_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i16.nxv16i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i16.nxv16i16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vlmul_trunc_v_i16m4_i16m1(vint16m4_t op1) { @@ -1358,7 +1358,7 @@ vint16m1_t test_vlmul_trunc_v_i16m4_i16m1(vint16m4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m4_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i16.nxv16i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i16.nxv16i16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vlmul_trunc_v_i16m4_i16m2(vint16m4_t op1) { @@ -1367,7 +1367,7 @@ vint16m2_t test_vlmul_trunc_v_i16m4_i16m2(vint16m4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m8_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i16.nxv32i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i16.nxv32i16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vlmul_trunc_v_i16m8_i16mf4(vint16m8_t op1) { @@ -1376,7 +1376,7 @@ vint16mf4_t test_vlmul_trunc_v_i16m8_i16mf4(vint16m8_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m8_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i16.nxv32i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i16.nxv32i16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vlmul_trunc_v_i16m8_i16mf2(vint16m8_t op1) { @@ -1385,7 +1385,7 @@ vint16mf2_t test_vlmul_trunc_v_i16m8_i16mf2(vint16m8_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m8_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i16.nxv32i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i16.nxv32i16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vlmul_trunc_v_i16m8_i16m1(vint16m8_t op1) { @@ -1394,7 +1394,7 @@ vint16m1_t test_vlmul_trunc_v_i16m8_i16m1(vint16m8_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m8_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i16.nxv32i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i16.nxv32i16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vlmul_trunc_v_i16m8_i16m2(vint16m8_t op1) { @@ -1403,7 +1403,7 @@ vint16m2_t test_vlmul_trunc_v_i16m8_i16m2(vint16m8_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m8_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16i16.nxv32i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv16i16.nxv32i16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vlmul_trunc_v_i16m8_i16m4(vint16m8_t op1) { @@ -1412,7 +1412,7 @@ vint16m4_t test_vlmul_trunc_v_i16m8_i16m4(vint16m8_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m1_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i32.nxv2i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i32.nxv2i32( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vlmul_trunc_v_i32m1_i32mf2(vint32m1_t op1) { @@ -1421,7 +1421,7 @@ vint32mf2_t test_vlmul_trunc_v_i32m1_i32mf2(vint32m1_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m2_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i32.nxv4i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i32.nxv4i32( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vlmul_trunc_v_i32m2_i32mf2(vint32m2_t op1) { @@ -1430,7 +1430,7 @@ vint32mf2_t test_vlmul_trunc_v_i32m2_i32mf2(vint32m2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m2_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i32.nxv4i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i32.nxv4i32( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vlmul_trunc_v_i32m2_i32m1(vint32m2_t op1) { @@ -1439,7 +1439,7 @@ vint32m1_t test_vlmul_trunc_v_i32m2_i32m1(vint32m2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m4_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i32.nxv8i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i32.nxv8i32( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vlmul_trunc_v_i32m4_i32mf2(vint32m4_t op1) { @@ -1448,7 +1448,7 @@ vint32mf2_t test_vlmul_trunc_v_i32m4_i32mf2(vint32m4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m4_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i32.nxv8i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i32.nxv8i32( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vlmul_trunc_v_i32m4_i32m1(vint32m4_t op1) { @@ -1457,7 +1457,7 @@ vint32m1_t test_vlmul_trunc_v_i32m4_i32m1(vint32m4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m4_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i32.nxv8i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i32.nxv8i32( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vlmul_trunc_v_i32m4_i32m2(vint32m4_t op1) { @@ -1466,7 +1466,7 @@ vint32m2_t test_vlmul_trunc_v_i32m4_i32m2(vint32m4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m8_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i32.nxv16i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i32.nxv16i32( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vlmul_trunc_v_i32m8_i32mf2(vint32m8_t op1) { @@ -1475,7 +1475,7 @@ vint32mf2_t test_vlmul_trunc_v_i32m8_i32mf2(vint32m8_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m8_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i32.nxv16i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i32.nxv16i32( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vlmul_trunc_v_i32m8_i32m1(vint32m8_t op1) { @@ -1484,7 +1484,7 @@ vint32m1_t test_vlmul_trunc_v_i32m8_i32m1(vint32m8_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m8_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i32.nxv16i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i32.nxv16i32( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vlmul_trunc_v_i32m8_i32m2(vint32m8_t op1) { @@ -1493,7 +1493,7 @@ vint32m2_t test_vlmul_trunc_v_i32m8_i32m2(vint32m8_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m8_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i32.nxv16i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i32.nxv16i32( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vlmul_trunc_v_i32m8_i32m4(vint32m8_t op1) { @@ -1502,7 +1502,7 @@ vint32m4_t test_vlmul_trunc_v_i32m8_i32m4(vint32m8_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i64m2_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i64.nxv2i64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i64.nxv2i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vlmul_trunc_v_i64m2_i64m1(vint64m2_t op1) { @@ -1511,7 +1511,7 @@ vint64m1_t test_vlmul_trunc_v_i64m2_i64m1(vint64m2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i64m4_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i64.nxv4i64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i64.nxv4i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vlmul_trunc_v_i64m4_i64m1(vint64m4_t op1) { @@ -1520,7 +1520,7 @@ vint64m1_t test_vlmul_trunc_v_i64m4_i64m1(vint64m4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i64m4_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i64.nxv4i64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i64.nxv4i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vlmul_trunc_v_i64m4_i64m2(vint64m4_t op1) { @@ -1529,7 +1529,7 @@ vint64m2_t test_vlmul_trunc_v_i64m4_i64m2(vint64m4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i64m8_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i64.nxv8i64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i64.nxv8i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vlmul_trunc_v_i64m8_i64m1(vint64m8_t op1) { @@ -1538,7 +1538,7 @@ vint64m1_t test_vlmul_trunc_v_i64m8_i64m1(vint64m8_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i64m8_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i64.nxv8i64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i64.nxv8i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vlmul_trunc_v_i64m8_i64m2(vint64m8_t op1) { @@ -1547,7 +1547,7 @@ vint64m2_t test_vlmul_trunc_v_i64m8_i64m2(vint64m8_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i64m8_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i64.nxv8i64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i64.nxv8i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vlmul_trunc_v_i64m8_i64m4(vint64m8_t op1) { @@ -1556,7 +1556,7 @@ vint64m4_t test_vlmul_trunc_v_i64m8_i64m4(vint64m8_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8mf4_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv2i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i8.nxv2i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vlmul_trunc_v_u8mf4_u8mf8(vuint8mf4_t op1) { @@ -1565,7 +1565,7 @@ vuint8mf8_t test_vlmul_trunc_v_u8mf4_u8mf8(vuint8mf4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8mf2_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv4i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i8.nxv4i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vlmul_trunc_v_u8mf2_u8mf8(vuint8mf2_t op1) { @@ -1574,7 +1574,7 @@ vuint8mf8_t test_vlmul_trunc_v_u8mf2_u8mf8(vuint8mf2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8mf2_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i8.nxv4i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i8.nxv4i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vlmul_trunc_v_u8mf2_u8mf4(vuint8mf2_t op1) { @@ -1583,7 +1583,7 @@ vuint8mf4_t test_vlmul_trunc_v_u8mf2_u8mf4(vuint8mf2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m1_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv8i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i8.nxv8i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vlmul_trunc_v_u8m1_u8mf8(vuint8m1_t op1) { @@ -1592,7 +1592,7 @@ vuint8mf8_t test_vlmul_trunc_v_u8m1_u8mf8(vuint8m1_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m1_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i8.nxv8i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i8.nxv8i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vlmul_trunc_v_u8m1_u8mf4(vuint8m1_t op1) { @@ -1601,7 +1601,7 @@ vuint8mf4_t test_vlmul_trunc_v_u8m1_u8mf4(vuint8m1_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m1_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i8.nxv8i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i8.nxv8i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vlmul_trunc_v_u8m1_u8mf2(vuint8m1_t op1) { @@ -1610,7 +1610,7 @@ vuint8mf2_t test_vlmul_trunc_v_u8m1_u8mf2(vuint8m1_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m2_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv16i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i8.nxv16i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vlmul_trunc_v_u8m2_u8mf8(vuint8m2_t op1) { @@ -1619,7 +1619,7 @@ vuint8mf8_t test_vlmul_trunc_v_u8m2_u8mf8(vuint8m2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m2_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i8.nxv16i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i8.nxv16i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vlmul_trunc_v_u8m2_u8mf4(vuint8m2_t op1) { @@ -1628,7 +1628,7 @@ vuint8mf4_t test_vlmul_trunc_v_u8m2_u8mf4(vuint8m2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m2_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i8.nxv16i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i8.nxv16i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vlmul_trunc_v_u8m2_u8mf2(vuint8m2_t op1) { @@ -1637,7 +1637,7 @@ vuint8mf2_t test_vlmul_trunc_v_u8m2_u8mf2(vuint8m2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m2_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i8.nxv16i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i8.nxv16i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vlmul_trunc_v_u8m2_u8m1(vuint8m2_t op1) { @@ -1646,7 +1646,7 @@ vuint8m1_t test_vlmul_trunc_v_u8m2_u8m1(vuint8m2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m4_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv32i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i8.nxv32i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vlmul_trunc_v_u8m4_u8mf8(vuint8m4_t op1) { @@ -1655,7 +1655,7 @@ vuint8mf8_t test_vlmul_trunc_v_u8m4_u8mf8(vuint8m4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m4_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i8.nxv32i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i8.nxv32i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vlmul_trunc_v_u8m4_u8mf4(vuint8m4_t op1) { @@ -1664,7 +1664,7 @@ vuint8mf4_t test_vlmul_trunc_v_u8m4_u8mf4(vuint8m4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m4_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i8.nxv32i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i8.nxv32i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vlmul_trunc_v_u8m4_u8mf2(vuint8m4_t op1) { @@ -1673,7 +1673,7 @@ vuint8mf2_t test_vlmul_trunc_v_u8m4_u8mf2(vuint8m4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m4_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i8.nxv32i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i8.nxv32i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vlmul_trunc_v_u8m4_u8m1(vuint8m4_t op1) { @@ -1682,7 +1682,7 @@ vuint8m1_t test_vlmul_trunc_v_u8m4_u8m1(vuint8m4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m4_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16i8.nxv32i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv16i8.nxv32i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vlmul_trunc_v_u8m4_u8m2(vuint8m4_t op1) { @@ -1691,7 +1691,7 @@ vuint8m2_t test_vlmul_trunc_v_u8m4_u8m2(vuint8m4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m8_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i8.nxv64i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vlmul_trunc_v_u8m8_u8mf8(vuint8m8_t op1) { @@ -1700,7 +1700,7 @@ vuint8mf8_t test_vlmul_trunc_v_u8m8_u8mf8(vuint8m8_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m8_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i8.nxv64i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vlmul_trunc_v_u8m8_u8mf4(vuint8m8_t op1) { @@ -1709,7 +1709,7 @@ vuint8mf4_t test_vlmul_trunc_v_u8m8_u8mf4(vuint8m8_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m8_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i8.nxv64i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vlmul_trunc_v_u8m8_u8mf2(vuint8m8_t op1) { @@ -1718,7 +1718,7 @@ vuint8mf2_t test_vlmul_trunc_v_u8m8_u8mf2(vuint8m8_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m8_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i8.nxv64i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vlmul_trunc_v_u8m8_u8m1(vuint8m8_t op1) { @@ -1727,7 +1727,7 @@ vuint8m1_t test_vlmul_trunc_v_u8m8_u8m1(vuint8m8_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m8_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv16i8.nxv64i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vlmul_trunc_v_u8m8_u8m2(vuint8m8_t op1) { @@ -1736,7 +1736,7 @@ vuint8m2_t test_vlmul_trunc_v_u8m8_u8m2(vuint8m8_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m8_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv32i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv32i8.nxv64i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vlmul_trunc_v_u8m8_u8m4(vuint8m8_t op1) { @@ -1745,7 +1745,7 @@ vuint8m4_t test_vlmul_trunc_v_u8m8_u8m4(vuint8m8_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16mf2_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i16.nxv2i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i16.nxv2i16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vlmul_trunc_v_u16mf2_u16mf4(vuint16mf2_t op1) { @@ -1754,7 +1754,7 @@ vuint16mf4_t test_vlmul_trunc_v_u16mf2_u16mf4(vuint16mf2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m1_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i16.nxv4i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i16.nxv4i16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vlmul_trunc_v_u16m1_u16mf4(vuint16m1_t op1) { @@ -1763,7 +1763,7 @@ vuint16mf4_t test_vlmul_trunc_v_u16m1_u16mf4(vuint16m1_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m1_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i16.nxv4i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i16.nxv4i16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vlmul_trunc_v_u16m1_u16mf2(vuint16m1_t op1) { @@ -1772,7 +1772,7 @@ vuint16mf2_t test_vlmul_trunc_v_u16m1_u16mf2(vuint16m1_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m2_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i16.nxv8i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i16.nxv8i16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vlmul_trunc_v_u16m2_u16mf4(vuint16m2_t op1) { @@ -1781,7 +1781,7 @@ vuint16mf4_t test_vlmul_trunc_v_u16m2_u16mf4(vuint16m2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m2_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i16.nxv8i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i16.nxv8i16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vlmul_trunc_v_u16m2_u16mf2(vuint16m2_t op1) { @@ -1790,7 +1790,7 @@ vuint16mf2_t test_vlmul_trunc_v_u16m2_u16mf2(vuint16m2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m2_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i16.nxv8i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i16.nxv8i16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vlmul_trunc_v_u16m2_u16m1(vuint16m2_t op1) { @@ -1799,7 +1799,7 @@ vuint16m1_t test_vlmul_trunc_v_u16m2_u16m1(vuint16m2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m4_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i16.nxv16i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i16.nxv16i16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vlmul_trunc_v_u16m4_u16mf4(vuint16m4_t op1) { @@ -1808,7 +1808,7 @@ vuint16mf4_t test_vlmul_trunc_v_u16m4_u16mf4(vuint16m4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m4_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i16.nxv16i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i16.nxv16i16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vlmul_trunc_v_u16m4_u16mf2(vuint16m4_t op1) { @@ -1817,7 +1817,7 @@ vuint16mf2_t test_vlmul_trunc_v_u16m4_u16mf2(vuint16m4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m4_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i16.nxv16i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i16.nxv16i16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vlmul_trunc_v_u16m4_u16m1(vuint16m4_t op1) { @@ -1826,7 +1826,7 @@ vuint16m1_t test_vlmul_trunc_v_u16m4_u16m1(vuint16m4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m4_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i16.nxv16i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i16.nxv16i16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vlmul_trunc_v_u16m4_u16m2(vuint16m4_t op1) { @@ -1835,7 +1835,7 @@ vuint16m2_t test_vlmul_trunc_v_u16m4_u16m2(vuint16m4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m8_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i16.nxv32i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i16.nxv32i16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vlmul_trunc_v_u16m8_u16mf4(vuint16m8_t op1) { @@ -1844,7 +1844,7 @@ vuint16mf4_t test_vlmul_trunc_v_u16m8_u16mf4(vuint16m8_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m8_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i16.nxv32i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i16.nxv32i16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vlmul_trunc_v_u16m8_u16mf2(vuint16m8_t op1) { @@ -1853,7 +1853,7 @@ vuint16mf2_t test_vlmul_trunc_v_u16m8_u16mf2(vuint16m8_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m8_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i16.nxv32i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i16.nxv32i16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vlmul_trunc_v_u16m8_u16m1(vuint16m8_t op1) { @@ -1862,7 +1862,7 @@ vuint16m1_t test_vlmul_trunc_v_u16m8_u16m1(vuint16m8_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m8_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i16.nxv32i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i16.nxv32i16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vlmul_trunc_v_u16m8_u16m2(vuint16m8_t op1) { @@ -1871,7 +1871,7 @@ vuint16m2_t test_vlmul_trunc_v_u16m8_u16m2(vuint16m8_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m8_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16i16.nxv32i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv16i16.nxv32i16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vlmul_trunc_v_u16m8_u16m4(vuint16m8_t op1) { @@ -1880,7 +1880,7 @@ vuint16m4_t test_vlmul_trunc_v_u16m8_u16m4(vuint16m8_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m1_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i32.nxv2i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i32.nxv2i32( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vlmul_trunc_v_u32m1_u32mf2(vuint32m1_t op1) { @@ -1889,7 +1889,7 @@ vuint32mf2_t test_vlmul_trunc_v_u32m1_u32mf2(vuint32m1_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m2_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i32.nxv4i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i32.nxv4i32( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vlmul_trunc_v_u32m2_u32mf2(vuint32m2_t op1) { @@ -1898,7 +1898,7 @@ vuint32mf2_t test_vlmul_trunc_v_u32m2_u32mf2(vuint32m2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m2_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i32.nxv4i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i32.nxv4i32( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vlmul_trunc_v_u32m2_u32m1(vuint32m2_t op1) { @@ -1907,7 +1907,7 @@ vuint32m1_t test_vlmul_trunc_v_u32m2_u32m1(vuint32m2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m4_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i32.nxv8i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i32.nxv8i32( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vlmul_trunc_v_u32m4_u32mf2(vuint32m4_t op1) { @@ -1916,7 +1916,7 @@ vuint32mf2_t test_vlmul_trunc_v_u32m4_u32mf2(vuint32m4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m4_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i32.nxv8i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i32.nxv8i32( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vlmul_trunc_v_u32m4_u32m1(vuint32m4_t op1) { @@ -1925,7 +1925,7 @@ vuint32m1_t test_vlmul_trunc_v_u32m4_u32m1(vuint32m4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m4_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i32.nxv8i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i32.nxv8i32( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vlmul_trunc_v_u32m4_u32m2(vuint32m4_t op1) { @@ -1934,7 +1934,7 @@ vuint32m2_t test_vlmul_trunc_v_u32m4_u32m2(vuint32m4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m8_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i32.nxv16i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i32.nxv16i32( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vlmul_trunc_v_u32m8_u32mf2(vuint32m8_t op1) { @@ -1943,7 +1943,7 @@ vuint32mf2_t test_vlmul_trunc_v_u32m8_u32mf2(vuint32m8_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m8_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i32.nxv16i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i32.nxv16i32( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vlmul_trunc_v_u32m8_u32m1(vuint32m8_t op1) { @@ -1952,7 +1952,7 @@ vuint32m1_t test_vlmul_trunc_v_u32m8_u32m1(vuint32m8_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m8_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i32.nxv16i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i32.nxv16i32( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vlmul_trunc_v_u32m8_u32m2(vuint32m8_t op1) { @@ -1961,7 +1961,7 @@ vuint32m2_t test_vlmul_trunc_v_u32m8_u32m2(vuint32m8_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m8_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i32.nxv16i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i32.nxv16i32( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vlmul_trunc_v_u32m8_u32m4(vuint32m8_t op1) { @@ -1970,7 +1970,7 @@ vuint32m4_t test_vlmul_trunc_v_u32m8_u32m4(vuint32m8_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u64m2_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i64.nxv2i64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i64.nxv2i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vlmul_trunc_v_u64m2_u64m1(vuint64m2_t op1) { @@ -1979,7 +1979,7 @@ vuint64m1_t test_vlmul_trunc_v_u64m2_u64m1(vuint64m2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u64m4_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i64.nxv4i64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i64.nxv4i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vlmul_trunc_v_u64m4_u64m1(vuint64m4_t op1) { @@ -1988,7 +1988,7 @@ vuint64m1_t test_vlmul_trunc_v_u64m4_u64m1(vuint64m4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u64m4_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i64.nxv4i64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i64.nxv4i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vlmul_trunc_v_u64m4_u64m2(vuint64m4_t op1) { @@ -1997,7 +1997,7 @@ vuint64m2_t test_vlmul_trunc_v_u64m4_u64m2(vuint64m4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u64m8_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i64.nxv8i64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i64.nxv8i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vlmul_trunc_v_u64m8_u64m1(vuint64m8_t op1) { @@ -2006,7 +2006,7 @@ vuint64m1_t test_vlmul_trunc_v_u64m8_u64m1(vuint64m8_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u64m8_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i64.nxv8i64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i64.nxv8i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vlmul_trunc_v_u64m8_u64m2(vuint64m8_t op1) { @@ -2015,7 +2015,7 @@ vuint64m2_t test_vlmul_trunc_v_u64m8_u64m2(vuint64m8_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u64m8_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i64.nxv8i64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i64.nxv8i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vlmul_trunc_v_u64m8_u64m4(vuint64m8_t op1) { @@ -2024,7 +2024,7 @@ vuint64m4_t test_vlmul_trunc_v_u64m8_u64m4(vuint64m8_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m1_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f32.nxv2f32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1f32.nxv2f32( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vlmul_trunc_v_f32m1_f32mf2(vfloat32m1_t op1) { @@ -2033,7 +2033,7 @@ vfloat32mf2_t test_vlmul_trunc_v_f32m1_f32mf2(vfloat32m1_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m2_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f32.nxv4f32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1f32.nxv4f32( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vlmul_trunc_v_f32m2_f32mf2(vfloat32m2_t op1) { @@ -2042,7 +2042,7 @@ vfloat32mf2_t test_vlmul_trunc_v_f32m2_f32mf2(vfloat32m2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m2_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f32.nxv4f32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2f32.nxv4f32( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vlmul_trunc_v_f32m2_f32m1(vfloat32m2_t op1) { @@ -2051,7 +2051,7 @@ vfloat32m1_t test_vlmul_trunc_v_f32m2_f32m1(vfloat32m2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m4_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f32.nxv8f32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1f32.nxv8f32( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vlmul_trunc_v_f32m4_f32mf2(vfloat32m4_t op1) { @@ -2060,7 +2060,7 @@ vfloat32mf2_t test_vlmul_trunc_v_f32m4_f32mf2(vfloat32m4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m4_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f32.nxv8f32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2f32.nxv8f32( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vlmul_trunc_v_f32m4_f32m1(vfloat32m4_t op1) { @@ -2069,7 +2069,7 @@ vfloat32m1_t test_vlmul_trunc_v_f32m4_f32m1(vfloat32m4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m4_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4f32.nxv8f32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4f32.nxv8f32( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vlmul_trunc_v_f32m4_f32m2(vfloat32m4_t op1) { @@ -2078,7 +2078,7 @@ vfloat32m2_t test_vlmul_trunc_v_f32m4_f32m2(vfloat32m4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m8_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f32.nxv16f32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1f32.nxv16f32( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vlmul_trunc_v_f32m8_f32mf2(vfloat32m8_t op1) { @@ -2087,7 +2087,7 @@ vfloat32mf2_t test_vlmul_trunc_v_f32m8_f32mf2(vfloat32m8_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m8_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f32.nxv16f32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2f32.nxv16f32( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vlmul_trunc_v_f32m8_f32m1(vfloat32m8_t op1) { @@ -2096,7 +2096,7 @@ vfloat32m1_t test_vlmul_trunc_v_f32m8_f32m1(vfloat32m8_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m8_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4f32.nxv16f32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4f32.nxv16f32( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vlmul_trunc_v_f32m8_f32m2(vfloat32m8_t op1) { @@ -2105,7 +2105,7 @@ vfloat32m2_t test_vlmul_trunc_v_f32m8_f32m2(vfloat32m8_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m8_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8f32.nxv16f32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8f32.nxv16f32( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vlmul_trunc_v_f32m8_f32m4(vfloat32m8_t op1) { @@ -2114,7 +2114,7 @@ vfloat32m4_t test_vlmul_trunc_v_f32m8_f32m4(vfloat32m8_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f64m2_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f64.nxv2f64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1f64.nxv2f64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vlmul_trunc_v_f64m2_f64m1(vfloat64m2_t op1) { @@ -2123,7 +2123,7 @@ vfloat64m1_t test_vlmul_trunc_v_f64m2_f64m1(vfloat64m2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f64m4_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f64.nxv4f64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1f64.nxv4f64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vlmul_trunc_v_f64m4_f64m1(vfloat64m4_t op1) { @@ -2132,7 +2132,7 @@ vfloat64m1_t test_vlmul_trunc_v_f64m4_f64m1(vfloat64m4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f64m4_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f64.nxv4f64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2f64.nxv4f64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vlmul_trunc_v_f64m4_f64m2(vfloat64m4_t op1) { @@ -2141,7 +2141,7 @@ vfloat64m2_t test_vlmul_trunc_v_f64m4_f64m2(vfloat64m4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f64m8_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f64.nxv8f64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1f64.nxv8f64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vlmul_trunc_v_f64m8_f64m1(vfloat64m8_t op1) { @@ -2150,7 +2150,7 @@ vfloat64m1_t test_vlmul_trunc_v_f64m8_f64m1(vfloat64m8_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f64m8_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f64.nxv8f64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2f64.nxv8f64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vlmul_trunc_v_f64m8_f64m2(vfloat64m8_t op1) { @@ -2159,7 +2159,7 @@ vfloat64m2_t test_vlmul_trunc_v_f64m8_f64m2(vfloat64m8_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f64m8_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4f64.nxv8f64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4f64.nxv8f64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vlmul_trunc_v_f64m8_f64m4(vfloat64m8_t op1) { @@ -2168,7 +2168,7 @@ vfloat64m4_t test_vlmul_trunc_v_f64m8_f64m4(vfloat64m8_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_f16mf4_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv2f16.nxv1f16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv2f16.nxv1f16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vlmul_ext_v_f16mf4_f16mf2 (vfloat16mf4_t op1) { @@ -2177,7 +2177,7 @@ vfloat16mf2_t test_vlmul_ext_v_f16mf4_f16mf2 (vfloat16mf4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_f16mf4_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4f16.nxv1f16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4f16.nxv1f16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vlmul_ext_v_f16mf4_f16m1 (vfloat16mf4_t op1) { @@ -2186,7 +2186,7 @@ vfloat16m1_t test_vlmul_ext_v_f16mf4_f16m1 (vfloat16mf4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_f16mf4_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8f16.nxv1f16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8f16.nxv1f16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vlmul_ext_v_f16mf4_f16m2 (vfloat16mf4_t op1) { @@ -2195,7 +2195,7 @@ vfloat16m2_t test_vlmul_ext_v_f16mf4_f16m2 (vfloat16mf4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_f16mf4_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16f16.nxv1f16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16f16.nxv1f16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vlmul_ext_v_f16mf4_f16m4 (vfloat16mf4_t op1) { @@ -2204,7 +2204,7 @@ vfloat16m4_t test_vlmul_ext_v_f16mf4_f16m4 (vfloat16mf4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_f16mf4_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32f16.nxv1f16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32f16.nxv1f16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vlmul_ext_v_f16mf4_f16m8 (vfloat16mf4_t op1) { @@ -2213,7 +2213,7 @@ vfloat16m8_t test_vlmul_ext_v_f16mf4_f16m8 (vfloat16mf4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_f16mf2_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4f16.nxv2f16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4f16.nxv2f16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vlmul_ext_v_f16mf2_f16m1 (vfloat16mf2_t op1) { @@ -2222,7 +2222,7 @@ vfloat16m1_t test_vlmul_ext_v_f16mf2_f16m1 (vfloat16mf2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_f16mf2_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8f16.nxv2f16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8f16.nxv2f16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vlmul_ext_v_f16mf2_f16m2 (vfloat16mf2_t op1) { @@ -2231,7 +2231,7 @@ vfloat16m2_t test_vlmul_ext_v_f16mf2_f16m2 (vfloat16mf2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_f16mf2_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16f16.nxv2f16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16f16.nxv2f16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vlmul_ext_v_f16mf2_f16m4 (vfloat16mf2_t op1) { @@ -2240,7 +2240,7 @@ vfloat16m4_t test_vlmul_ext_v_f16mf2_f16m4 (vfloat16mf2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_f16mf2_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32f16.nxv2f16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32f16.nxv2f16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vlmul_ext_v_f16mf2_f16m8 (vfloat16mf2_t op1) { @@ -2249,7 +2249,7 @@ vfloat16m8_t test_vlmul_ext_v_f16mf2_f16m8 (vfloat16mf2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_f16m1_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8f16.nxv4f16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8f16.nxv4f16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vlmul_ext_v_f16m1_f16m2 (vfloat16m1_t op1) { @@ -2258,7 +2258,7 @@ vfloat16m2_t test_vlmul_ext_v_f16m1_f16m2 (vfloat16m1_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_f16m1_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16f16.nxv4f16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16f16.nxv4f16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vlmul_ext_v_f16m1_f16m4 (vfloat16m1_t op1) { @@ -2267,7 +2267,7 @@ vfloat16m4_t test_vlmul_ext_v_f16m1_f16m4 (vfloat16m1_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_f16m1_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32f16.nxv4f16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32f16.nxv4f16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vlmul_ext_v_f16m1_f16m8 (vfloat16m1_t op1) { @@ -2276,7 +2276,7 @@ vfloat16m8_t test_vlmul_ext_v_f16m1_f16m8 (vfloat16m1_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_f16m2_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16f16.nxv8f16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16f16.nxv8f16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vlmul_ext_v_f16m2_f16m4 (vfloat16m2_t op1) { @@ -2285,7 +2285,7 @@ vfloat16m4_t test_vlmul_ext_v_f16m2_f16m4 (vfloat16m2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_f16m2_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32f16.nxv8f16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32f16.nxv8f16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vlmul_ext_v_f16m2_f16m8 (vfloat16m2_t op1) { @@ -2294,7 +2294,7 @@ vfloat16m8_t test_vlmul_ext_v_f16m2_f16m8 (vfloat16m2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_ext_v_f16m4_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32f16.nxv16f16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32f16.nxv16f16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vlmul_ext_v_f16m4_f16m8 (vfloat16m4_t op1) { @@ -2303,7 +2303,7 @@ vfloat16m8_t test_vlmul_ext_v_f16m4_f16m8 (vfloat16m4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16mf2_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f16.nxv2f16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1f16.nxv2f16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vlmul_trunc_v_f16mf2_f16mf4 (vfloat16mf2_t op1) { @@ -2312,7 +2312,7 @@ vfloat16mf4_t test_vlmul_trunc_v_f16mf2_f16mf4 (vfloat16mf2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m1_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f16.nxv4f16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1f16.nxv4f16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vlmul_trunc_v_f16m1_f16mf4 (vfloat16m1_t op1) { @@ -2321,7 +2321,7 @@ vfloat16mf4_t test_vlmul_trunc_v_f16m1_f16mf4 (vfloat16m1_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m1_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f16.nxv4f16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2f16.nxv4f16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vlmul_trunc_v_f16m1_f16mf2 (vfloat16m1_t op1) { @@ -2330,7 +2330,7 @@ vfloat16mf2_t test_vlmul_trunc_v_f16m1_f16mf2 (vfloat16m1_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m2_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f16.nxv8f16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1f16.nxv8f16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vlmul_trunc_v_f16m2_f16mf4 (vfloat16m2_t op1) { @@ -2339,7 +2339,7 @@ vfloat16mf4_t test_vlmul_trunc_v_f16m2_f16mf4 (vfloat16m2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m2_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f16.nxv8f16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2f16.nxv8f16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vlmul_trunc_v_f16m2_f16mf2 (vfloat16m2_t op1) { @@ -2348,7 +2348,7 @@ vfloat16mf2_t test_vlmul_trunc_v_f16m2_f16mf2 (vfloat16m2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m2_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4f16.nxv8f16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4f16.nxv8f16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vlmul_trunc_v_f16m2_f16m1 (vfloat16m2_t op1) { @@ -2357,7 +2357,7 @@ vfloat16m1_t test_vlmul_trunc_v_f16m2_f16m1 (vfloat16m2_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m4_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f16.nxv16f16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1f16.nxv16f16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vlmul_trunc_v_f16m4_f16mf4 (vfloat16m4_t op1) { @@ -2366,7 +2366,7 @@ vfloat16mf4_t test_vlmul_trunc_v_f16m4_f16mf4 (vfloat16m4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m4_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f16.nxv16f16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2f16.nxv16f16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vlmul_trunc_v_f16m4_f16mf2 (vfloat16m4_t op1) { @@ -2375,7 +2375,7 @@ vfloat16mf2_t test_vlmul_trunc_v_f16m4_f16mf2 (vfloat16m4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m4_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4f16.nxv16f16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4f16.nxv16f16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vlmul_trunc_v_f16m4_f16m1 (vfloat16m4_t op1) { @@ -2384,7 +2384,7 @@ vfloat16m1_t test_vlmul_trunc_v_f16m4_f16m1 (vfloat16m4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m4_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8f16.nxv16f16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8f16.nxv16f16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vlmul_trunc_v_f16m4_f16m2 (vfloat16m4_t op1) { @@ -2393,7 +2393,7 @@ vfloat16m2_t test_vlmul_trunc_v_f16m4_f16m2 (vfloat16m4_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m8_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f16.nxv32f16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1f16.nxv32f16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vlmul_trunc_v_f16m8_f16mf4 (vfloat16m8_t op1) { @@ -2402,7 +2402,7 @@ vfloat16mf4_t test_vlmul_trunc_v_f16m8_f16mf4 (vfloat16m8_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m8_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f16.nxv32f16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2f16.nxv32f16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vlmul_trunc_v_f16m8_f16mf2 (vfloat16m8_t op1) { @@ -2411,7 +2411,7 @@ vfloat16mf2_t test_vlmul_trunc_v_f16m8_f16mf2 (vfloat16m8_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m8_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4f16.nxv32f16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4f16.nxv32f16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vlmul_trunc_v_f16m8_f16m1 (vfloat16m8_t op1) { @@ -2420,7 +2420,7 @@ vfloat16m1_t test_vlmul_trunc_v_f16m8_f16m1 (vfloat16m8_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m8_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8f16.nxv32f16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8f16.nxv32f16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vlmul_trunc_v_f16m8_f16m2 (vfloat16m8_t op1) { @@ -2429,7 +2429,7 @@ vfloat16m2_t test_vlmul_trunc_v_f16m8_f16m2 (vfloat16m8_t op1) { // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m8_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16f16.nxv32f16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv16f16.nxv32f16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vlmul_trunc_v_f16m8_f16m4 (vfloat16m8_t op1) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vset.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vset.c index ad402a31f0c453f54e8c43b01d2ce4eeb1a48d19..d2b17dac29dad1a3c18d488a4bbbf1d23b52742a 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vset.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vset.c @@ -8,7 +8,7 @@ // CHECK-RV64-LABEL: @test_vset_v_i8m1_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.nxv8i8( [[DEST:%.*]], [[VAL:%.*]], i64 8) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i8.nxv8i8( [[DEST:%.*]], [[VAL:%.*]], i64 8) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vset_v_i8m1_i8m2(vint8m2_t dest, vint8m1_t val) { @@ -17,7 +17,7 @@ vint8m2_t test_vset_v_i8m1_i8m2(vint8m2_t dest, vint8m1_t val) { // CHECK-RV64-LABEL: @test_vset_v_i8m1_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i8.nxv8i8( [[DEST:%.*]], [[VAL:%.*]], i64 24) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32i8.nxv8i8( [[DEST:%.*]], [[VAL:%.*]], i64 24) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vset_v_i8m1_i8m4(vint8m4_t dest, vint8m1_t val) { @@ -26,7 +26,7 @@ vint8m4_t test_vset_v_i8m1_i8m4(vint8m4_t dest, vint8m1_t val) { // CHECK-RV64-LABEL: @test_vset_v_i8m2_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i8.nxv16i8( [[DEST:%.*]], [[VAL:%.*]], i64 16) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32i8.nxv16i8( [[DEST:%.*]], [[VAL:%.*]], i64 16) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vset_v_i8m2_i8m4(vint8m4_t dest, vint8m2_t val) { @@ -35,7 +35,7 @@ vint8m4_t test_vset_v_i8m2_i8m4(vint8m4_t dest, vint8m2_t val) { // CHECK-RV64-LABEL: @test_vset_v_i8m1_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv64i8.nxv8i8( [[DEST:%.*]], [[VAL:%.*]], i64 56) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv64i8.nxv8i8( [[DEST:%.*]], [[VAL:%.*]], i64 56) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vset_v_i8m1_i8m8(vint8m8_t dest, vint8m1_t val) { @@ -44,7 +44,7 @@ vint8m8_t test_vset_v_i8m1_i8m8(vint8m8_t dest, vint8m1_t val) { // CHECK-RV64-LABEL: @test_vset_v_i8m2_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv64i8.nxv16i8( [[DEST:%.*]], [[VAL:%.*]], i64 32) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv64i8.nxv16i8( [[DEST:%.*]], [[VAL:%.*]], i64 32) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vset_v_i8m2_i8m8(vint8m8_t dest, vint8m2_t val) { @@ -53,7 +53,7 @@ vint8m8_t test_vset_v_i8m2_i8m8(vint8m8_t dest, vint8m2_t val) { // CHECK-RV64-LABEL: @test_vset_v_i8m4_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv64i8.nxv32i8( [[DEST:%.*]], [[VAL:%.*]], i64 32) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv64i8.nxv32i8( [[DEST:%.*]], [[VAL:%.*]], i64 32) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vset_v_i8m4_i8m8(vint8m8_t dest, vint8m4_t val) { @@ -62,7 +62,7 @@ vint8m8_t test_vset_v_i8m4_i8m8(vint8m8_t dest, vint8m4_t val) { // CHECK-RV64-LABEL: @test_vset_v_i16m1_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.nxv4i16( [[DEST:%.*]], [[VAL:%.*]], i64 4) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i16.nxv4i16( [[DEST:%.*]], [[VAL:%.*]], i64 4) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vset_v_i16m1_i16m2(vint16m2_t dest, vint16m1_t val) { @@ -71,7 +71,7 @@ vint16m2_t test_vset_v_i16m1_i16m2(vint16m2_t dest, vint16m1_t val) { // CHECK-RV64-LABEL: @test_vset_v_i16m1_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i16.nxv4i16( [[DEST:%.*]], [[VAL:%.*]], i64 12) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i16.nxv4i16( [[DEST:%.*]], [[VAL:%.*]], i64 12) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vset_v_i16m1_i16m4(vint16m4_t dest, vint16m1_t val) { @@ -80,7 +80,7 @@ vint16m4_t test_vset_v_i16m1_i16m4(vint16m4_t dest, vint16m1_t val) { // CHECK-RV64-LABEL: @test_vset_v_i16m2_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i16.nxv8i16( [[DEST:%.*]], [[VAL:%.*]], i64 8) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i16.nxv8i16( [[DEST:%.*]], [[VAL:%.*]], i64 8) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vset_v_i16m2_i16m4(vint16m4_t dest, vint16m2_t val) { @@ -89,7 +89,7 @@ vint16m4_t test_vset_v_i16m2_i16m4(vint16m4_t dest, vint16m2_t val) { // CHECK-RV64-LABEL: @test_vset_v_i16m1_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i16.nxv4i16( [[DEST:%.*]], [[VAL:%.*]], i64 28) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32i16.nxv4i16( [[DEST:%.*]], [[VAL:%.*]], i64 28) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vset_v_i16m1_i16m8(vint16m8_t dest, vint16m1_t val) { @@ -98,7 +98,7 @@ vint16m8_t test_vset_v_i16m1_i16m8(vint16m8_t dest, vint16m1_t val) { // CHECK-RV64-LABEL: @test_vset_v_i16m2_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i16.nxv8i16( [[DEST:%.*]], [[VAL:%.*]], i64 16) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32i16.nxv8i16( [[DEST:%.*]], [[VAL:%.*]], i64 16) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vset_v_i16m2_i16m8(vint16m8_t dest, vint16m2_t val) { @@ -107,7 +107,7 @@ vint16m8_t test_vset_v_i16m2_i16m8(vint16m8_t dest, vint16m2_t val) { // CHECK-RV64-LABEL: @test_vset_v_i16m4_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i16.nxv16i16( [[DEST:%.*]], [[VAL:%.*]], i64 16) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32i16.nxv16i16( [[DEST:%.*]], [[VAL:%.*]], i64 16) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vset_v_i16m4_i16m8(vint16m8_t dest, vint16m4_t val) { @@ -116,7 +116,7 @@ vint16m8_t test_vset_v_i16m4_i16m8(vint16m8_t dest, vint16m4_t val) { // CHECK-RV64-LABEL: @test_vset_v_i32m1_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.nxv2i32( [[DEST:%.*]], [[VAL:%.*]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4i32.nxv2i32( [[DEST:%.*]], [[VAL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vset_v_i32m1_i32m2(vint32m2_t dest, vint32m1_t val) { @@ -125,7 +125,7 @@ vint32m2_t test_vset_v_i32m1_i32m2(vint32m2_t dest, vint32m1_t val) { // CHECK-RV64-LABEL: @test_vset_v_i32m1_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i32.nxv2i32( [[DEST:%.*]], [[VAL:%.*]], i64 6) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i32.nxv2i32( [[DEST:%.*]], [[VAL:%.*]], i64 6) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vset_v_i32m1_i32m4(vint32m4_t dest, vint32m1_t val) { @@ -134,7 +134,7 @@ vint32m4_t test_vset_v_i32m1_i32m4(vint32m4_t dest, vint32m1_t val) { // CHECK-RV64-LABEL: @test_vset_v_i32m2_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i32.nxv4i32( [[DEST:%.*]], [[VAL:%.*]], i64 4) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i32.nxv4i32( [[DEST:%.*]], [[VAL:%.*]], i64 4) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vset_v_i32m2_i32m4(vint32m4_t dest, vint32m2_t val) { @@ -143,7 +143,7 @@ vint32m4_t test_vset_v_i32m2_i32m4(vint32m4_t dest, vint32m2_t val) { // CHECK-RV64-LABEL: @test_vset_v_i32m1_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i32.nxv2i32( [[DEST:%.*]], [[VAL:%.*]], i64 14) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i32.nxv2i32( [[DEST:%.*]], [[VAL:%.*]], i64 14) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vset_v_i32m1_i32m8(vint32m8_t dest, vint32m1_t val) { @@ -152,7 +152,7 @@ vint32m8_t test_vset_v_i32m1_i32m8(vint32m8_t dest, vint32m1_t val) { // CHECK-RV64-LABEL: @test_vset_v_i32m2_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i32.nxv4i32( [[DEST:%.*]], [[VAL:%.*]], i64 8) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i32.nxv4i32( [[DEST:%.*]], [[VAL:%.*]], i64 8) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vset_v_i32m2_i32m8(vint32m8_t dest, vint32m2_t val) { @@ -161,7 +161,7 @@ vint32m8_t test_vset_v_i32m2_i32m8(vint32m8_t dest, vint32m2_t val) { // CHECK-RV64-LABEL: @test_vset_v_i32m4_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i32.nxv8i32( [[DEST:%.*]], [[VAL:%.*]], i64 8) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i32.nxv8i32( [[DEST:%.*]], [[VAL:%.*]], i64 8) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vset_v_i32m4_i32m8(vint32m8_t dest, vint32m4_t val) { @@ -170,7 +170,7 @@ vint32m8_t test_vset_v_i32m4_i32m8(vint32m8_t dest, vint32m4_t val) { // CHECK-RV64-LABEL: @test_vset_v_i64m1_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.nxv1i64( [[DEST:%.*]], [[VAL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv2i64.nxv1i64( [[DEST:%.*]], [[VAL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vset_v_i64m1_i64m2(vint64m2_t dest, vint64m1_t val) { @@ -179,7 +179,7 @@ vint64m2_t test_vset_v_i64m1_i64m2(vint64m2_t dest, vint64m1_t val) { // CHECK-RV64-LABEL: @test_vset_v_i64m1_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i64.nxv1i64( [[DEST:%.*]], [[VAL:%.*]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4i64.nxv1i64( [[DEST:%.*]], [[VAL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vset_v_i64m1_i64m4(vint64m4_t dest, vint64m1_t val) { @@ -188,7 +188,7 @@ vint64m4_t test_vset_v_i64m1_i64m4(vint64m4_t dest, vint64m1_t val) { // CHECK-RV64-LABEL: @test_vset_v_i64m2_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i64.nxv2i64( [[DEST:%.*]], [[VAL:%.*]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4i64.nxv2i64( [[DEST:%.*]], [[VAL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vset_v_i64m2_i64m4(vint64m4_t dest, vint64m2_t val) { @@ -197,7 +197,7 @@ vint64m4_t test_vset_v_i64m2_i64m4(vint64m4_t dest, vint64m2_t val) { // CHECK-RV64-LABEL: @test_vset_v_i64m1_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i64.nxv1i64( [[DEST:%.*]], [[VAL:%.*]], i64 7) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i64.nxv1i64( [[DEST:%.*]], [[VAL:%.*]], i64 7) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vset_v_i64m1_i64m8(vint64m8_t dest, vint64m1_t val) { @@ -206,7 +206,7 @@ vint64m8_t test_vset_v_i64m1_i64m8(vint64m8_t dest, vint64m1_t val) { // CHECK-RV64-LABEL: @test_vset_v_i64m2_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i64.nxv2i64( [[DEST:%.*]], [[VAL:%.*]], i64 4) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i64.nxv2i64( [[DEST:%.*]], [[VAL:%.*]], i64 4) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vset_v_i64m2_i64m8(vint64m8_t dest, vint64m2_t val) { @@ -215,7 +215,7 @@ vint64m8_t test_vset_v_i64m2_i64m8(vint64m8_t dest, vint64m2_t val) { // CHECK-RV64-LABEL: @test_vset_v_i64m4_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i64.nxv4i64( [[DEST:%.*]], [[VAL:%.*]], i64 4) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i64.nxv4i64( [[DEST:%.*]], [[VAL:%.*]], i64 4) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vset_v_i64m4_i64m8(vint64m8_t dest, vint64m4_t val) { @@ -224,7 +224,7 @@ vint64m8_t test_vset_v_i64m4_i64m8(vint64m8_t dest, vint64m4_t val) { // CHECK-RV64-LABEL: @test_vset_v_u8m1_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.nxv8i8( [[DEST:%.*]], [[VAL:%.*]], i64 8) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i8.nxv8i8( [[DEST:%.*]], [[VAL:%.*]], i64 8) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vset_v_u8m1_u8m2(vuint8m2_t dest, vuint8m1_t val) { @@ -233,7 +233,7 @@ vuint8m2_t test_vset_v_u8m1_u8m2(vuint8m2_t dest, vuint8m1_t val) { // CHECK-RV64-LABEL: @test_vset_v_u8m1_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i8.nxv8i8( [[DEST:%.*]], [[VAL:%.*]], i64 24) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32i8.nxv8i8( [[DEST:%.*]], [[VAL:%.*]], i64 24) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vset_v_u8m1_u8m4(vuint8m4_t dest, vuint8m1_t val) { @@ -242,7 +242,7 @@ vuint8m4_t test_vset_v_u8m1_u8m4(vuint8m4_t dest, vuint8m1_t val) { // CHECK-RV64-LABEL: @test_vset_v_u8m2_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i8.nxv16i8( [[DEST:%.*]], [[VAL:%.*]], i64 16) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32i8.nxv16i8( [[DEST:%.*]], [[VAL:%.*]], i64 16) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vset_v_u8m2_u8m4(vuint8m4_t dest, vuint8m2_t val) { @@ -251,7 +251,7 @@ vuint8m4_t test_vset_v_u8m2_u8m4(vuint8m4_t dest, vuint8m2_t val) { // CHECK-RV64-LABEL: @test_vset_v_u8m1_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv64i8.nxv8i8( [[DEST:%.*]], [[VAL:%.*]], i64 56) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv64i8.nxv8i8( [[DEST:%.*]], [[VAL:%.*]], i64 56) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vset_v_u8m1_u8m8(vuint8m8_t dest, vuint8m1_t val) { @@ -260,7 +260,7 @@ vuint8m8_t test_vset_v_u8m1_u8m8(vuint8m8_t dest, vuint8m1_t val) { // CHECK-RV64-LABEL: @test_vset_v_u8m2_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv64i8.nxv16i8( [[DEST:%.*]], [[VAL:%.*]], i64 32) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv64i8.nxv16i8( [[DEST:%.*]], [[VAL:%.*]], i64 32) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vset_v_u8m2_u8m8(vuint8m8_t dest, vuint8m2_t val) { @@ -269,7 +269,7 @@ vuint8m8_t test_vset_v_u8m2_u8m8(vuint8m8_t dest, vuint8m2_t val) { // CHECK-RV64-LABEL: @test_vset_v_u8m4_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv64i8.nxv32i8( [[DEST:%.*]], [[VAL:%.*]], i64 32) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv64i8.nxv32i8( [[DEST:%.*]], [[VAL:%.*]], i64 32) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vset_v_u8m4_u8m8(vuint8m8_t dest, vuint8m4_t val) { @@ -278,7 +278,7 @@ vuint8m8_t test_vset_v_u8m4_u8m8(vuint8m8_t dest, vuint8m4_t val) { // CHECK-RV64-LABEL: @test_vset_v_u16m1_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.nxv4i16( [[DEST:%.*]], [[VAL:%.*]], i64 4) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i16.nxv4i16( [[DEST:%.*]], [[VAL:%.*]], i64 4) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vset_v_u16m1_u16m2(vuint16m2_t dest, vuint16m1_t val) { @@ -287,7 +287,7 @@ vuint16m2_t test_vset_v_u16m1_u16m2(vuint16m2_t dest, vuint16m1_t val) { // CHECK-RV64-LABEL: @test_vset_v_u16m1_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i16.nxv4i16( [[DEST:%.*]], [[VAL:%.*]], i64 12) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i16.nxv4i16( [[DEST:%.*]], [[VAL:%.*]], i64 12) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vset_v_u16m1_u16m4(vuint16m4_t dest, vuint16m1_t val) { @@ -296,7 +296,7 @@ vuint16m4_t test_vset_v_u16m1_u16m4(vuint16m4_t dest, vuint16m1_t val) { // CHECK-RV64-LABEL: @test_vset_v_u16m2_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i16.nxv8i16( [[DEST:%.*]], [[VAL:%.*]], i64 8) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i16.nxv8i16( [[DEST:%.*]], [[VAL:%.*]], i64 8) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vset_v_u16m2_u16m4(vuint16m4_t dest, vuint16m2_t val) { @@ -305,7 +305,7 @@ vuint16m4_t test_vset_v_u16m2_u16m4(vuint16m4_t dest, vuint16m2_t val) { // CHECK-RV64-LABEL: @test_vset_v_u16m1_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i16.nxv4i16( [[DEST:%.*]], [[VAL:%.*]], i64 28) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32i16.nxv4i16( [[DEST:%.*]], [[VAL:%.*]], i64 28) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vset_v_u16m1_u16m8(vuint16m8_t dest, vuint16m1_t val) { @@ -314,7 +314,7 @@ vuint16m8_t test_vset_v_u16m1_u16m8(vuint16m8_t dest, vuint16m1_t val) { // CHECK-RV64-LABEL: @test_vset_v_u16m2_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i16.nxv8i16( [[DEST:%.*]], [[VAL:%.*]], i64 16) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32i16.nxv8i16( [[DEST:%.*]], [[VAL:%.*]], i64 16) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vset_v_u16m2_u16m8(vuint16m8_t dest, vuint16m2_t val) { @@ -323,7 +323,7 @@ vuint16m8_t test_vset_v_u16m2_u16m8(vuint16m8_t dest, vuint16m2_t val) { // CHECK-RV64-LABEL: @test_vset_v_u16m4_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i16.nxv16i16( [[DEST:%.*]], [[VAL:%.*]], i64 16) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32i16.nxv16i16( [[DEST:%.*]], [[VAL:%.*]], i64 16) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vset_v_u16m4_u16m8(vuint16m8_t dest, vuint16m4_t val) { @@ -332,7 +332,7 @@ vuint16m8_t test_vset_v_u16m4_u16m8(vuint16m8_t dest, vuint16m4_t val) { // CHECK-RV64-LABEL: @test_vset_v_u32m1_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.nxv2i32( [[DEST:%.*]], [[VAL:%.*]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4i32.nxv2i32( [[DEST:%.*]], [[VAL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vset_v_u32m1_u32m2(vuint32m2_t dest, vuint32m1_t val) { @@ -341,7 +341,7 @@ vuint32m2_t test_vset_v_u32m1_u32m2(vuint32m2_t dest, vuint32m1_t val) { // CHECK-RV64-LABEL: @test_vset_v_u32m1_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i32.nxv2i32( [[DEST:%.*]], [[VAL:%.*]], i64 6) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i32.nxv2i32( [[DEST:%.*]], [[VAL:%.*]], i64 6) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vset_v_u32m1_u32m4(vuint32m4_t dest, vuint32m1_t val) { @@ -350,7 +350,7 @@ vuint32m4_t test_vset_v_u32m1_u32m4(vuint32m4_t dest, vuint32m1_t val) { // CHECK-RV64-LABEL: @test_vset_v_u32m2_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i32.nxv4i32( [[DEST:%.*]], [[VAL:%.*]], i64 4) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i32.nxv4i32( [[DEST:%.*]], [[VAL:%.*]], i64 4) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vset_v_u32m2_u32m4(vuint32m4_t dest, vuint32m2_t val) { @@ -359,7 +359,7 @@ vuint32m4_t test_vset_v_u32m2_u32m4(vuint32m4_t dest, vuint32m2_t val) { // CHECK-RV64-LABEL: @test_vset_v_u32m1_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i32.nxv2i32( [[DEST:%.*]], [[VAL:%.*]], i64 14) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i32.nxv2i32( [[DEST:%.*]], [[VAL:%.*]], i64 14) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vset_v_u32m1_u32m8(vuint32m8_t dest, vuint32m1_t val) { @@ -368,7 +368,7 @@ vuint32m8_t test_vset_v_u32m1_u32m8(vuint32m8_t dest, vuint32m1_t val) { // CHECK-RV64-LABEL: @test_vset_v_u32m2_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i32.nxv4i32( [[DEST:%.*]], [[VAL:%.*]], i64 8) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i32.nxv4i32( [[DEST:%.*]], [[VAL:%.*]], i64 8) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vset_v_u32m2_u32m8(vuint32m8_t dest, vuint32m2_t val) { @@ -377,7 +377,7 @@ vuint32m8_t test_vset_v_u32m2_u32m8(vuint32m8_t dest, vuint32m2_t val) { // CHECK-RV64-LABEL: @test_vset_v_u32m4_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i32.nxv8i32( [[DEST:%.*]], [[VAL:%.*]], i64 8) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i32.nxv8i32( [[DEST:%.*]], [[VAL:%.*]], i64 8) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vset_v_u32m4_u32m8(vuint32m8_t dest, vuint32m4_t val) { @@ -386,7 +386,7 @@ vuint32m8_t test_vset_v_u32m4_u32m8(vuint32m8_t dest, vuint32m4_t val) { // CHECK-RV64-LABEL: @test_vset_v_u64m1_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.nxv1i64( [[DEST:%.*]], [[VAL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv2i64.nxv1i64( [[DEST:%.*]], [[VAL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vset_v_u64m1_u64m2(vuint64m2_t dest, vuint64m1_t val) { @@ -395,7 +395,7 @@ vuint64m2_t test_vset_v_u64m1_u64m2(vuint64m2_t dest, vuint64m1_t val) { // CHECK-RV64-LABEL: @test_vset_v_u64m1_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i64.nxv1i64( [[DEST:%.*]], [[VAL:%.*]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4i64.nxv1i64( [[DEST:%.*]], [[VAL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vset_v_u64m1_u64m4(vuint64m4_t dest, vuint64m1_t val) { @@ -404,7 +404,7 @@ vuint64m4_t test_vset_v_u64m1_u64m4(vuint64m4_t dest, vuint64m1_t val) { // CHECK-RV64-LABEL: @test_vset_v_u64m2_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i64.nxv2i64( [[DEST:%.*]], [[VAL:%.*]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4i64.nxv2i64( [[DEST:%.*]], [[VAL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vset_v_u64m2_u64m4(vuint64m4_t dest, vuint64m2_t val) { @@ -413,7 +413,7 @@ vuint64m4_t test_vset_v_u64m2_u64m4(vuint64m4_t dest, vuint64m2_t val) { // CHECK-RV64-LABEL: @test_vset_v_u64m1_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i64.nxv1i64( [[DEST:%.*]], [[VAL:%.*]], i64 7) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i64.nxv1i64( [[DEST:%.*]], [[VAL:%.*]], i64 7) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vset_v_u64m1_u64m8(vuint64m8_t dest, vuint64m1_t val) { @@ -422,7 +422,7 @@ vuint64m8_t test_vset_v_u64m1_u64m8(vuint64m8_t dest, vuint64m1_t val) { // CHECK-RV64-LABEL: @test_vset_v_u64m2_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i64.nxv2i64( [[DEST:%.*]], [[VAL:%.*]], i64 4) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i64.nxv2i64( [[DEST:%.*]], [[VAL:%.*]], i64 4) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vset_v_u64m2_u64m8(vuint64m8_t dest, vuint64m2_t val) { @@ -431,7 +431,7 @@ vuint64m8_t test_vset_v_u64m2_u64m8(vuint64m8_t dest, vuint64m2_t val) { // CHECK-RV64-LABEL: @test_vset_v_u64m4_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i64.nxv4i64( [[DEST:%.*]], [[VAL:%.*]], i64 4) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i64.nxv4i64( [[DEST:%.*]], [[VAL:%.*]], i64 4) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vset_v_u64m4_u64m8(vuint64m8_t dest, vuint64m4_t val) { @@ -440,7 +440,7 @@ vuint64m8_t test_vset_v_u64m4_u64m8(vuint64m8_t dest, vuint64m4_t val) { // CHECK-RV64-LABEL: @test_vset_v_f32m1_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4f32.nxv2f32( [[DEST:%.*]], [[VAL:%.*]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4f32.nxv2f32( [[DEST:%.*]], [[VAL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vset_v_f32m1_f32m2(vfloat32m2_t dest, vfloat32m1_t val) { @@ -449,7 +449,7 @@ vfloat32m2_t test_vset_v_f32m1_f32m2(vfloat32m2_t dest, vfloat32m1_t val) { // CHECK-RV64-LABEL: @test_vset_v_f32m1_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8f32.nxv2f32( [[DEST:%.*]], [[VAL:%.*]], i64 6) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8f32.nxv2f32( [[DEST:%.*]], [[VAL:%.*]], i64 6) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vset_v_f32m1_f32m4(vfloat32m4_t dest, vfloat32m1_t val) { @@ -458,7 +458,7 @@ vfloat32m4_t test_vset_v_f32m1_f32m4(vfloat32m4_t dest, vfloat32m1_t val) { // CHECK-RV64-LABEL: @test_vset_v_f32m2_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8f32.nxv4f32( [[DEST:%.*]], [[VAL:%.*]], i64 4) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8f32.nxv4f32( [[DEST:%.*]], [[VAL:%.*]], i64 4) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vset_v_f32m2_f32m4(vfloat32m4_t dest, vfloat32m2_t val) { @@ -467,7 +467,7 @@ vfloat32m4_t test_vset_v_f32m2_f32m4(vfloat32m4_t dest, vfloat32m2_t val) { // CHECK-RV64-LABEL: @test_vset_v_f32m1_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16f32.nxv2f32( [[DEST:%.*]], [[VAL:%.*]], i64 14) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16f32.nxv2f32( [[DEST:%.*]], [[VAL:%.*]], i64 14) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vset_v_f32m1_f32m8(vfloat32m8_t dest, vfloat32m1_t val) { @@ -476,7 +476,7 @@ vfloat32m8_t test_vset_v_f32m1_f32m8(vfloat32m8_t dest, vfloat32m1_t val) { // CHECK-RV64-LABEL: @test_vset_v_f32m2_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16f32.nxv4f32( [[DEST:%.*]], [[VAL:%.*]], i64 8) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16f32.nxv4f32( [[DEST:%.*]], [[VAL:%.*]], i64 8) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vset_v_f32m2_f32m8(vfloat32m8_t dest, vfloat32m2_t val) { @@ -485,7 +485,7 @@ vfloat32m8_t test_vset_v_f32m2_f32m8(vfloat32m8_t dest, vfloat32m2_t val) { // CHECK-RV64-LABEL: @test_vset_v_f32m4_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16f32.nxv8f32( [[DEST:%.*]], [[VAL:%.*]], i64 8) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16f32.nxv8f32( [[DEST:%.*]], [[VAL:%.*]], i64 8) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vset_v_f32m4_f32m8(vfloat32m8_t dest, vfloat32m4_t val) { @@ -494,7 +494,7 @@ vfloat32m8_t test_vset_v_f32m4_f32m8(vfloat32m8_t dest, vfloat32m4_t val) { // CHECK-RV64-LABEL: @test_vset_v_f64m1_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv2f64.nxv1f64( [[DEST:%.*]], [[VAL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv2f64.nxv1f64( [[DEST:%.*]], [[VAL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vset_v_f64m1_f64m2(vfloat64m2_t dest, vfloat64m1_t val) { @@ -503,7 +503,7 @@ vfloat64m2_t test_vset_v_f64m1_f64m2(vfloat64m2_t dest, vfloat64m1_t val) { // CHECK-RV64-LABEL: @test_vset_v_f64m1_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4f64.nxv1f64( [[DEST:%.*]], [[VAL:%.*]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4f64.nxv1f64( [[DEST:%.*]], [[VAL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vset_v_f64m1_f64m4(vfloat64m4_t dest, vfloat64m1_t val) { @@ -512,7 +512,7 @@ vfloat64m4_t test_vset_v_f64m1_f64m4(vfloat64m4_t dest, vfloat64m1_t val) { // CHECK-RV64-LABEL: @test_vset_v_f64m2_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4f64.nxv2f64( [[DEST:%.*]], [[VAL:%.*]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4f64.nxv2f64( [[DEST:%.*]], [[VAL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vset_v_f64m2_f64m4(vfloat64m4_t dest, vfloat64m2_t val) { @@ -521,7 +521,7 @@ vfloat64m4_t test_vset_v_f64m2_f64m4(vfloat64m4_t dest, vfloat64m2_t val) { // CHECK-RV64-LABEL: @test_vset_v_f64m1_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8f64.nxv1f64( [[DEST:%.*]], [[VAL:%.*]], i64 7) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8f64.nxv1f64( [[DEST:%.*]], [[VAL:%.*]], i64 7) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vset_v_f64m1_f64m8(vfloat64m8_t dest, vfloat64m1_t val) { @@ -530,7 +530,7 @@ vfloat64m8_t test_vset_v_f64m1_f64m8(vfloat64m8_t dest, vfloat64m1_t val) { // CHECK-RV64-LABEL: @test_vset_v_f64m2_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8f64.nxv2f64( [[DEST:%.*]], [[VAL:%.*]], i64 4) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8f64.nxv2f64( [[DEST:%.*]], [[VAL:%.*]], i64 4) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vset_v_f64m2_f64m8(vfloat64m8_t dest, vfloat64m2_t val) { @@ -539,7 +539,7 @@ vfloat64m8_t test_vset_v_f64m2_f64m8(vfloat64m8_t dest, vfloat64m2_t val) { // CHECK-RV64-LABEL: @test_vset_v_f64m4_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8f64.nxv4f64( [[DEST:%.*]], [[VAL:%.*]], i64 4) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8f64.nxv4f64( [[DEST:%.*]], [[VAL:%.*]], i64 4) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vset_v_f64m4_f64m8(vfloat64m8_t dest, vfloat64m4_t val) { @@ -548,7 +548,7 @@ vfloat64m8_t test_vset_v_f64m4_f64m8(vfloat64m8_t dest, vfloat64m4_t val) { // CHECK-RV64-LABEL: @test_vset_v_f16m1_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8f16.nxv4f16( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8f16.nxv4f16( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vset_v_f16m1_f16m2 (vfloat16m2_t dest, vfloat16m1_t val) { @@ -557,7 +557,7 @@ vfloat16m2_t test_vset_v_f16m1_f16m2 (vfloat16m2_t dest, vfloat16m1_t val) { // CHECK-RV64-LABEL: @test_vset_v_f16m1_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16f16.nxv4f16( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16f16.nxv4f16( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vset_v_f16m1_f16m4 (vfloat16m4_t dest, vfloat16m1_t val) { @@ -566,7 +566,7 @@ vfloat16m4_t test_vset_v_f16m1_f16m4 (vfloat16m4_t dest, vfloat16m1_t val) { // CHECK-RV64-LABEL: @test_vset_v_f16m2_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16f16.nxv8f16( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16f16.nxv8f16( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vset_v_f16m2_f16m4 (vfloat16m4_t dest, vfloat16m2_t val) { @@ -575,7 +575,7 @@ vfloat16m4_t test_vset_v_f16m2_f16m4 (vfloat16m4_t dest, vfloat16m2_t val) { // CHECK-RV64-LABEL: @test_vset_v_f16m1_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32f16.nxv4f16( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32f16.nxv4f16( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vset_v_f16m1_f16m8 (vfloat16m8_t dest, vfloat16m1_t val) { @@ -584,7 +584,7 @@ vfloat16m8_t test_vset_v_f16m1_f16m8 (vfloat16m8_t dest, vfloat16m1_t val) { // CHECK-RV64-LABEL: @test_vset_v_f16m2_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32f16.nxv8f16( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32f16.nxv8f16( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vset_v_f16m2_f16m8 (vfloat16m8_t dest, vfloat16m2_t val) { @@ -593,7 +593,7 @@ vfloat16m8_t test_vset_v_f16m2_f16m8 (vfloat16m8_t dest, vfloat16m2_t val) { // CHECK-RV64-LABEL: @test_vset_v_f16m4_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32f16.nxv16f16( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32f16.nxv16f16( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vset_v_f16m4_f16m8 (vfloat16m8_t dest, vfloat16m4_t val) { diff --git a/clang/test/CodeGen/aarch64-sve-acle-__ARM_FEATURE_SVE_VECTOR_OPERATORS.c b/clang/test/CodeGen/aarch64-sve-acle-__ARM_FEATURE_SVE_VECTOR_OPERATORS.c index 42570a10a2d1c3f4963e8ec8be6fd3ac97a7ffc4..dc8216d74ec8f37545b187710f18c22e31327e2b 100644 --- a/clang/test/CodeGen/aarch64-sve-acle-__ARM_FEATURE_SVE_VECTOR_OPERATORS.c +++ b/clang/test/CodeGen/aarch64-sve-acle-__ARM_FEATURE_SVE_VECTOR_OPERATORS.c @@ -53,9 +53,9 @@ typedef int8_t vec_int8 __attribute__((vector_size(N / 8))); // CHECK128-LABEL: define{{.*}} <16 x i8> @f2(<16 x i8> noundef %x) // CHECK128-NEXT: entry: // CHECK128-NEXT: [[TMP0:%.*]] = call @llvm.aarch64.sve.ptrue.nxv16i1(i32 31) -// CHECK128-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v16i8( undef, <16 x i8> [[X:%.*]], i64 0) +// CHECK128-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v16i8( undef, <16 x i8> [[X:%.*]], i64 0) // CHECK128-NEXT: [[TMP1:%.*]] = call @llvm.aarch64.sve.asrd.nxv16i8( [[TMP0]], [[CASTSCALABLESVE]], i32 1) -// CHECK128-NEXT: [[CASTFIXEDSVE:%.*]] = call <16 x i8> @llvm.experimental.vector.extract.v16i8.nxv16i8( [[TMP1]], i64 0) +// CHECK128-NEXT: [[CASTFIXEDSVE:%.*]] = call <16 x i8> @llvm.vector.extract.v16i8.nxv16i8( [[TMP1]], i64 0) // CHECK128-NEXT: ret <16 x i8> [[CASTFIXEDSVE]] // CHECK-LABEL: define{{.*}} void @f2( @@ -63,9 +63,9 @@ typedef int8_t vec_int8 __attribute__((vector_size(N / 8))); // CHECK-NEXT: entry: // CHECK-NEXT: [[X:%.*]] = load <[[#div(VBITS,8)]] x i8>, <[[#div(VBITS,8)]] x i8>* [[TMP0:%.*]], align 16, [[TBAA6:!tbaa !.*]] // CHECK-NEXT: [[TMP1:%.*]] = call @llvm.aarch64.sve.ptrue.nxv16i1(i32 31) -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v[[#div(VBITS,8)]]i8( undef, <[[#div(VBITS,8)]] x i8> [[X]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v[[#div(VBITS,8)]]i8( undef, <[[#div(VBITS,8)]] x i8> [[X]], i64 0) // CHECK-NEXT: [[TMP2:%.*]] = call @llvm.aarch64.sve.asrd.nxv16i8( [[TMP1]], [[CASTSCALABLESVE]], i32 1) -// CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = call <[[#div(VBITS,8)]] x i8> @llvm.experimental.vector.extract.v[[#div(VBITS,8)]]i8.nxv16i8( [[TMP2]], i64 0) +// CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = call <[[#div(VBITS,8)]] x i8> @llvm.vector.extract.v[[#div(VBITS,8)]]i8.nxv16i8( [[TMP2]], i64 0) // CHECK-NEXT: store <[[#div(VBITS,8)]] x i8> [[CASTFIXEDSVE]], <[[#div(VBITS,8)]] x i8>* [[AGG_RESULT:%.*]], align 16, [[TBAA6]] // CHECK-NEXT: ret void vec_int8 f2(vec_int8 x) { return svasrd_x(svptrue_b8(), x, 1); } @@ -80,14 +80,14 @@ typedef svint8_t vec2 __attribute__((arm_sve_vector_bits(N))); // CHECK128-LABEL: define{{.*}} void @g( noundef %x.coerce) // CHECK128-NEXT: entry: -// CHECK128-NEXT: [[X:%.*]] = call <16 x i8> @llvm.experimental.vector.extract.v16i8.nxv16i8( [[X_COERCE:%.*]], i64 0) +// CHECK128-NEXT: [[X:%.*]] = call <16 x i8> @llvm.vector.extract.v16i8.nxv16i8( [[X_COERCE:%.*]], i64 0) // CHECK128-NEXT: call void @f3(<16 x i8> noundef [[X]]) [[ATTR5:#.*]] // CHECK128-NEXT: ret void // CHECK-LABEL: define{{.*}} void @g( noundef %x.coerce) // CHECK-NEXT: entry: // CHECK-NEXT: [[INDIRECT_ARG_TEMP:%.*]] = alloca <[[#div(VBITS,8)]] x i8>, align 16 -// CHECK-NEXT: [[X:%.*]] = call <[[#div(VBITS,8)]] x i8> @llvm.experimental.vector.extract.v[[#div(VBITS,8)]]i8.nxv16i8( [[X_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[X:%.*]] = call <[[#div(VBITS,8)]] x i8> @llvm.vector.extract.v[[#div(VBITS,8)]]i8.nxv16i8( [[X_COERCE:%.*]], i64 0) // CHECK-NEXT: store <[[#div(VBITS,8)]] x i8> [[X]], <[[#div(VBITS,8)]] x i8>* [[INDIRECT_ARG_TEMP]], align 16, [[TBAA6]] // CHECK-NEXT: call void @f3(<[[#div(VBITS,8)]] x i8>* noundef nonnull [[INDIRECT_ARG_TEMP]]) [[ATTR5:#.*]] // CHECK-NEXT: ret void diff --git a/clang/test/CodeGen/aarch64-sve-acle-__ARM_FEATURE_SVE_VECTOR_OPERATORS.cpp b/clang/test/CodeGen/aarch64-sve-acle-__ARM_FEATURE_SVE_VECTOR_OPERATORS.cpp index 7e606e5f9e42033cfa5a0e8ce7645bd980b2f921..dbf5eedf04edf122f5668d90c5718f733310f50d 100644 --- a/clang/test/CodeGen/aarch64-sve-acle-__ARM_FEATURE_SVE_VECTOR_OPERATORS.cpp +++ b/clang/test/CodeGen/aarch64-sve-acle-__ARM_FEATURE_SVE_VECTOR_OPERATORS.cpp @@ -49,10 +49,10 @@ void test02() { // CHECK-SAME: [[#VBITS]] // CHECK-SAME: EES_( noundef %x.coerce, noundef %y.coerce) // CHECK-NEXT: entry: -// CHECK-NEXT: [[X:%.*]] = call <[[#div(VBITS, 32)]] x i32> @llvm.experimental.vector.extract.v[[#div(VBITS, 32)]]i32.nxv4i32( [[X_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[Y:%.*]] = call <[[#div(VBITS, 32)]] x i32> @llvm.experimental.vector.extract.v[[#div(VBITS, 32)]]i32.nxv4i32( [[X_COERCE1:%.*]], i64 0) +// CHECK-NEXT: [[X:%.*]] = call <[[#div(VBITS, 32)]] x i32> @llvm.vector.extract.v[[#div(VBITS, 32)]]i32.nxv4i32( [[X_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[Y:%.*]] = call <[[#div(VBITS, 32)]] x i32> @llvm.vector.extract.v[[#div(VBITS, 32)]]i32.nxv4i32( [[X_COERCE1:%.*]], i64 0) // CHECK-NEXT: [[ADD:%.*]] = add <[[#div(VBITS, 32)]] x i32> [[Y]], [[X]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v[[#div(VBITS, 32)]]i32( undef, <[[#div(VBITS, 32)]] x i32> [[ADD]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v[[#div(VBITS, 32)]]i32( undef, <[[#div(VBITS, 32)]] x i32> [[ADD]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] typedef svint32_t vec __attribute__((arm_sve_vector_bits(N))); auto f(vec x, vec y) { return x + y; } // Returns a vec. @@ -68,11 +68,11 @@ typedef svint16_t vec2 __attribute__((arm_sve_vector_bits(N))); // CHECK-SAME: [[#VBITS]] // CHECK-SAME: EE( noundef %x.coerce) // CHECK-NEXT: entry: -// CHECK128-NEXT: [[X:%.*]] = call <8 x i16> @llvm.experimental.vector.extract.v8i16.nxv8i16( [[X_COERCE:%.*]], i64 0) +// CHECK128-NEXT: [[X:%.*]] = call <8 x i16> @llvm.vector.extract.v8i16.nxv8i16( [[X_COERCE:%.*]], i64 0) // CHECK128-NEXT: call void @_Z1fDv8_s(<8 x i16> noundef [[X]]) [[ATTR5:#.*]] // CHECK128-NEXT: ret void // CHECKWIDE-NEXT: [[INDIRECT_ARG_TEMP:%.*]] = alloca <[[#div(VBITS, 16)]] x i16>, align 16 -// CHECKWIDE-NEXT: [[X:%.*]] = call <[[#div(VBITS, 16)]] x i16> @llvm.experimental.vector.extract.v[[#div(VBITS, 16)]]i16.nxv8i16( [[X_COERCE:%.*]], i64 0) +// CHECKWIDE-NEXT: [[X:%.*]] = call <[[#div(VBITS, 16)]] x i16> @llvm.vector.extract.v[[#div(VBITS, 16)]]i16.nxv8i16( [[X_COERCE:%.*]], i64 0) // CHECKWIDE-NEXT: store <[[#div(VBITS, 16)]] x i16> [[X]], <[[#div(VBITS, 16)]] x i16>* [[INDIRECT_ARG_TEMP]], align 16, [[TBAA6:!tbaa !.*]] // CHECKWIDE-NEXT: call void @_Z1fDv[[#div(VBITS, 16)]]_s(<[[#div(VBITS, 16)]] x i16>* noundef nonnull [[INDIRECT_ARG_TEMP]]) [[ATTR5:#.*]] // CHECKWIDE-NEXT: ret void diff --git a/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_dupq-bfloat.c b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_dupq-bfloat.c index 6c3eb185d5573de212a8f7028ebaa9d7056e13e3..fb733bd95223c2761cad687b3259d7b1dc3a8adf 100644 --- a/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_dupq-bfloat.c +++ b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_dupq-bfloat.c @@ -39,7 +39,7 @@ svbfloat16_t test_svdupq_lane_bf16(svbfloat16_t data, uint64_t index) { // CHECK-NEXT: [[TMP5:%.*]] = insertelement <8 x bfloat> [[TMP4]], bfloat [[X5:%.*]], i64 5 // CHECK-NEXT: [[TMP6:%.*]] = insertelement <8 x bfloat> [[TMP5]], bfloat [[X6:%.*]], i64 6 // CHECK-NEXT: [[TMP7:%.*]] = insertelement <8 x bfloat> [[TMP6]], bfloat [[X7:%.*]], i64 7 -// CHECK-NEXT: [[TMP8:%.*]] = call @llvm.experimental.vector.insert.nxv8bf16.v8bf16( undef, <8 x bfloat> [[TMP7]], i64 0) +// CHECK-NEXT: [[TMP8:%.*]] = call @llvm.vector.insert.nxv8bf16.v8bf16( undef, <8 x bfloat> [[TMP7]], i64 0) // CHECK-NEXT: [[TMP9:%.*]] = call @llvm.aarch64.sve.dupq.lane.nxv8bf16( [[TMP8]], i64 0) // CHECK-NEXT: ret [[TMP9]] // @@ -53,7 +53,7 @@ svbfloat16_t test_svdupq_lane_bf16(svbfloat16_t data, uint64_t index) { // CPP-CHECK-NEXT: [[TMP5:%.*]] = insertelement <8 x bfloat> [[TMP4]], bfloat [[X5:%.*]], i64 5 // CPP-CHECK-NEXT: [[TMP6:%.*]] = insertelement <8 x bfloat> [[TMP5]], bfloat [[X6:%.*]], i64 6 // CPP-CHECK-NEXT: [[TMP7:%.*]] = insertelement <8 x bfloat> [[TMP6]], bfloat [[X7:%.*]], i64 7 -// CPP-CHECK-NEXT: [[TMP8:%.*]] = call @llvm.experimental.vector.insert.nxv8bf16.v8bf16( undef, <8 x bfloat> [[TMP7]], i64 0) +// CPP-CHECK-NEXT: [[TMP8:%.*]] = call @llvm.vector.insert.nxv8bf16.v8bf16( undef, <8 x bfloat> [[TMP7]], i64 0) // CPP-CHECK-NEXT: [[TMP9:%.*]] = call @llvm.aarch64.sve.dupq.lane.nxv8bf16( [[TMP8]], i64 0) // CPP-CHECK-NEXT: ret [[TMP9]] // diff --git a/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_dupq.c b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_dupq.c index f7352af1614e2f55dcdc5ec0141477a51a1c33ea..3bc97c03cba1f342cdf45fe9e18802f8fcc82b96 100644 --- a/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_dupq.c +++ b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_dupq.c @@ -197,7 +197,7 @@ svfloat64_t test_svdupq_lane_f64(svfloat64_t data, uint64_t index) // CHECK-NEXT: [[TMP13:%.*]] = insertelement <16 x i8> [[TMP12]], i8 [[X13:%.*]], i64 13 // CHECK-NEXT: [[TMP14:%.*]] = insertelement <16 x i8> [[TMP13]], i8 [[X14:%.*]], i64 14 // CHECK-NEXT: [[TMP15:%.*]] = insertelement <16 x i8> [[TMP14]], i8 [[X15:%.*]], i64 15 -// CHECK-NEXT: [[TMP16:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v16i8( undef, <16 x i8> [[TMP15]], i64 0) +// CHECK-NEXT: [[TMP16:%.*]] = call @llvm.vector.insert.nxv16i8.v16i8( undef, <16 x i8> [[TMP15]], i64 0) // CHECK-NEXT: [[TMP17:%.*]] = call @llvm.aarch64.sve.dupq.lane.nxv16i8( [[TMP16]], i64 0) // CHECK-NEXT: ret [[TMP17]] // @@ -219,7 +219,7 @@ svfloat64_t test_svdupq_lane_f64(svfloat64_t data, uint64_t index) // CPP-CHECK-NEXT: [[TMP13:%.*]] = insertelement <16 x i8> [[TMP12]], i8 [[X13:%.*]], i64 13 // CPP-CHECK-NEXT: [[TMP14:%.*]] = insertelement <16 x i8> [[TMP13]], i8 [[X14:%.*]], i64 14 // CPP-CHECK-NEXT: [[TMP15:%.*]] = insertelement <16 x i8> [[TMP14]], i8 [[X15:%.*]], i64 15 -// CPP-CHECK-NEXT: [[TMP16:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v16i8( undef, <16 x i8> [[TMP15]], i64 0) +// CPP-CHECK-NEXT: [[TMP16:%.*]] = call @llvm.vector.insert.nxv16i8.v16i8( undef, <16 x i8> [[TMP15]], i64 0) // CPP-CHECK-NEXT: [[TMP17:%.*]] = call @llvm.aarch64.sve.dupq.lane.nxv16i8( [[TMP16]], i64 0) // CPP-CHECK-NEXT: ret [[TMP17]] // @@ -242,7 +242,7 @@ svint8_t test_svdupq_n_s8(int8_t x0, int8_t x1, int8_t x2, int8_t x3, // CHECK-NEXT: [[TMP5:%.*]] = insertelement <8 x i16> [[TMP4]], i16 [[X5:%.*]], i64 5 // CHECK-NEXT: [[TMP6:%.*]] = insertelement <8 x i16> [[TMP5]], i16 [[X6:%.*]], i64 6 // CHECK-NEXT: [[TMP7:%.*]] = insertelement <8 x i16> [[TMP6]], i16 [[X7:%.*]], i64 7 -// CHECK-NEXT: [[TMP8:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v8i16( undef, <8 x i16> [[TMP7]], i64 0) +// CHECK-NEXT: [[TMP8:%.*]] = call @llvm.vector.insert.nxv8i16.v8i16( undef, <8 x i16> [[TMP7]], i64 0) // CHECK-NEXT: [[TMP9:%.*]] = call @llvm.aarch64.sve.dupq.lane.nxv8i16( [[TMP8]], i64 0) // CHECK-NEXT: ret [[TMP9]] // @@ -256,7 +256,7 @@ svint8_t test_svdupq_n_s8(int8_t x0, int8_t x1, int8_t x2, int8_t x3, // CPP-CHECK-NEXT: [[TMP5:%.*]] = insertelement <8 x i16> [[TMP4]], i16 [[X5:%.*]], i64 5 // CPP-CHECK-NEXT: [[TMP6:%.*]] = insertelement <8 x i16> [[TMP5]], i16 [[X6:%.*]], i64 6 // CPP-CHECK-NEXT: [[TMP7:%.*]] = insertelement <8 x i16> [[TMP6]], i16 [[X7:%.*]], i64 7 -// CPP-CHECK-NEXT: [[TMP8:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v8i16( undef, <8 x i16> [[TMP7]], i64 0) +// CPP-CHECK-NEXT: [[TMP8:%.*]] = call @llvm.vector.insert.nxv8i16.v8i16( undef, <8 x i16> [[TMP7]], i64 0) // CPP-CHECK-NEXT: [[TMP9:%.*]] = call @llvm.aarch64.sve.dupq.lane.nxv8i16( [[TMP8]], i64 0) // CPP-CHECK-NEXT: ret [[TMP9]] // @@ -273,7 +273,7 @@ svint16_t test_svdupq_n_s16(int16_t x0, int16_t x1, int16_t x2, int16_t x3, // CHECK-NEXT: [[TMP1:%.*]] = insertelement <4 x i32> [[TMP0]], i32 [[X1:%.*]], i64 1 // CHECK-NEXT: [[TMP2:%.*]] = insertelement <4 x i32> [[TMP1]], i32 [[X2:%.*]], i64 2 // CHECK-NEXT: [[TMP3:%.*]] = insertelement <4 x i32> [[TMP2]], i32 [[X3:%.*]], i64 3 -// CHECK-NEXT: [[TMP4:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v4i32( undef, <4 x i32> [[TMP3]], i64 0) +// CHECK-NEXT: [[TMP4:%.*]] = call @llvm.vector.insert.nxv4i32.v4i32( undef, <4 x i32> [[TMP3]], i64 0) // CHECK-NEXT: [[TMP5:%.*]] = call @llvm.aarch64.sve.dupq.lane.nxv4i32( [[TMP4]], i64 0) // CHECK-NEXT: ret [[TMP5]] // @@ -283,7 +283,7 @@ svint16_t test_svdupq_n_s16(int16_t x0, int16_t x1, int16_t x2, int16_t x3, // CPP-CHECK-NEXT: [[TMP1:%.*]] = insertelement <4 x i32> [[TMP0]], i32 [[X1:%.*]], i64 1 // CPP-CHECK-NEXT: [[TMP2:%.*]] = insertelement <4 x i32> [[TMP1]], i32 [[X2:%.*]], i64 2 // CPP-CHECK-NEXT: [[TMP3:%.*]] = insertelement <4 x i32> [[TMP2]], i32 [[X3:%.*]], i64 3 -// CPP-CHECK-NEXT: [[TMP4:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v4i32( undef, <4 x i32> [[TMP3]], i64 0) +// CPP-CHECK-NEXT: [[TMP4:%.*]] = call @llvm.vector.insert.nxv4i32.v4i32( undef, <4 x i32> [[TMP3]], i64 0) // CPP-CHECK-NEXT: [[TMP5:%.*]] = call @llvm.aarch64.sve.dupq.lane.nxv4i32( [[TMP4]], i64 0) // CPP-CHECK-NEXT: ret [[TMP5]] // @@ -297,7 +297,7 @@ svint32_t test_svdupq_n_s32(int32_t x0, int32_t x1, int32_t x2, int32_t x3) // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = insertelement <2 x i64> undef, i64 [[X0:%.*]], i64 0 // CHECK-NEXT: [[TMP1:%.*]] = insertelement <2 x i64> [[TMP0]], i64 [[X1:%.*]], i64 1 -// CHECK-NEXT: [[TMP2:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v2i64( undef, <2 x i64> [[TMP1]], i64 0) +// CHECK-NEXT: [[TMP2:%.*]] = call @llvm.vector.insert.nxv2i64.v2i64( undef, <2 x i64> [[TMP1]], i64 0) // CHECK-NEXT: [[TMP3:%.*]] = call @llvm.aarch64.sve.dupq.lane.nxv2i64( [[TMP2]], i64 0) // CHECK-NEXT: ret [[TMP3]] // @@ -305,7 +305,7 @@ svint32_t test_svdupq_n_s32(int32_t x0, int32_t x1, int32_t x2, int32_t x3) // CPP-CHECK-NEXT: entry: // CPP-CHECK-NEXT: [[TMP0:%.*]] = insertelement <2 x i64> undef, i64 [[X0:%.*]], i64 0 // CPP-CHECK-NEXT: [[TMP1:%.*]] = insertelement <2 x i64> [[TMP0]], i64 [[X1:%.*]], i64 1 -// CPP-CHECK-NEXT: [[TMP2:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v2i64( undef, <2 x i64> [[TMP1]], i64 0) +// CPP-CHECK-NEXT: [[TMP2:%.*]] = call @llvm.vector.insert.nxv2i64.v2i64( undef, <2 x i64> [[TMP1]], i64 0) // CPP-CHECK-NEXT: [[TMP3:%.*]] = call @llvm.aarch64.sve.dupq.lane.nxv2i64( [[TMP2]], i64 0) // CPP-CHECK-NEXT: ret [[TMP3]] // @@ -332,7 +332,7 @@ svint64_t test_svdupq_n_s64(int64_t x0, int64_t x1) // CHECK-NEXT: [[TMP13:%.*]] = insertelement <16 x i8> [[TMP12]], i8 [[X13:%.*]], i64 13 // CHECK-NEXT: [[TMP14:%.*]] = insertelement <16 x i8> [[TMP13]], i8 [[X14:%.*]], i64 14 // CHECK-NEXT: [[TMP15:%.*]] = insertelement <16 x i8> [[TMP14]], i8 [[X15:%.*]], i64 15 -// CHECK-NEXT: [[TMP16:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v16i8( undef, <16 x i8> [[TMP15]], i64 0) +// CHECK-NEXT: [[TMP16:%.*]] = call @llvm.vector.insert.nxv16i8.v16i8( undef, <16 x i8> [[TMP15]], i64 0) // CHECK-NEXT: [[TMP17:%.*]] = call @llvm.aarch64.sve.dupq.lane.nxv16i8( [[TMP16]], i64 0) // CHECK-NEXT: ret [[TMP17]] // @@ -354,7 +354,7 @@ svint64_t test_svdupq_n_s64(int64_t x0, int64_t x1) // CPP-CHECK-NEXT: [[TMP13:%.*]] = insertelement <16 x i8> [[TMP12]], i8 [[X13:%.*]], i64 13 // CPP-CHECK-NEXT: [[TMP14:%.*]] = insertelement <16 x i8> [[TMP13]], i8 [[X14:%.*]], i64 14 // CPP-CHECK-NEXT: [[TMP15:%.*]] = insertelement <16 x i8> [[TMP14]], i8 [[X15:%.*]], i64 15 -// CPP-CHECK-NEXT: [[TMP16:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v16i8( undef, <16 x i8> [[TMP15]], i64 0) +// CPP-CHECK-NEXT: [[TMP16:%.*]] = call @llvm.vector.insert.nxv16i8.v16i8( undef, <16 x i8> [[TMP15]], i64 0) // CPP-CHECK-NEXT: [[TMP17:%.*]] = call @llvm.aarch64.sve.dupq.lane.nxv16i8( [[TMP16]], i64 0) // CPP-CHECK-NEXT: ret [[TMP17]] // @@ -377,7 +377,7 @@ svuint8_t test_svdupq_n_u8(uint8_t x0, uint8_t x1, uint8_t x2, uint8_t x3, // CHECK-NEXT: [[TMP5:%.*]] = insertelement <8 x i16> [[TMP4]], i16 [[X5:%.*]], i64 5 // CHECK-NEXT: [[TMP6:%.*]] = insertelement <8 x i16> [[TMP5]], i16 [[X6:%.*]], i64 6 // CHECK-NEXT: [[TMP7:%.*]] = insertelement <8 x i16> [[TMP6]], i16 [[X7:%.*]], i64 7 -// CHECK-NEXT: [[TMP8:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v8i16( undef, <8 x i16> [[TMP7]], i64 0) +// CHECK-NEXT: [[TMP8:%.*]] = call @llvm.vector.insert.nxv8i16.v8i16( undef, <8 x i16> [[TMP7]], i64 0) // CHECK-NEXT: [[TMP9:%.*]] = call @llvm.aarch64.sve.dupq.lane.nxv8i16( [[TMP8]], i64 0) // CHECK-NEXT: ret [[TMP9]] // @@ -391,7 +391,7 @@ svuint8_t test_svdupq_n_u8(uint8_t x0, uint8_t x1, uint8_t x2, uint8_t x3, // CPP-CHECK-NEXT: [[TMP5:%.*]] = insertelement <8 x i16> [[TMP4]], i16 [[X5:%.*]], i64 5 // CPP-CHECK-NEXT: [[TMP6:%.*]] = insertelement <8 x i16> [[TMP5]], i16 [[X6:%.*]], i64 6 // CPP-CHECK-NEXT: [[TMP7:%.*]] = insertelement <8 x i16> [[TMP6]], i16 [[X7:%.*]], i64 7 -// CPP-CHECK-NEXT: [[TMP8:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v8i16( undef, <8 x i16> [[TMP7]], i64 0) +// CPP-CHECK-NEXT: [[TMP8:%.*]] = call @llvm.vector.insert.nxv8i16.v8i16( undef, <8 x i16> [[TMP7]], i64 0) // CPP-CHECK-NEXT: [[TMP9:%.*]] = call @llvm.aarch64.sve.dupq.lane.nxv8i16( [[TMP8]], i64 0) // CPP-CHECK-NEXT: ret [[TMP9]] // @@ -408,7 +408,7 @@ svuint16_t test_svdupq_n_u16(uint16_t x0, uint16_t x1, uint16_t x2, uint16_t x3, // CHECK-NEXT: [[TMP1:%.*]] = insertelement <4 x i32> [[TMP0]], i32 [[X1:%.*]], i64 1 // CHECK-NEXT: [[TMP2:%.*]] = insertelement <4 x i32> [[TMP1]], i32 [[X2:%.*]], i64 2 // CHECK-NEXT: [[TMP3:%.*]] = insertelement <4 x i32> [[TMP2]], i32 [[X3:%.*]], i64 3 -// CHECK-NEXT: [[TMP4:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v4i32( undef, <4 x i32> [[TMP3]], i64 0) +// CHECK-NEXT: [[TMP4:%.*]] = call @llvm.vector.insert.nxv4i32.v4i32( undef, <4 x i32> [[TMP3]], i64 0) // CHECK-NEXT: [[TMP5:%.*]] = call @llvm.aarch64.sve.dupq.lane.nxv4i32( [[TMP4]], i64 0) // CHECK-NEXT: ret [[TMP5]] // @@ -418,7 +418,7 @@ svuint16_t test_svdupq_n_u16(uint16_t x0, uint16_t x1, uint16_t x2, uint16_t x3, // CPP-CHECK-NEXT: [[TMP1:%.*]] = insertelement <4 x i32> [[TMP0]], i32 [[X1:%.*]], i64 1 // CPP-CHECK-NEXT: [[TMP2:%.*]] = insertelement <4 x i32> [[TMP1]], i32 [[X2:%.*]], i64 2 // CPP-CHECK-NEXT: [[TMP3:%.*]] = insertelement <4 x i32> [[TMP2]], i32 [[X3:%.*]], i64 3 -// CPP-CHECK-NEXT: [[TMP4:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v4i32( undef, <4 x i32> [[TMP3]], i64 0) +// CPP-CHECK-NEXT: [[TMP4:%.*]] = call @llvm.vector.insert.nxv4i32.v4i32( undef, <4 x i32> [[TMP3]], i64 0) // CPP-CHECK-NEXT: [[TMP5:%.*]] = call @llvm.aarch64.sve.dupq.lane.nxv4i32( [[TMP4]], i64 0) // CPP-CHECK-NEXT: ret [[TMP5]] // @@ -432,7 +432,7 @@ svuint32_t test_svdupq_n_u32(uint32_t x0, uint32_t x1, uint32_t x2, uint32_t x3) // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = insertelement <2 x i64> undef, i64 [[X0:%.*]], i64 0 // CHECK-NEXT: [[TMP1:%.*]] = insertelement <2 x i64> [[TMP0]], i64 [[X1:%.*]], i64 1 -// CHECK-NEXT: [[TMP2:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v2i64( undef, <2 x i64> [[TMP1]], i64 0) +// CHECK-NEXT: [[TMP2:%.*]] = call @llvm.vector.insert.nxv2i64.v2i64( undef, <2 x i64> [[TMP1]], i64 0) // CHECK-NEXT: [[TMP3:%.*]] = call @llvm.aarch64.sve.dupq.lane.nxv2i64( [[TMP2]], i64 0) // CHECK-NEXT: ret [[TMP3]] // @@ -440,7 +440,7 @@ svuint32_t test_svdupq_n_u32(uint32_t x0, uint32_t x1, uint32_t x2, uint32_t x3) // CPP-CHECK-NEXT: entry: // CPP-CHECK-NEXT: [[TMP0:%.*]] = insertelement <2 x i64> undef, i64 [[X0:%.*]], i64 0 // CPP-CHECK-NEXT: [[TMP1:%.*]] = insertelement <2 x i64> [[TMP0]], i64 [[X1:%.*]], i64 1 -// CPP-CHECK-NEXT: [[TMP2:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v2i64( undef, <2 x i64> [[TMP1]], i64 0) +// CPP-CHECK-NEXT: [[TMP2:%.*]] = call @llvm.vector.insert.nxv2i64.v2i64( undef, <2 x i64> [[TMP1]], i64 0) // CPP-CHECK-NEXT: [[TMP3:%.*]] = call @llvm.aarch64.sve.dupq.lane.nxv2i64( [[TMP2]], i64 0) // CPP-CHECK-NEXT: ret [[TMP3]] // @@ -459,7 +459,7 @@ svuint64_t test_svdupq_n_u64(uint64_t x0, uint64_t x1) // CHECK-NEXT: [[TMP5:%.*]] = insertelement <8 x half> [[TMP4]], half [[X5:%.*]], i64 5 // CHECK-NEXT: [[TMP6:%.*]] = insertelement <8 x half> [[TMP5]], half [[X6:%.*]], i64 6 // CHECK-NEXT: [[TMP7:%.*]] = insertelement <8 x half> [[TMP6]], half [[X7:%.*]], i64 7 -// CHECK-NEXT: [[TMP8:%.*]] = call @llvm.experimental.vector.insert.nxv8f16.v8f16( undef, <8 x half> [[TMP7]], i64 0) +// CHECK-NEXT: [[TMP8:%.*]] = call @llvm.vector.insert.nxv8f16.v8f16( undef, <8 x half> [[TMP7]], i64 0) // CHECK-NEXT: [[TMP9:%.*]] = call @llvm.aarch64.sve.dupq.lane.nxv8f16( [[TMP8]], i64 0) // CHECK-NEXT: ret [[TMP9]] // @@ -473,7 +473,7 @@ svuint64_t test_svdupq_n_u64(uint64_t x0, uint64_t x1) // CPP-CHECK-NEXT: [[TMP5:%.*]] = insertelement <8 x half> [[TMP4]], half [[X5:%.*]], i64 5 // CPP-CHECK-NEXT: [[TMP6:%.*]] = insertelement <8 x half> [[TMP5]], half [[X6:%.*]], i64 6 // CPP-CHECK-NEXT: [[TMP7:%.*]] = insertelement <8 x half> [[TMP6]], half [[X7:%.*]], i64 7 -// CPP-CHECK-NEXT: [[TMP8:%.*]] = call @llvm.experimental.vector.insert.nxv8f16.v8f16( undef, <8 x half> [[TMP7]], i64 0) +// CPP-CHECK-NEXT: [[TMP8:%.*]] = call @llvm.vector.insert.nxv8f16.v8f16( undef, <8 x half> [[TMP7]], i64 0) // CPP-CHECK-NEXT: [[TMP9:%.*]] = call @llvm.aarch64.sve.dupq.lane.nxv8f16( [[TMP8]], i64 0) // CPP-CHECK-NEXT: ret [[TMP9]] // @@ -490,7 +490,7 @@ svfloat16_t test_svdupq_n_f16(float16_t x0, float16_t x1, float16_t x2, float16_ // CHECK-NEXT: [[TMP1:%.*]] = insertelement <4 x float> [[TMP0]], float [[X1:%.*]], i64 1 // CHECK-NEXT: [[TMP2:%.*]] = insertelement <4 x float> [[TMP1]], float [[X2:%.*]], i64 2 // CHECK-NEXT: [[TMP3:%.*]] = insertelement <4 x float> [[TMP2]], float [[X3:%.*]], i64 3 -// CHECK-NEXT: [[TMP4:%.*]] = call @llvm.experimental.vector.insert.nxv4f32.v4f32( undef, <4 x float> [[TMP3]], i64 0) +// CHECK-NEXT: [[TMP4:%.*]] = call @llvm.vector.insert.nxv4f32.v4f32( undef, <4 x float> [[TMP3]], i64 0) // CHECK-NEXT: [[TMP5:%.*]] = call @llvm.aarch64.sve.dupq.lane.nxv4f32( [[TMP4]], i64 0) // CHECK-NEXT: ret [[TMP5]] // @@ -500,7 +500,7 @@ svfloat16_t test_svdupq_n_f16(float16_t x0, float16_t x1, float16_t x2, float16_ // CPP-CHECK-NEXT: [[TMP1:%.*]] = insertelement <4 x float> [[TMP0]], float [[X1:%.*]], i64 1 // CPP-CHECK-NEXT: [[TMP2:%.*]] = insertelement <4 x float> [[TMP1]], float [[X2:%.*]], i64 2 // CPP-CHECK-NEXT: [[TMP3:%.*]] = insertelement <4 x float> [[TMP2]], float [[X3:%.*]], i64 3 -// CPP-CHECK-NEXT: [[TMP4:%.*]] = call @llvm.experimental.vector.insert.nxv4f32.v4f32( undef, <4 x float> [[TMP3]], i64 0) +// CPP-CHECK-NEXT: [[TMP4:%.*]] = call @llvm.vector.insert.nxv4f32.v4f32( undef, <4 x float> [[TMP3]], i64 0) // CPP-CHECK-NEXT: [[TMP5:%.*]] = call @llvm.aarch64.sve.dupq.lane.nxv4f32( [[TMP4]], i64 0) // CPP-CHECK-NEXT: ret [[TMP5]] // @@ -514,7 +514,7 @@ svfloat32_t test_svdupq_n_f32(float32_t x0, float32_t x1, float32_t x2, float32_ // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = insertelement <2 x double> undef, double [[X0:%.*]], i64 0 // CHECK-NEXT: [[TMP1:%.*]] = insertelement <2 x double> [[TMP0]], double [[X1:%.*]], i64 1 -// CHECK-NEXT: [[TMP2:%.*]] = call @llvm.experimental.vector.insert.nxv2f64.v2f64( undef, <2 x double> [[TMP1]], i64 0) +// CHECK-NEXT: [[TMP2:%.*]] = call @llvm.vector.insert.nxv2f64.v2f64( undef, <2 x double> [[TMP1]], i64 0) // CHECK-NEXT: [[TMP3:%.*]] = call @llvm.aarch64.sve.dupq.lane.nxv2f64( [[TMP2]], i64 0) // CHECK-NEXT: ret [[TMP3]] // @@ -522,7 +522,7 @@ svfloat32_t test_svdupq_n_f32(float32_t x0, float32_t x1, float32_t x2, float32_ // CPP-CHECK-NEXT: entry: // CPP-CHECK-NEXT: [[TMP0:%.*]] = insertelement <2 x double> undef, double [[X0:%.*]], i64 0 // CPP-CHECK-NEXT: [[TMP1:%.*]] = insertelement <2 x double> [[TMP0]], double [[X1:%.*]], i64 1 -// CPP-CHECK-NEXT: [[TMP2:%.*]] = call @llvm.experimental.vector.insert.nxv2f64.v2f64( undef, <2 x double> [[TMP1]], i64 0) +// CPP-CHECK-NEXT: [[TMP2:%.*]] = call @llvm.vector.insert.nxv2f64.v2f64( undef, <2 x double> [[TMP1]], i64 0) // CPP-CHECK-NEXT: [[TMP3:%.*]] = call @llvm.aarch64.sve.dupq.lane.nxv2f64( [[TMP2]], i64 0) // CPP-CHECK-NEXT: ret [[TMP3]] // @@ -566,7 +566,7 @@ svfloat64_t test_svdupq_n_f64(float64_t x0, float64_t x1) // CHECK-NEXT: [[TMP14:%.*]] = insertelement <16 x i8> [[TMP13]], i8 [[FROMBOOL14]], i64 14 // CHECK-NEXT: [[TMP15:%.*]] = insertelement <16 x i8> [[TMP14]], i8 [[FROMBOOL15]], i64 15 // CHECK-NEXT: [[TMP16:%.*]] = call @llvm.aarch64.sve.ptrue.nxv16i1(i32 31) -// CHECK-NEXT: [[TMP17:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v16i8( undef, <16 x i8> [[TMP15]], i64 0) +// CHECK-NEXT: [[TMP17:%.*]] = call @llvm.vector.insert.nxv16i8.v16i8( undef, <16 x i8> [[TMP15]], i64 0) // CHECK-NEXT: [[TMP18:%.*]] = call @llvm.aarch64.sve.dupq.lane.nxv16i8( [[TMP17]], i64 0) // CHECK-NEXT: [[TMP19:%.*]] = call @llvm.aarch64.sve.cmpne.wide.nxv16i8( [[TMP16]], [[TMP18]], zeroinitializer) // CHECK-NEXT: ret [[TMP19]] @@ -606,7 +606,7 @@ svfloat64_t test_svdupq_n_f64(float64_t x0, float64_t x1) // CPP-CHECK-NEXT: [[TMP14:%.*]] = insertelement <16 x i8> [[TMP13]], i8 [[FROMBOOL14]], i64 14 // CPP-CHECK-NEXT: [[TMP15:%.*]] = insertelement <16 x i8> [[TMP14]], i8 [[FROMBOOL15]], i64 15 // CPP-CHECK-NEXT: [[TMP16:%.*]] = call @llvm.aarch64.sve.ptrue.nxv16i1(i32 31) -// CPP-CHECK-NEXT: [[TMP17:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v16i8( undef, <16 x i8> [[TMP15]], i64 0) +// CPP-CHECK-NEXT: [[TMP17:%.*]] = call @llvm.vector.insert.nxv16i8.v16i8( undef, <16 x i8> [[TMP15]], i64 0) // CPP-CHECK-NEXT: [[TMP18:%.*]] = call @llvm.aarch64.sve.dupq.lane.nxv16i8( [[TMP17]], i64 0) // CPP-CHECK-NEXT: [[TMP19:%.*]] = call @llvm.aarch64.sve.cmpne.wide.nxv16i8( [[TMP16]], [[TMP18]], zeroinitializer) // CPP-CHECK-NEXT: ret [[TMP19]] @@ -639,7 +639,7 @@ svbool_t test_svdupq_n_b8(bool x0, bool x1, bool x2, bool x3, // CHECK-NEXT: [[TMP14:%.*]] = insertelement <8 x i16> [[TMP13]], i16 [[TMP6]], i64 6 // CHECK-NEXT: [[TMP15:%.*]] = insertelement <8 x i16> [[TMP14]], i16 [[TMP7]], i64 7 // CHECK-NEXT: [[TMP16:%.*]] = call @llvm.aarch64.sve.ptrue.nxv8i1(i32 31) -// CHECK-NEXT: [[TMP17:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v8i16( undef, <8 x i16> [[TMP15]], i64 0) +// CHECK-NEXT: [[TMP17:%.*]] = call @llvm.vector.insert.nxv8i16.v8i16( undef, <8 x i16> [[TMP15]], i64 0) // CHECK-NEXT: [[TMP18:%.*]] = call @llvm.aarch64.sve.dupq.lane.nxv8i16( [[TMP17]], i64 0) // CHECK-NEXT: [[TMP19:%.*]] = call @llvm.aarch64.sve.cmpne.wide.nxv8i16( [[TMP16]], [[TMP18]], zeroinitializer) // CHECK-NEXT: [[TMP20:%.*]] = call @llvm.aarch64.sve.convert.to.svbool.nxv8i1( [[TMP19]]) @@ -664,7 +664,7 @@ svbool_t test_svdupq_n_b8(bool x0, bool x1, bool x2, bool x3, // CPP-CHECK-NEXT: [[TMP14:%.*]] = insertelement <8 x i16> [[TMP13]], i16 [[TMP6]], i64 6 // CPP-CHECK-NEXT: [[TMP15:%.*]] = insertelement <8 x i16> [[TMP14]], i16 [[TMP7]], i64 7 // CPP-CHECK-NEXT: [[TMP16:%.*]] = call @llvm.aarch64.sve.ptrue.nxv8i1(i32 31) -// CPP-CHECK-NEXT: [[TMP17:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v8i16( undef, <8 x i16> [[TMP15]], i64 0) +// CPP-CHECK-NEXT: [[TMP17:%.*]] = call @llvm.vector.insert.nxv8i16.v8i16( undef, <8 x i16> [[TMP15]], i64 0) // CPP-CHECK-NEXT: [[TMP18:%.*]] = call @llvm.aarch64.sve.dupq.lane.nxv8i16( [[TMP17]], i64 0) // CPP-CHECK-NEXT: [[TMP19:%.*]] = call @llvm.aarch64.sve.cmpne.wide.nxv8i16( [[TMP16]], [[TMP18]], zeroinitializer) // CPP-CHECK-NEXT: [[TMP20:%.*]] = call @llvm.aarch64.sve.convert.to.svbool.nxv8i1( [[TMP19]]) @@ -688,7 +688,7 @@ svbool_t test_svdupq_n_b16(bool x0, bool x1, bool x2, bool x3, // CHECK-NEXT: [[TMP6:%.*]] = insertelement <4 x i32> [[TMP5]], i32 [[TMP2]], i64 2 // CHECK-NEXT: [[TMP7:%.*]] = insertelement <4 x i32> [[TMP6]], i32 [[TMP3]], i64 3 // CHECK-NEXT: [[TMP8:%.*]] = call @llvm.aarch64.sve.ptrue.nxv4i1(i32 31) -// CHECK-NEXT: [[TMP9:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v4i32( undef, <4 x i32> [[TMP7]], i64 0) +// CHECK-NEXT: [[TMP9:%.*]] = call @llvm.vector.insert.nxv4i32.v4i32( undef, <4 x i32> [[TMP7]], i64 0) // CHECK-NEXT: [[TMP10:%.*]] = call @llvm.aarch64.sve.dupq.lane.nxv4i32( [[TMP9]], i64 0) // CHECK-NEXT: [[TMP11:%.*]] = call @llvm.aarch64.sve.cmpne.wide.nxv4i32( [[TMP8]], [[TMP10]], zeroinitializer) // CHECK-NEXT: [[TMP12:%.*]] = call @llvm.aarch64.sve.convert.to.svbool.nxv4i1( [[TMP11]]) @@ -705,7 +705,7 @@ svbool_t test_svdupq_n_b16(bool x0, bool x1, bool x2, bool x3, // CPP-CHECK-NEXT: [[TMP6:%.*]] = insertelement <4 x i32> [[TMP5]], i32 [[TMP2]], i64 2 // CPP-CHECK-NEXT: [[TMP7:%.*]] = insertelement <4 x i32> [[TMP6]], i32 [[TMP3]], i64 3 // CPP-CHECK-NEXT: [[TMP8:%.*]] = call @llvm.aarch64.sve.ptrue.nxv4i1(i32 31) -// CPP-CHECK-NEXT: [[TMP9:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v4i32( undef, <4 x i32> [[TMP7]], i64 0) +// CPP-CHECK-NEXT: [[TMP9:%.*]] = call @llvm.vector.insert.nxv4i32.v4i32( undef, <4 x i32> [[TMP7]], i64 0) // CPP-CHECK-NEXT: [[TMP10:%.*]] = call @llvm.aarch64.sve.dupq.lane.nxv4i32( [[TMP9]], i64 0) // CPP-CHECK-NEXT: [[TMP11:%.*]] = call @llvm.aarch64.sve.cmpne.wide.nxv4i32( [[TMP8]], [[TMP10]], zeroinitializer) // CPP-CHECK-NEXT: [[TMP12:%.*]] = call @llvm.aarch64.sve.convert.to.svbool.nxv4i1( [[TMP11]]) @@ -724,7 +724,7 @@ svbool_t test_svdupq_n_b32(bool x0, bool x1, bool x2, bool x3) // CHECK-NEXT: [[TMP2:%.*]] = insertelement <2 x i64> undef, i64 [[TMP0]], i64 0 // CHECK-NEXT: [[TMP3:%.*]] = insertelement <2 x i64> [[TMP2]], i64 [[TMP1]], i64 1 // CHECK-NEXT: [[TMP4:%.*]] = call @llvm.aarch64.sve.ptrue.nxv2i1(i32 31) -// CHECK-NEXT: [[TMP5:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v2i64( undef, <2 x i64> [[TMP3]], i64 0) +// CHECK-NEXT: [[TMP5:%.*]] = call @llvm.vector.insert.nxv2i64.v2i64( undef, <2 x i64> [[TMP3]], i64 0) // CHECK-NEXT: [[TMP6:%.*]] = call @llvm.aarch64.sve.dupq.lane.nxv2i64( [[TMP5]], i64 0) // CHECK-NEXT: [[TMP7:%.*]] = call @llvm.aarch64.sve.cmpne.nxv2i64( [[TMP4]], [[TMP6]], zeroinitializer) // CHECK-NEXT: [[TMP8:%.*]] = call @llvm.aarch64.sve.convert.to.svbool.nxv2i1( [[TMP7]]) @@ -737,7 +737,7 @@ svbool_t test_svdupq_n_b32(bool x0, bool x1, bool x2, bool x3) // CPP-CHECK-NEXT: [[TMP2:%.*]] = insertelement <2 x i64> undef, i64 [[TMP0]], i64 0 // CPP-CHECK-NEXT: [[TMP3:%.*]] = insertelement <2 x i64> [[TMP2]], i64 [[TMP1]], i64 1 // CPP-CHECK-NEXT: [[TMP4:%.*]] = call @llvm.aarch64.sve.ptrue.nxv2i1(i32 31) -// CPP-CHECK-NEXT: [[TMP5:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v2i64( undef, <2 x i64> [[TMP3]], i64 0) +// CPP-CHECK-NEXT: [[TMP5:%.*]] = call @llvm.vector.insert.nxv2i64.v2i64( undef, <2 x i64> [[TMP3]], i64 0) // CPP-CHECK-NEXT: [[TMP6:%.*]] = call @llvm.aarch64.sve.dupq.lane.nxv2i64( [[TMP5]], i64 0) // CPP-CHECK-NEXT: [[TMP7:%.*]] = call @llvm.aarch64.sve.cmpne.nxv2i64( [[TMP4]], [[TMP6]], zeroinitializer) // CPP-CHECK-NEXT: [[TMP8:%.*]] = call @llvm.aarch64.sve.convert.to.svbool.nxv2i1( [[TMP7]]) diff --git a/clang/test/CodeGen/aarch64-sve-vls-arith-ops.c b/clang/test/CodeGen/aarch64-sve-vls-arith-ops.c index 38b1f970fdc0d1f15b53928746b5c4b3c38ef395..3b0451fa16e43bd719f00c78a237bb412473fe41 100644 --- a/clang/test/CodeGen/aarch64-sve-vls-arith-ops.c +++ b/clang/test/CodeGen/aarch64-sve-vls-arith-ops.c @@ -29,10 +29,10 @@ typedef svbool_t fixed_bool_t __attribute__((arm_sve_vector_bits(N))); // CHECK-LABEL: @add_i8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[ADD:%.*]] = add <64 x i8> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[ADD]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[ADD]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int8_t add_i8(fixed_int8_t a, fixed_int8_t b) { @@ -41,10 +41,10 @@ fixed_int8_t add_i8(fixed_int8_t a, fixed_int8_t b) { // CHECK-LABEL: @add_i16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[ADD:%.*]] = add <32 x i16> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[ADD]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[ADD]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int16_t add_i16(fixed_int16_t a, fixed_int16_t b) { @@ -53,10 +53,10 @@ fixed_int16_t add_i16(fixed_int16_t a, fixed_int16_t b) { // CHECK-LABEL: @add_i32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[ADD:%.*]] = add <16 x i32> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[ADD]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[ADD]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int32_t add_i32(fixed_int32_t a, fixed_int32_t b) { @@ -65,10 +65,10 @@ fixed_int32_t add_i32(fixed_int32_t a, fixed_int32_t b) { // CHECK-LABEL: @add_i64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[ADD:%.*]] = add <8 x i64> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[ADD]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[ADD]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int64_t add_i64(fixed_int64_t a, fixed_int64_t b) { @@ -77,10 +77,10 @@ fixed_int64_t add_i64(fixed_int64_t a, fixed_int64_t b) { // CHECK-LABEL: @add_u8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[ADD:%.*]] = add <64 x i8> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[ADD]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[ADD]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint8_t add_u8(fixed_uint8_t a, fixed_uint8_t b) { @@ -89,10 +89,10 @@ fixed_uint8_t add_u8(fixed_uint8_t a, fixed_uint8_t b) { // CHECK-LABEL: @add_u16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[ADD:%.*]] = add <32 x i16> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[ADD]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[ADD]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint16_t add_u16(fixed_uint16_t a, fixed_uint16_t b) { @@ -101,10 +101,10 @@ fixed_uint16_t add_u16(fixed_uint16_t a, fixed_uint16_t b) { // CHECK-LABEL: @add_u32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[ADD:%.*]] = add <16 x i32> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[ADD]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[ADD]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint32_t add_u32(fixed_uint32_t a, fixed_uint32_t b) { @@ -113,10 +113,10 @@ fixed_uint32_t add_u32(fixed_uint32_t a, fixed_uint32_t b) { // CHECK-LABEL: @add_u64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[ADD:%.*]] = add <8 x i64> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[ADD]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[ADD]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint64_t add_u64(fixed_uint64_t a, fixed_uint64_t b) { @@ -125,13 +125,13 @@ fixed_uint64_t add_u64(fixed_uint64_t a, fixed_uint64_t b) { // CHECK-LABEL: @add_f16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x half> @llvm.vector.extract.v32f16.nxv8f16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x half> @llvm.vector.extract.v32f16.nxv8f16( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CONV:%.*]] = fpext <32 x half> [[A]] to <32 x float> // CHECK-NEXT: [[CONV2:%.*]] = fpext <32 x half> [[B]] to <32 x float> // CHECK-NEXT: [[ADD:%.*]] = fadd <32 x float> [[CONV]], [[CONV2]] // CHECK-NEXT: [[CONV3:%.*]] = fptrunc <32 x float> [[ADD]] to <32 x half> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8f16.v32f16( undef, <32 x half> [[CONV3]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8f16.v32f16( undef, <32 x half> [[CONV3]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_float16_t add_f16(fixed_float16_t a, fixed_float16_t b) { @@ -140,10 +140,10 @@ fixed_float16_t add_f16(fixed_float16_t a, fixed_float16_t b) { // CHECK-LABEL: @add_f32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x float> @llvm.vector.extract.v16f32.nxv4f32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x float> @llvm.vector.extract.v16f32.nxv4f32( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[ADD:%.*]] = fadd <16 x float> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4f32.v16f32( undef, <16 x float> [[ADD]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4f32.v16f32( undef, <16 x float> [[ADD]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_float32_t add_f32(fixed_float32_t a, fixed_float32_t b) { @@ -152,10 +152,10 @@ fixed_float32_t add_f32(fixed_float32_t a, fixed_float32_t b) { // CHECK-LABEL: @add_f64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x double> @llvm.vector.extract.v8f64.nxv2f64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x double> @llvm.vector.extract.v8f64.nxv2f64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[ADD:%.*]] = fadd <8 x double> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2f64.v8f64( undef, <8 x double> [[ADD]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2f64.v8f64( undef, <8 x double> [[ADD]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_float64_t add_f64(fixed_float64_t a, fixed_float64_t b) { @@ -164,10 +164,10 @@ fixed_float64_t add_f64(fixed_float64_t a, fixed_float64_t b) { // CHECK-LABEL: @add_inplace_i8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[ADD:%.*]] = add <64 x i8> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[ADD]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[ADD]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int8_t add_inplace_i8(fixed_int8_t a, fixed_int8_t b) { @@ -176,10 +176,10 @@ fixed_int8_t add_inplace_i8(fixed_int8_t a, fixed_int8_t b) { // CHECK-LABEL: @add_inplace_i16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[ADD:%.*]] = add <32 x i16> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[ADD]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[ADD]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int16_t add_inplace_i16(fixed_int16_t a, fixed_int16_t b) { @@ -188,10 +188,10 @@ fixed_int16_t add_inplace_i16(fixed_int16_t a, fixed_int16_t b) { // CHECK-LABEL: @add_inplace_i32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[ADD:%.*]] = add <16 x i32> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[ADD]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[ADD]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int32_t add_inplace_i32(fixed_int32_t a, fixed_int32_t b) { @@ -200,10 +200,10 @@ fixed_int32_t add_inplace_i32(fixed_int32_t a, fixed_int32_t b) { // CHECK-LABEL: @add_inplace_i64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[ADD:%.*]] = add <8 x i64> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[ADD]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[ADD]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int64_t add_inplace_i64(fixed_int64_t a, fixed_int64_t b) { @@ -212,10 +212,10 @@ fixed_int64_t add_inplace_i64(fixed_int64_t a, fixed_int64_t b) { // CHECK-LABEL: @add_inplace_u8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[ADD:%.*]] = add <64 x i8> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[ADD]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[ADD]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint8_t add_inplace_u8(fixed_uint8_t a, fixed_uint8_t b) { @@ -224,10 +224,10 @@ fixed_uint8_t add_inplace_u8(fixed_uint8_t a, fixed_uint8_t b) { // CHECK-LABEL: @add_inplace_u16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[ADD:%.*]] = add <32 x i16> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[ADD]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[ADD]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint16_t add_inplace_u16(fixed_uint16_t a, fixed_uint16_t b) { @@ -236,10 +236,10 @@ fixed_uint16_t add_inplace_u16(fixed_uint16_t a, fixed_uint16_t b) { // CHECK-LABEL: @add_inplace_u32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[ADD:%.*]] = add <16 x i32> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[ADD]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[ADD]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint32_t add_inplace_u32(fixed_uint32_t a, fixed_uint32_t b) { @@ -248,10 +248,10 @@ fixed_uint32_t add_inplace_u32(fixed_uint32_t a, fixed_uint32_t b) { // CHECK-LABEL: @add_inplace_u64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[ADD:%.*]] = add <8 x i64> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[ADD]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[ADD]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint64_t add_inplace_u64(fixed_uint64_t a, fixed_uint64_t b) { @@ -260,13 +260,13 @@ fixed_uint64_t add_inplace_u64(fixed_uint64_t a, fixed_uint64_t b) { // CHECK-LABEL: @add_inplace_f16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x half> @llvm.vector.extract.v32f16.nxv8f16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x half> @llvm.vector.extract.v32f16.nxv8f16( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CONV:%.*]] = fpext <32 x half> [[B]] to <32 x float> // CHECK-NEXT: [[CONV2:%.*]] = fpext <32 x half> [[A]] to <32 x float> // CHECK-NEXT: [[ADD:%.*]] = fadd <32 x float> [[CONV2]], [[CONV]] // CHECK-NEXT: [[CONV3:%.*]] = fptrunc <32 x float> [[ADD]] to <32 x half> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8f16.v32f16( undef, <32 x half> [[CONV3]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8f16.v32f16( undef, <32 x half> [[CONV3]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_float16_t add_inplace_f16(fixed_float16_t a, fixed_float16_t b) { @@ -275,10 +275,10 @@ fixed_float16_t add_inplace_f16(fixed_float16_t a, fixed_float16_t b) { // CHECK-LABEL: @add_inplace_f32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x float> @llvm.vector.extract.v16f32.nxv4f32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x float> @llvm.vector.extract.v16f32.nxv4f32( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[ADD:%.*]] = fadd <16 x float> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4f32.v16f32( undef, <16 x float> [[ADD]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4f32.v16f32( undef, <16 x float> [[ADD]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_float32_t add_inplace_f32(fixed_float32_t a, fixed_float32_t b) { @@ -287,10 +287,10 @@ fixed_float32_t add_inplace_f32(fixed_float32_t a, fixed_float32_t b) { // CHECK-LABEL: @add_inplace_f64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x double> @llvm.vector.extract.v8f64.nxv2f64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x double> @llvm.vector.extract.v8f64.nxv2f64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[ADD:%.*]] = fadd <8 x double> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2f64.v8f64( undef, <8 x double> [[ADD]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2f64.v8f64( undef, <8 x double> [[ADD]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_float64_t add_inplace_f64(fixed_float64_t a, fixed_float64_t b) { @@ -299,11 +299,11 @@ fixed_float64_t add_inplace_f64(fixed_float64_t a, fixed_float64_t b) { // CHECK-LABEL: @add_scalar_i8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <64 x i8> poison, i8 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <64 x i8> [[SPLAT_SPLATINSERT]], <64 x i8> poison, <64 x i32> zeroinitializer // CHECK-NEXT: [[ADD:%.*]] = add <64 x i8> [[A]], [[SPLAT_SPLAT]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[ADD]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[ADD]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int8_t add_scalar_i8(fixed_int8_t a, int8_t b) { @@ -312,11 +312,11 @@ fixed_int8_t add_scalar_i8(fixed_int8_t a, int8_t b) { // CHECK-LABEL: @add_scalar_i16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <32 x i16> poison, i16 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <32 x i16> [[SPLAT_SPLATINSERT]], <32 x i16> poison, <32 x i32> zeroinitializer // CHECK-NEXT: [[ADD:%.*]] = add <32 x i16> [[A]], [[SPLAT_SPLAT]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[ADD]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[ADD]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int16_t add_scalar_i16(fixed_int16_t a, int16_t b) { @@ -325,11 +325,11 @@ fixed_int16_t add_scalar_i16(fixed_int16_t a, int16_t b) { // CHECK-LABEL: @add_scalar_i32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x i32> poison, i32 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i32> [[SPLAT_SPLATINSERT]], <16 x i32> poison, <16 x i32> zeroinitializer // CHECK-NEXT: [[ADD:%.*]] = add <16 x i32> [[A]], [[SPLAT_SPLAT]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[ADD]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[ADD]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int32_t add_scalar_i32(fixed_int32_t a, int32_t b) { @@ -338,11 +338,11 @@ fixed_int32_t add_scalar_i32(fixed_int32_t a, int32_t b) { // CHECK-LABEL: @add_scalar_i64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x i64> poison, i64 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <8 x i64> [[SPLAT_SPLATINSERT]], <8 x i64> poison, <8 x i32> zeroinitializer // CHECK-NEXT: [[ADD:%.*]] = add <8 x i64> [[A]], [[SPLAT_SPLAT]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[ADD]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[ADD]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int64_t add_scalar_i64(fixed_int64_t a, int64_t b) { @@ -351,11 +351,11 @@ fixed_int64_t add_scalar_i64(fixed_int64_t a, int64_t b) { // CHECK-LABEL: @add_scalar_u8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <64 x i8> poison, i8 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <64 x i8> [[SPLAT_SPLATINSERT]], <64 x i8> poison, <64 x i32> zeroinitializer // CHECK-NEXT: [[ADD:%.*]] = add <64 x i8> [[A]], [[SPLAT_SPLAT]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[ADD]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[ADD]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint8_t add_scalar_u8(fixed_uint8_t a, uint8_t b) { @@ -364,11 +364,11 @@ fixed_uint8_t add_scalar_u8(fixed_uint8_t a, uint8_t b) { // CHECK-LABEL: @add_scalar_u16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <32 x i16> poison, i16 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <32 x i16> [[SPLAT_SPLATINSERT]], <32 x i16> poison, <32 x i32> zeroinitializer // CHECK-NEXT: [[ADD:%.*]] = add <32 x i16> [[A]], [[SPLAT_SPLAT]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[ADD]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[ADD]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint16_t add_scalar_u16(fixed_uint16_t a, uint16_t b) { @@ -377,11 +377,11 @@ fixed_uint16_t add_scalar_u16(fixed_uint16_t a, uint16_t b) { // CHECK-LABEL: @add_scalar_u32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x i32> poison, i32 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i32> [[SPLAT_SPLATINSERT]], <16 x i32> poison, <16 x i32> zeroinitializer // CHECK-NEXT: [[ADD:%.*]] = add <16 x i32> [[A]], [[SPLAT_SPLAT]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[ADD]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[ADD]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint32_t add_scalar_u32(fixed_uint32_t a, uint32_t b) { @@ -390,11 +390,11 @@ fixed_uint32_t add_scalar_u32(fixed_uint32_t a, uint32_t b) { // CHECK-LABEL: @add_scalar_u64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x i64> poison, i64 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <8 x i64> [[SPLAT_SPLATINSERT]], <8 x i64> poison, <8 x i32> zeroinitializer // CHECK-NEXT: [[ADD:%.*]] = add <8 x i64> [[A]], [[SPLAT_SPLAT]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[ADD]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[ADD]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint64_t add_scalar_u64(fixed_uint64_t a, uint64_t b) { @@ -403,11 +403,11 @@ fixed_uint64_t add_scalar_u64(fixed_uint64_t a, uint64_t b) { // CHECK-LABEL: @add_scalar_f16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x half> @llvm.vector.extract.v32f16.nxv8f16( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <32 x half> poison, half [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <32 x half> [[SPLAT_SPLATINSERT]], <32 x half> poison, <32 x i32> zeroinitializer // CHECK-NEXT: [[ADD:%.*]] = fadd <32 x half> [[A]], [[SPLAT_SPLAT]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8f16.v32f16( undef, <32 x half> [[ADD]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8f16.v32f16( undef, <32 x half> [[ADD]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_float16_t add_scalar_f16(fixed_float16_t a, __fp16 b) { @@ -416,11 +416,11 @@ fixed_float16_t add_scalar_f16(fixed_float16_t a, __fp16 b) { // CHECK-LABEL: @add_scalar_f32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x float> @llvm.vector.extract.v16f32.nxv4f32( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x float> poison, float [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <16 x float> [[SPLAT_SPLATINSERT]], <16 x float> poison, <16 x i32> zeroinitializer // CHECK-NEXT: [[ADD:%.*]] = fadd <16 x float> [[A]], [[SPLAT_SPLAT]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4f32.v16f32( undef, <16 x float> [[ADD]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4f32.v16f32( undef, <16 x float> [[ADD]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_float32_t add_scalar_f32(fixed_float32_t a, float b) { @@ -429,11 +429,11 @@ fixed_float32_t add_scalar_f32(fixed_float32_t a, float b) { // CHECK-LABEL: @add_scalar_f64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x double> @llvm.vector.extract.v8f64.nxv2f64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x double> poison, double [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <8 x double> [[SPLAT_SPLATINSERT]], <8 x double> poison, <8 x i32> zeroinitializer // CHECK-NEXT: [[ADD:%.*]] = fadd <8 x double> [[A]], [[SPLAT_SPLAT]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2f64.v8f64( undef, <8 x double> [[ADD]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2f64.v8f64( undef, <8 x double> [[ADD]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_float64_t add_scalar_f64(fixed_float64_t a, double b) { @@ -444,10 +444,10 @@ fixed_float64_t add_scalar_f64(fixed_float64_t a, double b) { // CHECK-LABEL: @sub_i8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SUB:%.*]] = sub <64 x i8> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SUB]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SUB]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int8_t sub_i8(fixed_int8_t a, fixed_int8_t b) { @@ -456,10 +456,10 @@ fixed_int8_t sub_i8(fixed_int8_t a, fixed_int8_t b) { // CHECK-LABEL: @sub_i16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SUB:%.*]] = sub <32 x i16> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SUB]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SUB]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int16_t sub_i16(fixed_int16_t a, fixed_int16_t b) { @@ -468,10 +468,10 @@ fixed_int16_t sub_i16(fixed_int16_t a, fixed_int16_t b) { // CHECK-LABEL: @sub_i32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SUB:%.*]] = sub <16 x i32> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SUB]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SUB]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int32_t sub_i32(fixed_int32_t a, fixed_int32_t b) { @@ -480,10 +480,10 @@ fixed_int32_t sub_i32(fixed_int32_t a, fixed_int32_t b) { // CHECK-LABEL: @sub_i64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SUB:%.*]] = sub <8 x i64> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SUB]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SUB]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int64_t sub_i64(fixed_int64_t a, fixed_int64_t b) { @@ -492,10 +492,10 @@ fixed_int64_t sub_i64(fixed_int64_t a, fixed_int64_t b) { // CHECK-LABEL: @sub_u8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SUB:%.*]] = sub <64 x i8> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SUB]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SUB]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint8_t sub_u8(fixed_uint8_t a, fixed_uint8_t b) { @@ -504,10 +504,10 @@ fixed_uint8_t sub_u8(fixed_uint8_t a, fixed_uint8_t b) { // CHECK-LABEL: @sub_u16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SUB:%.*]] = sub <32 x i16> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SUB]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SUB]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint16_t sub_u16(fixed_uint16_t a, fixed_uint16_t b) { @@ -516,10 +516,10 @@ fixed_uint16_t sub_u16(fixed_uint16_t a, fixed_uint16_t b) { // CHECK-LABEL: @sub_u32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SUB:%.*]] = sub <16 x i32> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SUB]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SUB]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint32_t sub_u32(fixed_uint32_t a, fixed_uint32_t b) { @@ -528,10 +528,10 @@ fixed_uint32_t sub_u32(fixed_uint32_t a, fixed_uint32_t b) { // CHECK-LABEL: @sub_u64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SUB:%.*]] = sub <8 x i64> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SUB]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SUB]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint64_t sub_u64(fixed_uint64_t a, fixed_uint64_t b) { @@ -540,13 +540,13 @@ fixed_uint64_t sub_u64(fixed_uint64_t a, fixed_uint64_t b) { // CHECK-LABEL: @sub_f16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x half> @llvm.vector.extract.v32f16.nxv8f16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x half> @llvm.vector.extract.v32f16.nxv8f16( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CONV:%.*]] = fpext <32 x half> [[A]] to <32 x float> // CHECK-NEXT: [[CONV2:%.*]] = fpext <32 x half> [[B]] to <32 x float> // CHECK-NEXT: [[SUB:%.*]] = fsub <32 x float> [[CONV]], [[CONV2]] // CHECK-NEXT: [[CONV3:%.*]] = fptrunc <32 x float> [[SUB]] to <32 x half> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8f16.v32f16( undef, <32 x half> [[CONV3]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8f16.v32f16( undef, <32 x half> [[CONV3]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_float16_t sub_f16(fixed_float16_t a, fixed_float16_t b) { @@ -555,10 +555,10 @@ fixed_float16_t sub_f16(fixed_float16_t a, fixed_float16_t b) { // CHECK-LABEL: @sub_f32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x float> @llvm.vector.extract.v16f32.nxv4f32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x float> @llvm.vector.extract.v16f32.nxv4f32( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SUB:%.*]] = fsub <16 x float> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4f32.v16f32( undef, <16 x float> [[SUB]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4f32.v16f32( undef, <16 x float> [[SUB]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_float32_t sub_f32(fixed_float32_t a, fixed_float32_t b) { @@ -567,10 +567,10 @@ fixed_float32_t sub_f32(fixed_float32_t a, fixed_float32_t b) { // CHECK-LABEL: @sub_f64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x double> @llvm.vector.extract.v8f64.nxv2f64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x double> @llvm.vector.extract.v8f64.nxv2f64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SUB:%.*]] = fsub <8 x double> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2f64.v8f64( undef, <8 x double> [[SUB]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2f64.v8f64( undef, <8 x double> [[SUB]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_float64_t sub_f64(fixed_float64_t a, fixed_float64_t b) { @@ -579,10 +579,10 @@ fixed_float64_t sub_f64(fixed_float64_t a, fixed_float64_t b) { // CHECK-LABEL: @sub_inplace_i8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SUB:%.*]] = sub <64 x i8> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SUB]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SUB]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int8_t sub_inplace_i8(fixed_int8_t a, fixed_int8_t b) { @@ -591,10 +591,10 @@ fixed_int8_t sub_inplace_i8(fixed_int8_t a, fixed_int8_t b) { // CHECK-LABEL: @sub_inplace_i16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SUB:%.*]] = sub <32 x i16> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SUB]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SUB]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int16_t sub_inplace_i16(fixed_int16_t a, fixed_int16_t b) { @@ -603,10 +603,10 @@ fixed_int16_t sub_inplace_i16(fixed_int16_t a, fixed_int16_t b) { // CHECK-LABEL: @sub_inplace_i32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SUB:%.*]] = sub <16 x i32> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SUB]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SUB]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int32_t sub_inplace_i32(fixed_int32_t a, fixed_int32_t b) { @@ -615,10 +615,10 @@ fixed_int32_t sub_inplace_i32(fixed_int32_t a, fixed_int32_t b) { // CHECK-LABEL: @sub_inplace_i64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SUB:%.*]] = sub <8 x i64> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SUB]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SUB]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int64_t sub_inplace_i64(fixed_int64_t a, fixed_int64_t b) { @@ -627,10 +627,10 @@ fixed_int64_t sub_inplace_i64(fixed_int64_t a, fixed_int64_t b) { // CHECK-LABEL: @sub_inplace_u8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SUB:%.*]] = sub <64 x i8> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SUB]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SUB]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint8_t sub_inplace_u8(fixed_uint8_t a, fixed_uint8_t b) { @@ -639,10 +639,10 @@ fixed_uint8_t sub_inplace_u8(fixed_uint8_t a, fixed_uint8_t b) { // CHECK-LABEL: @sub_inplace_u16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SUB:%.*]] = sub <32 x i16> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SUB]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SUB]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint16_t sub_inplace_u16(fixed_uint16_t a, fixed_uint16_t b) { @@ -651,10 +651,10 @@ fixed_uint16_t sub_inplace_u16(fixed_uint16_t a, fixed_uint16_t b) { // CHECK-LABEL: @sub_inplace_u32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SUB:%.*]] = sub <16 x i32> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SUB]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SUB]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint32_t sub_inplace_u32(fixed_uint32_t a, fixed_uint32_t b) { @@ -663,10 +663,10 @@ fixed_uint32_t sub_inplace_u32(fixed_uint32_t a, fixed_uint32_t b) { // CHECK-LABEL: @sub_inplace_u64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SUB:%.*]] = sub <8 x i64> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SUB]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SUB]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint64_t sub_inplace_u64(fixed_uint64_t a, fixed_uint64_t b) { @@ -675,13 +675,13 @@ fixed_uint64_t sub_inplace_u64(fixed_uint64_t a, fixed_uint64_t b) { // CHECK-LABEL: @sub_inplace_f16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x half> @llvm.vector.extract.v32f16.nxv8f16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x half> @llvm.vector.extract.v32f16.nxv8f16( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CONV:%.*]] = fpext <32 x half> [[A]] to <32 x float> // CHECK-NEXT: [[CONV2:%.*]] = fpext <32 x half> [[B]] to <32 x float> // CHECK-NEXT: [[SUB:%.*]] = fsub <32 x float> [[CONV]], [[CONV2]] // CHECK-NEXT: [[CONV3:%.*]] = fptrunc <32 x float> [[SUB]] to <32 x half> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8f16.v32f16( undef, <32 x half> [[CONV3]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8f16.v32f16( undef, <32 x half> [[CONV3]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_float16_t sub_inplace_f16(fixed_float16_t a, fixed_float16_t b) { @@ -690,10 +690,10 @@ fixed_float16_t sub_inplace_f16(fixed_float16_t a, fixed_float16_t b) { // CHECK-LABEL: @sub_inplace_f32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x float> @llvm.vector.extract.v16f32.nxv4f32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x float> @llvm.vector.extract.v16f32.nxv4f32( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SUB:%.*]] = fsub <16 x float> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4f32.v16f32( undef, <16 x float> [[SUB]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4f32.v16f32( undef, <16 x float> [[SUB]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_float32_t sub_inplace_f32(fixed_float32_t a, fixed_float32_t b) { @@ -702,10 +702,10 @@ fixed_float32_t sub_inplace_f32(fixed_float32_t a, fixed_float32_t b) { // CHECK-LABEL: @sub_inplace_f64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x double> @llvm.vector.extract.v8f64.nxv2f64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x double> @llvm.vector.extract.v8f64.nxv2f64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SUB:%.*]] = fsub <8 x double> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2f64.v8f64( undef, <8 x double> [[SUB]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2f64.v8f64( undef, <8 x double> [[SUB]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_float64_t sub_inplace_f64(fixed_float64_t a, fixed_float64_t b) { @@ -714,11 +714,11 @@ fixed_float64_t sub_inplace_f64(fixed_float64_t a, fixed_float64_t b) { // CHECK-LABEL: @sub_scalar_i8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <64 x i8> poison, i8 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <64 x i8> [[SPLAT_SPLATINSERT]], <64 x i8> poison, <64 x i32> zeroinitializer // CHECK-NEXT: [[SUB:%.*]] = sub <64 x i8> [[A]], [[SPLAT_SPLAT]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SUB]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SUB]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int8_t sub_scalar_i8(fixed_int8_t a, int8_t b) { @@ -727,11 +727,11 @@ fixed_int8_t sub_scalar_i8(fixed_int8_t a, int8_t b) { // CHECK-LABEL: @sub_scalar_i16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <32 x i16> poison, i16 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <32 x i16> [[SPLAT_SPLATINSERT]], <32 x i16> poison, <32 x i32> zeroinitializer // CHECK-NEXT: [[SUB:%.*]] = sub <32 x i16> [[A]], [[SPLAT_SPLAT]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SUB]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SUB]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int16_t sub_scalar_i16(fixed_int16_t a, int16_t b) { @@ -740,11 +740,11 @@ fixed_int16_t sub_scalar_i16(fixed_int16_t a, int16_t b) { // CHECK-LABEL: @sub_scalar_i32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x i32> poison, i32 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i32> [[SPLAT_SPLATINSERT]], <16 x i32> poison, <16 x i32> zeroinitializer // CHECK-NEXT: [[SUB:%.*]] = sub <16 x i32> [[A]], [[SPLAT_SPLAT]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SUB]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SUB]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int32_t sub_scalar_i32(fixed_int32_t a, int32_t b) { @@ -753,11 +753,11 @@ fixed_int32_t sub_scalar_i32(fixed_int32_t a, int32_t b) { // CHECK-LABEL: @sub_scalar_i64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x i64> poison, i64 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <8 x i64> [[SPLAT_SPLATINSERT]], <8 x i64> poison, <8 x i32> zeroinitializer // CHECK-NEXT: [[SUB:%.*]] = sub <8 x i64> [[A]], [[SPLAT_SPLAT]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SUB]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SUB]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int64_t sub_scalar_i64(fixed_int64_t a, int64_t b) { @@ -766,11 +766,11 @@ fixed_int64_t sub_scalar_i64(fixed_int64_t a, int64_t b) { // CHECK-LABEL: @sub_scalar_u8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <64 x i8> poison, i8 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <64 x i8> [[SPLAT_SPLATINSERT]], <64 x i8> poison, <64 x i32> zeroinitializer // CHECK-NEXT: [[SUB:%.*]] = sub <64 x i8> [[A]], [[SPLAT_SPLAT]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SUB]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SUB]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint8_t sub_scalar_u8(fixed_uint8_t a, uint8_t b) { @@ -779,11 +779,11 @@ fixed_uint8_t sub_scalar_u8(fixed_uint8_t a, uint8_t b) { // CHECK-LABEL: @sub_scalar_u16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <32 x i16> poison, i16 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <32 x i16> [[SPLAT_SPLATINSERT]], <32 x i16> poison, <32 x i32> zeroinitializer // CHECK-NEXT: [[SUB:%.*]] = sub <32 x i16> [[A]], [[SPLAT_SPLAT]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SUB]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SUB]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint16_t sub_scalar_u16(fixed_uint16_t a, uint16_t b) { @@ -792,11 +792,11 @@ fixed_uint16_t sub_scalar_u16(fixed_uint16_t a, uint16_t b) { // CHECK-LABEL: @sub_scalar_u32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x i32> poison, i32 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i32> [[SPLAT_SPLATINSERT]], <16 x i32> poison, <16 x i32> zeroinitializer // CHECK-NEXT: [[SUB:%.*]] = sub <16 x i32> [[A]], [[SPLAT_SPLAT]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SUB]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SUB]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint32_t sub_scalar_u32(fixed_uint32_t a, uint32_t b) { @@ -805,11 +805,11 @@ fixed_uint32_t sub_scalar_u32(fixed_uint32_t a, uint32_t b) { // CHECK-LABEL: @sub_scalar_u64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x i64> poison, i64 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <8 x i64> [[SPLAT_SPLATINSERT]], <8 x i64> poison, <8 x i32> zeroinitializer // CHECK-NEXT: [[SUB:%.*]] = sub <8 x i64> [[A]], [[SPLAT_SPLAT]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SUB]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SUB]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint64_t sub_scalar_u64(fixed_uint64_t a, uint64_t b) { @@ -818,11 +818,11 @@ fixed_uint64_t sub_scalar_u64(fixed_uint64_t a, uint64_t b) { // CHECK-LABEL: @sub_scalar_f16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x half> @llvm.vector.extract.v32f16.nxv8f16( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <32 x half> poison, half [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <32 x half> [[SPLAT_SPLATINSERT]], <32 x half> poison, <32 x i32> zeroinitializer // CHECK-NEXT: [[SUB:%.*]] = fsub <32 x half> [[A]], [[SPLAT_SPLAT]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8f16.v32f16( undef, <32 x half> [[SUB]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8f16.v32f16( undef, <32 x half> [[SUB]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_float16_t sub_scalar_f16(fixed_float16_t a, __fp16 b) { @@ -831,11 +831,11 @@ fixed_float16_t sub_scalar_f16(fixed_float16_t a, __fp16 b) { // CHECK-LABEL: @sub_scalar_f32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x float> @llvm.vector.extract.v16f32.nxv4f32( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x float> poison, float [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <16 x float> [[SPLAT_SPLATINSERT]], <16 x float> poison, <16 x i32> zeroinitializer // CHECK-NEXT: [[SUB:%.*]] = fsub <16 x float> [[A]], [[SPLAT_SPLAT]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4f32.v16f32( undef, <16 x float> [[SUB]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4f32.v16f32( undef, <16 x float> [[SUB]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_float32_t sub_scalar_f32(fixed_float32_t a, float b) { @@ -844,11 +844,11 @@ fixed_float32_t sub_scalar_f32(fixed_float32_t a, float b) { // CHECK-LABEL: @sub_scalar_f64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x double> @llvm.vector.extract.v8f64.nxv2f64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x double> poison, double [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <8 x double> [[SPLAT_SPLATINSERT]], <8 x double> poison, <8 x i32> zeroinitializer // CHECK-NEXT: [[SUB:%.*]] = fsub <8 x double> [[A]], [[SPLAT_SPLAT]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2f64.v8f64( undef, <8 x double> [[SUB]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2f64.v8f64( undef, <8 x double> [[SUB]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_float64_t sub_scalar_f64(fixed_float64_t a, double b) { @@ -859,10 +859,10 @@ fixed_float64_t sub_scalar_f64(fixed_float64_t a, double b) { // CHECK-LABEL: @mul_i8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[MUL:%.*]] = mul <64 x i8> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[MUL]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[MUL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int8_t mul_i8(fixed_int8_t a, fixed_int8_t b) { @@ -871,10 +871,10 @@ fixed_int8_t mul_i8(fixed_int8_t a, fixed_int8_t b) { // CHECK-LABEL: @mul_i16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[MUL:%.*]] = mul <32 x i16> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[MUL]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[MUL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int16_t mul_i16(fixed_int16_t a, fixed_int16_t b) { @@ -883,10 +883,10 @@ fixed_int16_t mul_i16(fixed_int16_t a, fixed_int16_t b) { // CHECK-LABEL: @mul_i32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[MUL:%.*]] = mul <16 x i32> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[MUL]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[MUL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int32_t mul_i32(fixed_int32_t a, fixed_int32_t b) { @@ -895,10 +895,10 @@ fixed_int32_t mul_i32(fixed_int32_t a, fixed_int32_t b) { // CHECK-LABEL: @mul_i64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[MUL:%.*]] = mul <8 x i64> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[MUL]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[MUL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int64_t mul_i64(fixed_int64_t a, fixed_int64_t b) { @@ -907,10 +907,10 @@ fixed_int64_t mul_i64(fixed_int64_t a, fixed_int64_t b) { // CHECK-LABEL: @mul_u8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[MUL:%.*]] = mul <64 x i8> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[MUL]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[MUL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint8_t mul_u8(fixed_uint8_t a, fixed_uint8_t b) { @@ -919,10 +919,10 @@ fixed_uint8_t mul_u8(fixed_uint8_t a, fixed_uint8_t b) { // CHECK-LABEL: @mul_u16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[MUL:%.*]] = mul <32 x i16> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[MUL]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[MUL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint16_t mul_u16(fixed_uint16_t a, fixed_uint16_t b) { @@ -931,10 +931,10 @@ fixed_uint16_t mul_u16(fixed_uint16_t a, fixed_uint16_t b) { // CHECK-LABEL: @mul_u32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[MUL:%.*]] = mul <16 x i32> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[MUL]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[MUL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint32_t mul_u32(fixed_uint32_t a, fixed_uint32_t b) { @@ -943,10 +943,10 @@ fixed_uint32_t mul_u32(fixed_uint32_t a, fixed_uint32_t b) { // CHECK-LABEL: @mul_u64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[MUL:%.*]] = mul <8 x i64> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[MUL]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[MUL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint64_t mul_u64(fixed_uint64_t a, fixed_uint64_t b) { @@ -955,13 +955,13 @@ fixed_uint64_t mul_u64(fixed_uint64_t a, fixed_uint64_t b) { // CHECK-LABEL: @mul_f16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x half> @llvm.vector.extract.v32f16.nxv8f16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x half> @llvm.vector.extract.v32f16.nxv8f16( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CONV:%.*]] = fpext <32 x half> [[A]] to <32 x float> // CHECK-NEXT: [[CONV2:%.*]] = fpext <32 x half> [[B]] to <32 x float> // CHECK-NEXT: [[MUL:%.*]] = fmul <32 x float> [[CONV]], [[CONV2]] // CHECK-NEXT: [[CONV3:%.*]] = fptrunc <32 x float> [[MUL]] to <32 x half> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8f16.v32f16( undef, <32 x half> [[CONV3]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8f16.v32f16( undef, <32 x half> [[CONV3]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_float16_t mul_f16(fixed_float16_t a, fixed_float16_t b) { @@ -970,10 +970,10 @@ fixed_float16_t mul_f16(fixed_float16_t a, fixed_float16_t b) { // CHECK-LABEL: @mul_f32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x float> @llvm.vector.extract.v16f32.nxv4f32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x float> @llvm.vector.extract.v16f32.nxv4f32( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[MUL:%.*]] = fmul <16 x float> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4f32.v16f32( undef, <16 x float> [[MUL]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4f32.v16f32( undef, <16 x float> [[MUL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_float32_t mul_f32(fixed_float32_t a, fixed_float32_t b) { @@ -982,10 +982,10 @@ fixed_float32_t mul_f32(fixed_float32_t a, fixed_float32_t b) { // CHECK-LABEL: @mul_f64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x double> @llvm.vector.extract.v8f64.nxv2f64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x double> @llvm.vector.extract.v8f64.nxv2f64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[MUL:%.*]] = fmul <8 x double> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2f64.v8f64( undef, <8 x double> [[MUL]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2f64.v8f64( undef, <8 x double> [[MUL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_float64_t mul_f64(fixed_float64_t a, fixed_float64_t b) { @@ -994,10 +994,10 @@ fixed_float64_t mul_f64(fixed_float64_t a, fixed_float64_t b) { // CHECK-LABEL: @mul_inplace_i8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[MUL:%.*]] = mul <64 x i8> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[MUL]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[MUL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int8_t mul_inplace_i8(fixed_int8_t a, fixed_int8_t b) { @@ -1006,10 +1006,10 @@ fixed_int8_t mul_inplace_i8(fixed_int8_t a, fixed_int8_t b) { // CHECK-LABEL: @mul_inplace_i16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[MUL:%.*]] = mul <32 x i16> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[MUL]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[MUL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int16_t mul_inplace_i16(fixed_int16_t a, fixed_int16_t b) { @@ -1018,10 +1018,10 @@ fixed_int16_t mul_inplace_i16(fixed_int16_t a, fixed_int16_t b) { // CHECK-LABEL: @mul_inplace_i32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[MUL:%.*]] = mul <16 x i32> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[MUL]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[MUL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int32_t mul_inplace_i32(fixed_int32_t a, fixed_int32_t b) { @@ -1030,10 +1030,10 @@ fixed_int32_t mul_inplace_i32(fixed_int32_t a, fixed_int32_t b) { // CHECK-LABEL: @mul_inplace_i64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[MUL:%.*]] = mul <8 x i64> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[MUL]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[MUL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int64_t mul_inplace_i64(fixed_int64_t a, fixed_int64_t b) { @@ -1042,10 +1042,10 @@ fixed_int64_t mul_inplace_i64(fixed_int64_t a, fixed_int64_t b) { // CHECK-LABEL: @mul_inplace_u8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[MUL:%.*]] = mul <64 x i8> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[MUL]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[MUL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint8_t mul_inplace_u8(fixed_uint8_t a, fixed_uint8_t b) { @@ -1054,10 +1054,10 @@ fixed_uint8_t mul_inplace_u8(fixed_uint8_t a, fixed_uint8_t b) { // CHECK-LABEL: @mul_inplace_u16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[MUL:%.*]] = mul <32 x i16> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[MUL]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[MUL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint16_t mul_inplace_u16(fixed_uint16_t a, fixed_uint16_t b) { @@ -1066,10 +1066,10 @@ fixed_uint16_t mul_inplace_u16(fixed_uint16_t a, fixed_uint16_t b) { // CHECK-LABEL: @mul_inplace_u32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[MUL:%.*]] = mul <16 x i32> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[MUL]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[MUL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint32_t mul_inplace_u32(fixed_uint32_t a, fixed_uint32_t b) { @@ -1078,10 +1078,10 @@ fixed_uint32_t mul_inplace_u32(fixed_uint32_t a, fixed_uint32_t b) { // CHECK-LABEL: @mul_inplace_u64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[MUL:%.*]] = mul <8 x i64> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[MUL]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[MUL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint64_t mul_inplace_u64(fixed_uint64_t a, fixed_uint64_t b) { @@ -1090,13 +1090,13 @@ fixed_uint64_t mul_inplace_u64(fixed_uint64_t a, fixed_uint64_t b) { // CHECK-LABEL: @mul_inplace_f16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x half> @llvm.vector.extract.v32f16.nxv8f16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x half> @llvm.vector.extract.v32f16.nxv8f16( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CONV:%.*]] = fpext <32 x half> [[A]] to <32 x float> // CHECK-NEXT: [[CONV2:%.*]] = fpext <32 x half> [[B]] to <32 x float> // CHECK-NEXT: [[MUL:%.*]] = fmul <32 x float> [[CONV]], [[CONV2]] // CHECK-NEXT: [[CONV3:%.*]] = fptrunc <32 x float> [[MUL]] to <32 x half> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8f16.v32f16( undef, <32 x half> [[CONV3]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8f16.v32f16( undef, <32 x half> [[CONV3]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_float16_t mul_inplace_f16(fixed_float16_t a, fixed_float16_t b) { @@ -1105,10 +1105,10 @@ fixed_float16_t mul_inplace_f16(fixed_float16_t a, fixed_float16_t b) { // CHECK-LABEL: @mul_inplace_f32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x float> @llvm.vector.extract.v16f32.nxv4f32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x float> @llvm.vector.extract.v16f32.nxv4f32( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[MUL:%.*]] = fmul <16 x float> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4f32.v16f32( undef, <16 x float> [[MUL]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4f32.v16f32( undef, <16 x float> [[MUL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_float32_t mul_inplace_f32(fixed_float32_t a, fixed_float32_t b) { @@ -1117,10 +1117,10 @@ fixed_float32_t mul_inplace_f32(fixed_float32_t a, fixed_float32_t b) { // CHECK-LABEL: @mul_inplace_f64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x double> @llvm.vector.extract.v8f64.nxv2f64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x double> @llvm.vector.extract.v8f64.nxv2f64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[MUL:%.*]] = fmul <8 x double> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2f64.v8f64( undef, <8 x double> [[MUL]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2f64.v8f64( undef, <8 x double> [[MUL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_float64_t mul_inplace_f64(fixed_float64_t a, fixed_float64_t b) { @@ -1129,11 +1129,11 @@ fixed_float64_t mul_inplace_f64(fixed_float64_t a, fixed_float64_t b) { // CHECK-LABEL: @mul_scalar_i8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <64 x i8> poison, i8 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <64 x i8> [[SPLAT_SPLATINSERT]], <64 x i8> poison, <64 x i32> zeroinitializer // CHECK-NEXT: [[MUL:%.*]] = mul <64 x i8> [[A]], [[SPLAT_SPLAT]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[MUL]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[MUL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int8_t mul_scalar_i8(fixed_int8_t a, int8_t b) { @@ -1142,11 +1142,11 @@ fixed_int8_t mul_scalar_i8(fixed_int8_t a, int8_t b) { // CHECK-LABEL: @mul_scalar_i16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <32 x i16> poison, i16 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <32 x i16> [[SPLAT_SPLATINSERT]], <32 x i16> poison, <32 x i32> zeroinitializer // CHECK-NEXT: [[MUL:%.*]] = mul <32 x i16> [[A]], [[SPLAT_SPLAT]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[MUL]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[MUL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int16_t mul_scalar_i16(fixed_int16_t a, int16_t b) { @@ -1155,11 +1155,11 @@ fixed_int16_t mul_scalar_i16(fixed_int16_t a, int16_t b) { // CHECK-LABEL: @mul_scalar_i32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x i32> poison, i32 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i32> [[SPLAT_SPLATINSERT]], <16 x i32> poison, <16 x i32> zeroinitializer // CHECK-NEXT: [[MUL:%.*]] = mul <16 x i32> [[A]], [[SPLAT_SPLAT]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[MUL]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[MUL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int32_t mul_scalar_i32(fixed_int32_t a, int32_t b) { @@ -1168,11 +1168,11 @@ fixed_int32_t mul_scalar_i32(fixed_int32_t a, int32_t b) { // CHECK-LABEL: @mul_scalar_i64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x i64> poison, i64 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <8 x i64> [[SPLAT_SPLATINSERT]], <8 x i64> poison, <8 x i32> zeroinitializer // CHECK-NEXT: [[MUL:%.*]] = mul <8 x i64> [[A]], [[SPLAT_SPLAT]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[MUL]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[MUL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int64_t mul_scalar_i64(fixed_int64_t a, int64_t b) { @@ -1181,11 +1181,11 @@ fixed_int64_t mul_scalar_i64(fixed_int64_t a, int64_t b) { // CHECK-LABEL: @mul_scalar_u8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <64 x i8> poison, i8 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <64 x i8> [[SPLAT_SPLATINSERT]], <64 x i8> poison, <64 x i32> zeroinitializer // CHECK-NEXT: [[MUL:%.*]] = mul <64 x i8> [[A]], [[SPLAT_SPLAT]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[MUL]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[MUL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint8_t mul_scalar_u8(fixed_uint8_t a, uint8_t b) { @@ -1194,11 +1194,11 @@ fixed_uint8_t mul_scalar_u8(fixed_uint8_t a, uint8_t b) { // CHECK-LABEL: @mul_scalar_u16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <32 x i16> poison, i16 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <32 x i16> [[SPLAT_SPLATINSERT]], <32 x i16> poison, <32 x i32> zeroinitializer // CHECK-NEXT: [[MUL:%.*]] = mul <32 x i16> [[A]], [[SPLAT_SPLAT]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[MUL]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[MUL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint16_t mul_scalar_u16(fixed_uint16_t a, uint16_t b) { @@ -1207,11 +1207,11 @@ fixed_uint16_t mul_scalar_u16(fixed_uint16_t a, uint16_t b) { // CHECK-LABEL: @mul_scalar_u32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x i32> poison, i32 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i32> [[SPLAT_SPLATINSERT]], <16 x i32> poison, <16 x i32> zeroinitializer // CHECK-NEXT: [[MUL:%.*]] = mul <16 x i32> [[A]], [[SPLAT_SPLAT]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[MUL]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[MUL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint32_t mul_scalar_u32(fixed_uint32_t a, uint32_t b) { @@ -1220,11 +1220,11 @@ fixed_uint32_t mul_scalar_u32(fixed_uint32_t a, uint32_t b) { // CHECK-LABEL: @mul_scalar_u64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x i64> poison, i64 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <8 x i64> [[SPLAT_SPLATINSERT]], <8 x i64> poison, <8 x i32> zeroinitializer // CHECK-NEXT: [[MUL:%.*]] = mul <8 x i64> [[A]], [[SPLAT_SPLAT]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[MUL]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[MUL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint64_t mul_scalar_u64(fixed_uint64_t a, uint64_t b) { @@ -1233,11 +1233,11 @@ fixed_uint64_t mul_scalar_u64(fixed_uint64_t a, uint64_t b) { // CHECK-LABEL: @mul_scalar_f16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x half> @llvm.vector.extract.v32f16.nxv8f16( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <32 x half> poison, half [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <32 x half> [[SPLAT_SPLATINSERT]], <32 x half> poison, <32 x i32> zeroinitializer // CHECK-NEXT: [[MUL:%.*]] = fmul <32 x half> [[A]], [[SPLAT_SPLAT]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8f16.v32f16( undef, <32 x half> [[MUL]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8f16.v32f16( undef, <32 x half> [[MUL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_float16_t mul_scalar_f16(fixed_float16_t a, __fp16 b) { @@ -1246,11 +1246,11 @@ fixed_float16_t mul_scalar_f16(fixed_float16_t a, __fp16 b) { // CHECK-LABEL: @mul_scalar_f32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x float> @llvm.vector.extract.v16f32.nxv4f32( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x float> poison, float [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <16 x float> [[SPLAT_SPLATINSERT]], <16 x float> poison, <16 x i32> zeroinitializer // CHECK-NEXT: [[MUL:%.*]] = fmul <16 x float> [[A]], [[SPLAT_SPLAT]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4f32.v16f32( undef, <16 x float> [[MUL]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4f32.v16f32( undef, <16 x float> [[MUL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_float32_t mul_scalar_f32(fixed_float32_t a, float b) { @@ -1259,11 +1259,11 @@ fixed_float32_t mul_scalar_f32(fixed_float32_t a, float b) { // CHECK-LABEL: @mul_scalar_f64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x double> @llvm.vector.extract.v8f64.nxv2f64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x double> poison, double [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <8 x double> [[SPLAT_SPLATINSERT]], <8 x double> poison, <8 x i32> zeroinitializer // CHECK-NEXT: [[MUL:%.*]] = fmul <8 x double> [[A]], [[SPLAT_SPLAT]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2f64.v8f64( undef, <8 x double> [[MUL]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2f64.v8f64( undef, <8 x double> [[MUL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_float64_t mul_scalar_f64(fixed_float64_t a, double b) { @@ -1274,10 +1274,10 @@ fixed_float64_t mul_scalar_f64(fixed_float64_t a, double b) { // CHECK-LABEL: @div_i8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[DIV:%.*]] = sdiv <64 x i8> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[DIV]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[DIV]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int8_t div_i8(fixed_int8_t a, fixed_int8_t b) { @@ -1286,10 +1286,10 @@ fixed_int8_t div_i8(fixed_int8_t a, fixed_int8_t b) { // CHECK-LABEL: @div_i16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[DIV:%.*]] = sdiv <32 x i16> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[DIV]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[DIV]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int16_t div_i16(fixed_int16_t a, fixed_int16_t b) { @@ -1298,10 +1298,10 @@ fixed_int16_t div_i16(fixed_int16_t a, fixed_int16_t b) { // CHECK-LABEL: @div_i32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[DIV:%.*]] = sdiv <16 x i32> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[DIV]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[DIV]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int32_t div_i32(fixed_int32_t a, fixed_int32_t b) { @@ -1310,10 +1310,10 @@ fixed_int32_t div_i32(fixed_int32_t a, fixed_int32_t b) { // CHECK-LABEL: @div_i64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[DIV:%.*]] = sdiv <8 x i64> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[DIV]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[DIV]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int64_t div_i64(fixed_int64_t a, fixed_int64_t b) { @@ -1322,10 +1322,10 @@ fixed_int64_t div_i64(fixed_int64_t a, fixed_int64_t b) { // CHECK-LABEL: @div_u8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[DIV:%.*]] = udiv <64 x i8> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[DIV]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[DIV]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint8_t div_u8(fixed_uint8_t a, fixed_uint8_t b) { @@ -1334,10 +1334,10 @@ fixed_uint8_t div_u8(fixed_uint8_t a, fixed_uint8_t b) { // CHECK-LABEL: @div_u16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[DIV:%.*]] = udiv <32 x i16> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[DIV]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[DIV]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint16_t div_u16(fixed_uint16_t a, fixed_uint16_t b) { @@ -1346,10 +1346,10 @@ fixed_uint16_t div_u16(fixed_uint16_t a, fixed_uint16_t b) { // CHECK-LABEL: @div_u32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[DIV:%.*]] = udiv <16 x i32> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[DIV]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[DIV]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint32_t div_u32(fixed_uint32_t a, fixed_uint32_t b) { @@ -1358,10 +1358,10 @@ fixed_uint32_t div_u32(fixed_uint32_t a, fixed_uint32_t b) { // CHECK-LABEL: @div_u64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[DIV:%.*]] = udiv <8 x i64> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[DIV]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[DIV]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint64_t div_u64(fixed_uint64_t a, fixed_uint64_t b) { @@ -1370,13 +1370,13 @@ fixed_uint64_t div_u64(fixed_uint64_t a, fixed_uint64_t b) { // CHECK-LABEL: @div_f16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x half> @llvm.vector.extract.v32f16.nxv8f16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x half> @llvm.vector.extract.v32f16.nxv8f16( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CONV:%.*]] = fpext <32 x half> [[A]] to <32 x float> // CHECK-NEXT: [[CONV2:%.*]] = fpext <32 x half> [[B]] to <32 x float> // CHECK-NEXT: [[DIV:%.*]] = fdiv <32 x float> [[CONV]], [[CONV2]] // CHECK-NEXT: [[CONV3:%.*]] = fptrunc <32 x float> [[DIV]] to <32 x half> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8f16.v32f16( undef, <32 x half> [[CONV3]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8f16.v32f16( undef, <32 x half> [[CONV3]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_float16_t div_f16(fixed_float16_t a, fixed_float16_t b) { @@ -1385,10 +1385,10 @@ fixed_float16_t div_f16(fixed_float16_t a, fixed_float16_t b) { // CHECK-LABEL: @div_f32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x float> @llvm.vector.extract.v16f32.nxv4f32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x float> @llvm.vector.extract.v16f32.nxv4f32( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[DIV:%.*]] = fdiv <16 x float> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4f32.v16f32( undef, <16 x float> [[DIV]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4f32.v16f32( undef, <16 x float> [[DIV]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_float32_t div_f32(fixed_float32_t a, fixed_float32_t b) { @@ -1397,10 +1397,10 @@ fixed_float32_t div_f32(fixed_float32_t a, fixed_float32_t b) { // CHECK-LABEL: @div_f64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x double> @llvm.vector.extract.v8f64.nxv2f64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x double> @llvm.vector.extract.v8f64.nxv2f64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[DIV:%.*]] = fdiv <8 x double> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2f64.v8f64( undef, <8 x double> [[DIV]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2f64.v8f64( undef, <8 x double> [[DIV]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_float64_t div_f64(fixed_float64_t a, fixed_float64_t b) { @@ -1409,10 +1409,10 @@ fixed_float64_t div_f64(fixed_float64_t a, fixed_float64_t b) { // CHECK-LABEL: @div_inplace_i8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[DIV:%.*]] = sdiv <64 x i8> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[DIV]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[DIV]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int8_t div_inplace_i8(fixed_int8_t a, fixed_int8_t b) { @@ -1421,10 +1421,10 @@ fixed_int8_t div_inplace_i8(fixed_int8_t a, fixed_int8_t b) { // CHECK-LABEL: @div_inplace_i16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[DIV:%.*]] = sdiv <32 x i16> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[DIV]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[DIV]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int16_t div_inplace_i16(fixed_int16_t a, fixed_int16_t b) { @@ -1433,10 +1433,10 @@ fixed_int16_t div_inplace_i16(fixed_int16_t a, fixed_int16_t b) { // CHECK-LABEL: @div_inplace_i32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[DIV:%.*]] = sdiv <16 x i32> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[DIV]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[DIV]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int32_t div_inplace_i32(fixed_int32_t a, fixed_int32_t b) { @@ -1445,10 +1445,10 @@ fixed_int32_t div_inplace_i32(fixed_int32_t a, fixed_int32_t b) { // CHECK-LABEL: @div_inplace_i64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[DIV:%.*]] = sdiv <8 x i64> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[DIV]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[DIV]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int64_t div_inplace_i64(fixed_int64_t a, fixed_int64_t b) { @@ -1457,10 +1457,10 @@ fixed_int64_t div_inplace_i64(fixed_int64_t a, fixed_int64_t b) { // CHECK-LABEL: @div_inplace_u8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[DIV:%.*]] = udiv <64 x i8> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[DIV]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[DIV]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint8_t div_inplace_u8(fixed_uint8_t a, fixed_uint8_t b) { @@ -1469,10 +1469,10 @@ fixed_uint8_t div_inplace_u8(fixed_uint8_t a, fixed_uint8_t b) { // CHECK-LABEL: @div_inplace_u16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[DIV:%.*]] = udiv <32 x i16> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[DIV]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[DIV]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint16_t div_inplace_u16(fixed_uint16_t a, fixed_uint16_t b) { @@ -1481,10 +1481,10 @@ fixed_uint16_t div_inplace_u16(fixed_uint16_t a, fixed_uint16_t b) { // CHECK-LABEL: @div_inplace_u32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[DIV:%.*]] = udiv <16 x i32> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[DIV]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[DIV]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint32_t div_inplace_u32(fixed_uint32_t a, fixed_uint32_t b) { @@ -1493,10 +1493,10 @@ fixed_uint32_t div_inplace_u32(fixed_uint32_t a, fixed_uint32_t b) { // CHECK-LABEL: @div_inplace_u64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[DIV:%.*]] = udiv <8 x i64> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[DIV]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[DIV]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint64_t div_inplace_u64(fixed_uint64_t a, fixed_uint64_t b) { @@ -1505,13 +1505,13 @@ fixed_uint64_t div_inplace_u64(fixed_uint64_t a, fixed_uint64_t b) { // CHECK-LABEL: @div_inplace_f16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x half> @llvm.vector.extract.v32f16.nxv8f16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x half> @llvm.vector.extract.v32f16.nxv8f16( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CONV:%.*]] = fpext <32 x half> [[A]] to <32 x float> // CHECK-NEXT: [[CONV2:%.*]] = fpext <32 x half> [[B]] to <32 x float> // CHECK-NEXT: [[DIV:%.*]] = fdiv <32 x float> [[CONV]], [[CONV2]] // CHECK-NEXT: [[CONV3:%.*]] = fptrunc <32 x float> [[DIV]] to <32 x half> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8f16.v32f16( undef, <32 x half> [[CONV3]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8f16.v32f16( undef, <32 x half> [[CONV3]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_float16_t div_inplace_f16(fixed_float16_t a, fixed_float16_t b) { @@ -1520,10 +1520,10 @@ fixed_float16_t div_inplace_f16(fixed_float16_t a, fixed_float16_t b) { // CHECK-LABEL: @div_inplace_f32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x float> @llvm.vector.extract.v16f32.nxv4f32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x float> @llvm.vector.extract.v16f32.nxv4f32( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[DIV:%.*]] = fdiv <16 x float> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4f32.v16f32( undef, <16 x float> [[DIV]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4f32.v16f32( undef, <16 x float> [[DIV]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_float32_t div_inplace_f32(fixed_float32_t a, fixed_float32_t b) { @@ -1532,10 +1532,10 @@ fixed_float32_t div_inplace_f32(fixed_float32_t a, fixed_float32_t b) { // CHECK-LABEL: @div_inplace_f64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x double> @llvm.vector.extract.v8f64.nxv2f64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x double> @llvm.vector.extract.v8f64.nxv2f64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[DIV:%.*]] = fdiv <8 x double> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2f64.v8f64( undef, <8 x double> [[DIV]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2f64.v8f64( undef, <8 x double> [[DIV]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_float64_t div_inplace_f64(fixed_float64_t a, fixed_float64_t b) { @@ -1544,11 +1544,11 @@ fixed_float64_t div_inplace_f64(fixed_float64_t a, fixed_float64_t b) { // CHECK-LABEL: @div_scalar_i8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <64 x i8> poison, i8 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <64 x i8> [[SPLAT_SPLATINSERT]], <64 x i8> poison, <64 x i32> zeroinitializer // CHECK-NEXT: [[DIV:%.*]] = sdiv <64 x i8> [[A]], [[SPLAT_SPLAT]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[DIV]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[DIV]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int8_t div_scalar_i8(fixed_int8_t a, int8_t b) { @@ -1557,11 +1557,11 @@ fixed_int8_t div_scalar_i8(fixed_int8_t a, int8_t b) { // CHECK-LABEL: @div_scalar_i16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <32 x i16> poison, i16 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <32 x i16> [[SPLAT_SPLATINSERT]], <32 x i16> poison, <32 x i32> zeroinitializer // CHECK-NEXT: [[DIV:%.*]] = sdiv <32 x i16> [[A]], [[SPLAT_SPLAT]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[DIV]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[DIV]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int16_t div_scalar_i16(fixed_int16_t a, int16_t b) { @@ -1570,11 +1570,11 @@ fixed_int16_t div_scalar_i16(fixed_int16_t a, int16_t b) { // CHECK-LABEL: @div_scalar_i32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x i32> poison, i32 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i32> [[SPLAT_SPLATINSERT]], <16 x i32> poison, <16 x i32> zeroinitializer // CHECK-NEXT: [[DIV:%.*]] = sdiv <16 x i32> [[A]], [[SPLAT_SPLAT]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[DIV]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[DIV]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int32_t div_scalar_i32(fixed_int32_t a, int32_t b) { @@ -1583,11 +1583,11 @@ fixed_int32_t div_scalar_i32(fixed_int32_t a, int32_t b) { // CHECK-LABEL: @div_scalar_i64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x i64> poison, i64 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <8 x i64> [[SPLAT_SPLATINSERT]], <8 x i64> poison, <8 x i32> zeroinitializer // CHECK-NEXT: [[DIV:%.*]] = sdiv <8 x i64> [[A]], [[SPLAT_SPLAT]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[DIV]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[DIV]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int64_t div_scalar_i64(fixed_int64_t a, int64_t b) { @@ -1596,11 +1596,11 @@ fixed_int64_t div_scalar_i64(fixed_int64_t a, int64_t b) { // CHECK-LABEL: @div_scalar_u8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <64 x i8> poison, i8 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <64 x i8> [[SPLAT_SPLATINSERT]], <64 x i8> poison, <64 x i32> zeroinitializer // CHECK-NEXT: [[DIV:%.*]] = udiv <64 x i8> [[A]], [[SPLAT_SPLAT]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[DIV]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[DIV]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint8_t div_scalar_u8(fixed_uint8_t a, uint8_t b) { @@ -1609,11 +1609,11 @@ fixed_uint8_t div_scalar_u8(fixed_uint8_t a, uint8_t b) { // CHECK-LABEL: @div_scalar_u16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <32 x i16> poison, i16 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <32 x i16> [[SPLAT_SPLATINSERT]], <32 x i16> poison, <32 x i32> zeroinitializer // CHECK-NEXT: [[DIV:%.*]] = udiv <32 x i16> [[A]], [[SPLAT_SPLAT]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[DIV]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[DIV]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint16_t div_scalar_u16(fixed_uint16_t a, uint16_t b) { @@ -1622,11 +1622,11 @@ fixed_uint16_t div_scalar_u16(fixed_uint16_t a, uint16_t b) { // CHECK-LABEL: @div_scalar_u32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x i32> poison, i32 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i32> [[SPLAT_SPLATINSERT]], <16 x i32> poison, <16 x i32> zeroinitializer // CHECK-NEXT: [[DIV:%.*]] = udiv <16 x i32> [[A]], [[SPLAT_SPLAT]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[DIV]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[DIV]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint32_t div_scalar_u32(fixed_uint32_t a, uint32_t b) { @@ -1635,11 +1635,11 @@ fixed_uint32_t div_scalar_u32(fixed_uint32_t a, uint32_t b) { // CHECK-LABEL: @div_scalar_u64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x i64> poison, i64 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <8 x i64> [[SPLAT_SPLATINSERT]], <8 x i64> poison, <8 x i32> zeroinitializer // CHECK-NEXT: [[DIV:%.*]] = udiv <8 x i64> [[A]], [[SPLAT_SPLAT]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[DIV]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[DIV]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint64_t div_scalar_u64(fixed_uint64_t a, uint64_t b) { @@ -1648,11 +1648,11 @@ fixed_uint64_t div_scalar_u64(fixed_uint64_t a, uint64_t b) { // CHECK-LABEL: @div_scalar_f16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x half> @llvm.vector.extract.v32f16.nxv8f16( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <32 x half> poison, half [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <32 x half> [[SPLAT_SPLATINSERT]], <32 x half> poison, <32 x i32> zeroinitializer // CHECK-NEXT: [[DIV:%.*]] = fdiv <32 x half> [[A]], [[SPLAT_SPLAT]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8f16.v32f16( undef, <32 x half> [[DIV]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8f16.v32f16( undef, <32 x half> [[DIV]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_float16_t div_scalar_f16(fixed_float16_t a, __fp16 b) { @@ -1661,11 +1661,11 @@ fixed_float16_t div_scalar_f16(fixed_float16_t a, __fp16 b) { // CHECK-LABEL: @div_scalar_f32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x float> @llvm.vector.extract.v16f32.nxv4f32( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x float> poison, float [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <16 x float> [[SPLAT_SPLATINSERT]], <16 x float> poison, <16 x i32> zeroinitializer // CHECK-NEXT: [[DIV:%.*]] = fdiv <16 x float> [[A]], [[SPLAT_SPLAT]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4f32.v16f32( undef, <16 x float> [[DIV]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4f32.v16f32( undef, <16 x float> [[DIV]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_float32_t div_scalar_f32(fixed_float32_t a, float b) { @@ -1674,11 +1674,11 @@ fixed_float32_t div_scalar_f32(fixed_float32_t a, float b) { // CHECK-LABEL: @div_scalar_f64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x double> @llvm.vector.extract.v8f64.nxv2f64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x double> poison, double [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <8 x double> [[SPLAT_SPLATINSERT]], <8 x double> poison, <8 x i32> zeroinitializer // CHECK-NEXT: [[DIV:%.*]] = fdiv <8 x double> [[A]], [[SPLAT_SPLAT]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2f64.v8f64( undef, <8 x double> [[DIV]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2f64.v8f64( undef, <8 x double> [[DIV]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_float64_t div_scalar_f64(fixed_float64_t a, double b) { @@ -1689,10 +1689,10 @@ fixed_float64_t div_scalar_f64(fixed_float64_t a, double b) { // CHECK-LABEL: @rem_i8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[REM:%.*]] = srem <64 x i8> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[REM]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[REM]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int8_t rem_i8(fixed_int8_t a, fixed_int8_t b) { @@ -1701,10 +1701,10 @@ fixed_int8_t rem_i8(fixed_int8_t a, fixed_int8_t b) { // CHECK-LABEL: @rem_i16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[REM:%.*]] = srem <32 x i16> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[REM]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[REM]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int16_t rem_i16(fixed_int16_t a, fixed_int16_t b) { @@ -1713,10 +1713,10 @@ fixed_int16_t rem_i16(fixed_int16_t a, fixed_int16_t b) { // CHECK-LABEL: @rem_i32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[REM:%.*]] = srem <16 x i32> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[REM]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[REM]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int32_t rem_i32(fixed_int32_t a, fixed_int32_t b) { @@ -1725,10 +1725,10 @@ fixed_int32_t rem_i32(fixed_int32_t a, fixed_int32_t b) { // CHECK-LABEL: @rem_i64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[REM:%.*]] = srem <8 x i64> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[REM]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[REM]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int64_t rem_i64(fixed_int64_t a, fixed_int64_t b) { @@ -1737,10 +1737,10 @@ fixed_int64_t rem_i64(fixed_int64_t a, fixed_int64_t b) { // CHECK-LABEL: @rem_u8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[REM:%.*]] = urem <64 x i8> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[REM]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[REM]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint8_t rem_u8(fixed_uint8_t a, fixed_uint8_t b) { @@ -1749,10 +1749,10 @@ fixed_uint8_t rem_u8(fixed_uint8_t a, fixed_uint8_t b) { // CHECK-LABEL: @rem_u16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[REM:%.*]] = urem <32 x i16> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[REM]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[REM]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint16_t rem_u16(fixed_uint16_t a, fixed_uint16_t b) { @@ -1761,10 +1761,10 @@ fixed_uint16_t rem_u16(fixed_uint16_t a, fixed_uint16_t b) { // CHECK-LABEL: @rem_u32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[REM:%.*]] = urem <16 x i32> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[REM]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[REM]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint32_t rem_u32(fixed_uint32_t a, fixed_uint32_t b) { @@ -1773,10 +1773,10 @@ fixed_uint32_t rem_u32(fixed_uint32_t a, fixed_uint32_t b) { // CHECK-LABEL: @rem_u64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[REM:%.*]] = urem <8 x i64> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[REM]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[REM]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint64_t rem_u64(fixed_uint64_t a, fixed_uint64_t b) { @@ -1785,10 +1785,10 @@ fixed_uint64_t rem_u64(fixed_uint64_t a, fixed_uint64_t b) { // CHECK-LABEL: @rem_inplace_i8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[REM:%.*]] = srem <64 x i8> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[REM]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[REM]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int8_t rem_inplace_i8(fixed_int8_t a, fixed_int8_t b) { @@ -1797,10 +1797,10 @@ fixed_int8_t rem_inplace_i8(fixed_int8_t a, fixed_int8_t b) { // CHECK-LABEL: @rem_inplace_i16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[REM:%.*]] = srem <32 x i16> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[REM]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[REM]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int16_t rem_inplace_i16(fixed_int16_t a, fixed_int16_t b) { @@ -1809,10 +1809,10 @@ fixed_int16_t rem_inplace_i16(fixed_int16_t a, fixed_int16_t b) { // CHECK-LABEL: @rem_inplace_i32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[REM:%.*]] = srem <16 x i32> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[REM]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[REM]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int32_t rem_inplace_i32(fixed_int32_t a, fixed_int32_t b) { @@ -1821,10 +1821,10 @@ fixed_int32_t rem_inplace_i32(fixed_int32_t a, fixed_int32_t b) { // CHECK-LABEL: @rem_inplace_i64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[REM:%.*]] = srem <8 x i64> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[REM]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[REM]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int64_t rem_inplace_i64(fixed_int64_t a, fixed_int64_t b) { @@ -1833,10 +1833,10 @@ fixed_int64_t rem_inplace_i64(fixed_int64_t a, fixed_int64_t b) { // CHECK-LABEL: @rem_inplace_u8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[REM:%.*]] = urem <64 x i8> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[REM]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[REM]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint8_t rem_inplace_u8(fixed_uint8_t a, fixed_uint8_t b) { @@ -1845,10 +1845,10 @@ fixed_uint8_t rem_inplace_u8(fixed_uint8_t a, fixed_uint8_t b) { // CHECK-LABEL: @rem_inplace_u16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[REM:%.*]] = urem <32 x i16> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[REM]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[REM]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint16_t rem_inplace_u16(fixed_uint16_t a, fixed_uint16_t b) { @@ -1857,10 +1857,10 @@ fixed_uint16_t rem_inplace_u16(fixed_uint16_t a, fixed_uint16_t b) { // CHECK-LABEL: @rem_inplace_u32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[REM:%.*]] = urem <16 x i32> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[REM]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[REM]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint32_t rem_inplace_u32(fixed_uint32_t a, fixed_uint32_t b) { @@ -1869,10 +1869,10 @@ fixed_uint32_t rem_inplace_u32(fixed_uint32_t a, fixed_uint32_t b) { // CHECK-LABEL: @rem_inplace_u64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[REM:%.*]] = urem <8 x i64> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[REM]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[REM]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint64_t rem_inplace_u64(fixed_uint64_t a, fixed_uint64_t b) { @@ -1881,11 +1881,11 @@ fixed_uint64_t rem_inplace_u64(fixed_uint64_t a, fixed_uint64_t b) { // CHECK-LABEL: @rem_scalar_i8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <64 x i8> poison, i8 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <64 x i8> [[SPLAT_SPLATINSERT]], <64 x i8> poison, <64 x i32> zeroinitializer // CHECK-NEXT: [[REM:%.*]] = srem <64 x i8> [[A]], [[SPLAT_SPLAT]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[REM]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[REM]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int8_t rem_scalar_i8(fixed_int8_t a, int8_t b) { @@ -1894,11 +1894,11 @@ fixed_int8_t rem_scalar_i8(fixed_int8_t a, int8_t b) { // CHECK-LABEL: @rem_scalar_i16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <32 x i16> poison, i16 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <32 x i16> [[SPLAT_SPLATINSERT]], <32 x i16> poison, <32 x i32> zeroinitializer // CHECK-NEXT: [[REM:%.*]] = srem <32 x i16> [[A]], [[SPLAT_SPLAT]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[REM]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[REM]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int16_t rem_scalar_i16(fixed_int16_t a, int16_t b) { @@ -1907,11 +1907,11 @@ fixed_int16_t rem_scalar_i16(fixed_int16_t a, int16_t b) { // CHECK-LABEL: @rem_scalar_i32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x i32> poison, i32 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i32> [[SPLAT_SPLATINSERT]], <16 x i32> poison, <16 x i32> zeroinitializer // CHECK-NEXT: [[REM:%.*]] = srem <16 x i32> [[A]], [[SPLAT_SPLAT]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[REM]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[REM]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int32_t rem_scalar_i32(fixed_int32_t a, int32_t b) { @@ -1920,11 +1920,11 @@ fixed_int32_t rem_scalar_i32(fixed_int32_t a, int32_t b) { // CHECK-LABEL: @rem_scalar_i64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x i64> poison, i64 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <8 x i64> [[SPLAT_SPLATINSERT]], <8 x i64> poison, <8 x i32> zeroinitializer // CHECK-NEXT: [[REM:%.*]] = srem <8 x i64> [[A]], [[SPLAT_SPLAT]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[REM]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[REM]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int64_t rem_scalar_i64(fixed_int64_t a, int64_t b) { @@ -1933,11 +1933,11 @@ fixed_int64_t rem_scalar_i64(fixed_int64_t a, int64_t b) { // CHECK-LABEL: @rem_scalar_u8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <64 x i8> poison, i8 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <64 x i8> [[SPLAT_SPLATINSERT]], <64 x i8> poison, <64 x i32> zeroinitializer // CHECK-NEXT: [[REM:%.*]] = urem <64 x i8> [[A]], [[SPLAT_SPLAT]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[REM]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[REM]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint8_t rem_scalar_u8(fixed_uint8_t a, uint8_t b) { @@ -1946,11 +1946,11 @@ fixed_uint8_t rem_scalar_u8(fixed_uint8_t a, uint8_t b) { // CHECK-LABEL: @rem_scalar_u16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <32 x i16> poison, i16 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <32 x i16> [[SPLAT_SPLATINSERT]], <32 x i16> poison, <32 x i32> zeroinitializer // CHECK-NEXT: [[REM:%.*]] = urem <32 x i16> [[A]], [[SPLAT_SPLAT]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[REM]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[REM]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint16_t rem_scalar_u16(fixed_uint16_t a, uint16_t b) { @@ -1959,11 +1959,11 @@ fixed_uint16_t rem_scalar_u16(fixed_uint16_t a, uint16_t b) { // CHECK-LABEL: @rem_scalar_u32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x i32> poison, i32 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i32> [[SPLAT_SPLATINSERT]], <16 x i32> poison, <16 x i32> zeroinitializer // CHECK-NEXT: [[REM:%.*]] = urem <16 x i32> [[A]], [[SPLAT_SPLAT]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[REM]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[REM]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint32_t rem_scalar_u32(fixed_uint32_t a, uint32_t b) { @@ -1972,11 +1972,11 @@ fixed_uint32_t rem_scalar_u32(fixed_uint32_t a, uint32_t b) { // CHECK-LABEL: @rem_scalar_u64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x i64> poison, i64 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <8 x i64> [[SPLAT_SPLATINSERT]], <8 x i64> poison, <8 x i32> zeroinitializer // CHECK-NEXT: [[REM:%.*]] = urem <8 x i64> [[A]], [[SPLAT_SPLAT]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[REM]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[REM]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint64_t rem_scalar_u64(fixed_uint64_t a, uint64_t b) { diff --git a/clang/test/CodeGen/aarch64-sve-vls-bitwise-ops.c b/clang/test/CodeGen/aarch64-sve-vls-bitwise-ops.c index b77d2bd63c3bd92c066c778e4fe0f0f5f3d49cd8..46e4790477c4f8022077bccf24cc34e851a97021 100644 --- a/clang/test/CodeGen/aarch64-sve-vls-bitwise-ops.c +++ b/clang/test/CodeGen/aarch64-sve-vls-bitwise-ops.c @@ -30,11 +30,11 @@ typedef svbool_t fixed_bool_t __attribute__((arm_sve_vector_bits(N))); // CHECK-LABEL: @and_bool( // CHECK-NEXT: entry: // CHECK-NEXT: [[A_COERCE:%.*]] = bitcast [[TMP0:%.*]] to -// CHECK-NEXT: [[A:%.*]] = call <8 x i8> @llvm.experimental.vector.extract.v8i8.nxv2i8( [[A_COERCE]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i8> @llvm.vector.extract.v8i8.nxv2i8( [[A_COERCE]], i64 0) // CHECK-NEXT: [[B_COERCE:%.*]] = bitcast [[TMP1:%.*]] to -// CHECK-NEXT: [[B:%.*]] = call <8 x i8> @llvm.experimental.vector.extract.v8i8.nxv2i8( [[B_COERCE]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i8> @llvm.vector.extract.v8i8.nxv2i8( [[B_COERCE]], i64 0) // CHECK-NEXT: [[AND:%.*]] = and <8 x i8> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i8.v8i8( undef, <8 x i8> [[AND]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i8.v8i8( undef, <8 x i8> [[AND]], i64 0) // CHECK-NEXT: [[TMP2:%.*]] = bitcast [[CASTSCALABLESVE]] to // CHECK-NEXT: ret [[TMP2]] // @@ -44,10 +44,10 @@ fixed_bool_t and_bool(fixed_bool_t a, fixed_bool_t b) { // CHECK-LABEL: @and_i8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[AND:%.*]] = and <64 x i8> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[AND]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[AND]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int8_t and_i8(fixed_int8_t a, fixed_int8_t b) { @@ -56,10 +56,10 @@ fixed_int8_t and_i8(fixed_int8_t a, fixed_int8_t b) { // CHECK-LABEL: @and_i16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[AND:%.*]] = and <32 x i16> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[AND]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[AND]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int16_t and_i16(fixed_int16_t a, fixed_int16_t b) { @@ -68,10 +68,10 @@ fixed_int16_t and_i16(fixed_int16_t a, fixed_int16_t b) { // CHECK-LABEL: @and_i32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[AND:%.*]] = and <16 x i32> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[AND]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[AND]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int32_t and_i32(fixed_int32_t a, fixed_int32_t b) { @@ -80,10 +80,10 @@ fixed_int32_t and_i32(fixed_int32_t a, fixed_int32_t b) { // CHECK-LABEL: @and_i64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[AND:%.*]] = and <8 x i64> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[AND]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[AND]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int64_t and_i64(fixed_int64_t a, fixed_int64_t b) { @@ -92,10 +92,10 @@ fixed_int64_t and_i64(fixed_int64_t a, fixed_int64_t b) { // CHECK-LABEL: @and_u8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[AND:%.*]] = and <64 x i8> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[AND]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[AND]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint8_t and_u8(fixed_uint8_t a, fixed_uint8_t b) { @@ -104,10 +104,10 @@ fixed_uint8_t and_u8(fixed_uint8_t a, fixed_uint8_t b) { // CHECK-LABEL: @and_u16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[AND:%.*]] = and <32 x i16> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[AND]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[AND]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint16_t and_u16(fixed_uint16_t a, fixed_uint16_t b) { @@ -116,10 +116,10 @@ fixed_uint16_t and_u16(fixed_uint16_t a, fixed_uint16_t b) { // CHECK-LABEL: @and_u32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[AND:%.*]] = and <16 x i32> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[AND]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[AND]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint32_t and_u32(fixed_uint32_t a, fixed_uint32_t b) { @@ -128,10 +128,10 @@ fixed_uint32_t and_u32(fixed_uint32_t a, fixed_uint32_t b) { // CHECK-LABEL: @and_u64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[AND:%.*]] = and <8 x i64> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[AND]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[AND]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint64_t and_u64(fixed_uint64_t a, fixed_uint64_t b) { @@ -143,11 +143,11 @@ fixed_uint64_t and_u64(fixed_uint64_t a, fixed_uint64_t b) { // CHECK-LABEL: @or_bool( // CHECK-NEXT: entry: // CHECK-NEXT: [[A_COERCE:%.*]] = bitcast [[TMP0:%.*]] to -// CHECK-NEXT: [[A:%.*]] = call <8 x i8> @llvm.experimental.vector.extract.v8i8.nxv2i8( [[A_COERCE]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i8> @llvm.vector.extract.v8i8.nxv2i8( [[A_COERCE]], i64 0) // CHECK-NEXT: [[B_COERCE:%.*]] = bitcast [[TMP1:%.*]] to -// CHECK-NEXT: [[B:%.*]] = call <8 x i8> @llvm.experimental.vector.extract.v8i8.nxv2i8( [[B_COERCE]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i8> @llvm.vector.extract.v8i8.nxv2i8( [[B_COERCE]], i64 0) // CHECK-NEXT: [[OR:%.*]] = or <8 x i8> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i8.v8i8( undef, <8 x i8> [[OR]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i8.v8i8( undef, <8 x i8> [[OR]], i64 0) // CHECK-NEXT: [[TMP2:%.*]] = bitcast [[CASTSCALABLESVE]] to // CHECK-NEXT: ret [[TMP2]] // @@ -157,10 +157,10 @@ fixed_bool_t or_bool(fixed_bool_t a, fixed_bool_t b) { // CHECK-LABEL: @or_i8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[OR:%.*]] = or <64 x i8> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[OR]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[OR]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int8_t or_i8(fixed_int8_t a, fixed_int8_t b) { @@ -169,10 +169,10 @@ fixed_int8_t or_i8(fixed_int8_t a, fixed_int8_t b) { // CHECK-LABEL: @or_i16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[OR:%.*]] = or <32 x i16> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[OR]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[OR]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int16_t or_i16(fixed_int16_t a, fixed_int16_t b) { @@ -181,10 +181,10 @@ fixed_int16_t or_i16(fixed_int16_t a, fixed_int16_t b) { // CHECK-LABEL: @or_i32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[OR:%.*]] = or <16 x i32> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[OR]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[OR]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int32_t or_i32(fixed_int32_t a, fixed_int32_t b) { @@ -193,10 +193,10 @@ fixed_int32_t or_i32(fixed_int32_t a, fixed_int32_t b) { // CHECK-LABEL: @or_i64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[OR:%.*]] = or <8 x i64> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[OR]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[OR]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int64_t or_i64(fixed_int64_t a, fixed_int64_t b) { @@ -205,10 +205,10 @@ fixed_int64_t or_i64(fixed_int64_t a, fixed_int64_t b) { // CHECK-LABEL: @or_u8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[OR:%.*]] = or <64 x i8> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[OR]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[OR]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint8_t or_u8(fixed_uint8_t a, fixed_uint8_t b) { @@ -217,10 +217,10 @@ fixed_uint8_t or_u8(fixed_uint8_t a, fixed_uint8_t b) { // CHECK-LABEL: @or_u16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[OR:%.*]] = or <32 x i16> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[OR]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[OR]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint16_t or_u16(fixed_uint16_t a, fixed_uint16_t b) { @@ -229,10 +229,10 @@ fixed_uint16_t or_u16(fixed_uint16_t a, fixed_uint16_t b) { // CHECK-LABEL: @or_u32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[OR:%.*]] = or <16 x i32> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[OR]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[OR]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint32_t or_u32(fixed_uint32_t a, fixed_uint32_t b) { @@ -241,10 +241,10 @@ fixed_uint32_t or_u32(fixed_uint32_t a, fixed_uint32_t b) { // CHECK-LABEL: @or_u64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[OR:%.*]] = or <8 x i64> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[OR]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[OR]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint64_t or_u64(fixed_uint64_t a, fixed_uint64_t b) { @@ -256,11 +256,11 @@ fixed_uint64_t or_u64(fixed_uint64_t a, fixed_uint64_t b) { // CHECK-LABEL: @xor_bool( // CHECK-NEXT: entry: // CHECK-NEXT: [[A_COERCE:%.*]] = bitcast [[TMP0:%.*]] to -// CHECK-NEXT: [[A:%.*]] = call <8 x i8> @llvm.experimental.vector.extract.v8i8.nxv2i8( [[A_COERCE]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i8> @llvm.vector.extract.v8i8.nxv2i8( [[A_COERCE]], i64 0) // CHECK-NEXT: [[B_COERCE:%.*]] = bitcast [[TMP1:%.*]] to -// CHECK-NEXT: [[B:%.*]] = call <8 x i8> @llvm.experimental.vector.extract.v8i8.nxv2i8( [[B_COERCE]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i8> @llvm.vector.extract.v8i8.nxv2i8( [[B_COERCE]], i64 0) // CHECK-NEXT: [[XOR:%.*]] = xor <8 x i8> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i8.v8i8( undef, <8 x i8> [[XOR]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i8.v8i8( undef, <8 x i8> [[XOR]], i64 0) // CHECK-NEXT: [[TMP2:%.*]] = bitcast [[CASTSCALABLESVE]] to // CHECK-NEXT: ret [[TMP2]] // @@ -270,10 +270,10 @@ fixed_bool_t xor_bool(fixed_bool_t a, fixed_bool_t b) { // CHECK-LABEL: @xor_i8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[XOR:%.*]] = xor <64 x i8> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[XOR]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[XOR]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int8_t xor_i8(fixed_int8_t a, fixed_int8_t b) { @@ -282,10 +282,10 @@ fixed_int8_t xor_i8(fixed_int8_t a, fixed_int8_t b) { // CHECK-LABEL: @xor_i16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[XOR:%.*]] = xor <32 x i16> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[XOR]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[XOR]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int16_t xor_i16(fixed_int16_t a, fixed_int16_t b) { @@ -294,10 +294,10 @@ fixed_int16_t xor_i16(fixed_int16_t a, fixed_int16_t b) { // CHECK-LABEL: @xor_i32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[XOR:%.*]] = xor <16 x i32> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[XOR]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[XOR]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int32_t xor_i32(fixed_int32_t a, fixed_int32_t b) { @@ -306,10 +306,10 @@ fixed_int32_t xor_i32(fixed_int32_t a, fixed_int32_t b) { // CHECK-LABEL: @xor_i64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[XOR:%.*]] = xor <8 x i64> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[XOR]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[XOR]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int64_t xor_i64(fixed_int64_t a, fixed_int64_t b) { @@ -318,10 +318,10 @@ fixed_int64_t xor_i64(fixed_int64_t a, fixed_int64_t b) { // CHECK-LABEL: @xor_u8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[XOR:%.*]] = xor <64 x i8> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[XOR]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[XOR]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint8_t xor_u8(fixed_uint8_t a, fixed_uint8_t b) { @@ -330,10 +330,10 @@ fixed_uint8_t xor_u8(fixed_uint8_t a, fixed_uint8_t b) { // CHECK-LABEL: @xor_u16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[XOR:%.*]] = xor <32 x i16> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[XOR]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[XOR]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint16_t xor_u16(fixed_uint16_t a, fixed_uint16_t b) { @@ -342,10 +342,10 @@ fixed_uint16_t xor_u16(fixed_uint16_t a, fixed_uint16_t b) { // CHECK-LABEL: @xor_u32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[XOR:%.*]] = xor <16 x i32> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[XOR]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[XOR]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint32_t xor_u32(fixed_uint32_t a, fixed_uint32_t b) { @@ -354,10 +354,10 @@ fixed_uint32_t xor_u32(fixed_uint32_t a, fixed_uint32_t b) { // CHECK-LABEL: @xor_u64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[XOR:%.*]] = xor <8 x i64> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[XOR]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[XOR]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint64_t xor_u64(fixed_uint64_t a, fixed_uint64_t b) { @@ -369,9 +369,9 @@ fixed_uint64_t xor_u64(fixed_uint64_t a, fixed_uint64_t b) { // CHECK-LABEL: @neg_bool( // CHECK-NEXT: entry: // CHECK-NEXT: [[A_COERCE:%.*]] = bitcast [[TMP0:%.*]] to -// CHECK-NEXT: [[A:%.*]] = call <8 x i8> @llvm.experimental.vector.extract.v8i8.nxv2i8( [[A_COERCE]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i8> @llvm.vector.extract.v8i8.nxv2i8( [[A_COERCE]], i64 0) // CHECK-NEXT: [[NEG:%.*]] = xor <8 x i8> [[A]], -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i8.v8i8( undef, <8 x i8> [[NEG]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i8.v8i8( undef, <8 x i8> [[NEG]], i64 0) // CHECK-NEXT: [[TMP1:%.*]] = bitcast [[CASTSCALABLESVE]] to // CHECK-NEXT: ret [[TMP1]] // @@ -381,9 +381,9 @@ fixed_bool_t neg_bool(fixed_bool_t a) { // CHECK-LABEL: @neg_i8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[NEG:%.*]] = xor <64 x i8> [[A]], -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[NEG]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[NEG]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int8_t neg_i8(fixed_int8_t a) { @@ -392,9 +392,9 @@ fixed_int8_t neg_i8(fixed_int8_t a) { // CHECK-LABEL: @neg_i16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[NEG:%.*]] = xor <32 x i16> [[A]], -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[NEG]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[NEG]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int16_t neg_i16(fixed_int16_t a) { @@ -403,9 +403,9 @@ fixed_int16_t neg_i16(fixed_int16_t a) { // CHECK-LABEL: @neg_i32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[NEG:%.*]] = xor <16 x i32> [[A]], -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[NEG]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[NEG]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int32_t neg_i32(fixed_int32_t a) { @@ -414,9 +414,9 @@ fixed_int32_t neg_i32(fixed_int32_t a) { // CHECK-LABEL: @neg_i64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[NEG:%.*]] = xor <8 x i64> [[A]], -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[NEG]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[NEG]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int64_t neg_i64(fixed_int64_t a) { @@ -425,9 +425,9 @@ fixed_int64_t neg_i64(fixed_int64_t a) { // CHECK-LABEL: @neg_u8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[NEG:%.*]] = xor <64 x i8> [[A]], -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[NEG]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[NEG]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint8_t neg_u8(fixed_uint8_t a) { @@ -436,9 +436,9 @@ fixed_uint8_t neg_u8(fixed_uint8_t a) { // CHECK-LABEL: @neg_u16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[NEG:%.*]] = xor <32 x i16> [[A]], -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[NEG]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[NEG]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint16_t neg_u16(fixed_uint16_t a) { @@ -447,9 +447,9 @@ fixed_uint16_t neg_u16(fixed_uint16_t a) { // CHECK-LABEL: @neg_u32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[NEG:%.*]] = xor <16 x i32> [[A]], -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[NEG]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[NEG]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint32_t neg_u32(fixed_uint32_t a) { @@ -458,9 +458,9 @@ fixed_uint32_t neg_u32(fixed_uint32_t a) { // CHECK-LABEL: @neg_u64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[NEG:%.*]] = xor <8 x i64> [[A]], -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[NEG]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[NEG]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint64_t neg_u64(fixed_uint64_t a) { diff --git a/clang/test/CodeGen/aarch64-sve-vls-compare-ops.c b/clang/test/CodeGen/aarch64-sve-vls-compare-ops.c index f6c6672336dde2539ba6563114e7c3a25306cecd..3b953fa123ed386de31a8b3f6a02e33e5ed8a4a7 100644 --- a/clang/test/CodeGen/aarch64-sve-vls-compare-ops.c +++ b/clang/test/CodeGen/aarch64-sve-vls-compare-ops.c @@ -30,12 +30,12 @@ typedef svbool_t fixed_bool_t __attribute__((arm_sve_vector_bits(N))); // CHECK-LABEL: @eq_bool( // CHECK-NEXT: entry: // CHECK-NEXT: [[A_COERCE:%.*]] = bitcast [[TMP0:%.*]] to -// CHECK-NEXT: [[A:%.*]] = call <8 x i8> @llvm.experimental.vector.extract.v8i8.nxv2i8( [[A_COERCE]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i8> @llvm.vector.extract.v8i8.nxv2i8( [[A_COERCE]], i64 0) // CHECK-NEXT: [[B_COERCE:%.*]] = bitcast [[TMP1:%.*]] to -// CHECK-NEXT: [[B:%.*]] = call <8 x i8> @llvm.experimental.vector.extract.v8i8.nxv2i8( [[B_COERCE]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i8> @llvm.vector.extract.v8i8.nxv2i8( [[B_COERCE]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp eq <8 x i8> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <8 x i1> [[CMP]] to <8 x i8> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i8.v8i8( undef, <8 x i8> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i8.v8i8( undef, <8 x i8> [[SEXT]], i64 0) // CHECK-NEXT: [[TMP2:%.*]] = bitcast [[CASTSCALABLESVE]] to // CHECK-NEXT: ret [[TMP2]] // @@ -45,11 +45,11 @@ fixed_bool_t eq_bool(fixed_bool_t a, fixed_bool_t b) { // CHECK-LABEL: @eq_i8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp eq <64 x i8> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <64 x i1> [[CMP]] to <64 x i8> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SEXT]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int8_t eq_i8(fixed_int8_t a, fixed_int8_t b) { @@ -58,11 +58,11 @@ fixed_int8_t eq_i8(fixed_int8_t a, fixed_int8_t b) { // CHECK-LABEL: @eq_i16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp eq <32 x i16> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <32 x i1> [[CMP]] to <32 x i16> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SEXT]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int16_t eq_i16(fixed_int16_t a, fixed_int16_t b) { @@ -71,11 +71,11 @@ fixed_int16_t eq_i16(fixed_int16_t a, fixed_int16_t b) { // CHECK-LABEL: @eq_i32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp eq <16 x i32> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <16 x i1> [[CMP]] to <16 x i32> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SEXT]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int32_t eq_i32(fixed_int32_t a, fixed_int32_t b) { @@ -84,11 +84,11 @@ fixed_int32_t eq_i32(fixed_int32_t a, fixed_int32_t b) { // CHECK-LABEL: @eq_i64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp eq <8 x i64> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <8 x i1> [[CMP]] to <8 x i64> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SEXT]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int64_t eq_i64(fixed_int64_t a, fixed_int64_t b) { @@ -97,11 +97,11 @@ fixed_int64_t eq_i64(fixed_int64_t a, fixed_int64_t b) { // CHECK-LABEL: @eq_u8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp eq <64 x i8> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <64 x i1> [[CMP]] to <64 x i8> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SEXT]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int8_t eq_u8(fixed_uint8_t a, fixed_uint8_t b) { @@ -110,11 +110,11 @@ fixed_int8_t eq_u8(fixed_uint8_t a, fixed_uint8_t b) { // CHECK-LABEL: @eq_u16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp eq <32 x i16> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <32 x i1> [[CMP]] to <32 x i16> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SEXT]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int16_t eq_u16(fixed_uint16_t a, fixed_uint16_t b) { @@ -123,11 +123,11 @@ fixed_int16_t eq_u16(fixed_uint16_t a, fixed_uint16_t b) { // CHECK-LABEL: @eq_u32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp eq <16 x i32> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <16 x i1> [[CMP]] to <16 x i32> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SEXT]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int32_t eq_u32(fixed_uint32_t a, fixed_uint32_t b) { @@ -136,11 +136,11 @@ fixed_int32_t eq_u32(fixed_uint32_t a, fixed_uint32_t b) { // CHECK-LABEL: @eq_u64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp eq <8 x i64> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <8 x i1> [[CMP]] to <8 x i64> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SEXT]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int64_t eq_u64(fixed_uint64_t a, fixed_uint64_t b) { @@ -149,14 +149,14 @@ fixed_int64_t eq_u64(fixed_uint64_t a, fixed_uint64_t b) { // CHECK-LABEL: @eq_f16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x half> @llvm.vector.extract.v32f16.nxv8f16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x half> @llvm.vector.extract.v32f16.nxv8f16( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CONV:%.*]] = fpext <32 x half> [[A]] to <32 x float> // CHECK-NEXT: [[CONV2:%.*]] = fpext <32 x half> [[B]] to <32 x float> // CHECK-NEXT: [[CMP:%.*]] = fcmp oeq <32 x float> [[CONV]], [[CONV2]] // CHECK-NEXT: [[SEXT:%.*]] = sext <32 x i1> [[CMP]] to <32 x i32> // CHECK-NEXT: [[CONV3:%.*]] = trunc <32 x i32> [[SEXT]] to <32 x i16> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[CONV3]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[CONV3]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int16_t eq_f16(fixed_float16_t a, fixed_float16_t b) { @@ -165,11 +165,11 @@ fixed_int16_t eq_f16(fixed_float16_t a, fixed_float16_t b) { // CHECK-LABEL: @eq_f32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x float> @llvm.vector.extract.v16f32.nxv4f32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x float> @llvm.vector.extract.v16f32.nxv4f32( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = fcmp oeq <16 x float> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <16 x i1> [[CMP]] to <16 x i32> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SEXT]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int32_t eq_f32(fixed_float32_t a, fixed_float32_t b) { @@ -178,11 +178,11 @@ fixed_int32_t eq_f32(fixed_float32_t a, fixed_float32_t b) { // CHECK-LABEL: @eq_f64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x double> @llvm.vector.extract.v8f64.nxv2f64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x double> @llvm.vector.extract.v8f64.nxv2f64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = fcmp oeq <8 x double> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <8 x i1> [[CMP]] to <8 x i64> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SEXT]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int64_t eq_f64(fixed_float64_t a, fixed_float64_t b) { @@ -194,12 +194,12 @@ fixed_int64_t eq_f64(fixed_float64_t a, fixed_float64_t b) { // CHECK-LABEL: @neq_bool( // CHECK-NEXT: entry: // CHECK-NEXT: [[A_COERCE:%.*]] = bitcast [[TMP0:%.*]] to -// CHECK-NEXT: [[A:%.*]] = call <8 x i8> @llvm.experimental.vector.extract.v8i8.nxv2i8( [[A_COERCE]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i8> @llvm.vector.extract.v8i8.nxv2i8( [[A_COERCE]], i64 0) // CHECK-NEXT: [[B_COERCE:%.*]] = bitcast [[TMP1:%.*]] to -// CHECK-NEXT: [[B:%.*]] = call <8 x i8> @llvm.experimental.vector.extract.v8i8.nxv2i8( [[B_COERCE]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i8> @llvm.vector.extract.v8i8.nxv2i8( [[B_COERCE]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp ne <8 x i8> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <8 x i1> [[CMP]] to <8 x i8> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i8.v8i8( undef, <8 x i8> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i8.v8i8( undef, <8 x i8> [[SEXT]], i64 0) // CHECK-NEXT: [[TMP2:%.*]] = bitcast [[CASTSCALABLESVE]] to // CHECK-NEXT: ret [[TMP2]] // @@ -209,11 +209,11 @@ fixed_bool_t neq_bool(fixed_bool_t a, fixed_bool_t b) { // CHECK-LABEL: @neq_i8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp ne <64 x i8> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <64 x i1> [[CMP]] to <64 x i8> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SEXT]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int8_t neq_i8(fixed_int8_t a, fixed_int8_t b) { @@ -222,11 +222,11 @@ fixed_int8_t neq_i8(fixed_int8_t a, fixed_int8_t b) { // CHECK-LABEL: @neq_i16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp ne <32 x i16> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <32 x i1> [[CMP]] to <32 x i16> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SEXT]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int16_t neq_i16(fixed_int16_t a, fixed_int16_t b) { @@ -235,11 +235,11 @@ fixed_int16_t neq_i16(fixed_int16_t a, fixed_int16_t b) { // CHECK-LABEL: @neq_i32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp ne <16 x i32> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <16 x i1> [[CMP]] to <16 x i32> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SEXT]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int32_t neq_i32(fixed_int32_t a, fixed_int32_t b) { @@ -248,11 +248,11 @@ fixed_int32_t neq_i32(fixed_int32_t a, fixed_int32_t b) { // CHECK-LABEL: @neq_i64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp ne <8 x i64> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <8 x i1> [[CMP]] to <8 x i64> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SEXT]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int64_t neq_i64(fixed_int64_t a, fixed_int64_t b) { @@ -261,11 +261,11 @@ fixed_int64_t neq_i64(fixed_int64_t a, fixed_int64_t b) { // CHECK-LABEL: @neq_u8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp ne <64 x i8> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <64 x i1> [[CMP]] to <64 x i8> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SEXT]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int8_t neq_u8(fixed_uint8_t a, fixed_uint8_t b) { @@ -274,11 +274,11 @@ fixed_int8_t neq_u8(fixed_uint8_t a, fixed_uint8_t b) { // CHECK-LABEL: @neq_u16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp ne <32 x i16> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <32 x i1> [[CMP]] to <32 x i16> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SEXT]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int16_t neq_u16(fixed_uint16_t a, fixed_uint16_t b) { @@ -287,11 +287,11 @@ fixed_int16_t neq_u16(fixed_uint16_t a, fixed_uint16_t b) { // CHECK-LABEL: @neq_u32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp ne <16 x i32> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <16 x i1> [[CMP]] to <16 x i32> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SEXT]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int32_t neq_u32(fixed_uint32_t a, fixed_uint32_t b) { @@ -300,11 +300,11 @@ fixed_int32_t neq_u32(fixed_uint32_t a, fixed_uint32_t b) { // CHECK-LABEL: @neq_u64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp ne <8 x i64> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <8 x i1> [[CMP]] to <8 x i64> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SEXT]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int64_t neq_u64(fixed_uint64_t a, fixed_uint64_t b) { @@ -313,14 +313,14 @@ fixed_int64_t neq_u64(fixed_uint64_t a, fixed_uint64_t b) { // CHECK-LABEL: @neq_f16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x half> @llvm.vector.extract.v32f16.nxv8f16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x half> @llvm.vector.extract.v32f16.nxv8f16( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CONV:%.*]] = fpext <32 x half> [[A]] to <32 x float> // CHECK-NEXT: [[CONV2:%.*]] = fpext <32 x half> [[B]] to <32 x float> // CHECK-NEXT: [[CMP:%.*]] = fcmp une <32 x float> [[CONV]], [[CONV2]] // CHECK-NEXT: [[SEXT:%.*]] = sext <32 x i1> [[CMP]] to <32 x i32> // CHECK-NEXT: [[CONV3:%.*]] = trunc <32 x i32> [[SEXT]] to <32 x i16> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[CONV3]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[CONV3]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int16_t neq_f16(fixed_float16_t a, fixed_float16_t b) { @@ -329,11 +329,11 @@ fixed_int16_t neq_f16(fixed_float16_t a, fixed_float16_t b) { // CHECK-LABEL: @neq_f32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x float> @llvm.vector.extract.v16f32.nxv4f32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x float> @llvm.vector.extract.v16f32.nxv4f32( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = fcmp une <16 x float> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <16 x i1> [[CMP]] to <16 x i32> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SEXT]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int32_t neq_f32(fixed_float32_t a, fixed_float32_t b) { @@ -342,11 +342,11 @@ fixed_int32_t neq_f32(fixed_float32_t a, fixed_float32_t b) { // CHECK-LABEL: @neq_f64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x double> @llvm.vector.extract.v8f64.nxv2f64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x double> @llvm.vector.extract.v8f64.nxv2f64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = fcmp une <8 x double> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <8 x i1> [[CMP]] to <8 x i64> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SEXT]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int64_t neq_f64(fixed_float64_t a, fixed_float64_t b) { @@ -358,12 +358,12 @@ fixed_int64_t neq_f64(fixed_float64_t a, fixed_float64_t b) { // CHECK-LABEL: @lt_bool( // CHECK-NEXT: entry: // CHECK-NEXT: [[A_COERCE:%.*]] = bitcast [[TMP0:%.*]] to -// CHECK-NEXT: [[A:%.*]] = call <8 x i8> @llvm.experimental.vector.extract.v8i8.nxv2i8( [[A_COERCE]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i8> @llvm.vector.extract.v8i8.nxv2i8( [[A_COERCE]], i64 0) // CHECK-NEXT: [[B_COERCE:%.*]] = bitcast [[TMP1:%.*]] to -// CHECK-NEXT: [[B:%.*]] = call <8 x i8> @llvm.experimental.vector.extract.v8i8.nxv2i8( [[B_COERCE]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i8> @llvm.vector.extract.v8i8.nxv2i8( [[B_COERCE]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp ult <8 x i8> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <8 x i1> [[CMP]] to <8 x i8> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i8.v8i8( undef, <8 x i8> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i8.v8i8( undef, <8 x i8> [[SEXT]], i64 0) // CHECK-NEXT: [[TMP2:%.*]] = bitcast [[CASTSCALABLESVE]] to // CHECK-NEXT: ret [[TMP2]] // @@ -373,11 +373,11 @@ fixed_bool_t lt_bool(fixed_bool_t a, fixed_bool_t b) { // CHECK-LABEL: @lt_i8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp slt <64 x i8> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <64 x i1> [[CMP]] to <64 x i8> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SEXT]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int8_t lt_i8(fixed_int8_t a, fixed_int8_t b) { @@ -386,11 +386,11 @@ fixed_int8_t lt_i8(fixed_int8_t a, fixed_int8_t b) { // CHECK-LABEL: @lt_i16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp slt <32 x i16> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <32 x i1> [[CMP]] to <32 x i16> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SEXT]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int16_t lt_i16(fixed_int16_t a, fixed_int16_t b) { @@ -399,11 +399,11 @@ fixed_int16_t lt_i16(fixed_int16_t a, fixed_int16_t b) { // CHECK-LABEL: @lt_i32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp slt <16 x i32> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <16 x i1> [[CMP]] to <16 x i32> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SEXT]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int32_t lt_i32(fixed_int32_t a, fixed_int32_t b) { @@ -412,11 +412,11 @@ fixed_int32_t lt_i32(fixed_int32_t a, fixed_int32_t b) { // CHECK-LABEL: @lt_i64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp slt <8 x i64> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <8 x i1> [[CMP]] to <8 x i64> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SEXT]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int64_t lt_i64(fixed_int64_t a, fixed_int64_t b) { @@ -425,11 +425,11 @@ fixed_int64_t lt_i64(fixed_int64_t a, fixed_int64_t b) { // CHECK-LABEL: @lt_u8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp ult <64 x i8> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <64 x i1> [[CMP]] to <64 x i8> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SEXT]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int8_t lt_u8(fixed_uint8_t a, fixed_uint8_t b) { @@ -438,11 +438,11 @@ fixed_int8_t lt_u8(fixed_uint8_t a, fixed_uint8_t b) { // CHECK-LABEL: @lt_u16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp ult <32 x i16> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <32 x i1> [[CMP]] to <32 x i16> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SEXT]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int16_t lt_u16(fixed_uint16_t a, fixed_uint16_t b) { @@ -451,11 +451,11 @@ fixed_int16_t lt_u16(fixed_uint16_t a, fixed_uint16_t b) { // CHECK-LABEL: @lt_u32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp ult <16 x i32> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <16 x i1> [[CMP]] to <16 x i32> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SEXT]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int32_t lt_u32(fixed_uint32_t a, fixed_uint32_t b) { @@ -464,11 +464,11 @@ fixed_int32_t lt_u32(fixed_uint32_t a, fixed_uint32_t b) { // CHECK-LABEL: @lt_u64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp ult <8 x i64> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <8 x i1> [[CMP]] to <8 x i64> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SEXT]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int64_t lt_u64(fixed_uint64_t a, fixed_uint64_t b) { @@ -477,14 +477,14 @@ fixed_int64_t lt_u64(fixed_uint64_t a, fixed_uint64_t b) { // CHECK-LABEL: @lt_f16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x half> @llvm.vector.extract.v32f16.nxv8f16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x half> @llvm.vector.extract.v32f16.nxv8f16( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CONV:%.*]] = fpext <32 x half> [[A]] to <32 x float> // CHECK-NEXT: [[CONV2:%.*]] = fpext <32 x half> [[B]] to <32 x float> // CHECK-NEXT: [[CMP:%.*]] = fcmp olt <32 x float> [[CONV]], [[CONV2]] // CHECK-NEXT: [[SEXT:%.*]] = sext <32 x i1> [[CMP]] to <32 x i32> // CHECK-NEXT: [[CONV3:%.*]] = trunc <32 x i32> [[SEXT]] to <32 x i16> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[CONV3]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[CONV3]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int16_t lt_f16(fixed_float16_t a, fixed_float16_t b) { @@ -493,11 +493,11 @@ fixed_int16_t lt_f16(fixed_float16_t a, fixed_float16_t b) { // CHECK-LABEL: @lt_f32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x float> @llvm.vector.extract.v16f32.nxv4f32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x float> @llvm.vector.extract.v16f32.nxv4f32( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = fcmp olt <16 x float> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <16 x i1> [[CMP]] to <16 x i32> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SEXT]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int32_t lt_f32(fixed_float32_t a, fixed_float32_t b) { @@ -506,11 +506,11 @@ fixed_int32_t lt_f32(fixed_float32_t a, fixed_float32_t b) { // CHECK-LABEL: @lt_f64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x double> @llvm.vector.extract.v8f64.nxv2f64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x double> @llvm.vector.extract.v8f64.nxv2f64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = fcmp olt <8 x double> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <8 x i1> [[CMP]] to <8 x i64> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SEXT]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int64_t lt_f64(fixed_float64_t a, fixed_float64_t b) { @@ -522,12 +522,12 @@ fixed_int64_t lt_f64(fixed_float64_t a, fixed_float64_t b) { // CHECK-LABEL: @leq_bool( // CHECK-NEXT: entry: // CHECK-NEXT: [[A_COERCE:%.*]] = bitcast [[TMP0:%.*]] to -// CHECK-NEXT: [[A:%.*]] = call <8 x i8> @llvm.experimental.vector.extract.v8i8.nxv2i8( [[A_COERCE]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i8> @llvm.vector.extract.v8i8.nxv2i8( [[A_COERCE]], i64 0) // CHECK-NEXT: [[B_COERCE:%.*]] = bitcast [[TMP1:%.*]] to -// CHECK-NEXT: [[B:%.*]] = call <8 x i8> @llvm.experimental.vector.extract.v8i8.nxv2i8( [[B_COERCE]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i8> @llvm.vector.extract.v8i8.nxv2i8( [[B_COERCE]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp ule <8 x i8> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <8 x i1> [[CMP]] to <8 x i8> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i8.v8i8( undef, <8 x i8> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i8.v8i8( undef, <8 x i8> [[SEXT]], i64 0) // CHECK-NEXT: [[TMP2:%.*]] = bitcast [[CASTSCALABLESVE]] to // CHECK-NEXT: ret [[TMP2]] // @@ -537,11 +537,11 @@ fixed_bool_t leq_bool(fixed_bool_t a, fixed_bool_t b) { // CHECK-LABEL: @leq_i8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp sle <64 x i8> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <64 x i1> [[CMP]] to <64 x i8> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SEXT]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int8_t leq_i8(fixed_int8_t a, fixed_int8_t b) { @@ -550,11 +550,11 @@ fixed_int8_t leq_i8(fixed_int8_t a, fixed_int8_t b) { // CHECK-LABEL: @leq_i16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp sle <32 x i16> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <32 x i1> [[CMP]] to <32 x i16> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SEXT]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int16_t leq_i16(fixed_int16_t a, fixed_int16_t b) { @@ -563,11 +563,11 @@ fixed_int16_t leq_i16(fixed_int16_t a, fixed_int16_t b) { // CHECK-LABEL: @leq_i32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp sle <16 x i32> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <16 x i1> [[CMP]] to <16 x i32> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SEXT]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int32_t leq_i32(fixed_int32_t a, fixed_int32_t b) { @@ -576,11 +576,11 @@ fixed_int32_t leq_i32(fixed_int32_t a, fixed_int32_t b) { // CHECK-LABEL: @leq_i64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp sle <8 x i64> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <8 x i1> [[CMP]] to <8 x i64> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SEXT]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int64_t leq_i64(fixed_int64_t a, fixed_int64_t b) { @@ -589,11 +589,11 @@ fixed_int64_t leq_i64(fixed_int64_t a, fixed_int64_t b) { // CHECK-LABEL: @leq_u8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp ule <64 x i8> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <64 x i1> [[CMP]] to <64 x i8> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SEXT]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int8_t leq_u8(fixed_uint8_t a, fixed_uint8_t b) { @@ -602,11 +602,11 @@ fixed_int8_t leq_u8(fixed_uint8_t a, fixed_uint8_t b) { // CHECK-LABEL: @leq_u16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp ule <32 x i16> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <32 x i1> [[CMP]] to <32 x i16> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SEXT]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int16_t leq_u16(fixed_uint16_t a, fixed_uint16_t b) { @@ -615,11 +615,11 @@ fixed_int16_t leq_u16(fixed_uint16_t a, fixed_uint16_t b) { // CHECK-LABEL: @leq_u32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp ule <16 x i32> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <16 x i1> [[CMP]] to <16 x i32> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SEXT]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int32_t leq_u32(fixed_uint32_t a, fixed_uint32_t b) { @@ -628,11 +628,11 @@ fixed_int32_t leq_u32(fixed_uint32_t a, fixed_uint32_t b) { // CHECK-LABEL: @leq_u64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp ule <8 x i64> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <8 x i1> [[CMP]] to <8 x i64> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SEXT]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int64_t leq_u64(fixed_uint64_t a, fixed_uint64_t b) { @@ -641,14 +641,14 @@ fixed_int64_t leq_u64(fixed_uint64_t a, fixed_uint64_t b) { // CHECK-LABEL: @leq_f16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x half> @llvm.vector.extract.v32f16.nxv8f16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x half> @llvm.vector.extract.v32f16.nxv8f16( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CONV:%.*]] = fpext <32 x half> [[A]] to <32 x float> // CHECK-NEXT: [[CONV2:%.*]] = fpext <32 x half> [[B]] to <32 x float> // CHECK-NEXT: [[CMP:%.*]] = fcmp ole <32 x float> [[CONV]], [[CONV2]] // CHECK-NEXT: [[SEXT:%.*]] = sext <32 x i1> [[CMP]] to <32 x i32> // CHECK-NEXT: [[CONV3:%.*]] = trunc <32 x i32> [[SEXT]] to <32 x i16> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[CONV3]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[CONV3]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int16_t leq_f16(fixed_float16_t a, fixed_float16_t b) { @@ -657,11 +657,11 @@ fixed_int16_t leq_f16(fixed_float16_t a, fixed_float16_t b) { // CHECK-LABEL: @leq_f32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x float> @llvm.vector.extract.v16f32.nxv4f32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x float> @llvm.vector.extract.v16f32.nxv4f32( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = fcmp ole <16 x float> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <16 x i1> [[CMP]] to <16 x i32> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SEXT]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int32_t leq_f32(fixed_float32_t a, fixed_float32_t b) { @@ -670,11 +670,11 @@ fixed_int32_t leq_f32(fixed_float32_t a, fixed_float32_t b) { // CHECK-LABEL: @leq_f64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x double> @llvm.vector.extract.v8f64.nxv2f64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x double> @llvm.vector.extract.v8f64.nxv2f64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = fcmp ole <8 x double> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <8 x i1> [[CMP]] to <8 x i64> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SEXT]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int64_t leq_f64(fixed_float64_t a, fixed_float64_t b) { @@ -686,12 +686,12 @@ fixed_int64_t leq_f64(fixed_float64_t a, fixed_float64_t b) { // CHECK-LABEL: @gt_bool( // CHECK-NEXT: entry: // CHECK-NEXT: [[A_COERCE:%.*]] = bitcast [[TMP0:%.*]] to -// CHECK-NEXT: [[A:%.*]] = call <8 x i8> @llvm.experimental.vector.extract.v8i8.nxv2i8( [[A_COERCE]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i8> @llvm.vector.extract.v8i8.nxv2i8( [[A_COERCE]], i64 0) // CHECK-NEXT: [[B_COERCE:%.*]] = bitcast [[TMP1:%.*]] to -// CHECK-NEXT: [[B:%.*]] = call <8 x i8> @llvm.experimental.vector.extract.v8i8.nxv2i8( [[B_COERCE]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i8> @llvm.vector.extract.v8i8.nxv2i8( [[B_COERCE]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp ugt <8 x i8> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <8 x i1> [[CMP]] to <8 x i8> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i8.v8i8( undef, <8 x i8> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i8.v8i8( undef, <8 x i8> [[SEXT]], i64 0) // CHECK-NEXT: [[TMP2:%.*]] = bitcast [[CASTSCALABLESVE]] to // CHECK-NEXT: ret [[TMP2]] // @@ -701,11 +701,11 @@ fixed_bool_t gt_bool(fixed_bool_t a, fixed_bool_t b) { // CHECK-LABEL: @gt_i8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp sgt <64 x i8> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <64 x i1> [[CMP]] to <64 x i8> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SEXT]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int8_t gt_i8(fixed_int8_t a, fixed_int8_t b) { @@ -714,11 +714,11 @@ fixed_int8_t gt_i8(fixed_int8_t a, fixed_int8_t b) { // CHECK-LABEL: @gt_i16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp sgt <32 x i16> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <32 x i1> [[CMP]] to <32 x i16> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SEXT]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int16_t gt_i16(fixed_int16_t a, fixed_int16_t b) { @@ -727,11 +727,11 @@ fixed_int16_t gt_i16(fixed_int16_t a, fixed_int16_t b) { // CHECK-LABEL: @gt_i32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp sgt <16 x i32> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <16 x i1> [[CMP]] to <16 x i32> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SEXT]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int32_t gt_i32(fixed_int32_t a, fixed_int32_t b) { @@ -740,11 +740,11 @@ fixed_int32_t gt_i32(fixed_int32_t a, fixed_int32_t b) { // CHECK-LABEL: @gt_i64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp sgt <8 x i64> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <8 x i1> [[CMP]] to <8 x i64> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SEXT]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int64_t gt_i64(fixed_int64_t a, fixed_int64_t b) { @@ -753,11 +753,11 @@ fixed_int64_t gt_i64(fixed_int64_t a, fixed_int64_t b) { // CHECK-LABEL: @gt_u8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp ugt <64 x i8> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <64 x i1> [[CMP]] to <64 x i8> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SEXT]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int8_t gt_u8(fixed_uint8_t a, fixed_uint8_t b) { @@ -766,11 +766,11 @@ fixed_int8_t gt_u8(fixed_uint8_t a, fixed_uint8_t b) { // CHECK-LABEL: @gt_u16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp ugt <32 x i16> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <32 x i1> [[CMP]] to <32 x i16> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SEXT]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int16_t gt_u16(fixed_uint16_t a, fixed_uint16_t b) { @@ -779,11 +779,11 @@ fixed_int16_t gt_u16(fixed_uint16_t a, fixed_uint16_t b) { // CHECK-LABEL: @gt_u32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp ugt <16 x i32> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <16 x i1> [[CMP]] to <16 x i32> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SEXT]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int32_t gt_u32(fixed_uint32_t a, fixed_uint32_t b) { @@ -792,11 +792,11 @@ fixed_int32_t gt_u32(fixed_uint32_t a, fixed_uint32_t b) { // CHECK-LABEL: @gt_u64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp ugt <8 x i64> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <8 x i1> [[CMP]] to <8 x i64> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SEXT]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int64_t gt_u64(fixed_uint64_t a, fixed_uint64_t b) { @@ -805,14 +805,14 @@ fixed_int64_t gt_u64(fixed_uint64_t a, fixed_uint64_t b) { // CHECK-LABEL: @gt_f16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x half> @llvm.vector.extract.v32f16.nxv8f16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x half> @llvm.vector.extract.v32f16.nxv8f16( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CONV:%.*]] = fpext <32 x half> [[A]] to <32 x float> // CHECK-NEXT: [[CONV2:%.*]] = fpext <32 x half> [[B]] to <32 x float> // CHECK-NEXT: [[CMP:%.*]] = fcmp ogt <32 x float> [[CONV]], [[CONV2]] // CHECK-NEXT: [[SEXT:%.*]] = sext <32 x i1> [[CMP]] to <32 x i32> // CHECK-NEXT: [[CONV3:%.*]] = trunc <32 x i32> [[SEXT]] to <32 x i16> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[CONV3]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[CONV3]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int16_t gt_f16(fixed_float16_t a, fixed_float16_t b) { @@ -821,11 +821,11 @@ fixed_int16_t gt_f16(fixed_float16_t a, fixed_float16_t b) { // CHECK-LABEL: @gt_f32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x float> @llvm.vector.extract.v16f32.nxv4f32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x float> @llvm.vector.extract.v16f32.nxv4f32( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = fcmp ogt <16 x float> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <16 x i1> [[CMP]] to <16 x i32> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SEXT]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int32_t gt_f32(fixed_float32_t a, fixed_float32_t b) { @@ -834,11 +834,11 @@ fixed_int32_t gt_f32(fixed_float32_t a, fixed_float32_t b) { // CHECK-LABEL: @gt_f64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x double> @llvm.vector.extract.v8f64.nxv2f64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x double> @llvm.vector.extract.v8f64.nxv2f64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = fcmp ogt <8 x double> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <8 x i1> [[CMP]] to <8 x i64> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SEXT]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int64_t gt_f64(fixed_float64_t a, fixed_float64_t b) { @@ -850,12 +850,12 @@ fixed_int64_t gt_f64(fixed_float64_t a, fixed_float64_t b) { // CHECK-LABEL: @geq_bool( // CHECK-NEXT: entry: // CHECK-NEXT: [[A_COERCE:%.*]] = bitcast [[TMP0:%.*]] to -// CHECK-NEXT: [[A:%.*]] = call <8 x i8> @llvm.experimental.vector.extract.v8i8.nxv2i8( [[A_COERCE]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i8> @llvm.vector.extract.v8i8.nxv2i8( [[A_COERCE]], i64 0) // CHECK-NEXT: [[B_COERCE:%.*]] = bitcast [[TMP1:%.*]] to -// CHECK-NEXT: [[B:%.*]] = call <8 x i8> @llvm.experimental.vector.extract.v8i8.nxv2i8( [[B_COERCE]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i8> @llvm.vector.extract.v8i8.nxv2i8( [[B_COERCE]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp uge <8 x i8> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <8 x i1> [[CMP]] to <8 x i8> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i8.v8i8( undef, <8 x i8> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i8.v8i8( undef, <8 x i8> [[SEXT]], i64 0) // CHECK-NEXT: [[TMP2:%.*]] = bitcast [[CASTSCALABLESVE]] to // CHECK-NEXT: ret [[TMP2]] // @@ -865,11 +865,11 @@ fixed_bool_t geq_bool(fixed_bool_t a, fixed_bool_t b) { // CHECK-LABEL: @geq_i8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp sge <64 x i8> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <64 x i1> [[CMP]] to <64 x i8> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SEXT]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int8_t geq_i8(fixed_int8_t a, fixed_int8_t b) { @@ -878,11 +878,11 @@ fixed_int8_t geq_i8(fixed_int8_t a, fixed_int8_t b) { // CHECK-LABEL: @geq_i16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp sge <32 x i16> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <32 x i1> [[CMP]] to <32 x i16> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SEXT]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int16_t geq_i16(fixed_int16_t a, fixed_int16_t b) { @@ -891,11 +891,11 @@ fixed_int16_t geq_i16(fixed_int16_t a, fixed_int16_t b) { // CHECK-LABEL: @geq_i32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp sge <16 x i32> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <16 x i1> [[CMP]] to <16 x i32> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SEXT]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int32_t geq_i32(fixed_int32_t a, fixed_int32_t b) { @@ -904,11 +904,11 @@ fixed_int32_t geq_i32(fixed_int32_t a, fixed_int32_t b) { // CHECK-LABEL: @geq_i64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp sge <8 x i64> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <8 x i1> [[CMP]] to <8 x i64> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SEXT]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int64_t geq_i64(fixed_int64_t a, fixed_int64_t b) { @@ -917,11 +917,11 @@ fixed_int64_t geq_i64(fixed_int64_t a, fixed_int64_t b) { // CHECK-LABEL: @geq_u8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp uge <64 x i8> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <64 x i1> [[CMP]] to <64 x i8> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SEXT]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int8_t geq_u8(fixed_uint8_t a, fixed_uint8_t b) { @@ -930,11 +930,11 @@ fixed_int8_t geq_u8(fixed_uint8_t a, fixed_uint8_t b) { // CHECK-LABEL: @geq_u16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp uge <32 x i16> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <32 x i1> [[CMP]] to <32 x i16> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SEXT]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int16_t geq_u16(fixed_uint16_t a, fixed_uint16_t b) { @@ -943,11 +943,11 @@ fixed_int16_t geq_u16(fixed_uint16_t a, fixed_uint16_t b) { // CHECK-LABEL: @geq_u32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp uge <16 x i32> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <16 x i1> [[CMP]] to <16 x i32> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SEXT]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int32_t geq_u32(fixed_uint32_t a, fixed_uint32_t b) { @@ -956,11 +956,11 @@ fixed_int32_t geq_u32(fixed_uint32_t a, fixed_uint32_t b) { // CHECK-LABEL: @geq_u64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp uge <8 x i64> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <8 x i1> [[CMP]] to <8 x i64> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SEXT]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int64_t geq_u64(fixed_uint64_t a, fixed_uint64_t b) { @@ -969,14 +969,14 @@ fixed_int64_t geq_u64(fixed_uint64_t a, fixed_uint64_t b) { // CHECK-LABEL: @geq_f16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x half> @llvm.vector.extract.v32f16.nxv8f16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x half> @llvm.vector.extract.v32f16.nxv8f16( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CONV:%.*]] = fpext <32 x half> [[A]] to <32 x float> // CHECK-NEXT: [[CONV2:%.*]] = fpext <32 x half> [[B]] to <32 x float> // CHECK-NEXT: [[CMP:%.*]] = fcmp oge <32 x float> [[CONV]], [[CONV2]] // CHECK-NEXT: [[SEXT:%.*]] = sext <32 x i1> [[CMP]] to <32 x i32> // CHECK-NEXT: [[CONV3:%.*]] = trunc <32 x i32> [[SEXT]] to <32 x i16> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[CONV3]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[CONV3]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int16_t geq_f16(fixed_float16_t a, fixed_float16_t b) { @@ -985,11 +985,11 @@ fixed_int16_t geq_f16(fixed_float16_t a, fixed_float16_t b) { // CHECK-LABEL: @geq_f32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x float> @llvm.vector.extract.v16f32.nxv4f32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x float> @llvm.vector.extract.v16f32.nxv4f32( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = fcmp oge <16 x float> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <16 x i1> [[CMP]] to <16 x i32> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SEXT]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int32_t geq_f32(fixed_float32_t a, fixed_float32_t b) { @@ -998,11 +998,11 @@ fixed_int32_t geq_f32(fixed_float32_t a, fixed_float32_t b) { // CHECK-LABEL: @geq_f64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x double> @llvm.vector.extract.v8f64.nxv2f64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x double> @llvm.vector.extract.v8f64.nxv2f64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = fcmp oge <8 x double> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <8 x i1> [[CMP]] to <8 x i64> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SEXT]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int64_t geq_f64(fixed_float64_t a, fixed_float64_t b) { diff --git a/clang/test/CodeGen/aarch64-sve-vls-shift-ops.c b/clang/test/CodeGen/aarch64-sve-vls-shift-ops.c index b431649cc7395f819b73b4a628740bd217ba981a..89a8eb169527acdc773c976ce9ee21c60f8e7c5b 100644 --- a/clang/test/CodeGen/aarch64-sve-vls-shift-ops.c +++ b/clang/test/CodeGen/aarch64-sve-vls-shift-ops.c @@ -27,10 +27,10 @@ typedef svbool_t fixed_bool_t __attribute__((arm_sve_vector_bits(N))); // CHECK-LABEL: @lshift_i8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SHL:%.*]] = shl <64 x i8> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SHL]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SHL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int8_t lshift_i8(fixed_int8_t a, fixed_int8_t b) { @@ -39,10 +39,10 @@ fixed_int8_t lshift_i8(fixed_int8_t a, fixed_int8_t b) { // CHECK-LABEL: @rshift_i8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SHR:%.*]] = ashr <64 x i8> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SHR]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SHR]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int8_t rshift_i8(fixed_int8_t a, fixed_int8_t b) { @@ -51,10 +51,10 @@ fixed_int8_t rshift_i8(fixed_int8_t a, fixed_int8_t b) { // CHECK-LABEL: @lshift_u8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SHL:%.*]] = shl <64 x i8> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SHL]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SHL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint8_t lshift_u8(fixed_uint8_t a, fixed_uint8_t b) { @@ -63,10 +63,10 @@ fixed_uint8_t lshift_u8(fixed_uint8_t a, fixed_uint8_t b) { // CHECK-LABEL: @rshift_u8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SHR:%.*]] = lshr <64 x i8> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SHR]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SHR]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint8_t rshift_u8(fixed_uint8_t a, fixed_uint8_t b) { @@ -75,10 +75,10 @@ fixed_uint8_t rshift_u8(fixed_uint8_t a, fixed_uint8_t b) { // CHECK-LABEL: @lshift_i16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SHL:%.*]] = shl <32 x i16> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SHL]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SHL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int16_t lshift_i16(fixed_int16_t a, fixed_int16_t b) { @@ -87,10 +87,10 @@ fixed_int16_t lshift_i16(fixed_int16_t a, fixed_int16_t b) { // CHECK-LABEL: @rshift_i16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SHR:%.*]] = ashr <32 x i16> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SHR]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SHR]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int16_t rshift_i16(fixed_int16_t a, fixed_int16_t b) { @@ -99,10 +99,10 @@ fixed_int16_t rshift_i16(fixed_int16_t a, fixed_int16_t b) { // CHECK-LABEL: @lshift_u16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SHL:%.*]] = shl <32 x i16> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SHL]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SHL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint16_t lshift_u16(fixed_uint16_t a, fixed_uint16_t b) { @@ -111,10 +111,10 @@ fixed_uint16_t lshift_u16(fixed_uint16_t a, fixed_uint16_t b) { // CHECK-LABEL: @rshift_u16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SHR:%.*]] = lshr <32 x i16> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SHR]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SHR]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint16_t rshift_u16(fixed_uint16_t a, fixed_uint16_t b) { @@ -123,10 +123,10 @@ fixed_uint16_t rshift_u16(fixed_uint16_t a, fixed_uint16_t b) { // CHECK-LABEL: @lshift_i32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SHL:%.*]] = shl <16 x i32> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SHL]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SHL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int32_t lshift_i32(fixed_int32_t a, fixed_int32_t b) { @@ -135,10 +135,10 @@ fixed_int32_t lshift_i32(fixed_int32_t a, fixed_int32_t b) { // CHECK-LABEL: @rshift_i32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SHR:%.*]] = ashr <16 x i32> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SHR]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SHR]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int32_t rshift_i32(fixed_int32_t a, fixed_int32_t b) { @@ -147,10 +147,10 @@ fixed_int32_t rshift_i32(fixed_int32_t a, fixed_int32_t b) { // CHECK-LABEL: @lshift_u32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SHL:%.*]] = shl <16 x i32> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SHL]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SHL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint32_t lshift_u32(fixed_uint32_t a, fixed_uint32_t b) { @@ -159,10 +159,10 @@ fixed_uint32_t lshift_u32(fixed_uint32_t a, fixed_uint32_t b) { // CHECK-LABEL: @rshift_u32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SHR:%.*]] = lshr <16 x i32> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SHR]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SHR]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint32_t rshift_u32(fixed_uint32_t a, fixed_uint32_t b) { @@ -171,10 +171,10 @@ fixed_uint32_t rshift_u32(fixed_uint32_t a, fixed_uint32_t b) { // CHECK-LABEL: @lshift_i64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SHL:%.*]] = shl <8 x i64> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SHL]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SHL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int64_t lshift_i64(fixed_int64_t a, fixed_int64_t b) { @@ -183,10 +183,10 @@ fixed_int64_t lshift_i64(fixed_int64_t a, fixed_int64_t b) { // CHECK-LABEL: @rshift_i64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SHR:%.*]] = ashr <8 x i64> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SHR]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SHR]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int64_t rshift_i64(fixed_int64_t a, fixed_int64_t b) { @@ -195,10 +195,10 @@ fixed_int64_t rshift_i64(fixed_int64_t a, fixed_int64_t b) { // CHECK-LABEL: @lshift_u64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SHL:%.*]] = shl <8 x i64> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SHL]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SHL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint64_t lshift_u64(fixed_uint64_t a, fixed_uint64_t b) { @@ -207,10 +207,10 @@ fixed_uint64_t lshift_u64(fixed_uint64_t a, fixed_uint64_t b) { // CHECK-LABEL: @rshift_u64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SHR:%.*]] = lshr <8 x i64> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SHR]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SHR]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint64_t rshift_u64(fixed_uint64_t a, fixed_uint64_t b) { @@ -219,13 +219,13 @@ fixed_uint64_t rshift_u64(fixed_uint64_t a, fixed_uint64_t b) { // CHECK-LABEL: @lshift_i8_rsplat( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CONV:%.*]] = sext i8 [[B:%.*]] to i32 // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <64 x i32> poison, i32 [[CONV]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <64 x i32> [[SPLAT_SPLATINSERT]], <64 x i32> poison, <64 x i32> zeroinitializer // CHECK-NEXT: [[SH_PROM:%.*]] = trunc <64 x i32> [[SPLAT_SPLAT]] to <64 x i8> // CHECK-NEXT: [[SHL:%.*]] = shl <64 x i8> [[A]], [[SH_PROM]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SHL]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SHL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int8_t lshift_i8_rsplat(fixed_int8_t a, int8_t b) { @@ -234,11 +234,11 @@ fixed_int8_t lshift_i8_rsplat(fixed_int8_t a, int8_t b) { // CHECK-LABEL: @lshift_i8_lsplat( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <64 x i8> poison, i8 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <64 x i8> [[SPLAT_SPLATINSERT]], <64 x i8> poison, <64 x i32> zeroinitializer // CHECK-NEXT: [[SHL:%.*]] = shl <64 x i8> [[SPLAT_SPLAT]], [[A]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SHL]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SHL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int8_t lshift_i8_lsplat(fixed_int8_t a, int8_t b) { @@ -247,13 +247,13 @@ fixed_int8_t lshift_i8_lsplat(fixed_int8_t a, int8_t b) { // CHECK-LABEL: @rshift_i8_rsplat( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CONV:%.*]] = sext i8 [[B:%.*]] to i32 // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <64 x i32> poison, i32 [[CONV]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <64 x i32> [[SPLAT_SPLATINSERT]], <64 x i32> poison, <64 x i32> zeroinitializer // CHECK-NEXT: [[SH_PROM:%.*]] = trunc <64 x i32> [[SPLAT_SPLAT]] to <64 x i8> // CHECK-NEXT: [[SHR:%.*]] = ashr <64 x i8> [[A]], [[SH_PROM]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SHR]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SHR]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int8_t rshift_i8_rsplat(fixed_int8_t a, int8_t b) { @@ -262,11 +262,11 @@ fixed_int8_t rshift_i8_rsplat(fixed_int8_t a, int8_t b) { // CHECK-LABEL: @rshift_i8_lsplat( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <64 x i8> poison, i8 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <64 x i8> [[SPLAT_SPLATINSERT]], <64 x i8> poison, <64 x i32> zeroinitializer // CHECK-NEXT: [[SHR:%.*]] = ashr <64 x i8> [[SPLAT_SPLAT]], [[A]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SHR]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SHR]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int8_t rshift_i8_lsplat(fixed_int8_t a, int8_t b) { @@ -275,13 +275,13 @@ fixed_int8_t rshift_i8_lsplat(fixed_int8_t a, int8_t b) { // CHECK-LABEL: @lshift_u8_rsplat( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CONV:%.*]] = zext i8 [[B:%.*]] to i32 // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <64 x i32> poison, i32 [[CONV]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <64 x i32> [[SPLAT_SPLATINSERT]], <64 x i32> poison, <64 x i32> zeroinitializer // CHECK-NEXT: [[SH_PROM:%.*]] = trunc <64 x i32> [[SPLAT_SPLAT]] to <64 x i8> // CHECK-NEXT: [[SHL:%.*]] = shl <64 x i8> [[A]], [[SH_PROM]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SHL]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SHL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint8_t lshift_u8_rsplat(fixed_uint8_t a, uint8_t b) { @@ -290,11 +290,11 @@ fixed_uint8_t lshift_u8_rsplat(fixed_uint8_t a, uint8_t b) { // CHECK-LABEL: @lshift_u8_lsplat( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <64 x i8> poison, i8 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <64 x i8> [[SPLAT_SPLATINSERT]], <64 x i8> poison, <64 x i32> zeroinitializer // CHECK-NEXT: [[SHL:%.*]] = shl <64 x i8> [[SPLAT_SPLAT]], [[A]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SHL]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SHL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint8_t lshift_u8_lsplat(fixed_uint8_t a, uint8_t b) { @@ -303,13 +303,13 @@ fixed_uint8_t lshift_u8_lsplat(fixed_uint8_t a, uint8_t b) { // CHECK-LABEL: @rshift_u8_rsplat( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CONV:%.*]] = zext i8 [[B:%.*]] to i32 // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <64 x i32> poison, i32 [[CONV]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <64 x i32> [[SPLAT_SPLATINSERT]], <64 x i32> poison, <64 x i32> zeroinitializer // CHECK-NEXT: [[SH_PROM:%.*]] = trunc <64 x i32> [[SPLAT_SPLAT]] to <64 x i8> // CHECK-NEXT: [[SHR:%.*]] = lshr <64 x i8> [[A]], [[SH_PROM]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SHR]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SHR]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint8_t rshift_u8_rsplat(fixed_uint8_t a, uint8_t b) { @@ -318,11 +318,11 @@ fixed_uint8_t rshift_u8_rsplat(fixed_uint8_t a, uint8_t b) { // CHECK-LABEL: @rshift_u8_lsplat( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <64 x i8> poison, i8 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <64 x i8> [[SPLAT_SPLATINSERT]], <64 x i8> poison, <64 x i32> zeroinitializer // CHECK-NEXT: [[SHR:%.*]] = lshr <64 x i8> [[SPLAT_SPLAT]], [[A]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SHR]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SHR]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint8_t rshift_u8_lsplat(fixed_uint8_t a, uint8_t b) { @@ -331,13 +331,13 @@ fixed_uint8_t rshift_u8_lsplat(fixed_uint8_t a, uint8_t b) { // CHECK-LABEL: @lshift_i16_rsplat( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CONV:%.*]] = sext i16 [[B:%.*]] to i32 // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <32 x i32> poison, i32 [[CONV]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <32 x i32> [[SPLAT_SPLATINSERT]], <32 x i32> poison, <32 x i32> zeroinitializer // CHECK-NEXT: [[SH_PROM:%.*]] = trunc <32 x i32> [[SPLAT_SPLAT]] to <32 x i16> // CHECK-NEXT: [[SHL:%.*]] = shl <32 x i16> [[A]], [[SH_PROM]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SHL]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SHL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int16_t lshift_i16_rsplat(fixed_int16_t a, int16_t b) { @@ -346,11 +346,11 @@ fixed_int16_t lshift_i16_rsplat(fixed_int16_t a, int16_t b) { // CHECK-LABEL: @lshift_i16_lsplat( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <32 x i16> poison, i16 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <32 x i16> [[SPLAT_SPLATINSERT]], <32 x i16> poison, <32 x i32> zeroinitializer // CHECK-NEXT: [[SHL:%.*]] = shl <32 x i16> [[SPLAT_SPLAT]], [[A]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SHL]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SHL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int16_t lshift_i16_lsplat(fixed_int16_t a, int16_t b) { @@ -359,13 +359,13 @@ fixed_int16_t lshift_i16_lsplat(fixed_int16_t a, int16_t b) { // CHECK-LABEL: @rshift_i16_rsplat( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CONV:%.*]] = sext i16 [[B:%.*]] to i32 // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <32 x i32> poison, i32 [[CONV]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <32 x i32> [[SPLAT_SPLATINSERT]], <32 x i32> poison, <32 x i32> zeroinitializer // CHECK-NEXT: [[SH_PROM:%.*]] = trunc <32 x i32> [[SPLAT_SPLAT]] to <32 x i16> // CHECK-NEXT: [[SHR:%.*]] = ashr <32 x i16> [[A]], [[SH_PROM]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SHR]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SHR]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int16_t rshift_i16_rsplat(fixed_int16_t a, int16_t b) { @@ -374,11 +374,11 @@ fixed_int16_t rshift_i16_rsplat(fixed_int16_t a, int16_t b) { // CHECK-LABEL: @rshift_i16_lsplat( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <32 x i16> poison, i16 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <32 x i16> [[SPLAT_SPLATINSERT]], <32 x i16> poison, <32 x i32> zeroinitializer // CHECK-NEXT: [[SHR:%.*]] = ashr <32 x i16> [[SPLAT_SPLAT]], [[A]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SHR]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SHR]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int16_t rshift_i16_lsplat(fixed_int16_t a, int16_t b) { @@ -387,13 +387,13 @@ fixed_int16_t rshift_i16_lsplat(fixed_int16_t a, int16_t b) { // CHECK-LABEL: @lshift_u16_rsplat( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CONV:%.*]] = zext i16 [[B:%.*]] to i32 // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <32 x i32> poison, i32 [[CONV]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <32 x i32> [[SPLAT_SPLATINSERT]], <32 x i32> poison, <32 x i32> zeroinitializer // CHECK-NEXT: [[SH_PROM:%.*]] = trunc <32 x i32> [[SPLAT_SPLAT]] to <32 x i16> // CHECK-NEXT: [[SHL:%.*]] = shl <32 x i16> [[A]], [[SH_PROM]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SHL]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SHL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint16_t lshift_u16_rsplat(fixed_uint16_t a, uint16_t b) { @@ -402,11 +402,11 @@ fixed_uint16_t lshift_u16_rsplat(fixed_uint16_t a, uint16_t b) { // CHECK-LABEL: @lshift_u16_lsplat( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <32 x i16> poison, i16 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <32 x i16> [[SPLAT_SPLATINSERT]], <32 x i16> poison, <32 x i32> zeroinitializer // CHECK-NEXT: [[SHL:%.*]] = shl <32 x i16> [[SPLAT_SPLAT]], [[A]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SHL]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SHL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint16_t lshift_u16_lsplat(fixed_uint16_t a, uint16_t b) { @@ -415,13 +415,13 @@ fixed_uint16_t lshift_u16_lsplat(fixed_uint16_t a, uint16_t b) { // CHECK-LABEL: @rshift_u16_rsplat( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CONV:%.*]] = zext i16 [[B:%.*]] to i32 // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <32 x i32> poison, i32 [[CONV]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <32 x i32> [[SPLAT_SPLATINSERT]], <32 x i32> poison, <32 x i32> zeroinitializer // CHECK-NEXT: [[SH_PROM:%.*]] = trunc <32 x i32> [[SPLAT_SPLAT]] to <32 x i16> // CHECK-NEXT: [[SHR:%.*]] = lshr <32 x i16> [[A]], [[SH_PROM]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SHR]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SHR]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint16_t rshift_u16_rsplat(fixed_uint16_t a, uint16_t b) { @@ -430,11 +430,11 @@ fixed_uint16_t rshift_u16_rsplat(fixed_uint16_t a, uint16_t b) { // CHECK-LABEL: @rshift_u16_lsplat( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <32 x i16> poison, i16 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <32 x i16> [[SPLAT_SPLATINSERT]], <32 x i16> poison, <32 x i32> zeroinitializer // CHECK-NEXT: [[SHR:%.*]] = lshr <32 x i16> [[SPLAT_SPLAT]], [[A]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SHR]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SHR]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint16_t rshift_u16_lsplat(fixed_uint16_t a, uint16_t b) { @@ -443,11 +443,11 @@ fixed_uint16_t rshift_u16_lsplat(fixed_uint16_t a, uint16_t b) { // CHECK-LABEL: @lshift_i32_rsplat( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x i32> poison, i32 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i32> [[SPLAT_SPLATINSERT]], <16 x i32> poison, <16 x i32> zeroinitializer // CHECK-NEXT: [[SHL:%.*]] = shl <16 x i32> [[A]], [[SPLAT_SPLAT]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SHL]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SHL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int32_t lshift_i32_rsplat(fixed_int32_t a, int32_t b) { @@ -456,11 +456,11 @@ fixed_int32_t lshift_i32_rsplat(fixed_int32_t a, int32_t b) { // CHECK-LABEL: @lshift_i32_lsplat( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x i32> poison, i32 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i32> [[SPLAT_SPLATINSERT]], <16 x i32> poison, <16 x i32> zeroinitializer // CHECK-NEXT: [[SHL:%.*]] = shl <16 x i32> [[SPLAT_SPLAT]], [[A]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SHL]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SHL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int32_t lshift_i32_lsplat(fixed_int32_t a, int32_t b) { @@ -469,11 +469,11 @@ fixed_int32_t lshift_i32_lsplat(fixed_int32_t a, int32_t b) { // CHECK-LABEL: @rshift_i32_rsplat( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x i32> poison, i32 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i32> [[SPLAT_SPLATINSERT]], <16 x i32> poison, <16 x i32> zeroinitializer // CHECK-NEXT: [[SHR:%.*]] = ashr <16 x i32> [[A]], [[SPLAT_SPLAT]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SHR]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SHR]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int32_t rshift_i32_rsplat(fixed_int32_t a, int32_t b) { @@ -482,11 +482,11 @@ fixed_int32_t rshift_i32_rsplat(fixed_int32_t a, int32_t b) { // CHECK-LABEL: @rshift_i32_lsplat( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x i32> poison, i32 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i32> [[SPLAT_SPLATINSERT]], <16 x i32> poison, <16 x i32> zeroinitializer // CHECK-NEXT: [[SHR:%.*]] = ashr <16 x i32> [[SPLAT_SPLAT]], [[A]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SHR]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SHR]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int32_t rshift_i32_lsplat(fixed_int32_t a, int32_t b) { @@ -495,11 +495,11 @@ fixed_int32_t rshift_i32_lsplat(fixed_int32_t a, int32_t b) { // CHECK-LABEL: @lshift_u32_rsplat( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x i32> poison, i32 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i32> [[SPLAT_SPLATINSERT]], <16 x i32> poison, <16 x i32> zeroinitializer // CHECK-NEXT: [[SHL:%.*]] = shl <16 x i32> [[A]], [[SPLAT_SPLAT]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SHL]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SHL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint32_t lshift_u32_rsplat(fixed_uint32_t a, uint32_t b) { @@ -508,11 +508,11 @@ fixed_uint32_t lshift_u32_rsplat(fixed_uint32_t a, uint32_t b) { // CHECK-LABEL: @lshift_u32_lsplat( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x i32> poison, i32 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i32> [[SPLAT_SPLATINSERT]], <16 x i32> poison, <16 x i32> zeroinitializer // CHECK-NEXT: [[SHL:%.*]] = shl <16 x i32> [[SPLAT_SPLAT]], [[A]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SHL]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SHL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint32_t lshift_u32_lsplat(fixed_uint32_t a, uint32_t b) { @@ -521,11 +521,11 @@ fixed_uint32_t lshift_u32_lsplat(fixed_uint32_t a, uint32_t b) { // CHECK-LABEL: @rshift_u32_rsplat( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x i32> poison, i32 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i32> [[SPLAT_SPLATINSERT]], <16 x i32> poison, <16 x i32> zeroinitializer // CHECK-NEXT: [[SHR:%.*]] = lshr <16 x i32> [[A]], [[SPLAT_SPLAT]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SHR]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SHR]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint32_t rshift_u32_rsplat(fixed_uint32_t a, uint32_t b) { @@ -534,11 +534,11 @@ fixed_uint32_t rshift_u32_rsplat(fixed_uint32_t a, uint32_t b) { // CHECK-LABEL: @rshift_u32_lsplat( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x i32> poison, i32 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i32> [[SPLAT_SPLATINSERT]], <16 x i32> poison, <16 x i32> zeroinitializer // CHECK-NEXT: [[SHR:%.*]] = lshr <16 x i32> [[SPLAT_SPLAT]], [[A]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SHR]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SHR]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint32_t rshift_u32_lsplat(fixed_uint32_t a, uint32_t b) { @@ -547,11 +547,11 @@ fixed_uint32_t rshift_u32_lsplat(fixed_uint32_t a, uint32_t b) { // CHECK-LABEL: @lshift_i64_rsplat( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x i64> poison, i64 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <8 x i64> [[SPLAT_SPLATINSERT]], <8 x i64> poison, <8 x i32> zeroinitializer // CHECK-NEXT: [[SHL:%.*]] = shl <8 x i64> [[A]], [[SPLAT_SPLAT]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SHL]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SHL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int64_t lshift_i64_rsplat(fixed_int64_t a, int64_t b) { @@ -560,11 +560,11 @@ fixed_int64_t lshift_i64_rsplat(fixed_int64_t a, int64_t b) { // CHECK-LABEL: @lshift_i64_lsplat( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x i64> poison, i64 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <8 x i64> [[SPLAT_SPLATINSERT]], <8 x i64> poison, <8 x i32> zeroinitializer // CHECK-NEXT: [[SHL:%.*]] = shl <8 x i64> [[SPLAT_SPLAT]], [[A]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SHL]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SHL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int64_t lshift_i64_lsplat(fixed_int64_t a, int64_t b) { @@ -573,11 +573,11 @@ fixed_int64_t lshift_i64_lsplat(fixed_int64_t a, int64_t b) { // CHECK-LABEL: @rshift_i64_rsplat( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x i64> poison, i64 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <8 x i64> [[SPLAT_SPLATINSERT]], <8 x i64> poison, <8 x i32> zeroinitializer // CHECK-NEXT: [[SHR:%.*]] = ashr <8 x i64> [[A]], [[SPLAT_SPLAT]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SHR]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SHR]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int64_t rshift_i64_rsplat(fixed_int64_t a, int64_t b) { @@ -586,11 +586,11 @@ fixed_int64_t rshift_i64_rsplat(fixed_int64_t a, int64_t b) { // CHECK-LABEL: @rshift_i64_lsplat( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x i64> poison, i64 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <8 x i64> [[SPLAT_SPLATINSERT]], <8 x i64> poison, <8 x i32> zeroinitializer // CHECK-NEXT: [[SHR:%.*]] = ashr <8 x i64> [[SPLAT_SPLAT]], [[A]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SHR]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SHR]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int64_t rshift_i64_lsplat(fixed_int64_t a, int64_t b) { @@ -599,11 +599,11 @@ fixed_int64_t rshift_i64_lsplat(fixed_int64_t a, int64_t b) { // CHECK-LABEL: @lshift_u64_rsplat( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x i64> poison, i64 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <8 x i64> [[SPLAT_SPLATINSERT]], <8 x i64> poison, <8 x i32> zeroinitializer // CHECK-NEXT: [[SHL:%.*]] = shl <8 x i64> [[A]], [[SPLAT_SPLAT]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SHL]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SHL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint64_t lshift_u64_rsplat(fixed_uint64_t a, uint64_t b) { @@ -612,11 +612,11 @@ fixed_uint64_t lshift_u64_rsplat(fixed_uint64_t a, uint64_t b) { // CHECK-LABEL: @lshift_u64_lsplat( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x i64> poison, i64 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <8 x i64> [[SPLAT_SPLATINSERT]], <8 x i64> poison, <8 x i32> zeroinitializer // CHECK-NEXT: [[SHL:%.*]] = shl <8 x i64> [[SPLAT_SPLAT]], [[A]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SHL]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SHL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint64_t lshift_u64_lsplat(fixed_uint64_t a, uint64_t b) { @@ -625,11 +625,11 @@ fixed_uint64_t lshift_u64_lsplat(fixed_uint64_t a, uint64_t b) { // CHECK-LABEL: @rshift_u64_rsplat( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x i64> poison, i64 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <8 x i64> [[SPLAT_SPLATINSERT]], <8 x i64> poison, <8 x i32> zeroinitializer // CHECK-NEXT: [[SHR:%.*]] = lshr <8 x i64> [[A]], [[SPLAT_SPLAT]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SHR]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SHR]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint64_t rshift_u64_rsplat(fixed_uint64_t a, uint64_t b) { @@ -638,11 +638,11 @@ fixed_uint64_t rshift_u64_rsplat(fixed_uint64_t a, uint64_t b) { // CHECK-LABEL: @rshift_u64_lsplat( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x i64> poison, i64 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <8 x i64> [[SPLAT_SPLATINSERT]], <8 x i64> poison, <8 x i32> zeroinitializer // CHECK-NEXT: [[SHR:%.*]] = lshr <8 x i64> [[SPLAT_SPLAT]], [[A]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SHR]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SHR]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint64_t rshift_u64_lsplat(fixed_uint64_t a, uint64_t b) { diff --git a/clang/test/CodeGen/aarch64-sve-vls-subscript-ops.c b/clang/test/CodeGen/aarch64-sve-vls-subscript-ops.c index 444fab942d782104f284cb156bedd1953b3251a7..5567a3ebdca0b1d87a5217368a73f5b88337b82c 100644 --- a/clang/test/CodeGen/aarch64-sve-vls-subscript-ops.c +++ b/clang/test/CodeGen/aarch64-sve-vls-subscript-ops.c @@ -28,7 +28,7 @@ typedef svbool_t fixed_bool_t __attribute__((arm_sve_vector_bits(N))); // CHECK-LABEL: @subscript_int16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[VECEXT:%.*]] = extractelement <32 x i16> [[A]], i64 [[B:%.*]] // CHECK-NEXT: ret i16 [[VECEXT]] // @@ -38,7 +38,7 @@ int16_t subscript_int16(fixed_int16_t a, size_t b) { // CHECK-LABEL: @subscript_uint16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[VECEXT:%.*]] = extractelement <32 x i16> [[A]], i64 [[B:%.*]] // CHECK-NEXT: ret i16 [[VECEXT]] // @@ -48,7 +48,7 @@ uint16_t subscript_uint16(fixed_uint16_t a, size_t b) { // CHECK-LABEL: @subscript_int32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[VECEXT:%.*]] = extractelement <16 x i32> [[A]], i64 [[B:%.*]] // CHECK-NEXT: ret i32 [[VECEXT]] // @@ -58,7 +58,7 @@ int32_t subscript_int32(fixed_int32_t a, size_t b) { // CHECK-LABEL: @subscript_uint32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[VECEXT:%.*]] = extractelement <16 x i32> [[A]], i64 [[B:%.*]] // CHECK-NEXT: ret i32 [[VECEXT]] // @@ -68,7 +68,7 @@ uint32_t subscript_uint32(fixed_uint32_t a, size_t b) { // CHECK-LABEL: @subscript_int64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[VECEXT:%.*]] = extractelement <8 x i64> [[A]], i64 [[B:%.*]] // CHECK-NEXT: ret i64 [[VECEXT]] // @@ -78,7 +78,7 @@ int64_t subscript_int64(fixed_int64_t a, size_t b) { // CHECK-LABEL: @subscript_uint64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[VECEXT:%.*]] = extractelement <8 x i64> [[A]], i64 [[B:%.*]] // CHECK-NEXT: ret i64 [[VECEXT]] // @@ -88,7 +88,7 @@ uint64_t subscript_uint64(fixed_uint64_t a, size_t b) { // CHECK-LABEL: @subscript_float16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x half> @llvm.vector.extract.v32f16.nxv8f16( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[VECEXT:%.*]] = extractelement <32 x half> [[A]], i64 [[B:%.*]] // CHECK-NEXT: ret half [[VECEXT]] // @@ -98,7 +98,7 @@ __fp16 subscript_float16(fixed_float16_t a, size_t b) { // CHECK-LABEL: @subscript_float32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x float> @llvm.vector.extract.v16f32.nxv4f32( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[VECEXT:%.*]] = extractelement <16 x float> [[A]], i64 [[B:%.*]] // CHECK-NEXT: ret float [[VECEXT]] // @@ -108,7 +108,7 @@ float subscript_float32(fixed_float32_t a, size_t b) { // CHECK-LABEL: @subscript_float64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x double> @llvm.vector.extract.v8f64.nxv2f64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[VECEXT:%.*]] = extractelement <8 x double> [[A]], i64 [[B:%.*]] // CHECK-NEXT: ret double [[VECEXT]] // diff --git a/clang/test/CodeGen/aarch64_neon_sve_bridge_intrinsics/acle_neon_sve_bridge_dup_neonq.c b/clang/test/CodeGen/aarch64_neon_sve_bridge_intrinsics/acle_neon_sve_bridge_dup_neonq.c index 8d7b811b4cfa20fafb1b07a2f3c65ba04bf896c9..e84beb0668a718cccade0d0235ea3dda26b0c029 100644 --- a/clang/test/CodeGen/aarch64_neon_sve_bridge_intrinsics/acle_neon_sve_bridge_dup_neonq.c +++ b/clang/test/CodeGen/aarch64_neon_sve_bridge_intrinsics/acle_neon_sve_bridge_dup_neonq.c @@ -16,13 +16,13 @@ // CHECK-LABEL: @test_svdup_neonq_s8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v16i8( undef, <16 x i8> [[N:%.*]], i64 0) +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i8.v16i8( undef, <16 x i8> [[N:%.*]], i64 0) // CHECK-NEXT: [[TMP1:%.*]] = call @llvm.aarch64.sve.dupq.lane.nxv16i8( [[TMP0]], i64 0) // CHECK-NEXT: ret [[TMP1]] // // CPP-CHECK-LABEL: @_Z19test_svdup_neonq_s811__Int8x16_t( // CPP-CHECK-NEXT: entry: -// CPP-CHECK-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v16i8( undef, <16 x i8> [[N:%.*]], i64 0) +// CPP-CHECK-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i8.v16i8( undef, <16 x i8> [[N:%.*]], i64 0) // CPP-CHECK-NEXT: [[TMP1:%.*]] = call @llvm.aarch64.sve.dupq.lane.nxv16i8( [[TMP0]], i64 0) // CPP-CHECK-NEXT: ret [[TMP1]] // @@ -32,13 +32,13 @@ svint8_t test_svdup_neonq_s8(int8x16_t n) { // CHECK-LABEL: @test_svdup_neonq_s16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v8i16( undef, <8 x i16> [[N:%.*]], i64 0) +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i16.v8i16( undef, <8 x i16> [[N:%.*]], i64 0) // CHECK-NEXT: [[TMP1:%.*]] = call @llvm.aarch64.sve.dupq.lane.nxv8i16( [[TMP0]], i64 0) // CHECK-NEXT: ret [[TMP1]] // // CPP-CHECK-LABEL: @_Z20test_svdup_neonq_s1611__Int16x8_t( // CPP-CHECK-NEXT: entry: -// CPP-CHECK-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v8i16( undef, <8 x i16> [[N:%.*]], i64 0) +// CPP-CHECK-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i16.v8i16( undef, <8 x i16> [[N:%.*]], i64 0) // CPP-CHECK-NEXT: [[TMP1:%.*]] = call @llvm.aarch64.sve.dupq.lane.nxv8i16( [[TMP0]], i64 0) // CPP-CHECK-NEXT: ret [[TMP1]] // @@ -48,13 +48,13 @@ svint16_t test_svdup_neonq_s16(int16x8_t n) { // CHECK-LABEL: @test_svdup_neonq_s32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v4i32( undef, <4 x i32> [[N:%.*]], i64 0) +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4i32.v4i32( undef, <4 x i32> [[N:%.*]], i64 0) // CHECK-NEXT: [[TMP1:%.*]] = call @llvm.aarch64.sve.dupq.lane.nxv4i32( [[TMP0]], i64 0) // CHECK-NEXT: ret [[TMP1]] // // CPP-CHECK-LABEL: @_Z20test_svdup_neonq_s3211__Int32x4_t( // CPP-CHECK-NEXT: entry: -// CPP-CHECK-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v4i32( undef, <4 x i32> [[N:%.*]], i64 0) +// CPP-CHECK-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4i32.v4i32( undef, <4 x i32> [[N:%.*]], i64 0) // CPP-CHECK-NEXT: [[TMP1:%.*]] = call @llvm.aarch64.sve.dupq.lane.nxv4i32( [[TMP0]], i64 0) // CPP-CHECK-NEXT: ret [[TMP1]] // @@ -64,13 +64,13 @@ svint32_t test_svdup_neonq_s32(int32x4_t n) { // CHECK-LABEL: @test_svdup_neonq_s64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v2i64( undef, <2 x i64> [[N:%.*]], i64 0) +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv2i64.v2i64( undef, <2 x i64> [[N:%.*]], i64 0) // CHECK-NEXT: [[TMP1:%.*]] = call @llvm.aarch64.sve.dupq.lane.nxv2i64( [[TMP0]], i64 0) // CHECK-NEXT: ret [[TMP1]] // // CPP-CHECK-LABEL: @_Z20test_svdup_neonq_s6411__Int64x2_t( // CPP-CHECK-NEXT: entry: -// CPP-CHECK-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v2i64( undef, <2 x i64> [[N:%.*]], i64 0) +// CPP-CHECK-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv2i64.v2i64( undef, <2 x i64> [[N:%.*]], i64 0) // CPP-CHECK-NEXT: [[TMP1:%.*]] = call @llvm.aarch64.sve.dupq.lane.nxv2i64( [[TMP0]], i64 0) // CPP-CHECK-NEXT: ret [[TMP1]] // @@ -80,13 +80,13 @@ svint64_t test_svdup_neonq_s64(int64x2_t n) { // CHECK-LABEL: @test_svdup_neonq_u8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v16i8( undef, <16 x i8> [[N:%.*]], i64 0) +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i8.v16i8( undef, <16 x i8> [[N:%.*]], i64 0) // CHECK-NEXT: [[TMP1:%.*]] = call @llvm.aarch64.sve.dupq.lane.nxv16i8( [[TMP0]], i64 0) // CHECK-NEXT: ret [[TMP1]] // // CPP-CHECK-LABEL: @_Z19test_svdup_neonq_u812__Uint8x16_t( // CPP-CHECK-NEXT: entry: -// CPP-CHECK-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v16i8( undef, <16 x i8> [[N:%.*]], i64 0) +// CPP-CHECK-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i8.v16i8( undef, <16 x i8> [[N:%.*]], i64 0) // CPP-CHECK-NEXT: [[TMP1:%.*]] = call @llvm.aarch64.sve.dupq.lane.nxv16i8( [[TMP0]], i64 0) // CPP-CHECK-NEXT: ret [[TMP1]] // @@ -96,13 +96,13 @@ svuint8_t test_svdup_neonq_u8(uint8x16_t n) { // CHECK-LABEL: @test_svdup_neonq_u16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v8i16( undef, <8 x i16> [[N:%.*]], i64 0) +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i16.v8i16( undef, <8 x i16> [[N:%.*]], i64 0) // CHECK-NEXT: [[TMP1:%.*]] = call @llvm.aarch64.sve.dupq.lane.nxv8i16( [[TMP0]], i64 0) // CHECK-NEXT: ret [[TMP1]] // // CPP-CHECK-LABEL: @_Z20test_svdup_neonq_u1612__Uint16x8_t( // CPP-CHECK-NEXT: entry: -// CPP-CHECK-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v8i16( undef, <8 x i16> [[N:%.*]], i64 0) +// CPP-CHECK-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i16.v8i16( undef, <8 x i16> [[N:%.*]], i64 0) // CPP-CHECK-NEXT: [[TMP1:%.*]] = call @llvm.aarch64.sve.dupq.lane.nxv8i16( [[TMP0]], i64 0) // CPP-CHECK-NEXT: ret [[TMP1]] // @@ -112,13 +112,13 @@ svuint16_t test_svdup_neonq_u16(uint16x8_t n) { // CHECK-LABEL: @test_svdup_neonq_u32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v4i32( undef, <4 x i32> [[N:%.*]], i64 0) +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4i32.v4i32( undef, <4 x i32> [[N:%.*]], i64 0) // CHECK-NEXT: [[TMP1:%.*]] = call @llvm.aarch64.sve.dupq.lane.nxv4i32( [[TMP0]], i64 0) // CHECK-NEXT: ret [[TMP1]] // // CPP-CHECK-LABEL: @_Z20test_svdup_neonq_u3212__Uint32x4_t( // CPP-CHECK-NEXT: entry: -// CPP-CHECK-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v4i32( undef, <4 x i32> [[N:%.*]], i64 0) +// CPP-CHECK-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4i32.v4i32( undef, <4 x i32> [[N:%.*]], i64 0) // CPP-CHECK-NEXT: [[TMP1:%.*]] = call @llvm.aarch64.sve.dupq.lane.nxv4i32( [[TMP0]], i64 0) // CPP-CHECK-NEXT: ret [[TMP1]] // @@ -128,13 +128,13 @@ svuint32_t test_svdup_neonq_u32(uint32x4_t n) { // CHECK-LABEL: @test_svdup_neonq_u64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v2i64( undef, <2 x i64> [[N:%.*]], i64 0) +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv2i64.v2i64( undef, <2 x i64> [[N:%.*]], i64 0) // CHECK-NEXT: [[TMP1:%.*]] = call @llvm.aarch64.sve.dupq.lane.nxv2i64( [[TMP0]], i64 0) // CHECK-NEXT: ret [[TMP1]] // // CPP-CHECK-LABEL: @_Z20test_svdup_neonq_u6412__Uint64x2_t( // CPP-CHECK-NEXT: entry: -// CPP-CHECK-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v2i64( undef, <2 x i64> [[N:%.*]], i64 0) +// CPP-CHECK-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv2i64.v2i64( undef, <2 x i64> [[N:%.*]], i64 0) // CPP-CHECK-NEXT: [[TMP1:%.*]] = call @llvm.aarch64.sve.dupq.lane.nxv2i64( [[TMP0]], i64 0) // CPP-CHECK-NEXT: ret [[TMP1]] // @@ -144,13 +144,13 @@ svuint64_t test_svdup_neonq_u64(uint64x2_t n) { // CHECK-LABEL: @test_svdup_neonq_f16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8f16.v8f16( undef, <8 x half> [[N:%.*]], i64 0) +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8f16.v8f16( undef, <8 x half> [[N:%.*]], i64 0) // CHECK-NEXT: [[TMP1:%.*]] = call @llvm.aarch64.sve.dupq.lane.nxv8f16( [[TMP0]], i64 0) // CHECK-NEXT: ret [[TMP1]] // // CPP-CHECK-LABEL: @_Z20test_svdup_neonq_f1613__Float16x8_t( // CPP-CHECK-NEXT: entry: -// CPP-CHECK-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8f16.v8f16( undef, <8 x half> [[N:%.*]], i64 0) +// CPP-CHECK-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8f16.v8f16( undef, <8 x half> [[N:%.*]], i64 0) // CPP-CHECK-NEXT: [[TMP1:%.*]] = call @llvm.aarch64.sve.dupq.lane.nxv8f16( [[TMP0]], i64 0) // CPP-CHECK-NEXT: ret [[TMP1]] // @@ -158,18 +158,18 @@ svfloat16_t test_svdup_neonq_f16(float16x8_t n) { return SVE_ACLE_FUNC(svdup_neonq, _f16, , )(n); } -// CHECK-NEXT %0 = call @llvm.experimental.vector.insert.nxv4f32.v4f32( undef, <4 x float> %n, i64 0) +// CHECK-NEXT %0 = call @llvm.vector.insert.nxv4f32.v4f32( undef, <4 x float> %n, i64 0) // CHECK-NEXT %1 = call @llvm.aarch64.sve.dupq.lane.nxv4f32( %0, i64 0) // CHECK-NEXT ret %1 // CHECK-LABEL: @test_svdup_neonq_f32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4f32.v4f32( undef, <4 x float> [[N:%.*]], i64 0) +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4f32.v4f32( undef, <4 x float> [[N:%.*]], i64 0) // CHECK-NEXT: [[TMP1:%.*]] = call @llvm.aarch64.sve.dupq.lane.nxv4f32( [[TMP0]], i64 0) // CHECK-NEXT: ret [[TMP1]] // // CPP-CHECK-LABEL: @_Z20test_svdup_neonq_f3213__Float32x4_t( // CPP-CHECK-NEXT: entry: -// CPP-CHECK-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4f32.v4f32( undef, <4 x float> [[N:%.*]], i64 0) +// CPP-CHECK-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4f32.v4f32( undef, <4 x float> [[N:%.*]], i64 0) // CPP-CHECK-NEXT: [[TMP1:%.*]] = call @llvm.aarch64.sve.dupq.lane.nxv4f32( [[TMP0]], i64 0) // CPP-CHECK-NEXT: ret [[TMP1]] // @@ -179,13 +179,13 @@ svfloat32_t test_svdup_neonq_f32(float32x4_t n) { // CHECK-LABEL: @test_svdup_neonq_f64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv2f64.v2f64( undef, <2 x double> [[N:%.*]], i64 0) +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv2f64.v2f64( undef, <2 x double> [[N:%.*]], i64 0) // CHECK-NEXT: [[TMP1:%.*]] = call @llvm.aarch64.sve.dupq.lane.nxv2f64( [[TMP0]], i64 0) // CHECK-NEXT: ret [[TMP1]] // // CPP-CHECK-LABEL: @_Z20test_svdup_neonq_f6413__Float64x2_t( // CPP-CHECK-NEXT: entry: -// CPP-CHECK-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv2f64.v2f64( undef, <2 x double> [[N:%.*]], i64 0) +// CPP-CHECK-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv2f64.v2f64( undef, <2 x double> [[N:%.*]], i64 0) // CPP-CHECK-NEXT: [[TMP1:%.*]] = call @llvm.aarch64.sve.dupq.lane.nxv2f64( [[TMP0]], i64 0) // CPP-CHECK-NEXT: ret [[TMP1]] // @@ -195,13 +195,13 @@ svfloat64_t test_svdup_neonq_f64(float64x2_t n) { // CHECK-LABEL: @test_svdup_neonq_bf16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8bf16.v8bf16( undef, <8 x bfloat> [[N:%.*]], i64 0) +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8bf16.v8bf16( undef, <8 x bfloat> [[N:%.*]], i64 0) // CHECK-NEXT: [[TMP1:%.*]] = call @llvm.aarch64.sve.dupq.lane.nxv8bf16( [[TMP0]], i64 0) // CHECK-NEXT: ret [[TMP1]] // // CPP-CHECK-LABEL: @_Z21test_svdup_neonq_bf1614__Bfloat16x8_t( // CPP-CHECK-NEXT: entry: -// CPP-CHECK-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8bf16.v8bf16( undef, <8 x bfloat> [[N:%.*]], i64 0) +// CPP-CHECK-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8bf16.v8bf16( undef, <8 x bfloat> [[N:%.*]], i64 0) // CPP-CHECK-NEXT: [[TMP1:%.*]] = call @llvm.aarch64.sve.dupq.lane.nxv8bf16( [[TMP0]], i64 0) // CPP-CHECK-NEXT: ret [[TMP1]] // diff --git a/clang/test/CodeGen/aarch64_neon_sve_bridge_intrinsics/acle_neon_sve_bridge_get_neonq.c b/clang/test/CodeGen/aarch64_neon_sve_bridge_intrinsics/acle_neon_sve_bridge_get_neonq.c index ac33477fccc1ff1daa31f6d0371cd47333e8678c..2bc9255dc336061b4e5f296a9ad096634daff55c 100644 --- a/clang/test/CodeGen/aarch64_neon_sve_bridge_intrinsics/acle_neon_sve_bridge_get_neonq.c +++ b/clang/test/CodeGen/aarch64_neon_sve_bridge_intrinsics/acle_neon_sve_bridge_get_neonq.c @@ -16,12 +16,12 @@ // CHECK-LABEL: @test_svget_neonq_s8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = call <16 x i8> @llvm.experimental.vector.extract.v16i8.nxv16i8( [[N:%.*]], i64 0) +// CHECK-NEXT: [[TMP0:%.*]] = call <16 x i8> @llvm.vector.extract.v16i8.nxv16i8( [[N:%.*]], i64 0) // CHECK-NEXT: ret <16 x i8> [[TMP0]] // // CPP-CHECK-LABEL: @_Z19test_svget_neonq_s8u10__SVInt8_t( // CPP-CHECK-NEXT: entry: -// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <16 x i8> @llvm.experimental.vector.extract.v16i8.nxv16i8( [[N:%.*]], i64 0) +// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <16 x i8> @llvm.vector.extract.v16i8.nxv16i8( [[N:%.*]], i64 0) // CPP-CHECK-NEXT: ret <16 x i8> [[TMP0]] // int8x16_t test_svget_neonq_s8(svint8_t n) { @@ -31,12 +31,12 @@ int8x16_t test_svget_neonq_s8(svint8_t n) { // // CHECK-LABEL: @test_svget_neonq_s16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.experimental.vector.extract.v8i16.nxv8i16( [[N:%.*]], i64 0) +// CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.vector.extract.v8i16.nxv8i16( [[N:%.*]], i64 0) // CHECK-NEXT: ret <8 x i16> [[TMP0]] // // CPP-CHECK-LABEL: @_Z20test_svget_neonq_s16u11__SVInt16_t( // CPP-CHECK-NEXT: entry: -// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.experimental.vector.extract.v8i16.nxv8i16( [[N:%.*]], i64 0) +// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.vector.extract.v8i16.nxv8i16( [[N:%.*]], i64 0) // CPP-CHECK-NEXT: ret <8 x i16> [[TMP0]] // int16x8_t test_svget_neonq_s16(svint16_t n) { @@ -45,12 +45,12 @@ int16x8_t test_svget_neonq_s16(svint16_t n) { // CHECK-LABEL: @test_svget_neonq_s32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = call <4 x i32> @llvm.experimental.vector.extract.v4i32.nxv4i32( [[N:%.*]], i64 0) +// CHECK-NEXT: [[TMP0:%.*]] = call <4 x i32> @llvm.vector.extract.v4i32.nxv4i32( [[N:%.*]], i64 0) // CHECK-NEXT: ret <4 x i32> [[TMP0]] // // CPP-CHECK-LABEL: @_Z20test_svget_neonq_s32u11__SVInt32_t( // CPP-CHECK-NEXT: entry: -// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <4 x i32> @llvm.experimental.vector.extract.v4i32.nxv4i32( [[N:%.*]], i64 0) +// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <4 x i32> @llvm.vector.extract.v4i32.nxv4i32( [[N:%.*]], i64 0) // CPP-CHECK-NEXT: ret <4 x i32> [[TMP0]] // int32x4_t test_svget_neonq_s32(svint32_t n) { @@ -59,12 +59,12 @@ int32x4_t test_svget_neonq_s32(svint32_t n) { // CHECK-LABEL: @test_svget_neonq_s64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = call <2 x i64> @llvm.experimental.vector.extract.v2i64.nxv2i64( [[N:%.*]], i64 0) +// CHECK-NEXT: [[TMP0:%.*]] = call <2 x i64> @llvm.vector.extract.v2i64.nxv2i64( [[N:%.*]], i64 0) // CHECK-NEXT: ret <2 x i64> [[TMP0]] // // CPP-CHECK-LABEL: @_Z20test_svget_neonq_s64u11__SVInt64_t( // CPP-CHECK-NEXT: entry: -// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <2 x i64> @llvm.experimental.vector.extract.v2i64.nxv2i64( [[N:%.*]], i64 0) +// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <2 x i64> @llvm.vector.extract.v2i64.nxv2i64( [[N:%.*]], i64 0) // CPP-CHECK-NEXT: ret <2 x i64> [[TMP0]] // int64x2_t test_svget_neonq_s64(svint64_t n) { @@ -73,12 +73,12 @@ int64x2_t test_svget_neonq_s64(svint64_t n) { // CHECK-LABEL: @test_svget_neonq_u8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = call <16 x i8> @llvm.experimental.vector.extract.v16i8.nxv16i8( [[N:%.*]], i64 0) +// CHECK-NEXT: [[TMP0:%.*]] = call <16 x i8> @llvm.vector.extract.v16i8.nxv16i8( [[N:%.*]], i64 0) // CHECK-NEXT: ret <16 x i8> [[TMP0]] // // CPP-CHECK-LABEL: @_Z19test_svget_neonq_u8u11__SVUint8_t( // CPP-CHECK-NEXT: entry: -// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <16 x i8> @llvm.experimental.vector.extract.v16i8.nxv16i8( [[N:%.*]], i64 0) +// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <16 x i8> @llvm.vector.extract.v16i8.nxv16i8( [[N:%.*]], i64 0) // CPP-CHECK-NEXT: ret <16 x i8> [[TMP0]] // uint8x16_t test_svget_neonq_u8(svuint8_t n) { @@ -87,12 +87,12 @@ uint8x16_t test_svget_neonq_u8(svuint8_t n) { // CHECK-LABEL: @test_svget_neonq_u16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.experimental.vector.extract.v8i16.nxv8i16( [[N:%.*]], i64 0) +// CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.vector.extract.v8i16.nxv8i16( [[N:%.*]], i64 0) // CHECK-NEXT: ret <8 x i16> [[TMP0]] // // CPP-CHECK-LABEL: @_Z20test_svget_neonq_u16u12__SVUint16_t( // CPP-CHECK-NEXT: entry: -// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.experimental.vector.extract.v8i16.nxv8i16( [[N:%.*]], i64 0) +// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.vector.extract.v8i16.nxv8i16( [[N:%.*]], i64 0) // CPP-CHECK-NEXT: ret <8 x i16> [[TMP0]] // uint16x8_t test_svget_neonq_u16(svuint16_t n) { @@ -101,12 +101,12 @@ uint16x8_t test_svget_neonq_u16(svuint16_t n) { // CHECK-LABEL: @test_svget_neonq_u32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = call <4 x i32> @llvm.experimental.vector.extract.v4i32.nxv4i32( [[N:%.*]], i64 0) +// CHECK-NEXT: [[TMP0:%.*]] = call <4 x i32> @llvm.vector.extract.v4i32.nxv4i32( [[N:%.*]], i64 0) // CHECK-NEXT: ret <4 x i32> [[TMP0]] // // CPP-CHECK-LABEL: @_Z20test_svget_neonq_u32u12__SVUint32_t( // CPP-CHECK-NEXT: entry: -// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <4 x i32> @llvm.experimental.vector.extract.v4i32.nxv4i32( [[N:%.*]], i64 0) +// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <4 x i32> @llvm.vector.extract.v4i32.nxv4i32( [[N:%.*]], i64 0) // CPP-CHECK-NEXT: ret <4 x i32> [[TMP0]] // uint32x4_t test_svget_neonq_u32(svuint32_t n) { @@ -115,12 +115,12 @@ uint32x4_t test_svget_neonq_u32(svuint32_t n) { // CHECK-LABEL: @test_svget_neonq_u64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = call <2 x i64> @llvm.experimental.vector.extract.v2i64.nxv2i64( [[N:%.*]], i64 0) +// CHECK-NEXT: [[TMP0:%.*]] = call <2 x i64> @llvm.vector.extract.v2i64.nxv2i64( [[N:%.*]], i64 0) // CHECK-NEXT: ret <2 x i64> [[TMP0]] // // CPP-CHECK-LABEL: @_Z20test_svget_neonq_u64u12__SVUint64_t( // CPP-CHECK-NEXT: entry: -// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <2 x i64> @llvm.experimental.vector.extract.v2i64.nxv2i64( [[N:%.*]], i64 0) +// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <2 x i64> @llvm.vector.extract.v2i64.nxv2i64( [[N:%.*]], i64 0) // CPP-CHECK-NEXT: ret <2 x i64> [[TMP0]] // uint64x2_t test_svget_neonq_u64(svuint64_t n) { @@ -129,12 +129,12 @@ uint64x2_t test_svget_neonq_u64(svuint64_t n) { // CHECK-LABEL: @test_svget_neonq_f16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = call <8 x half> @llvm.experimental.vector.extract.v8f16.nxv8f16( [[N:%.*]], i64 0) +// CHECK-NEXT: [[TMP0:%.*]] = call <8 x half> @llvm.vector.extract.v8f16.nxv8f16( [[N:%.*]], i64 0) // CHECK-NEXT: ret <8 x half> [[TMP0]] // // CPP-CHECK-LABEL: @_Z20test_svget_neonq_f16u13__SVFloat16_t( // CPP-CHECK-NEXT: entry: -// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <8 x half> @llvm.experimental.vector.extract.v8f16.nxv8f16( [[N:%.*]], i64 0) +// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <8 x half> @llvm.vector.extract.v8f16.nxv8f16( [[N:%.*]], i64 0) // CPP-CHECK-NEXT: ret <8 x half> [[TMP0]] // float16x8_t test_svget_neonq_f16(svfloat16_t n) { @@ -143,12 +143,12 @@ float16x8_t test_svget_neonq_f16(svfloat16_t n) { // CHECK-LABEL: @test_svget_neonq_f32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = call <4 x float> @llvm.experimental.vector.extract.v4f32.nxv4f32( [[N:%.*]], i64 0) +// CHECK-NEXT: [[TMP0:%.*]] = call <4 x float> @llvm.vector.extract.v4f32.nxv4f32( [[N:%.*]], i64 0) // CHECK-NEXT: ret <4 x float> [[TMP0]] // // CPP-CHECK-LABEL: @_Z20test_svget_neonq_f32u13__SVFloat32_t( // CPP-CHECK-NEXT: entry: -// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <4 x float> @llvm.experimental.vector.extract.v4f32.nxv4f32( [[N:%.*]], i64 0) +// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <4 x float> @llvm.vector.extract.v4f32.nxv4f32( [[N:%.*]], i64 0) // CPP-CHECK-NEXT: ret <4 x float> [[TMP0]] // float32x4_t test_svget_neonq_f32(svfloat32_t n) { @@ -157,12 +157,12 @@ float32x4_t test_svget_neonq_f32(svfloat32_t n) { // CHECK-LABEL: @test_svget_neonq_f64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = call <2 x double> @llvm.experimental.vector.extract.v2f64.nxv2f64( [[N:%.*]], i64 0) +// CHECK-NEXT: [[TMP0:%.*]] = call <2 x double> @llvm.vector.extract.v2f64.nxv2f64( [[N:%.*]], i64 0) // CHECK-NEXT: ret <2 x double> [[TMP0]] // // CPP-CHECK-LABEL: @_Z20test_svget_neonq_f64u13__SVFloat64_t( // CPP-CHECK-NEXT: entry: -// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <2 x double> @llvm.experimental.vector.extract.v2f64.nxv2f64( [[N:%.*]], i64 0) +// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <2 x double> @llvm.vector.extract.v2f64.nxv2f64( [[N:%.*]], i64 0) // CPP-CHECK-NEXT: ret <2 x double> [[TMP0]] // float64x2_t test_svget_neonq_f64(svfloat64_t n) { @@ -171,12 +171,12 @@ float64x2_t test_svget_neonq_f64(svfloat64_t n) { // CHECK-LABEL: @test_svget_neonq_bf16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = call <8 x bfloat> @llvm.experimental.vector.extract.v8bf16.nxv8bf16( [[N:%.*]], i64 0) +// CHECK-NEXT: [[TMP0:%.*]] = call <8 x bfloat> @llvm.vector.extract.v8bf16.nxv8bf16( [[N:%.*]], i64 0) // CHECK-NEXT: ret <8 x bfloat> [[TMP0]] // // CPP-CHECK-LABEL: @_Z21test_svget_neonq_bf16u14__SVBFloat16_t( // CPP-CHECK-NEXT: entry: -// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <8 x bfloat> @llvm.experimental.vector.extract.v8bf16.nxv8bf16( [[N:%.*]], i64 0) +// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <8 x bfloat> @llvm.vector.extract.v8bf16.nxv8bf16( [[N:%.*]], i64 0) // CPP-CHECK-NEXT: ret <8 x bfloat> [[TMP0]] // bfloat16x8_t test_svget_neonq_bf16(svbfloat16_t n) { diff --git a/clang/test/CodeGen/aarch64_neon_sve_bridge_intrinsics/acle_neon_sve_bridge_set_neonq.c b/clang/test/CodeGen/aarch64_neon_sve_bridge_intrinsics/acle_neon_sve_bridge_set_neonq.c index d6ceb8218ea27fe390ef6b655fa7a87fb14bdda2..af9f4d1fccf77850e3dd9419db2c8a4d7a1b8e3c 100644 --- a/clang/test/CodeGen/aarch64_neon_sve_bridge_intrinsics/acle_neon_sve_bridge_set_neonq.c +++ b/clang/test/CodeGen/aarch64_neon_sve_bridge_intrinsics/acle_neon_sve_bridge_set_neonq.c @@ -16,12 +16,12 @@ // CHECK-LABEL: @test_svset_neonq_s8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v16i8( [[S:%.*]], <16 x i8> [[N:%.*]], i64 0) +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i8.v16i8( [[S:%.*]], <16 x i8> [[N:%.*]], i64 0) // CHECK-NEXT: ret [[TMP0]] // // CPP-CHECK-LABEL: @_Z19test_svset_neonq_s8u10__SVInt8_t11__Int8x16_t( // CPP-CHECK-NEXT: entry: -// CPP-CHECK-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v16i8( [[S:%.*]], <16 x i8> [[N:%.*]], i64 0) +// CPP-CHECK-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i8.v16i8( [[S:%.*]], <16 x i8> [[N:%.*]], i64 0) // CPP-CHECK-NEXT: ret [[TMP0]] // svint8_t test_svset_neonq_s8(svint8_t s, int8x16_t n) { @@ -30,12 +30,12 @@ svint8_t test_svset_neonq_s8(svint8_t s, int8x16_t n) { // CHECK-LABEL: @test_svset_neonq_s16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v8i16( [[S:%.*]], <8 x i16> [[N:%.*]], i64 0) +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i16.v8i16( [[S:%.*]], <8 x i16> [[N:%.*]], i64 0) // CHECK-NEXT: ret [[TMP0]] // // CPP-CHECK-LABEL: @_Z20test_svset_neonq_s16u11__SVInt16_t11__Int16x8_t( // CPP-CHECK-NEXT: entry: -// CPP-CHECK-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v8i16( [[S:%.*]], <8 x i16> [[N:%.*]], i64 0) +// CPP-CHECK-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i16.v8i16( [[S:%.*]], <8 x i16> [[N:%.*]], i64 0) // CPP-CHECK-NEXT: ret [[TMP0]] // svint16_t test_svset_neonq_s16(svint16_t s, int16x8_t n) { @@ -44,12 +44,12 @@ svint16_t test_svset_neonq_s16(svint16_t s, int16x8_t n) { // CHECK-LABEL: @test_svset_neonq_s32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v4i32( [[S:%.*]], <4 x i32> [[N:%.*]], i64 0) +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4i32.v4i32( [[S:%.*]], <4 x i32> [[N:%.*]], i64 0) // CHECK-NEXT: ret [[TMP0]] // // CPP-CHECK-LABEL: @_Z20test_svset_neonq_s32u11__SVInt32_t11__Int32x4_t( // CPP-CHECK-NEXT: entry: -// CPP-CHECK-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v4i32( [[S:%.*]], <4 x i32> [[N:%.*]], i64 0) +// CPP-CHECK-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4i32.v4i32( [[S:%.*]], <4 x i32> [[N:%.*]], i64 0) // CPP-CHECK-NEXT: ret [[TMP0]] // svint32_t test_svset_neonq_s32(svint32_t s, int32x4_t n) { @@ -58,12 +58,12 @@ svint32_t test_svset_neonq_s32(svint32_t s, int32x4_t n) { // CHECK-LABEL: @test_svset_neonq_s64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v2i64( [[S:%.*]], <2 x i64> [[N:%.*]], i64 0) +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv2i64.v2i64( [[S:%.*]], <2 x i64> [[N:%.*]], i64 0) // CHECK-NEXT: ret [[TMP0]] // // CPP-CHECK-LABEL: @_Z20test_svset_neonq_s64u11__SVInt64_t11__Int64x2_t( // CPP-CHECK-NEXT: entry: -// CPP-CHECK-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v2i64( [[S:%.*]], <2 x i64> [[N:%.*]], i64 0) +// CPP-CHECK-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv2i64.v2i64( [[S:%.*]], <2 x i64> [[N:%.*]], i64 0) // CPP-CHECK-NEXT: ret [[TMP0]] // svint64_t test_svset_neonq_s64(svint64_t s, int64x2_t n) { @@ -72,12 +72,12 @@ svint64_t test_svset_neonq_s64(svint64_t s, int64x2_t n) { // CHECK-LABEL: @test_svset_neonq_u8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v16i8( [[S:%.*]], <16 x i8> [[N:%.*]], i64 0) +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i8.v16i8( [[S:%.*]], <16 x i8> [[N:%.*]], i64 0) // CHECK-NEXT: ret [[TMP0]] // // CPP-CHECK-LABEL: @_Z19test_svset_neonq_u8u11__SVUint8_t12__Uint8x16_t( // CPP-CHECK-NEXT: entry: -// CPP-CHECK-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v16i8( [[S:%.*]], <16 x i8> [[N:%.*]], i64 0) +// CPP-CHECK-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i8.v16i8( [[S:%.*]], <16 x i8> [[N:%.*]], i64 0) // CPP-CHECK-NEXT: ret [[TMP0]] // svuint8_t test_svset_neonq_u8(svuint8_t s, uint8x16_t n) { @@ -86,12 +86,12 @@ svuint8_t test_svset_neonq_u8(svuint8_t s, uint8x16_t n) { // CHECK-LABEL: @test_svset_neonq_u16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v8i16( [[S:%.*]], <8 x i16> [[N:%.*]], i64 0) +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i16.v8i16( [[S:%.*]], <8 x i16> [[N:%.*]], i64 0) // CHECK-NEXT: ret [[TMP0]] // // CPP-CHECK-LABEL: @_Z20test_svset_neonq_u16u12__SVUint16_t12__Uint16x8_t( // CPP-CHECK-NEXT: entry: -// CPP-CHECK-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v8i16( [[S:%.*]], <8 x i16> [[N:%.*]], i64 0) +// CPP-CHECK-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i16.v8i16( [[S:%.*]], <8 x i16> [[N:%.*]], i64 0) // CPP-CHECK-NEXT: ret [[TMP0]] // svuint16_t test_svset_neonq_u16(svuint16_t s, uint16x8_t n) { @@ -100,12 +100,12 @@ svuint16_t test_svset_neonq_u16(svuint16_t s, uint16x8_t n) { // CHECK-LABEL: @test_svset_neonq_u32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v4i32( [[S:%.*]], <4 x i32> [[N:%.*]], i64 0) +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4i32.v4i32( [[S:%.*]], <4 x i32> [[N:%.*]], i64 0) // CHECK-NEXT: ret [[TMP0]] // // CPP-CHECK-LABEL: @_Z20test_svset_neonq_u32u12__SVUint32_t12__Uint32x4_t( // CPP-CHECK-NEXT: entry: -// CPP-CHECK-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v4i32( [[S:%.*]], <4 x i32> [[N:%.*]], i64 0) +// CPP-CHECK-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4i32.v4i32( [[S:%.*]], <4 x i32> [[N:%.*]], i64 0) // CPP-CHECK-NEXT: ret [[TMP0]] // svuint32_t test_svset_neonq_u32(svuint32_t s, uint32x4_t n) { @@ -114,12 +114,12 @@ svuint32_t test_svset_neonq_u32(svuint32_t s, uint32x4_t n) { // CHECK-LABEL: @test_svset_neonq_u64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v2i64( [[S:%.*]], <2 x i64> [[N:%.*]], i64 0) +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv2i64.v2i64( [[S:%.*]], <2 x i64> [[N:%.*]], i64 0) // CHECK-NEXT: ret [[TMP0]] // // CPP-CHECK-LABEL: @_Z20test_svset_neonq_u64u12__SVUint64_t12__Uint64x2_t( // CPP-CHECK-NEXT: entry: -// CPP-CHECK-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v2i64( [[S:%.*]], <2 x i64> [[N:%.*]], i64 0) +// CPP-CHECK-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv2i64.v2i64( [[S:%.*]], <2 x i64> [[N:%.*]], i64 0) // CPP-CHECK-NEXT: ret [[TMP0]] // svuint64_t test_svset_neonq_u64(svuint64_t s, uint64x2_t n) { @@ -128,12 +128,12 @@ svuint64_t test_svset_neonq_u64(svuint64_t s, uint64x2_t n) { // CHECK-LABEL: @test_svset_neonq_f16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8f16.v8f16( [[S:%.*]], <8 x half> [[N:%.*]], i64 0) +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8f16.v8f16( [[S:%.*]], <8 x half> [[N:%.*]], i64 0) // CHECK-NEXT: ret [[TMP0]] // // CPP-CHECK-LABEL: @_Z20test_svset_neonq_f16u13__SVFloat16_t13__Float16x8_t( // CPP-CHECK-NEXT: entry: -// CPP-CHECK-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8f16.v8f16( [[S:%.*]], <8 x half> [[N:%.*]], i64 0) +// CPP-CHECK-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8f16.v8f16( [[S:%.*]], <8 x half> [[N:%.*]], i64 0) // CPP-CHECK-NEXT: ret [[TMP0]] // svfloat16_t test_svset_neonq_f16(svfloat16_t s, float16x8_t n) { @@ -142,12 +142,12 @@ svfloat16_t test_svset_neonq_f16(svfloat16_t s, float16x8_t n) { // CHECK-LABEL: @test_svset_neonq_f32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4f32.v4f32( [[S:%.*]], <4 x float> [[N:%.*]], i64 0) +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4f32.v4f32( [[S:%.*]], <4 x float> [[N:%.*]], i64 0) // CHECK-NEXT: ret [[TMP0]] // // CPP-CHECK-LABEL: @_Z20test_svset_neonq_f32u13__SVFloat32_t13__Float32x4_t( // CPP-CHECK-NEXT: entry: -// CPP-CHECK-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4f32.v4f32( [[S:%.*]], <4 x float> [[N:%.*]], i64 0) +// CPP-CHECK-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4f32.v4f32( [[S:%.*]], <4 x float> [[N:%.*]], i64 0) // CPP-CHECK-NEXT: ret [[TMP0]] // svfloat32_t test_svset_neonq_f32(svfloat32_t s, float32x4_t n) { @@ -156,12 +156,12 @@ svfloat32_t test_svset_neonq_f32(svfloat32_t s, float32x4_t n) { // CHECK-LABEL: @test_svset_neonq_f64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv2f64.v2f64( [[S:%.*]], <2 x double> [[N:%.*]], i64 0) +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv2f64.v2f64( [[S:%.*]], <2 x double> [[N:%.*]], i64 0) // CHECK-NEXT: ret [[TMP0]] // // CPP-CHECK-LABEL: @_Z20test_svset_neonq_f64u13__SVFloat64_t13__Float64x2_t( // CPP-CHECK-NEXT: entry: -// CPP-CHECK-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv2f64.v2f64( [[S:%.*]], <2 x double> [[N:%.*]], i64 0) +// CPP-CHECK-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv2f64.v2f64( [[S:%.*]], <2 x double> [[N:%.*]], i64 0) // CPP-CHECK-NEXT: ret [[TMP0]] // svfloat64_t test_svset_neonq_f64(svfloat64_t s, float64x2_t n) { @@ -170,12 +170,12 @@ svfloat64_t test_svset_neonq_f64(svfloat64_t s, float64x2_t n) { // CHECK-LABEL: @test_svset_neonq_bf16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8bf16.v8bf16( [[S:%.*]], <8 x bfloat> [[N:%.*]], i64 0) +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8bf16.v8bf16( [[S:%.*]], <8 x bfloat> [[N:%.*]], i64 0) // CHECK-NEXT: ret [[TMP0]] // // CPP-CHECK-LABEL: @_Z21test_svset_neonq_bf16u14__SVBFloat16_t14__Bfloat16x8_t( // CPP-CHECK-NEXT: entry: -// CPP-CHECK-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8bf16.v8bf16( [[S:%.*]], <8 x bfloat> [[N:%.*]], i64 0) +// CPP-CHECK-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8bf16.v8bf16( [[S:%.*]], <8 x bfloat> [[N:%.*]], i64 0) // CPP-CHECK-NEXT: ret [[TMP0]] // svbfloat16_t test_svset_neonq_bf16(svbfloat16_t s, bfloat16x8_t n) { diff --git a/clang/test/CodeGen/attr-arm-sve-vector-bits-bitcast.c b/clang/test/CodeGen/attr-arm-sve-vector-bits-bitcast.c index 3f25eb0cba26c6a94f2309e17bb98b246230b679..fc2d09391ac9c273414afaa3e43795c783499f1e 100644 --- a/clang/test/CodeGen/attr-arm-sve-vector-bits-bitcast.c +++ b/clang/test/CodeGen/attr-arm-sve-vector-bits-bitcast.c @@ -32,21 +32,21 @@ DEFINE_STRUCT(bool) // CHECK-128-NEXT: entry: // CHECK-128-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_INT64:%.*]], %struct.struct_int64* [[S:%.*]], i64 0, i32 1, i64 0 // CHECK-128-NEXT: [[TMP0:%.*]] = load <2 x i64>, <2 x i64>* [[ARRAYIDX]], align 16, !tbaa [[TBAA6:![0-9]+]] -// CHECK-128-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v2i64( undef, <2 x i64> [[TMP0]], i64 0) +// CHECK-128-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v2i64( undef, <2 x i64> [[TMP0]], i64 0) // CHECK-128-NEXT: ret [[CASTSCALABLESVE]] // // CHECK-256-LABEL: @read_int64( // CHECK-256-NEXT: entry: // CHECK-256-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_INT64:%.*]], %struct.struct_int64* [[S:%.*]], i64 0, i32 1, i64 0 // CHECK-256-NEXT: [[TMP0:%.*]] = load <4 x i64>, <4 x i64>* [[ARRAYIDX]], align 16, !tbaa [[TBAA6:![0-9]+]] -// CHECK-256-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v4i64( undef, <4 x i64> [[TMP0]], i64 0) +// CHECK-256-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v4i64( undef, <4 x i64> [[TMP0]], i64 0) // CHECK-256-NEXT: ret [[CASTSCALABLESVE]] // // CHECK-512-LABEL: @read_int64( // CHECK-512-NEXT: entry: // CHECK-512-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_INT64:%.*]], %struct.struct_int64* [[S:%.*]], i64 0, i32 1, i64 0 // CHECK-512-NEXT: [[TMP0:%.*]] = load <8 x i64>, <8 x i64>* [[ARRAYIDX]], align 16, !tbaa [[TBAA6:![0-9]+]] -// CHECK-512-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[TMP0]], i64 0) +// CHECK-512-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[TMP0]], i64 0) // CHECK-512-NEXT: ret [[CASTSCALABLESVE]] // svint64_t read_int64(struct struct_int64 *s) { @@ -55,21 +55,21 @@ svint64_t read_int64(struct struct_int64 *s) { // CHECK-128-LABEL: @write_int64( // CHECK-128-NEXT: entry: -// CHECK-128-NEXT: [[CASTFIXEDSVE:%.*]] = call <2 x i64> @llvm.experimental.vector.extract.v2i64.nxv2i64( [[X:%.*]], i64 0) +// CHECK-128-NEXT: [[CASTFIXEDSVE:%.*]] = call <2 x i64> @llvm.vector.extract.v2i64.nxv2i64( [[X:%.*]], i64 0) // CHECK-128-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_INT64:%.*]], %struct.struct_int64* [[S:%.*]], i64 0, i32 1, i64 0 // CHECK-128-NEXT: store <2 x i64> [[CASTFIXEDSVE]], <2 x i64>* [[ARRAYIDX]], align 16, !tbaa [[TBAA6]] // CHECK-128-NEXT: ret void // // CHECK-256-LABEL: @write_int64( // CHECK-256-NEXT: entry: -// CHECK-256-NEXT: [[CASTFIXEDSVE:%.*]] = call <4 x i64> @llvm.experimental.vector.extract.v4i64.nxv2i64( [[X:%.*]], i64 0) +// CHECK-256-NEXT: [[CASTFIXEDSVE:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv2i64( [[X:%.*]], i64 0) // CHECK-256-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_INT64:%.*]], %struct.struct_int64* [[S:%.*]], i64 0, i32 1, i64 0 // CHECK-256-NEXT: store <4 x i64> [[CASTFIXEDSVE]], <4 x i64>* [[ARRAYIDX]], align 16, !tbaa [[TBAA6]] // CHECK-256-NEXT: ret void // // CHECK-512-LABEL: @write_int64( // CHECK-512-NEXT: entry: -// CHECK-512-NEXT: [[CASTFIXEDSVE:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[X:%.*]], i64 0) +// CHECK-512-NEXT: [[CASTFIXEDSVE:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[X:%.*]], i64 0) // CHECK-512-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_INT64:%.*]], %struct.struct_int64* [[S:%.*]], i64 0, i32 1, i64 0 // CHECK-512-NEXT: store <8 x i64> [[CASTFIXEDSVE]], <8 x i64>* [[ARRAYIDX]], align 16, !tbaa [[TBAA6]] // CHECK-512-NEXT: ret void @@ -86,21 +86,21 @@ void write_int64(struct struct_int64 *s, svint64_t x) { // CHECK-128-NEXT: entry: // CHECK-128-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_FLOAT64:%.*]], %struct.struct_float64* [[S:%.*]], i64 0, i32 1, i64 0 // CHECK-128-NEXT: [[TMP0:%.*]] = load <2 x double>, <2 x double>* [[ARRAYIDX]], align 16, !tbaa [[TBAA6]] -// CHECK-128-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2f64.v2f64( undef, <2 x double> [[TMP0]], i64 0) +// CHECK-128-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2f64.v2f64( undef, <2 x double> [[TMP0]], i64 0) // CHECK-128-NEXT: ret [[CASTSCALABLESVE]] // // CHECK-256-LABEL: @read_float64( // CHECK-256-NEXT: entry: // CHECK-256-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_FLOAT64:%.*]], %struct.struct_float64* [[S:%.*]], i64 0, i32 1, i64 0 // CHECK-256-NEXT: [[TMP0:%.*]] = load <4 x double>, <4 x double>* [[ARRAYIDX]], align 16, !tbaa [[TBAA6]] -// CHECK-256-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2f64.v4f64( undef, <4 x double> [[TMP0]], i64 0) +// CHECK-256-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2f64.v4f64( undef, <4 x double> [[TMP0]], i64 0) // CHECK-256-NEXT: ret [[CASTSCALABLESVE]] // // CHECK-512-LABEL: @read_float64( // CHECK-512-NEXT: entry: // CHECK-512-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_FLOAT64:%.*]], %struct.struct_float64* [[S:%.*]], i64 0, i32 1, i64 0 // CHECK-512-NEXT: [[TMP0:%.*]] = load <8 x double>, <8 x double>* [[ARRAYIDX]], align 16, !tbaa [[TBAA6]] -// CHECK-512-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2f64.v8f64( undef, <8 x double> [[TMP0]], i64 0) +// CHECK-512-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2f64.v8f64( undef, <8 x double> [[TMP0]], i64 0) // CHECK-512-NEXT: ret [[CASTSCALABLESVE]] // svfloat64_t read_float64(struct struct_float64 *s) { @@ -109,21 +109,21 @@ svfloat64_t read_float64(struct struct_float64 *s) { // CHECK-128-LABEL: @write_float64( // CHECK-128-NEXT: entry: -// CHECK-128-NEXT: [[CASTFIXEDSVE:%.*]] = call <2 x double> @llvm.experimental.vector.extract.v2f64.nxv2f64( [[X:%.*]], i64 0) +// CHECK-128-NEXT: [[CASTFIXEDSVE:%.*]] = call <2 x double> @llvm.vector.extract.v2f64.nxv2f64( [[X:%.*]], i64 0) // CHECK-128-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_FLOAT64:%.*]], %struct.struct_float64* [[S:%.*]], i64 0, i32 1, i64 0 // CHECK-128-NEXT: store <2 x double> [[CASTFIXEDSVE]], <2 x double>* [[ARRAYIDX]], align 16, !tbaa [[TBAA6]] // CHECK-128-NEXT: ret void // // CHECK-256-LABEL: @write_float64( // CHECK-256-NEXT: entry: -// CHECK-256-NEXT: [[CASTFIXEDSVE:%.*]] = call <4 x double> @llvm.experimental.vector.extract.v4f64.nxv2f64( [[X:%.*]], i64 0) +// CHECK-256-NEXT: [[CASTFIXEDSVE:%.*]] = call <4 x double> @llvm.vector.extract.v4f64.nxv2f64( [[X:%.*]], i64 0) // CHECK-256-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_FLOAT64:%.*]], %struct.struct_float64* [[S:%.*]], i64 0, i32 1, i64 0 // CHECK-256-NEXT: store <4 x double> [[CASTFIXEDSVE]], <4 x double>* [[ARRAYIDX]], align 16, !tbaa [[TBAA6]] // CHECK-256-NEXT: ret void // // CHECK-512-LABEL: @write_float64( // CHECK-512-NEXT: entry: -// CHECK-512-NEXT: [[CASTFIXEDSVE:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64( [[X:%.*]], i64 0) +// CHECK-512-NEXT: [[CASTFIXEDSVE:%.*]] = call <8 x double> @llvm.vector.extract.v8f64.nxv2f64( [[X:%.*]], i64 0) // CHECK-512-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_FLOAT64:%.*]], %struct.struct_float64* [[S:%.*]], i64 0, i32 1, i64 0 // CHECK-512-NEXT: store <8 x double> [[CASTFIXEDSVE]], <8 x double>* [[ARRAYIDX]], align 16, !tbaa [[TBAA6]] // CHECK-512-NEXT: ret void @@ -140,21 +140,21 @@ void write_float64(struct struct_float64 *s, svfloat64_t x) { // CHECK-128-NEXT: entry: // CHECK-128-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_BFLOAT16:%.*]], %struct.struct_bfloat16* [[S:%.*]], i64 0, i32 1, i64 0 // CHECK-128-NEXT: [[TMP0:%.*]] = load <8 x bfloat>, <8 x bfloat>* [[ARRAYIDX]], align 16, !tbaa [[TBAA6]] -// CHECK-128-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8bf16.v8bf16( undef, <8 x bfloat> [[TMP0]], i64 0) +// CHECK-128-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8bf16.v8bf16( undef, <8 x bfloat> [[TMP0]], i64 0) // CHECK-128-NEXT: ret [[CASTSCALABLESVE]] // // CHECK-256-LABEL: @read_bfloat16( // CHECK-256-NEXT: entry: // CHECK-256-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_BFLOAT16:%.*]], %struct.struct_bfloat16* [[S:%.*]], i64 0, i32 1, i64 0 // CHECK-256-NEXT: [[TMP0:%.*]] = load <16 x bfloat>, <16 x bfloat>* [[ARRAYIDX]], align 16, !tbaa [[TBAA6]] -// CHECK-256-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8bf16.v16bf16( undef, <16 x bfloat> [[TMP0]], i64 0) +// CHECK-256-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8bf16.v16bf16( undef, <16 x bfloat> [[TMP0]], i64 0) // CHECK-256-NEXT: ret [[CASTSCALABLESVE]] // // CHECK-512-LABEL: @read_bfloat16( // CHECK-512-NEXT: entry: // CHECK-512-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_BFLOAT16:%.*]], %struct.struct_bfloat16* [[S:%.*]], i64 0, i32 1, i64 0 // CHECK-512-NEXT: [[TMP0:%.*]] = load <32 x bfloat>, <32 x bfloat>* [[ARRAYIDX]], align 16, !tbaa [[TBAA6]] -// CHECK-512-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8bf16.v32bf16( undef, <32 x bfloat> [[TMP0]], i64 0) +// CHECK-512-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8bf16.v32bf16( undef, <32 x bfloat> [[TMP0]], i64 0) // CHECK-512-NEXT: ret [[CASTSCALABLESVE]] // svbfloat16_t read_bfloat16(struct struct_bfloat16 *s) { @@ -163,21 +163,21 @@ svbfloat16_t read_bfloat16(struct struct_bfloat16 *s) { // CHECK-128-LABEL: @write_bfloat16( // CHECK-128-NEXT: entry: -// CHECK-128-NEXT: [[CASTFIXEDSVE:%.*]] = call <8 x bfloat> @llvm.experimental.vector.extract.v8bf16.nxv8bf16( [[X:%.*]], i64 0) +// CHECK-128-NEXT: [[CASTFIXEDSVE:%.*]] = call <8 x bfloat> @llvm.vector.extract.v8bf16.nxv8bf16( [[X:%.*]], i64 0) // CHECK-128-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_BFLOAT16:%.*]], %struct.struct_bfloat16* [[S:%.*]], i64 0, i32 1, i64 0 // CHECK-128-NEXT: store <8 x bfloat> [[CASTFIXEDSVE]], <8 x bfloat>* [[ARRAYIDX]], align 16, !tbaa [[TBAA6]] // CHECK-128-NEXT: ret void // // CHECK-256-LABEL: @write_bfloat16( // CHECK-256-NEXT: entry: -// CHECK-256-NEXT: [[CASTFIXEDSVE:%.*]] = call <16 x bfloat> @llvm.experimental.vector.extract.v16bf16.nxv8bf16( [[X:%.*]], i64 0) +// CHECK-256-NEXT: [[CASTFIXEDSVE:%.*]] = call <16 x bfloat> @llvm.vector.extract.v16bf16.nxv8bf16( [[X:%.*]], i64 0) // CHECK-256-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_BFLOAT16:%.*]], %struct.struct_bfloat16* [[S:%.*]], i64 0, i32 1, i64 0 // CHECK-256-NEXT: store <16 x bfloat> [[CASTFIXEDSVE]], <16 x bfloat>* [[ARRAYIDX]], align 16, !tbaa [[TBAA6]] // CHECK-256-NEXT: ret void // // CHECK-512-LABEL: @write_bfloat16( // CHECK-512-NEXT: entry: -// CHECK-512-NEXT: [[CASTFIXEDSVE:%.*]] = call <32 x bfloat> @llvm.experimental.vector.extract.v32bf16.nxv8bf16( [[X:%.*]], i64 0) +// CHECK-512-NEXT: [[CASTFIXEDSVE:%.*]] = call <32 x bfloat> @llvm.vector.extract.v32bf16.nxv8bf16( [[X:%.*]], i64 0) // CHECK-512-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_BFLOAT16:%.*]], %struct.struct_bfloat16* [[S:%.*]], i64 0, i32 1, i64 0 // CHECK-512-NEXT: store <32 x bfloat> [[CASTFIXEDSVE]], <32 x bfloat>* [[ARRAYIDX]], align 16, !tbaa [[TBAA6]] // CHECK-512-NEXT: ret void @@ -194,7 +194,7 @@ void write_bfloat16(struct struct_bfloat16 *s, svbfloat16_t x) { // CHECK-128-NEXT: entry: // CHECK-128-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_BOOL:%.*]], %struct.struct_bool* [[S:%.*]], i64 0, i32 1, i64 0 // CHECK-128-NEXT: [[TMP0:%.*]] = load <2 x i8>, <2 x i8>* [[ARRAYIDX]], align 2, !tbaa [[TBAA6]] -// CHECK-128-NEXT: [[CASTFIXEDSVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i8.v2i8( undef, <2 x i8> [[TMP0]], i64 0) +// CHECK-128-NEXT: [[CASTFIXEDSVE:%.*]] = call @llvm.vector.insert.nxv2i8.v2i8( undef, <2 x i8> [[TMP0]], i64 0) // CHECK-128-NEXT: [[TMP1:%.*]] = bitcast [[CASTFIXEDSVE]] to // CHECK-128-NEXT: ret [[TMP1]] // @@ -202,7 +202,7 @@ void write_bfloat16(struct struct_bfloat16 *s, svbfloat16_t x) { // CHECK-256-NEXT: entry: // CHECK-256-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_BOOL:%.*]], %struct.struct_bool* [[S:%.*]], i64 0, i32 1, i64 0 // CHECK-256-NEXT: [[TMP0:%.*]] = load <4 x i8>, <4 x i8>* [[ARRAYIDX]], align 2, !tbaa [[TBAA6]] -// CHECK-256-NEXT: [[CASTFIXEDSVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i8.v4i8( undef, <4 x i8> [[TMP0]], i64 0) +// CHECK-256-NEXT: [[CASTFIXEDSVE:%.*]] = call @llvm.vector.insert.nxv2i8.v4i8( undef, <4 x i8> [[TMP0]], i64 0) // CHECK-256-NEXT: [[TMP1:%.*]] = bitcast [[CASTFIXEDSVE]] to // CHECK-256-NEXT: ret [[TMP1]] // @@ -210,7 +210,7 @@ void write_bfloat16(struct struct_bfloat16 *s, svbfloat16_t x) { // CHECK-512-NEXT: entry: // CHECK-512-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_BOOL:%.*]], %struct.struct_bool* [[S:%.*]], i64 0, i32 1, i64 0 // CHECK-512-NEXT: [[TMP0:%.*]] = load <8 x i8>, <8 x i8>* [[ARRAYIDX]], align 2, !tbaa [[TBAA6]] -// CHECK-512-NEXT: [[CASTFIXEDSVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i8.v8i8( undef, <8 x i8> [[TMP0]], i64 0) +// CHECK-512-NEXT: [[CASTFIXEDSVE:%.*]] = call @llvm.vector.insert.nxv2i8.v8i8( undef, <8 x i8> [[TMP0]], i64 0) // CHECK-512-NEXT: [[TMP1:%.*]] = bitcast [[CASTFIXEDSVE]] to // CHECK-512-NEXT: ret [[TMP1]] // @@ -221,7 +221,7 @@ svbool_t read_bool(struct struct_bool *s) { // CHECK-128-LABEL: @write_bool( // CHECK-128-NEXT: entry: // CHECK-128-NEXT: [[TMP0:%.*]] = bitcast %x to -// CHECK-128-NEXT: [[CASTFIXEDSVE:%.*]] = call <2 x i8> @llvm.experimental.vector.extract.v2i8.nxv2i8( [[TMP0]], i64 0) +// CHECK-128-NEXT: [[CASTFIXEDSVE:%.*]] = call <2 x i8> @llvm.vector.extract.v2i8.nxv2i8( [[TMP0]], i64 0) // CHECK-128-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_BOOL:%.*]], %struct.struct_bool* [[S:%.*]], i64 0, i32 1, i64 0 // CHECK-128-NEXT: store <2 x i8> [[CASTFIXEDSVE]], <2 x i8>* [[ARRAYIDX]], align 2, !tbaa [[TBAA6]] // CHECK-128-NEXT: ret void @@ -229,7 +229,7 @@ svbool_t read_bool(struct struct_bool *s) { // CHECK-256-LABEL: @write_bool( // CHECK-256-NEXT: entry: // CHECK-256-NEXT: [[TMP0:%.*]] = bitcast %x to -// CHECK-256-NEXT: [[CASTFIXEDSVE:%.*]] = call <4 x i8> @llvm.experimental.vector.extract.v4i8.nxv2i8( [[TMP0]], i64 0) +// CHECK-256-NEXT: [[CASTFIXEDSVE:%.*]] = call <4 x i8> @llvm.vector.extract.v4i8.nxv2i8( [[TMP0]], i64 0) // CHECK-256-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_BOOL:%.*]], %struct.struct_bool* [[S:%.*]], i64 0, i32 1, i64 0 // CHECK-256-NEXT: store <4 x i8> [[CASTFIXEDSVE]], <4 x i8>* [[ARRAYIDX]], align 2, !tbaa [[TBAA6]] // CHECK-256-NEXT: ret void @@ -237,7 +237,7 @@ svbool_t read_bool(struct struct_bool *s) { // CHECK-512-LABEL: @write_bool( // CHECK-512-NEXT: entry: // CHECK-512-NEXT: [[TMP0:%.*]] = bitcast %x to -// CHECK-512-NEXT: [[CASTFIXEDSVE:%.*]] = call <8 x i8> @llvm.experimental.vector.extract.v8i8.nxv2i8( [[TMP0]], i64 0) +// CHECK-512-NEXT: [[CASTFIXEDSVE:%.*]] = call <8 x i8> @llvm.vector.extract.v8i8.nxv2i8( [[TMP0]], i64 0) // CHECK-512-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_BOOL:%.*]], %struct.struct_bool* [[S:%.*]], i64 0, i32 1, i64 0 // CHECK-512-NEXT: store <8 x i8> [[CASTFIXEDSVE]], <8 x i8>* [[ARRAYIDX]], align 2, !tbaa [[TBAA6]] // CHECK-512-NEXT: ret void diff --git a/clang/test/CodeGen/attr-arm-sve-vector-bits-call.c b/clang/test/CodeGen/attr-arm-sve-vector-bits-call.c index 20bf6ab1d64a494dd6e27670ff4d141c9476744b..fff73c019cd27601952454e95dcdb006c5a2b000 100644 --- a/clang/test/CodeGen/attr-arm-sve-vector-bits-call.c +++ b/clang/test/CodeGen/attr-arm-sve-vector-bits-call.c @@ -45,7 +45,7 @@ fixed_int32_t fixed_callee(fixed_int32_t x) { // CHECK-NEXT: [[TMP0:%.*]] = bitcast <16 x i32>* [[COERCE1]] to * // CHECK-NEXT: store [[X:%.*]], * [[TMP0]], align 16 // CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, <16 x i32>* [[COERCE1]], align 16, !tbaa [[TBAA6:![0-9]+]] -// CHECK-NEXT: [[CASTSCALABLESVE2:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[TMP1]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE2:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[TMP1]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE2]] // svint32_t sizeless_caller(svint32_t x) { diff --git a/clang/test/CodeGen/attr-arm-sve-vector-bits-cast.c b/clang/test/CodeGen/attr-arm-sve-vector-bits-cast.c index dd9e8495f22189a4a1148f74ec5fa81ede2b1122..beb6e722e1d8b9f6e314f24621a649a7203365fc 100644 --- a/clang/test/CodeGen/attr-arm-sve-vector-bits-cast.c +++ b/clang/test/CodeGen/attr-arm-sve-vector-bits-cast.c @@ -63,7 +63,7 @@ fixed_bool_t from_svbool_t(svbool_t type) { // CHECK-LABEL: @lax_cast( // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = alloca <16 x i32>, align 64 -// CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[TYPE_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[TYPE_COERCE:%.*]], i64 0) // CHECK-NEXT: store <16 x i32> [[CASTFIXEDSVE]], <16 x i32>* [[TMP0:%.*]], align 64, !tbaa [[TBAA6:![0-9]+]] // CHECK-NEXT: [[TMP1:%.*]] = bitcast <16 x i32>* [[TMP0]] to * // CHECK-NEXT: [[TMP2:%.*]] = load , * [[TMP1]], align 64, !tbaa [[TBAA6]] @@ -76,7 +76,7 @@ svint64_t lax_cast(fixed_int32_t type) { // CHECK-LABEL: @to_svint32_t__from_gnu_int32_t( // CHECK-NEXT: entry: // CHECK-NEXT: [[TYPE:%.*]] = load <16 x i32>, <16 x i32>* [[TMP0:%.*]], align 16, !tbaa [[TBAA6:![0-9]+]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[TYPE]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[TYPE]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // svint32_t to_svint32_t__from_gnu_int32_t(gnu_int32_t type) { @@ -85,7 +85,7 @@ svint32_t to_svint32_t__from_gnu_int32_t(gnu_int32_t type) { // CHECK-LABEL: @from_svint32_t__to_gnu_int32_t( // CHECK-NEXT: entry: -// CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[TYPE:%.*]], i64 0) +// CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[TYPE:%.*]], i64 0) // CHECK-NEXT: store <16 x i32> [[CASTFIXEDSVE]], <16 x i32>* [[AGG_RESULT:%.*]], align 16, !tbaa [[TBAA6]] // CHECK-NEXT: ret void // @@ -96,7 +96,7 @@ gnu_int32_t from_svint32_t__to_gnu_int32_t(svint32_t type) { // CHECK-LABEL: @to_fixed_int32_t__from_gnu_int32_t( // CHECK-NEXT: entry: // CHECK-NEXT: [[TYPE:%.*]] = load <16 x i32>, <16 x i32>* [[TMP0:%.*]], align 16, !tbaa [[TBAA6]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[TYPE]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[TYPE]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int32_t to_fixed_int32_t__from_gnu_int32_t(gnu_int32_t type) { @@ -105,7 +105,7 @@ fixed_int32_t to_fixed_int32_t__from_gnu_int32_t(gnu_int32_t type) { // CHECK-LABEL: @from_fixed_int32_t__to_gnu_int32_t( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TYPE:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[TYPE_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[TYPE:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[TYPE_COERCE:%.*]], i64 0) // CHECK-NEXT: store <16 x i32> [[TYPE]], <16 x i32>* [[AGG_RESULT:%.*]], align 16, !tbaa [[TBAA6]] // CHECK-NEXT: ret void // diff --git a/clang/test/CodeGen/attr-arm-sve-vector-bits-codegen.c b/clang/test/CodeGen/attr-arm-sve-vector-bits-codegen.c index 7837b743e502d2ea63c3125cc2f324b7228433a8..784422c221f483ac121545d8f7d6c81a371eb632 100644 --- a/clang/test/CodeGen/attr-arm-sve-vector-bits-codegen.c +++ b/clang/test/CodeGen/attr-arm-sve-vector-bits-codegen.c @@ -24,23 +24,23 @@ fixed_int32_t global_vec; // CHECK-NEXT: store [[VEC:%.*]], * [[VEC_ADDR]], align 16 // CHECK-NEXT: [[TMP0:%.*]] = load , * [[PRED_ADDR]], align 2 // CHECK-NEXT: [[TMP1:%.*]] = load <8 x i8>, <8 x i8>* @global_pred, align 2 -// CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i8.v8i8( undef, <8 x i8> [[TMP1]], i64 0) +// CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = call @llvm.vector.insert.nxv2i8.v8i8( undef, <8 x i8> [[TMP1]], i64 0) // CHECK-NEXT: [[TMP2:%.*]] = bitcast [[CASTFIXEDSVE]] to // CHECK-NEXT: [[TMP3:%.*]] = load <8 x i8>, <8 x i8>* @global_pred, align 2 -// CHECK-NEXT: [[CASTFIXEDSVE2:%.*]] = call @llvm.experimental.vector.insert.nxv2i8.v8i8( undef, <8 x i8> [[TMP3]], i64 0) +// CHECK-NEXT: [[CASTFIXEDSVE2:%.*]] = call @llvm.vector.insert.nxv2i8.v8i8( undef, <8 x i8> [[TMP3]], i64 0) // CHECK-NEXT: [[TMP4:%.*]] = bitcast [[CASTFIXEDSVE2]] to // CHECK-NEXT: [[TMP5:%.*]] = call @llvm.aarch64.sve.and.z.nxv16i1( [[TMP0]], [[TMP2]], [[TMP4]]) // CHECK-NEXT: store [[TMP5]], * [[PG]], align 2 // CHECK-NEXT: [[TMP6:%.*]] = load , * [[PG]], align 2 // CHECK-NEXT: [[TMP7:%.*]] = load <16 x i32>, <16 x i32>* @global_vec, align 16 -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[TMP7]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[TMP7]], i64 0) // CHECK-NEXT: [[TMP8:%.*]] = load , * [[VEC_ADDR]], align 16 // CHECK-NEXT: [[TMP9:%.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( [[TMP6]]) // CHECK-NEXT: [[TMP10:%.*]] = call @llvm.aarch64.sve.add.nxv4i32( [[TMP9]], [[CASTSCALABLESVE]], [[TMP8]]) -// CHECK-NEXT: [[CASTFIXEDSVE3:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[TMP10]], i64 0) +// CHECK-NEXT: [[CASTFIXEDSVE3:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[TMP10]], i64 0) // CHECK-NEXT: store <16 x i32> [[CASTFIXEDSVE3]], <16 x i32>* [[RETVAL]], align 16 // CHECK-NEXT: [[TMP11:%.*]] = load <16 x i32>, <16 x i32>* [[RETVAL]], align 16 -// CHECK-NEXT: [[CASTSCALABLESVE4:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[TMP11]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE4:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[TMP11]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE4]] // fixed_int32_t foo(svbool_t pred, svint32_t vec) { @@ -57,7 +57,7 @@ fixed_int32_t foo(svbool_t pred, svint32_t vec) { // CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, <16 x i32>* [[TMP0]], align 16 // CHECK-NEXT: store <16 x i32> [[TMP1]], <16 x i32>* [[RETVAL]], align 16 // CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, <16 x i32>* [[RETVAL]], align 16 -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[TMP2]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[TMP2]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int32_t test_ptr_to_global() { @@ -78,7 +78,7 @@ fixed_int32_t test_ptr_to_global() { // CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, <16 x i32>* [[ARRAYIDX]], align 16 // CHECK-NEXT: store <16 x i32> [[TMP1]], <16 x i32>* [[RETVAL]], align 16 // CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, <16 x i32>* [[RETVAL]], align 16 -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[TMP2]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[TMP2]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int32_t array_arg(fixed_int32_t arr[]) { @@ -96,7 +96,7 @@ fixed_int32_t array_arg(fixed_int32_t arr[]) { // CHECK-NEXT: [[TMP1:%.*]] = load <8 x i8>, <8 x i8>* [[TMP0]], align 2 // CHECK-NEXT: store <8 x i8> [[TMP1]], <8 x i8>* [[RETVAL]], align 2 // CHECK-NEXT: [[TMP2:%.*]] = load <8 x i8>, <8 x i8>* [[RETVAL]], align 2 -// CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i8.v8i8( undef, <8 x i8> [[TMP2]], i64 0) +// CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = call @llvm.vector.insert.nxv2i8.v8i8( undef, <8 x i8> [[TMP2]], i64 0) // CHECK-NEXT: [[TMP3:%.*]] = bitcast [[CASTFIXEDSVE]] to // CHECK-NEXT: ret [[TMP3]] // @@ -121,25 +121,25 @@ fixed_bool_t address_of_array_idx() { // CHECK-NEXT: store <8 x i8> , <8 x i8>* [[YY]], align 8 // CHECK-NEXT: [[TMP0:%.*]] = load , * [[PRED_ADDR]], align 2 // CHECK-NEXT: [[TMP1:%.*]] = load <8 x i8>, <8 x i8>* @global_pred, align 2 -// CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i8.v8i8( undef, <8 x i8> [[TMP1]], i64 0) +// CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = call @llvm.vector.insert.nxv2i8.v8i8( undef, <8 x i8> [[TMP1]], i64 0) // CHECK-NEXT: [[TMP2:%.*]] = bitcast [[CASTFIXEDSVE]] to // CHECK-NEXT: [[TMP3:%.*]] = load <8 x i8>, <8 x i8>* [[XX]], align 8 // CHECK-NEXT: [[TMP4:%.*]] = load <8 x i8>, <8 x i8>* [[YY]], align 8 // CHECK-NEXT: [[ADD:%.*]] = add <8 x i8> [[TMP3]], [[TMP4]] -// CHECK-NEXT: [[CASTFIXEDSVE2:%.*]] = call @llvm.experimental.vector.insert.nxv2i8.v8i8( undef, <8 x i8> [[ADD]], i64 0) +// CHECK-NEXT: [[CASTFIXEDSVE2:%.*]] = call @llvm.vector.insert.nxv2i8.v8i8( undef, <8 x i8> [[ADD]], i64 0) // CHECK-NEXT: [[TMP5:%.*]] = bitcast [[CASTFIXEDSVE2]] to // CHECK-NEXT: [[TMP6:%.*]] = call @llvm.aarch64.sve.and.z.nxv16i1( [[TMP0]], [[TMP2]], [[TMP5]]) // CHECK-NEXT: store [[TMP6]], * [[PG]], align 2 // CHECK-NEXT: [[TMP7:%.*]] = load , * [[PG]], align 2 // CHECK-NEXT: [[TMP8:%.*]] = load <16 x i32>, <16 x i32>* @global_vec, align 16 -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[TMP8]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[TMP8]], i64 0) // CHECK-NEXT: [[TMP9:%.*]] = load , * [[VEC_ADDR]], align 16 // CHECK-NEXT: [[TMP10:%.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( [[TMP7]]) // CHECK-NEXT: [[TMP11:%.*]] = call @llvm.aarch64.sve.add.nxv4i32( [[TMP10]], [[CASTSCALABLESVE]], [[TMP9]]) -// CHECK-NEXT: [[CASTFIXEDSVE3:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[TMP11]], i64 0) +// CHECK-NEXT: [[CASTFIXEDSVE3:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[TMP11]], i64 0) // CHECK-NEXT: store <16 x i32> [[CASTFIXEDSVE3]], <16 x i32>* [[RETVAL]], align 16 // CHECK-NEXT: [[TMP12:%.*]] = load <16 x i32>, <16 x i32>* [[RETVAL]], align 16 -// CHECK-NEXT: [[CASTSCALABLESVE4:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[TMP12]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE4:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[TMP12]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE4]] // fixed_int32_t test_cast(svbool_t pred, svint32_t vec) { diff --git a/clang/test/CodeGen/attr-arm-sve-vector-bits-globals.c b/clang/test/CodeGen/attr-arm-sve-vector-bits-globals.c index 1bf5d2d211d0dec95556f05bd1040d37377a5b1e..741422ab404743194398909e8e0a5d172db61aaf 100644 --- a/clang/test/CodeGen/attr-arm-sve-vector-bits-globals.c +++ b/clang/test/CodeGen/attr-arm-sve-vector-bits-globals.c @@ -22,13 +22,13 @@ fixed_bool_t global_bool; // CHECK-128-LABEL: @write_global_i64( // CHECK-128-NEXT: entry: -// CHECK-128-NEXT: [[CASTFIXEDSVE:%.*]] = call <2 x i64> @llvm.experimental.vector.extract.v2i64.nxv2i64( [[V:%.*]], i64 0) +// CHECK-128-NEXT: [[CASTFIXEDSVE:%.*]] = call <2 x i64> @llvm.vector.extract.v2i64.nxv2i64( [[V:%.*]], i64 0) // CHECK-128-NEXT: store <2 x i64> [[CASTFIXEDSVE]], <2 x i64>* @global_i64, align 16, !tbaa [[TBAA6:![0-9]+]] // CHECK-128-NEXT: ret void // // CHECK-512-LABEL: @write_global_i64( // CHECK-512-NEXT: entry: -// CHECK-512-NEXT: [[CASTFIXEDSVE:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[V:%.*]], i64 0) +// CHECK-512-NEXT: [[CASTFIXEDSVE:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[V:%.*]], i64 0) // CHECK-512-NEXT: store <8 x i64> [[CASTFIXEDSVE]], <8 x i64>* @global_i64, align 16, !tbaa [[TBAA6:![0-9]+]] // CHECK-512-NEXT: ret void // @@ -36,13 +36,13 @@ void write_global_i64(svint64_t v) { global_i64 = v; } // CHECK-128-LABEL: @write_global_bf16( // CHECK-128-NEXT: entry: -// CHECK-128-NEXT: [[CASTFIXEDSVE:%.*]] = call <8 x bfloat> @llvm.experimental.vector.extract.v8bf16.nxv8bf16( [[V:%.*]], i64 0) +// CHECK-128-NEXT: [[CASTFIXEDSVE:%.*]] = call <8 x bfloat> @llvm.vector.extract.v8bf16.nxv8bf16( [[V:%.*]], i64 0) // CHECK-128-NEXT: store <8 x bfloat> [[CASTFIXEDSVE]], <8 x bfloat>* @global_bf16, align 16, !tbaa [[TBAA6]] // CHECK-128-NEXT: ret void // // CHECK-512-LABEL: @write_global_bf16( // CHECK-512-NEXT: entry: -// CHECK-512-NEXT: [[CASTFIXEDSVE:%.*]] = call <32 x bfloat> @llvm.experimental.vector.extract.v32bf16.nxv8bf16( [[V:%.*]], i64 0) +// CHECK-512-NEXT: [[CASTFIXEDSVE:%.*]] = call <32 x bfloat> @llvm.vector.extract.v32bf16.nxv8bf16( [[V:%.*]], i64 0) // CHECK-512-NEXT: store <32 x bfloat> [[CASTFIXEDSVE]], <32 x bfloat>* @global_bf16, align 16, !tbaa [[TBAA6]] // CHECK-512-NEXT: ret void // @@ -51,14 +51,14 @@ void write_global_bf16(svbfloat16_t v) { global_bf16 = v; } // CHECK-128-LABEL: @write_global_bool( // CHECK-128-NEXT: entry: // CHECK-128-NEXT: [[TMP0:%.*]] = bitcast [[V:%.*]] to -// CHECK-128-NEXT: [[CASTFIXEDSVE:%.*]] = call <2 x i8> @llvm.experimental.vector.extract.v2i8.nxv2i8( [[TMP0]], i64 0) +// CHECK-128-NEXT: [[CASTFIXEDSVE:%.*]] = call <2 x i8> @llvm.vector.extract.v2i8.nxv2i8( [[TMP0]], i64 0) // CHECK-128-NEXT: store <2 x i8> [[CASTFIXEDSVE]], <2 x i8>* @global_bool, align 2, !tbaa [[TBAA6:![0-9]+]] // CHECK-128-NEXT: ret void // // CHECK-512-LABEL: @write_global_bool( // CHECK-512-NEXT: entry: // CHECK-512-NEXT: [[TMP0:%.*]] = bitcast [[V:%.*]] to -// CHECK-512-NEXT: [[CASTFIXEDSVE:%.*]] = call <8 x i8> @llvm.experimental.vector.extract.v8i8.nxv2i8( [[TMP0]], i64 0) +// CHECK-512-NEXT: [[CASTFIXEDSVE:%.*]] = call <8 x i8> @llvm.vector.extract.v8i8.nxv2i8( [[TMP0]], i64 0) // CHECK-512-NEXT: store <8 x i8> [[CASTFIXEDSVE]], <8 x i8>* @global_bool, align 2, !tbaa [[TBAA6]] // CHECK-512-NEXT: ret void // @@ -71,13 +71,13 @@ void write_global_bool(svbool_t v) { global_bool = v; } // CHECK-128-LABEL: @read_global_i64( // CHECK-128-NEXT: entry: // CHECK-128-NEXT: [[TMP0:%.*]] = load <2 x i64>, <2 x i64>* @global_i64, align 16, !tbaa [[TBAA6]] -// CHECK-128-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v2i64( undef, <2 x i64> [[TMP0]], i64 0) +// CHECK-128-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v2i64( undef, <2 x i64> [[TMP0]], i64 0) // CHECK-128-NEXT: ret [[CASTSCALABLESVE]] // // CHECK-512-LABEL: @read_global_i64( // CHECK-512-NEXT: entry: // CHECK-512-NEXT: [[TMP0:%.*]] = load <8 x i64>, <8 x i64>* @global_i64, align 16, !tbaa [[TBAA6]] -// CHECK-512-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[TMP0]], i64 0) +// CHECK-512-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[TMP0]], i64 0) // CHECK-512-NEXT: ret [[CASTSCALABLESVE]] // svint64_t read_global_i64() { return global_i64; } @@ -85,13 +85,13 @@ svint64_t read_global_i64() { return global_i64; } // CHECK-128-LABEL: @read_global_bf16( // CHECK-128-NEXT: entry: // CHECK-128-NEXT: [[TMP0:%.*]] = load <8 x bfloat>, <8 x bfloat>* @global_bf16, align 16, !tbaa [[TBAA6]] -// CHECK-128-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8bf16.v8bf16( undef, <8 x bfloat> [[TMP0]], i64 0) +// CHECK-128-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8bf16.v8bf16( undef, <8 x bfloat> [[TMP0]], i64 0) // CHECK-128-NEXT: ret [[CASTSCALABLESVE]] // // CHECK-512-LABEL: @read_global_bf16( // CHECK-512-NEXT: entry: // CHECK-512-NEXT: [[TMP0:%.*]] = load <32 x bfloat>, <32 x bfloat>* @global_bf16, align 16, !tbaa [[TBAA6]] -// CHECK-512-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8bf16.v32bf16( undef, <32 x bfloat> [[TMP0]], i64 0) +// CHECK-512-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8bf16.v32bf16( undef, <32 x bfloat> [[TMP0]], i64 0) // CHECK-512-NEXT: ret [[CASTSCALABLESVE]] // svbfloat16_t read_global_bf16() { return global_bf16; } @@ -99,14 +99,14 @@ svbfloat16_t read_global_bf16() { return global_bf16; } // CHECK-128-LABEL: @read_global_bool( // CHECK-128-NEXT: entry: // CHECK-128-NEXT: [[TMP0:%.*]] = load <2 x i8>, <2 x i8>* @global_bool, align 2, !tbaa [[TBAA6]] -// CHECK-128-NEXT: [[CASTFIXEDSVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i8.v2i8( undef, <2 x i8> [[TMP0]], i64 0) +// CHECK-128-NEXT: [[CASTFIXEDSVE:%.*]] = call @llvm.vector.insert.nxv2i8.v2i8( undef, <2 x i8> [[TMP0]], i64 0) // CHECK-128-NEXT: [[TMP1:%.*]] = bitcast [[CASTFIXEDSVE]] to // CHECK-128-NEXT: ret [[TMP1]] // // CHECK-512-LABEL: @read_global_bool( // CHECK-512-NEXT: entry: // CHECK-512-NEXT: [[TMP0:%.*]] = load <8 x i8>, <8 x i8>* @global_bool, align 2, !tbaa [[TBAA6]] -// CHECK-512-NEXT: [[CASTFIXEDSVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i8.v8i8( undef, <8 x i8> [[TMP0]], i64 0) +// CHECK-512-NEXT: [[CASTFIXEDSVE:%.*]] = call @llvm.vector.insert.nxv2i8.v8i8( undef, <8 x i8> [[TMP0]], i64 0) // CHECK-512-NEXT: [[TMP1:%.*]] = bitcast [[CASTFIXEDSVE]] to // CHECK-512-NEXT: ret [[TMP1]] // diff --git a/clang/test/CodeGen/bounds-checking-fma.c b/clang/test/CodeGen/bounds-checking-fma.c new file mode 100644 index 0000000000000000000000000000000000000000..fbc51dc7a5e193d3a3d31d8dc95b0df92f151156 --- /dev/null +++ b/clang/test/CodeGen/bounds-checking-fma.c @@ -0,0 +1,42 @@ +// REQUIRES: x86-registered-target +// RUN: %clang_cc1 -emit-llvm -triple x86_64 -fsanitize=array-bounds %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-STRICT-0 +// RUN: %clang_cc1 -emit-llvm -triple x86_64 -fsanitize=array-bounds -fstrict-flex-arrays=1 %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-STRICT-1 +// RUN: %clang_cc1 -emit-llvm -triple x86_64 -fsanitize=array-bounds -fstrict-flex-arrays=2 %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-STRICT-2 + +// Before flexible array member was added to C99, many projects use a +// one-element array as the last emember of a structure as an alternative. +// E.g. https://github.com/python/cpython/issues/94250 +// Suppress such errors with -fstrict-flex-arrays=0. +struct One { + int a[1]; +}; +struct Two { + int a[2]; +}; +struct Three { + int a[3]; +}; + +// CHECK-LABEL: define {{.*}} @test_one( +int test_one(struct One *p, int i) { + // CHECK-STRICT-0-NOT: @__ubsan + // CHECK-STRICT-1-NOT: @__ubsan + // CHECK-STRICT-2: call void @__ubsan_handle_out_of_bounds_abort( + return p->a[i] + (p->a)[i]; +} + +// CHECK-LABEL: define {{.*}} @test_two( +int test_two(struct Two *p, int i) { + // CHECK-STRICT-0: call void @__ubsan_handle_out_of_bounds_abort( + // CHECK-STRICT-1: call void @__ubsan_handle_out_of_bounds_abort( + // CHECK-STRICT-2: call void @__ubsan_handle_out_of_bounds_abort( + return p->a[i] + (p->a)[i]; +} + +// CHECK-LABEL: define {{.*}} @test_three( +int test_three(struct Three *p, int i) { + // CHECK-STRICT-0: call void @__ubsan_handle_out_of_bounds_abort( + // CHECK-STRICT-1: call void @__ubsan_handle_out_of_bounds_abort( + // CHECK-STRICT-2: call void @__ubsan_handle_out_of_bounds_abort( + return p->a[i] + (p->a)[i]; +} diff --git a/clang/test/CodeGen/bounds-checking.c b/clang/test/CodeGen/bounds-checking.c index ca44adf5a1f273563252cb2bb2c68d59bf4c3ae4..62a49fd40d943376bcc67c80602273bcf0200795 100644 --- a/clang/test/CodeGen/bounds-checking.c +++ b/clang/test/CodeGen/bounds-checking.c @@ -35,8 +35,9 @@ union U { int a[0]; int b[1]; int c[2]; }; // CHECK-LABEL: define {{.*}} @f4 int f4(union U *u, int i) { - // a and b are treated as flexible array members. - // CHECK-NOT: @llvm.ubsantrap + // a and b bounds are treated as flexible array members, but they are inside a union + // and that prevent them from being considered as flexible array members. + // NONLOCAL: @llvm.ubsantrap return u->a[i] + u->b[i]; // CHECK: } } diff --git a/clang/test/CodeGen/builtins-wasm.c b/clang/test/CodeGen/builtins-wasm.c index d9ea753ee86a219a194c6810205ea37f533e60e1..ea591a195cadfcaf56eb5e0c764ceca66145a538 100644 --- a/clang/test/CodeGen/builtins-wasm.c +++ b/clang/test/CodeGen/builtins-wasm.c @@ -1,6 +1,6 @@ -// RUN: %clang_cc1 -no-opaque-pointers -triple wasm32-unknown-unknown -target-feature +simd128 -target-feature +relaxed-simd -target-feature +nontrapping-fptoint -target-feature +exception-handling -target-feature +bulk-memory -target-feature +atomics -flax-vector-conversions=none -O3 -emit-llvm -o - %s | FileCheck %s -check-prefixes WEBASSEMBLY,WEBASSEMBLY32 -// RUN: %clang_cc1 -no-opaque-pointers -triple wasm64-unknown-unknown -target-feature +simd128 -target-feature +relaxed-simd -target-feature +nontrapping-fptoint -target-feature +exception-handling -target-feature +bulk-memory -target-feature +atomics -flax-vector-conversions=none -O3 -emit-llvm -o - %s | FileCheck %s -check-prefixes WEBASSEMBLY,WEBASSEMBLY64 -// RUN: not %clang_cc1 -no-opaque-pointers -triple wasm64-unknown-unknown -target-feature +nontrapping-fptoint -target-feature +exception-handling -target-feature +bulk-memory -target-feature +atomics -flax-vector-conversions=none -O3 -emit-llvm -o - %s 2>&1 | FileCheck %s -check-prefixes MISSING-SIMD +// RUN: %clang_cc1 -triple wasm32-unknown-unknown -target-feature +simd128 -target-feature +relaxed-simd -target-feature +nontrapping-fptoint -target-feature +exception-handling -target-feature +bulk-memory -target-feature +atomics -flax-vector-conversions=none -O3 -emit-llvm -o - %s | FileCheck %s -check-prefixes WEBASSEMBLY,WEBASSEMBLY32 +// RUN: %clang_cc1 -triple wasm64-unknown-unknown -target-feature +simd128 -target-feature +relaxed-simd -target-feature +nontrapping-fptoint -target-feature +exception-handling -target-feature +bulk-memory -target-feature +atomics -flax-vector-conversions=none -O3 -emit-llvm -o - %s | FileCheck %s -check-prefixes WEBASSEMBLY,WEBASSEMBLY64 +// RUN: not %clang_cc1 -triple wasm64-unknown-unknown -target-feature +nontrapping-fptoint -target-feature +exception-handling -target-feature +bulk-memory -target-feature +atomics -flax-vector-conversions=none -O3 -emit-llvm -o - %s 2>&1 | FileCheck %s -check-prefixes MISSING-SIMD // SIMD convenience types typedef signed char i8x16 __attribute((vector_size(16))); @@ -40,13 +40,12 @@ __SIZE_TYPE__ tls_align(void) { void *tls_base(void) { return __builtin_wasm_tls_base(); - // WEBASSEMBLY: call i8* @llvm.wasm.tls.base() + // WEBASSEMBLY: call ptr @llvm.wasm.tls.base() } void throw(void *obj) { return __builtin_wasm_throw(0, obj); - // WEBASSEMBLY32: call void @llvm.wasm.throw(i32 0, i8* %{{.*}}) - // WEBASSEMBLY64: call void @llvm.wasm.throw(i32 0, i8* %{{.*}}) + // WEBASSEMBLY: call void @llvm.wasm.throw(i32 0, ptr %{{.*}}) } void rethrow(void) { @@ -57,20 +56,17 @@ void rethrow(void) { int memory_atomic_wait32(int *addr, int expected, long long timeout) { return __builtin_wasm_memory_atomic_wait32(addr, expected, timeout); - // WEBASSEMBLY32: call i32 @llvm.wasm.memory.atomic.wait32(i32* %{{.*}}, i32 %{{.*}}, i64 %{{.*}}) - // WEBASSEMBLY64: call i32 @llvm.wasm.memory.atomic.wait32(i32* %{{.*}}, i32 %{{.*}}, i64 %{{.*}}) + // WEBASSEMBLY: call i32 @llvm.wasm.memory.atomic.wait32(ptr %{{.*}}, i32 %{{.*}}, i64 %{{.*}}) } int memory_atomic_wait64(long long *addr, long long expected, long long timeout) { return __builtin_wasm_memory_atomic_wait64(addr, expected, timeout); - // WEBASSEMBLY32: call i32 @llvm.wasm.memory.atomic.wait64(i64* %{{.*}}, i64 %{{.*}}, i64 %{{.*}}) - // WEBASSEMBLY64: call i32 @llvm.wasm.memory.atomic.wait64(i64* %{{.*}}, i64 %{{.*}}, i64 %{{.*}}) + // WEBASSEMBLY: call i32 @llvm.wasm.memory.atomic.wait64(ptr %{{.*}}, i64 %{{.*}}, i64 %{{.*}}) } unsigned int memory_atomic_notify(int *addr, unsigned int count) { return __builtin_wasm_memory_atomic_notify(addr, count); - // WEBASSEMBLY32: call i32 @llvm.wasm.memory.atomic.notify(i32* %{{.*}}, i32 %{{.*}}) - // WEBASSEMBLY64: call i32 @llvm.wasm.memory.atomic.notify(i32* %{{.*}}, i32 %{{.*}}) + // WEBASSEMBLY: call i32 @llvm.wasm.memory.atomic.notify(ptr %{{.*}}, i32 %{{.*}}) } int trunc_s_i32_f32(float f) { diff --git a/clang/test/CodeGen/epi_rvv-header-generated.c b/clang/test/CodeGen/epi_rvv-header-generated.c index d1e22b014de72d5ad3002b368401684a3d82ff17..cf49a46209b6360dee6e33c62d5df00dfeaa4af0 100644 --- a/clang/test/CodeGen/epi_rvv-header-generated.c +++ b/clang/test/CodeGen/epi_rvv-header-generated.c @@ -4126,9 +4126,9 @@ __epi_1xf64 test_vfredmax_1xf64_mask(__epi_1xf64 arg_0, __epi_1xf64 arg_1, __epi // CHECK-O2-LABEL: @test_vfredmax_4xf32( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv2f32.nxv4f32( [[ARG_1:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv2f32.nxv4f32( [[ARG_1:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.riscv.vfredmax.nxv2f32.nxv4f32.i64( undef, [[ARG_0:%.*]], [[TMP0]], i64 [[ARG_2:%.*]]) -// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.experimental.vector.insert.nxv4f32.nxv2f32( undef, [[TMP1]], i64 0) +// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.vector.insert.nxv4f32.nxv2f32( undef, [[TMP1]], i64 0) // CHECK-O2-NEXT: ret [[TMP2]] // __epi_4xf32 test_vfredmax_4xf32(__epi_4xf32 arg_0, __epi_4xf32 arg_1, unsigned long int arg_2) @@ -4138,10 +4138,10 @@ __epi_4xf32 test_vfredmax_4xf32(__epi_4xf32 arg_0, __epi_4xf32 arg_1, unsigned l // CHECK-O2-LABEL: @test_vfredmax_4xf32_mask( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv2f32.nxv4f32( [[ARG_0:%.*]], i64 0) -// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.experimental.vector.extract.nxv2f32.nxv4f32( [[ARG_2:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv2f32.nxv4f32( [[ARG_0:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.vector.extract.nxv2f32.nxv4f32( [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.riscv.vfredmax.mask.nxv2f32.nxv4f32.i64( [[TMP0]], [[ARG_1:%.*]], [[TMP1]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.experimental.vector.insert.nxv4f32.nxv2f32( undef, [[TMP2]], i64 0) +// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.vector.insert.nxv4f32.nxv2f32( undef, [[TMP2]], i64 0) // CHECK-O2-NEXT: ret [[TMP3]] // __epi_4xf32 test_vfredmax_4xf32_mask(__epi_4xf32 arg_0, __epi_4xf32 arg_1, __epi_4xf32 arg_2, __epi_4xi1 arg_3, unsigned long int arg_4) @@ -4151,9 +4151,9 @@ __epi_4xf32 test_vfredmax_4xf32_mask(__epi_4xf32 arg_0, __epi_4xf32 arg_1, __epi // CHECK-O2-LABEL: @test_vfredmax_2xf64( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv1f64.nxv2f64( [[ARG_1:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv1f64.nxv2f64( [[ARG_1:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.riscv.vfredmax.nxv1f64.nxv2f64.i64( undef, [[ARG_0:%.*]], [[TMP0]], i64 [[ARG_2:%.*]]) -// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.experimental.vector.insert.nxv2f64.nxv1f64( undef, [[TMP1]], i64 0) +// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.vector.insert.nxv2f64.nxv1f64( undef, [[TMP1]], i64 0) // CHECK-O2-NEXT: ret [[TMP2]] // __epi_2xf64 test_vfredmax_2xf64(__epi_2xf64 arg_0, __epi_2xf64 arg_1, unsigned long int arg_2) @@ -4163,10 +4163,10 @@ __epi_2xf64 test_vfredmax_2xf64(__epi_2xf64 arg_0, __epi_2xf64 arg_1, unsigned l // CHECK-O2-LABEL: @test_vfredmax_2xf64_mask( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv1f64.nxv2f64( [[ARG_0:%.*]], i64 0) -// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.experimental.vector.extract.nxv1f64.nxv2f64( [[ARG_2:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv1f64.nxv2f64( [[ARG_0:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.vector.extract.nxv1f64.nxv2f64( [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.riscv.vfredmax.mask.nxv1f64.nxv2f64.i64( [[TMP0]], [[ARG_1:%.*]], [[TMP1]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.experimental.vector.insert.nxv2f64.nxv1f64( undef, [[TMP2]], i64 0) +// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.vector.insert.nxv2f64.nxv1f64( undef, [[TMP2]], i64 0) // CHECK-O2-NEXT: ret [[TMP3]] // __epi_2xf64 test_vfredmax_2xf64_mask(__epi_2xf64 arg_0, __epi_2xf64 arg_1, __epi_2xf64 arg_2, __epi_2xi1 arg_3, unsigned long int arg_4) @@ -4176,9 +4176,9 @@ __epi_2xf64 test_vfredmax_2xf64_mask(__epi_2xf64 arg_0, __epi_2xf64 arg_1, __epi // CHECK-O2-LABEL: @test_vfredmax_8xf32( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv2f32.nxv8f32( [[ARG_1:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv2f32.nxv8f32( [[ARG_1:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.riscv.vfredmax.nxv2f32.nxv8f32.i64( undef, [[ARG_0:%.*]], [[TMP0]], i64 [[ARG_2:%.*]]) -// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.experimental.vector.insert.nxv8f32.nxv2f32( undef, [[TMP1]], i64 0) +// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.vector.insert.nxv8f32.nxv2f32( undef, [[TMP1]], i64 0) // CHECK-O2-NEXT: ret [[TMP2]] // __epi_8xf32 test_vfredmax_8xf32(__epi_8xf32 arg_0, __epi_8xf32 arg_1, unsigned long int arg_2) @@ -4188,10 +4188,10 @@ __epi_8xf32 test_vfredmax_8xf32(__epi_8xf32 arg_0, __epi_8xf32 arg_1, unsigned l // CHECK-O2-LABEL: @test_vfredmax_8xf32_mask( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv2f32.nxv8f32( [[ARG_0:%.*]], i64 0) -// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.experimental.vector.extract.nxv2f32.nxv8f32( [[ARG_2:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv2f32.nxv8f32( [[ARG_0:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.vector.extract.nxv2f32.nxv8f32( [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.riscv.vfredmax.mask.nxv2f32.nxv8f32.i64( [[TMP0]], [[ARG_1:%.*]], [[TMP1]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.experimental.vector.insert.nxv8f32.nxv2f32( undef, [[TMP2]], i64 0) +// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.vector.insert.nxv8f32.nxv2f32( undef, [[TMP2]], i64 0) // CHECK-O2-NEXT: ret [[TMP3]] // __epi_8xf32 test_vfredmax_8xf32_mask(__epi_8xf32 arg_0, __epi_8xf32 arg_1, __epi_8xf32 arg_2, __epi_8xi1 arg_3, unsigned long int arg_4) @@ -4201,9 +4201,9 @@ __epi_8xf32 test_vfredmax_8xf32_mask(__epi_8xf32 arg_0, __epi_8xf32 arg_1, __epi // CHECK-O2-LABEL: @test_vfredmax_4xf64( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv1f64.nxv4f64( [[ARG_1:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv1f64.nxv4f64( [[ARG_1:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.riscv.vfredmax.nxv1f64.nxv4f64.i64( undef, [[ARG_0:%.*]], [[TMP0]], i64 [[ARG_2:%.*]]) -// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.experimental.vector.insert.nxv4f64.nxv1f64( undef, [[TMP1]], i64 0) +// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.vector.insert.nxv4f64.nxv1f64( undef, [[TMP1]], i64 0) // CHECK-O2-NEXT: ret [[TMP2]] // __epi_4xf64 test_vfredmax_4xf64(__epi_4xf64 arg_0, __epi_4xf64 arg_1, unsigned long int arg_2) @@ -4213,10 +4213,10 @@ __epi_4xf64 test_vfredmax_4xf64(__epi_4xf64 arg_0, __epi_4xf64 arg_1, unsigned l // CHECK-O2-LABEL: @test_vfredmax_4xf64_mask( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv1f64.nxv4f64( [[ARG_0:%.*]], i64 0) -// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.experimental.vector.extract.nxv1f64.nxv4f64( [[ARG_2:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv1f64.nxv4f64( [[ARG_0:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.vector.extract.nxv1f64.nxv4f64( [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.riscv.vfredmax.mask.nxv1f64.nxv4f64.i64( [[TMP0]], [[ARG_1:%.*]], [[TMP1]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.experimental.vector.insert.nxv4f64.nxv1f64( undef, [[TMP2]], i64 0) +// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.vector.insert.nxv4f64.nxv1f64( undef, [[TMP2]], i64 0) // CHECK-O2-NEXT: ret [[TMP3]] // __epi_4xf64 test_vfredmax_4xf64_mask(__epi_4xf64 arg_0, __epi_4xf64 arg_1, __epi_4xf64 arg_2, __epi_4xi1 arg_3, unsigned long int arg_4) @@ -4266,9 +4266,9 @@ __epi_1xf64 test_vfredmin_1xf64_mask(__epi_1xf64 arg_0, __epi_1xf64 arg_1, __epi // CHECK-O2-LABEL: @test_vfredmin_4xf32( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv2f32.nxv4f32( [[ARG_1:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv2f32.nxv4f32( [[ARG_1:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.riscv.vfredmin.nxv2f32.nxv4f32.i64( undef, [[ARG_0:%.*]], [[TMP0]], i64 [[ARG_2:%.*]]) -// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.experimental.vector.insert.nxv4f32.nxv2f32( undef, [[TMP1]], i64 0) +// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.vector.insert.nxv4f32.nxv2f32( undef, [[TMP1]], i64 0) // CHECK-O2-NEXT: ret [[TMP2]] // __epi_4xf32 test_vfredmin_4xf32(__epi_4xf32 arg_0, __epi_4xf32 arg_1, unsigned long int arg_2) @@ -4278,10 +4278,10 @@ __epi_4xf32 test_vfredmin_4xf32(__epi_4xf32 arg_0, __epi_4xf32 arg_1, unsigned l // CHECK-O2-LABEL: @test_vfredmin_4xf32_mask( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv2f32.nxv4f32( [[ARG_0:%.*]], i64 0) -// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.experimental.vector.extract.nxv2f32.nxv4f32( [[ARG_2:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv2f32.nxv4f32( [[ARG_0:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.vector.extract.nxv2f32.nxv4f32( [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.riscv.vfredmin.mask.nxv2f32.nxv4f32.i64( [[TMP0]], [[ARG_1:%.*]], [[TMP1]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.experimental.vector.insert.nxv4f32.nxv2f32( undef, [[TMP2]], i64 0) +// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.vector.insert.nxv4f32.nxv2f32( undef, [[TMP2]], i64 0) // CHECK-O2-NEXT: ret [[TMP3]] // __epi_4xf32 test_vfredmin_4xf32_mask(__epi_4xf32 arg_0, __epi_4xf32 arg_1, __epi_4xf32 arg_2, __epi_4xi1 arg_3, unsigned long int arg_4) @@ -4291,9 +4291,9 @@ __epi_4xf32 test_vfredmin_4xf32_mask(__epi_4xf32 arg_0, __epi_4xf32 arg_1, __epi // CHECK-O2-LABEL: @test_vfredmin_2xf64( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv1f64.nxv2f64( [[ARG_1:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv1f64.nxv2f64( [[ARG_1:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.riscv.vfredmin.nxv1f64.nxv2f64.i64( undef, [[ARG_0:%.*]], [[TMP0]], i64 [[ARG_2:%.*]]) -// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.experimental.vector.insert.nxv2f64.nxv1f64( undef, [[TMP1]], i64 0) +// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.vector.insert.nxv2f64.nxv1f64( undef, [[TMP1]], i64 0) // CHECK-O2-NEXT: ret [[TMP2]] // __epi_2xf64 test_vfredmin_2xf64(__epi_2xf64 arg_0, __epi_2xf64 arg_1, unsigned long int arg_2) @@ -4303,10 +4303,10 @@ __epi_2xf64 test_vfredmin_2xf64(__epi_2xf64 arg_0, __epi_2xf64 arg_1, unsigned l // CHECK-O2-LABEL: @test_vfredmin_2xf64_mask( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv1f64.nxv2f64( [[ARG_0:%.*]], i64 0) -// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.experimental.vector.extract.nxv1f64.nxv2f64( [[ARG_2:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv1f64.nxv2f64( [[ARG_0:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.vector.extract.nxv1f64.nxv2f64( [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.riscv.vfredmin.mask.nxv1f64.nxv2f64.i64( [[TMP0]], [[ARG_1:%.*]], [[TMP1]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.experimental.vector.insert.nxv2f64.nxv1f64( undef, [[TMP2]], i64 0) +// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.vector.insert.nxv2f64.nxv1f64( undef, [[TMP2]], i64 0) // CHECK-O2-NEXT: ret [[TMP3]] // __epi_2xf64 test_vfredmin_2xf64_mask(__epi_2xf64 arg_0, __epi_2xf64 arg_1, __epi_2xf64 arg_2, __epi_2xi1 arg_3, unsigned long int arg_4) @@ -4316,9 +4316,9 @@ __epi_2xf64 test_vfredmin_2xf64_mask(__epi_2xf64 arg_0, __epi_2xf64 arg_1, __epi // CHECK-O2-LABEL: @test_vfredmin_8xf32( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv2f32.nxv8f32( [[ARG_1:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv2f32.nxv8f32( [[ARG_1:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.riscv.vfredmin.nxv2f32.nxv8f32.i64( undef, [[ARG_0:%.*]], [[TMP0]], i64 [[ARG_2:%.*]]) -// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.experimental.vector.insert.nxv8f32.nxv2f32( undef, [[TMP1]], i64 0) +// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.vector.insert.nxv8f32.nxv2f32( undef, [[TMP1]], i64 0) // CHECK-O2-NEXT: ret [[TMP2]] // __epi_8xf32 test_vfredmin_8xf32(__epi_8xf32 arg_0, __epi_8xf32 arg_1, unsigned long int arg_2) @@ -4328,10 +4328,10 @@ __epi_8xf32 test_vfredmin_8xf32(__epi_8xf32 arg_0, __epi_8xf32 arg_1, unsigned l // CHECK-O2-LABEL: @test_vfredmin_8xf32_mask( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv2f32.nxv8f32( [[ARG_0:%.*]], i64 0) -// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.experimental.vector.extract.nxv2f32.nxv8f32( [[ARG_2:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv2f32.nxv8f32( [[ARG_0:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.vector.extract.nxv2f32.nxv8f32( [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.riscv.vfredmin.mask.nxv2f32.nxv8f32.i64( [[TMP0]], [[ARG_1:%.*]], [[TMP1]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.experimental.vector.insert.nxv8f32.nxv2f32( undef, [[TMP2]], i64 0) +// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.vector.insert.nxv8f32.nxv2f32( undef, [[TMP2]], i64 0) // CHECK-O2-NEXT: ret [[TMP3]] // __epi_8xf32 test_vfredmin_8xf32_mask(__epi_8xf32 arg_0, __epi_8xf32 arg_1, __epi_8xf32 arg_2, __epi_8xi1 arg_3, unsigned long int arg_4) @@ -4341,9 +4341,9 @@ __epi_8xf32 test_vfredmin_8xf32_mask(__epi_8xf32 arg_0, __epi_8xf32 arg_1, __epi // CHECK-O2-LABEL: @test_vfredmin_4xf64( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv1f64.nxv4f64( [[ARG_1:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv1f64.nxv4f64( [[ARG_1:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.riscv.vfredmin.nxv1f64.nxv4f64.i64( undef, [[ARG_0:%.*]], [[TMP0]], i64 [[ARG_2:%.*]]) -// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.experimental.vector.insert.nxv4f64.nxv1f64( undef, [[TMP1]], i64 0) +// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.vector.insert.nxv4f64.nxv1f64( undef, [[TMP1]], i64 0) // CHECK-O2-NEXT: ret [[TMP2]] // __epi_4xf64 test_vfredmin_4xf64(__epi_4xf64 arg_0, __epi_4xf64 arg_1, unsigned long int arg_2) @@ -4353,10 +4353,10 @@ __epi_4xf64 test_vfredmin_4xf64(__epi_4xf64 arg_0, __epi_4xf64 arg_1, unsigned l // CHECK-O2-LABEL: @test_vfredmin_4xf64_mask( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv1f64.nxv4f64( [[ARG_0:%.*]], i64 0) -// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.experimental.vector.extract.nxv1f64.nxv4f64( [[ARG_2:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv1f64.nxv4f64( [[ARG_0:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.vector.extract.nxv1f64.nxv4f64( [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.riscv.vfredmin.mask.nxv1f64.nxv4f64.i64( [[TMP0]], [[ARG_1:%.*]], [[TMP1]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.experimental.vector.insert.nxv4f64.nxv1f64( undef, [[TMP2]], i64 0) +// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.vector.insert.nxv4f64.nxv1f64( undef, [[TMP2]], i64 0) // CHECK-O2-NEXT: ret [[TMP3]] // __epi_4xf64 test_vfredmin_4xf64_mask(__epi_4xf64 arg_0, __epi_4xf64 arg_1, __epi_4xf64 arg_2, __epi_4xi1 arg_3, unsigned long int arg_4) @@ -4406,9 +4406,9 @@ __epi_1xf64 test_vfredosum_1xf64_mask(__epi_1xf64 arg_0, __epi_1xf64 arg_1, __ep // CHECK-O2-LABEL: @test_vfredosum_4xf32( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv2f32.nxv4f32( [[ARG_1:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv2f32.nxv4f32( [[ARG_1:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.riscv.vfredosum.nxv2f32.nxv4f32.i64( undef, [[ARG_0:%.*]], [[TMP0]], i64 [[ARG_2:%.*]]) -// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.experimental.vector.insert.nxv4f32.nxv2f32( undef, [[TMP1]], i64 0) +// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.vector.insert.nxv4f32.nxv2f32( undef, [[TMP1]], i64 0) // CHECK-O2-NEXT: ret [[TMP2]] // __epi_4xf32 test_vfredosum_4xf32(__epi_4xf32 arg_0, __epi_4xf32 arg_1, unsigned long int arg_2) @@ -4418,10 +4418,10 @@ __epi_4xf32 test_vfredosum_4xf32(__epi_4xf32 arg_0, __epi_4xf32 arg_1, unsigned // CHECK-O2-LABEL: @test_vfredosum_4xf32_mask( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv2f32.nxv4f32( [[ARG_0:%.*]], i64 0) -// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.experimental.vector.extract.nxv2f32.nxv4f32( [[ARG_2:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv2f32.nxv4f32( [[ARG_0:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.vector.extract.nxv2f32.nxv4f32( [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.riscv.vfredosum.mask.nxv2f32.nxv4f32.i64( [[TMP0]], [[ARG_1:%.*]], [[TMP1]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.experimental.vector.insert.nxv4f32.nxv2f32( undef, [[TMP2]], i64 0) +// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.vector.insert.nxv4f32.nxv2f32( undef, [[TMP2]], i64 0) // CHECK-O2-NEXT: ret [[TMP3]] // __epi_4xf32 test_vfredosum_4xf32_mask(__epi_4xf32 arg_0, __epi_4xf32 arg_1, __epi_4xf32 arg_2, __epi_4xi1 arg_3, unsigned long int arg_4) @@ -4431,9 +4431,9 @@ __epi_4xf32 test_vfredosum_4xf32_mask(__epi_4xf32 arg_0, __epi_4xf32 arg_1, __ep // CHECK-O2-LABEL: @test_vfredosum_2xf64( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv1f64.nxv2f64( [[ARG_1:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv1f64.nxv2f64( [[ARG_1:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.riscv.vfredosum.nxv1f64.nxv2f64.i64( undef, [[ARG_0:%.*]], [[TMP0]], i64 [[ARG_2:%.*]]) -// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.experimental.vector.insert.nxv2f64.nxv1f64( undef, [[TMP1]], i64 0) +// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.vector.insert.nxv2f64.nxv1f64( undef, [[TMP1]], i64 0) // CHECK-O2-NEXT: ret [[TMP2]] // __epi_2xf64 test_vfredosum_2xf64(__epi_2xf64 arg_0, __epi_2xf64 arg_1, unsigned long int arg_2) @@ -4443,10 +4443,10 @@ __epi_2xf64 test_vfredosum_2xf64(__epi_2xf64 arg_0, __epi_2xf64 arg_1, unsigned // CHECK-O2-LABEL: @test_vfredosum_2xf64_mask( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv1f64.nxv2f64( [[ARG_0:%.*]], i64 0) -// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.experimental.vector.extract.nxv1f64.nxv2f64( [[ARG_2:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv1f64.nxv2f64( [[ARG_0:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.vector.extract.nxv1f64.nxv2f64( [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.riscv.vfredosum.mask.nxv1f64.nxv2f64.i64( [[TMP0]], [[ARG_1:%.*]], [[TMP1]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.experimental.vector.insert.nxv2f64.nxv1f64( undef, [[TMP2]], i64 0) +// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.vector.insert.nxv2f64.nxv1f64( undef, [[TMP2]], i64 0) // CHECK-O2-NEXT: ret [[TMP3]] // __epi_2xf64 test_vfredosum_2xf64_mask(__epi_2xf64 arg_0, __epi_2xf64 arg_1, __epi_2xf64 arg_2, __epi_2xi1 arg_3, unsigned long int arg_4) @@ -4456,9 +4456,9 @@ __epi_2xf64 test_vfredosum_2xf64_mask(__epi_2xf64 arg_0, __epi_2xf64 arg_1, __ep // CHECK-O2-LABEL: @test_vfredosum_8xf32( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv2f32.nxv8f32( [[ARG_1:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv2f32.nxv8f32( [[ARG_1:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.riscv.vfredosum.nxv2f32.nxv8f32.i64( undef, [[ARG_0:%.*]], [[TMP0]], i64 [[ARG_2:%.*]]) -// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.experimental.vector.insert.nxv8f32.nxv2f32( undef, [[TMP1]], i64 0) +// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.vector.insert.nxv8f32.nxv2f32( undef, [[TMP1]], i64 0) // CHECK-O2-NEXT: ret [[TMP2]] // __epi_8xf32 test_vfredosum_8xf32(__epi_8xf32 arg_0, __epi_8xf32 arg_1, unsigned long int arg_2) @@ -4468,10 +4468,10 @@ __epi_8xf32 test_vfredosum_8xf32(__epi_8xf32 arg_0, __epi_8xf32 arg_1, unsigned // CHECK-O2-LABEL: @test_vfredosum_8xf32_mask( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv2f32.nxv8f32( [[ARG_0:%.*]], i64 0) -// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.experimental.vector.extract.nxv2f32.nxv8f32( [[ARG_2:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv2f32.nxv8f32( [[ARG_0:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.vector.extract.nxv2f32.nxv8f32( [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.riscv.vfredosum.mask.nxv2f32.nxv8f32.i64( [[TMP0]], [[ARG_1:%.*]], [[TMP1]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.experimental.vector.insert.nxv8f32.nxv2f32( undef, [[TMP2]], i64 0) +// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.vector.insert.nxv8f32.nxv2f32( undef, [[TMP2]], i64 0) // CHECK-O2-NEXT: ret [[TMP3]] // __epi_8xf32 test_vfredosum_8xf32_mask(__epi_8xf32 arg_0, __epi_8xf32 arg_1, __epi_8xf32 arg_2, __epi_8xi1 arg_3, unsigned long int arg_4) @@ -4481,9 +4481,9 @@ __epi_8xf32 test_vfredosum_8xf32_mask(__epi_8xf32 arg_0, __epi_8xf32 arg_1, __ep // CHECK-O2-LABEL: @test_vfredosum_4xf64( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv1f64.nxv4f64( [[ARG_1:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv1f64.nxv4f64( [[ARG_1:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.riscv.vfredosum.nxv1f64.nxv4f64.i64( undef, [[ARG_0:%.*]], [[TMP0]], i64 [[ARG_2:%.*]]) -// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.experimental.vector.insert.nxv4f64.nxv1f64( undef, [[TMP1]], i64 0) +// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.vector.insert.nxv4f64.nxv1f64( undef, [[TMP1]], i64 0) // CHECK-O2-NEXT: ret [[TMP2]] // __epi_4xf64 test_vfredosum_4xf64(__epi_4xf64 arg_0, __epi_4xf64 arg_1, unsigned long int arg_2) @@ -4493,10 +4493,10 @@ __epi_4xf64 test_vfredosum_4xf64(__epi_4xf64 arg_0, __epi_4xf64 arg_1, unsigned // CHECK-O2-LABEL: @test_vfredosum_4xf64_mask( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv1f64.nxv4f64( [[ARG_0:%.*]], i64 0) -// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.experimental.vector.extract.nxv1f64.nxv4f64( [[ARG_2:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv1f64.nxv4f64( [[ARG_0:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.vector.extract.nxv1f64.nxv4f64( [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.riscv.vfredosum.mask.nxv1f64.nxv4f64.i64( [[TMP0]], [[ARG_1:%.*]], [[TMP1]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.experimental.vector.insert.nxv4f64.nxv1f64( undef, [[TMP2]], i64 0) +// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.vector.insert.nxv4f64.nxv1f64( undef, [[TMP2]], i64 0) // CHECK-O2-NEXT: ret [[TMP3]] // __epi_4xf64 test_vfredosum_4xf64_mask(__epi_4xf64 arg_0, __epi_4xf64 arg_1, __epi_4xf64 arg_2, __epi_4xi1 arg_3, unsigned long int arg_4) @@ -4546,9 +4546,9 @@ __epi_1xf64 test_vfredsum_1xf64_mask(__epi_1xf64 arg_0, __epi_1xf64 arg_1, __epi // CHECK-O2-LABEL: @test_vfredsum_4xf32( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv2f32.nxv4f32( [[ARG_1:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv2f32.nxv4f32( [[ARG_1:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.riscv.vfredusum.nxv2f32.nxv4f32.i64( undef, [[ARG_0:%.*]], [[TMP0]], i64 [[ARG_2:%.*]]) -// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.experimental.vector.insert.nxv4f32.nxv2f32( undef, [[TMP1]], i64 0) +// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.vector.insert.nxv4f32.nxv2f32( undef, [[TMP1]], i64 0) // CHECK-O2-NEXT: ret [[TMP2]] // __epi_4xf32 test_vfredsum_4xf32(__epi_4xf32 arg_0, __epi_4xf32 arg_1, unsigned long int arg_2) @@ -4558,10 +4558,10 @@ __epi_4xf32 test_vfredsum_4xf32(__epi_4xf32 arg_0, __epi_4xf32 arg_1, unsigned l // CHECK-O2-LABEL: @test_vfredsum_4xf32_mask( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv2f32.nxv4f32( [[ARG_0:%.*]], i64 0) -// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.experimental.vector.extract.nxv2f32.nxv4f32( [[ARG_2:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv2f32.nxv4f32( [[ARG_0:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.vector.extract.nxv2f32.nxv4f32( [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.riscv.vfredusum.mask.nxv2f32.nxv4f32.i64( [[TMP0]], [[ARG_1:%.*]], [[TMP1]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.experimental.vector.insert.nxv4f32.nxv2f32( undef, [[TMP2]], i64 0) +// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.vector.insert.nxv4f32.nxv2f32( undef, [[TMP2]], i64 0) // CHECK-O2-NEXT: ret [[TMP3]] // __epi_4xf32 test_vfredsum_4xf32_mask(__epi_4xf32 arg_0, __epi_4xf32 arg_1, __epi_4xf32 arg_2, __epi_4xi1 arg_3, unsigned long int arg_4) @@ -4571,9 +4571,9 @@ __epi_4xf32 test_vfredsum_4xf32_mask(__epi_4xf32 arg_0, __epi_4xf32 arg_1, __epi // CHECK-O2-LABEL: @test_vfredsum_2xf64( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv1f64.nxv2f64( [[ARG_1:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv1f64.nxv2f64( [[ARG_1:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.riscv.vfredusum.nxv1f64.nxv2f64.i64( undef, [[ARG_0:%.*]], [[TMP0]], i64 [[ARG_2:%.*]]) -// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.experimental.vector.insert.nxv2f64.nxv1f64( undef, [[TMP1]], i64 0) +// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.vector.insert.nxv2f64.nxv1f64( undef, [[TMP1]], i64 0) // CHECK-O2-NEXT: ret [[TMP2]] // __epi_2xf64 test_vfredsum_2xf64(__epi_2xf64 arg_0, __epi_2xf64 arg_1, unsigned long int arg_2) @@ -4583,10 +4583,10 @@ __epi_2xf64 test_vfredsum_2xf64(__epi_2xf64 arg_0, __epi_2xf64 arg_1, unsigned l // CHECK-O2-LABEL: @test_vfredsum_2xf64_mask( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv1f64.nxv2f64( [[ARG_0:%.*]], i64 0) -// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.experimental.vector.extract.nxv1f64.nxv2f64( [[ARG_2:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv1f64.nxv2f64( [[ARG_0:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.vector.extract.nxv1f64.nxv2f64( [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.riscv.vfredusum.mask.nxv1f64.nxv2f64.i64( [[TMP0]], [[ARG_1:%.*]], [[TMP1]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.experimental.vector.insert.nxv2f64.nxv1f64( undef, [[TMP2]], i64 0) +// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.vector.insert.nxv2f64.nxv1f64( undef, [[TMP2]], i64 0) // CHECK-O2-NEXT: ret [[TMP3]] // __epi_2xf64 test_vfredsum_2xf64_mask(__epi_2xf64 arg_0, __epi_2xf64 arg_1, __epi_2xf64 arg_2, __epi_2xi1 arg_3, unsigned long int arg_4) @@ -4596,9 +4596,9 @@ __epi_2xf64 test_vfredsum_2xf64_mask(__epi_2xf64 arg_0, __epi_2xf64 arg_1, __epi // CHECK-O2-LABEL: @test_vfredsum_8xf32( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv2f32.nxv8f32( [[ARG_1:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv2f32.nxv8f32( [[ARG_1:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.riscv.vfredusum.nxv2f32.nxv8f32.i64( undef, [[ARG_0:%.*]], [[TMP0]], i64 [[ARG_2:%.*]]) -// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.experimental.vector.insert.nxv8f32.nxv2f32( undef, [[TMP1]], i64 0) +// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.vector.insert.nxv8f32.nxv2f32( undef, [[TMP1]], i64 0) // CHECK-O2-NEXT: ret [[TMP2]] // __epi_8xf32 test_vfredsum_8xf32(__epi_8xf32 arg_0, __epi_8xf32 arg_1, unsigned long int arg_2) @@ -4608,10 +4608,10 @@ __epi_8xf32 test_vfredsum_8xf32(__epi_8xf32 arg_0, __epi_8xf32 arg_1, unsigned l // CHECK-O2-LABEL: @test_vfredsum_8xf32_mask( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv2f32.nxv8f32( [[ARG_0:%.*]], i64 0) -// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.experimental.vector.extract.nxv2f32.nxv8f32( [[ARG_2:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv2f32.nxv8f32( [[ARG_0:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.vector.extract.nxv2f32.nxv8f32( [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.riscv.vfredusum.mask.nxv2f32.nxv8f32.i64( [[TMP0]], [[ARG_1:%.*]], [[TMP1]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.experimental.vector.insert.nxv8f32.nxv2f32( undef, [[TMP2]], i64 0) +// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.vector.insert.nxv8f32.nxv2f32( undef, [[TMP2]], i64 0) // CHECK-O2-NEXT: ret [[TMP3]] // __epi_8xf32 test_vfredsum_8xf32_mask(__epi_8xf32 arg_0, __epi_8xf32 arg_1, __epi_8xf32 arg_2, __epi_8xi1 arg_3, unsigned long int arg_4) @@ -4621,9 +4621,9 @@ __epi_8xf32 test_vfredsum_8xf32_mask(__epi_8xf32 arg_0, __epi_8xf32 arg_1, __epi // CHECK-O2-LABEL: @test_vfredsum_4xf64( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv1f64.nxv4f64( [[ARG_1:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv1f64.nxv4f64( [[ARG_1:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.riscv.vfredusum.nxv1f64.nxv4f64.i64( undef, [[ARG_0:%.*]], [[TMP0]], i64 [[ARG_2:%.*]]) -// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.experimental.vector.insert.nxv4f64.nxv1f64( undef, [[TMP1]], i64 0) +// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.vector.insert.nxv4f64.nxv1f64( undef, [[TMP1]], i64 0) // CHECK-O2-NEXT: ret [[TMP2]] // __epi_4xf64 test_vfredsum_4xf64(__epi_4xf64 arg_0, __epi_4xf64 arg_1, unsigned long int arg_2) @@ -4633,10 +4633,10 @@ __epi_4xf64 test_vfredsum_4xf64(__epi_4xf64 arg_0, __epi_4xf64 arg_1, unsigned l // CHECK-O2-LABEL: @test_vfredsum_4xf64_mask( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv1f64.nxv4f64( [[ARG_0:%.*]], i64 0) -// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.experimental.vector.extract.nxv1f64.nxv4f64( [[ARG_2:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv1f64.nxv4f64( [[ARG_0:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.vector.extract.nxv1f64.nxv4f64( [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.riscv.vfredusum.mask.nxv1f64.nxv4f64.i64( [[TMP0]], [[ARG_1:%.*]], [[TMP1]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.experimental.vector.insert.nxv4f64.nxv1f64( undef, [[TMP2]], i64 0) +// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.vector.insert.nxv4f64.nxv1f64( undef, [[TMP2]], i64 0) // CHECK-O2-NEXT: ret [[TMP3]] // __epi_4xf64 test_vfredsum_4xf64_mask(__epi_4xf64 arg_0, __epi_4xf64 arg_1, __epi_4xf64 arg_2, __epi_4xi1 arg_3, unsigned long int arg_4) @@ -6086,9 +6086,9 @@ __epi_8xf64 test_vfwnmsac_8xf64_mask(__epi_8xf32 arg_0, __epi_8xf32 arg_1, __epi // CHECK-O2-LABEL: @test_vfwredosum_2xf64( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv1f64.nxv2f64( [[ARG_1:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv1f64.nxv2f64( [[ARG_1:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.riscv.vfwredosum.nxv1f64.nxv2f32.i64( undef, [[ARG_0:%.*]], [[TMP0]], i64 [[ARG_2:%.*]]) -// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.experimental.vector.insert.nxv2f64.nxv1f64( undef, [[TMP1]], i64 0) +// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.vector.insert.nxv2f64.nxv1f64( undef, [[TMP1]], i64 0) // CHECK-O2-NEXT: ret [[TMP2]] // __epi_2xf64 test_vfwredosum_2xf64(__epi_2xf32 arg_0, __epi_2xf64 arg_1, unsigned long int arg_2) @@ -6098,10 +6098,10 @@ __epi_2xf64 test_vfwredosum_2xf64(__epi_2xf32 arg_0, __epi_2xf64 arg_1, unsigned // CHECK-O2-LABEL: @test_vfwredosum_2xf64_mask( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv1f64.nxv2f64( [[ARG_0:%.*]], i64 0) -// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.experimental.vector.extract.nxv1f64.nxv2f64( [[ARG_2:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv1f64.nxv2f64( [[ARG_0:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.vector.extract.nxv1f64.nxv2f64( [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv2f32.i64( [[TMP0]], [[ARG_1:%.*]], [[TMP1]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.experimental.vector.insert.nxv2f64.nxv1f64( undef, [[TMP2]], i64 0) +// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.vector.insert.nxv2f64.nxv1f64( undef, [[TMP2]], i64 0) // CHECK-O2-NEXT: ret [[TMP3]] // __epi_2xf64 test_vfwredosum_2xf64_mask(__epi_2xf64 arg_0, __epi_2xf32 arg_1, __epi_2xf64 arg_2, __epi_2xi1 arg_3, unsigned long int arg_4) @@ -6111,9 +6111,9 @@ __epi_2xf64 test_vfwredosum_2xf64_mask(__epi_2xf64 arg_0, __epi_2xf32 arg_1, __e // CHECK-O2-LABEL: @test_vfwredosum_4xf64( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv1f64.nxv4f64( [[ARG_1:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv1f64.nxv4f64( [[ARG_1:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.riscv.vfwredosum.nxv1f64.nxv4f32.i64( undef, [[ARG_0:%.*]], [[TMP0]], i64 [[ARG_2:%.*]]) -// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.experimental.vector.insert.nxv4f64.nxv1f64( undef, [[TMP1]], i64 0) +// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.vector.insert.nxv4f64.nxv1f64( undef, [[TMP1]], i64 0) // CHECK-O2-NEXT: ret [[TMP2]] // __epi_4xf64 test_vfwredosum_4xf64(__epi_4xf32 arg_0, __epi_4xf64 arg_1, unsigned long int arg_2) @@ -6123,10 +6123,10 @@ __epi_4xf64 test_vfwredosum_4xf64(__epi_4xf32 arg_0, __epi_4xf64 arg_1, unsigned // CHECK-O2-LABEL: @test_vfwredosum_4xf64_mask( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv1f64.nxv4f64( [[ARG_0:%.*]], i64 0) -// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.experimental.vector.extract.nxv1f64.nxv4f64( [[ARG_2:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv1f64.nxv4f64( [[ARG_0:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.vector.extract.nxv1f64.nxv4f64( [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv4f32.i64( [[TMP0]], [[ARG_1:%.*]], [[TMP1]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.experimental.vector.insert.nxv4f64.nxv1f64( undef, [[TMP2]], i64 0) +// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.vector.insert.nxv4f64.nxv1f64( undef, [[TMP2]], i64 0) // CHECK-O2-NEXT: ret [[TMP3]] // __epi_4xf64 test_vfwredosum_4xf64_mask(__epi_4xf64 arg_0, __epi_4xf32 arg_1, __epi_4xf64 arg_2, __epi_4xi1 arg_3, unsigned long int arg_4) @@ -6136,9 +6136,9 @@ __epi_4xf64 test_vfwredosum_4xf64_mask(__epi_4xf64 arg_0, __epi_4xf32 arg_1, __e // CHECK-O2-LABEL: @test_vfwredosum_8xf64( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv1f64.nxv8f64( [[ARG_1:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv1f64.nxv8f64( [[ARG_1:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.riscv.vfwredosum.nxv1f64.nxv8f32.i64( undef, [[ARG_0:%.*]], [[TMP0]], i64 [[ARG_2:%.*]]) -// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.experimental.vector.insert.nxv8f64.nxv1f64( undef, [[TMP1]], i64 0) +// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.vector.insert.nxv8f64.nxv1f64( undef, [[TMP1]], i64 0) // CHECK-O2-NEXT: ret [[TMP2]] // __epi_8xf64 test_vfwredosum_8xf64(__epi_8xf32 arg_0, __epi_8xf64 arg_1, unsigned long int arg_2) @@ -6148,10 +6148,10 @@ __epi_8xf64 test_vfwredosum_8xf64(__epi_8xf32 arg_0, __epi_8xf64 arg_1, unsigned // CHECK-O2-LABEL: @test_vfwredosum_8xf64_mask( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv1f64.nxv8f64( [[ARG_0:%.*]], i64 0) -// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.experimental.vector.extract.nxv1f64.nxv8f64( [[ARG_2:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv1f64.nxv8f64( [[ARG_0:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.vector.extract.nxv1f64.nxv8f64( [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv8f32.i64( [[TMP0]], [[ARG_1:%.*]], [[TMP1]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.experimental.vector.insert.nxv8f64.nxv1f64( undef, [[TMP2]], i64 0) +// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.vector.insert.nxv8f64.nxv1f64( undef, [[TMP2]], i64 0) // CHECK-O2-NEXT: ret [[TMP3]] // __epi_8xf64 test_vfwredosum_8xf64_mask(__epi_8xf64 arg_0, __epi_8xf32 arg_1, __epi_8xf64 arg_2, __epi_8xi1 arg_3, unsigned long int arg_4) @@ -6161,9 +6161,9 @@ __epi_8xf64 test_vfwredosum_8xf64_mask(__epi_8xf64 arg_0, __epi_8xf32 arg_1, __e // CHECK-O2-LABEL: @test_vfwredsum_2xf64( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv1f64.nxv2f64( [[ARG_1:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv1f64.nxv2f64( [[ARG_1:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.riscv.vfwredusum.nxv1f64.nxv2f32.i64( undef, [[ARG_0:%.*]], [[TMP0]], i64 [[ARG_2:%.*]]) -// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.experimental.vector.insert.nxv2f64.nxv1f64( undef, [[TMP1]], i64 0) +// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.vector.insert.nxv2f64.nxv1f64( undef, [[TMP1]], i64 0) // CHECK-O2-NEXT: ret [[TMP2]] // __epi_2xf64 test_vfwredsum_2xf64(__epi_2xf32 arg_0, __epi_2xf64 arg_1, unsigned long int arg_2) @@ -6173,10 +6173,10 @@ __epi_2xf64 test_vfwredsum_2xf64(__epi_2xf32 arg_0, __epi_2xf64 arg_1, unsigned // CHECK-O2-LABEL: @test_vfwredsum_2xf64_mask( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv1f64.nxv2f64( [[ARG_0:%.*]], i64 0) -// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.experimental.vector.extract.nxv1f64.nxv2f64( [[ARG_2:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv1f64.nxv2f64( [[ARG_0:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.vector.extract.nxv1f64.nxv2f64( [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.riscv.vfwredusum.mask.nxv1f64.nxv2f32.i64( [[TMP0]], [[ARG_1:%.*]], [[TMP1]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.experimental.vector.insert.nxv2f64.nxv1f64( undef, [[TMP2]], i64 0) +// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.vector.insert.nxv2f64.nxv1f64( undef, [[TMP2]], i64 0) // CHECK-O2-NEXT: ret [[TMP3]] // __epi_2xf64 test_vfwredsum_2xf64_mask(__epi_2xf64 arg_0, __epi_2xf32 arg_1, __epi_2xf64 arg_2, __epi_2xi1 arg_3, unsigned long int arg_4) @@ -6186,9 +6186,9 @@ __epi_2xf64 test_vfwredsum_2xf64_mask(__epi_2xf64 arg_0, __epi_2xf32 arg_1, __ep // CHECK-O2-LABEL: @test_vfwredsum_4xf64( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv1f64.nxv4f64( [[ARG_1:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv1f64.nxv4f64( [[ARG_1:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.riscv.vfwredusum.nxv1f64.nxv4f32.i64( undef, [[ARG_0:%.*]], [[TMP0]], i64 [[ARG_2:%.*]]) -// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.experimental.vector.insert.nxv4f64.nxv1f64( undef, [[TMP1]], i64 0) +// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.vector.insert.nxv4f64.nxv1f64( undef, [[TMP1]], i64 0) // CHECK-O2-NEXT: ret [[TMP2]] // __epi_4xf64 test_vfwredsum_4xf64(__epi_4xf32 arg_0, __epi_4xf64 arg_1, unsigned long int arg_2) @@ -6198,10 +6198,10 @@ __epi_4xf64 test_vfwredsum_4xf64(__epi_4xf32 arg_0, __epi_4xf64 arg_1, unsigned // CHECK-O2-LABEL: @test_vfwredsum_4xf64_mask( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv1f64.nxv4f64( [[ARG_0:%.*]], i64 0) -// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.experimental.vector.extract.nxv1f64.nxv4f64( [[ARG_2:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv1f64.nxv4f64( [[ARG_0:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.vector.extract.nxv1f64.nxv4f64( [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.riscv.vfwredusum.mask.nxv1f64.nxv4f32.i64( [[TMP0]], [[ARG_1:%.*]], [[TMP1]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.experimental.vector.insert.nxv4f64.nxv1f64( undef, [[TMP2]], i64 0) +// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.vector.insert.nxv4f64.nxv1f64( undef, [[TMP2]], i64 0) // CHECK-O2-NEXT: ret [[TMP3]] // __epi_4xf64 test_vfwredsum_4xf64_mask(__epi_4xf64 arg_0, __epi_4xf32 arg_1, __epi_4xf64 arg_2, __epi_4xi1 arg_3, unsigned long int arg_4) @@ -6211,9 +6211,9 @@ __epi_4xf64 test_vfwredsum_4xf64_mask(__epi_4xf64 arg_0, __epi_4xf32 arg_1, __ep // CHECK-O2-LABEL: @test_vfwredsum_8xf64( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv1f64.nxv8f64( [[ARG_1:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv1f64.nxv8f64( [[ARG_1:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.riscv.vfwredusum.nxv1f64.nxv8f32.i64( undef, [[ARG_0:%.*]], [[TMP0]], i64 [[ARG_2:%.*]]) -// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.experimental.vector.insert.nxv8f64.nxv1f64( undef, [[TMP1]], i64 0) +// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.vector.insert.nxv8f64.nxv1f64( undef, [[TMP1]], i64 0) // CHECK-O2-NEXT: ret [[TMP2]] // __epi_8xf64 test_vfwredsum_8xf64(__epi_8xf32 arg_0, __epi_8xf64 arg_1, unsigned long int arg_2) @@ -6223,10 +6223,10 @@ __epi_8xf64 test_vfwredsum_8xf64(__epi_8xf32 arg_0, __epi_8xf64 arg_1, unsigned // CHECK-O2-LABEL: @test_vfwredsum_8xf64_mask( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv1f64.nxv8f64( [[ARG_0:%.*]], i64 0) -// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.experimental.vector.extract.nxv1f64.nxv8f64( [[ARG_2:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv1f64.nxv8f64( [[ARG_0:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.vector.extract.nxv1f64.nxv8f64( [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.riscv.vfwredusum.mask.nxv1f64.nxv8f32.i64( [[TMP0]], [[ARG_1:%.*]], [[TMP1]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.experimental.vector.insert.nxv8f64.nxv1f64( undef, [[TMP2]], i64 0) +// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.vector.insert.nxv8f64.nxv1f64( undef, [[TMP2]], i64 0) // CHECK-O2-NEXT: ret [[TMP3]] // __epi_8xf64 test_vfwredsum_8xf64_mask(__epi_8xf64 arg_0, __epi_8xf32 arg_1, __epi_8xf64 arg_2, __epi_8xi1 arg_3, unsigned long int arg_4) @@ -15994,9 +15994,9 @@ __epi_1xi64 test_vredand_1xi64_mask(__epi_1xi64 arg_0, __epi_1xi64 arg_1, __epi_ // CHECK-O2-LABEL: @test_vredand_16xi8( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv8i8.nxv16i8( [[ARG_1:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv8i8.nxv16i8( [[ARG_1:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.riscv.vredand.nxv8i8.nxv16i8.i64( undef, [[ARG_0:%.*]], [[TMP0]], i64 [[ARG_2:%.*]]) -// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.experimental.vector.insert.nxv16i8.nxv8i8( undef, [[TMP1]], i64 0) +// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.vector.insert.nxv16i8.nxv8i8( undef, [[TMP1]], i64 0) // CHECK-O2-NEXT: ret [[TMP2]] // __epi_16xi8 test_vredand_16xi8(__epi_16xi8 arg_0, __epi_16xi8 arg_1, unsigned long int arg_2) @@ -16006,10 +16006,10 @@ __epi_16xi8 test_vredand_16xi8(__epi_16xi8 arg_0, __epi_16xi8 arg_1, unsigned lo // CHECK-O2-LABEL: @test_vredand_16xi8_mask( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv8i8.nxv16i8( [[ARG_0:%.*]], i64 0) -// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.experimental.vector.extract.nxv8i8.nxv16i8( [[ARG_2:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv8i8.nxv16i8( [[ARG_0:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.vector.extract.nxv8i8.nxv16i8( [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.riscv.vredand.mask.nxv8i8.nxv16i8.i64( [[TMP0]], [[ARG_1:%.*]], [[TMP1]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.experimental.vector.insert.nxv16i8.nxv8i8( undef, [[TMP2]], i64 0) +// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.vector.insert.nxv16i8.nxv8i8( undef, [[TMP2]], i64 0) // CHECK-O2-NEXT: ret [[TMP3]] // __epi_16xi8 test_vredand_16xi8_mask(__epi_16xi8 arg_0, __epi_16xi8 arg_1, __epi_16xi8 arg_2, __epi_16xi1 arg_3, unsigned long int arg_4) @@ -16019,9 +16019,9 @@ __epi_16xi8 test_vredand_16xi8_mask(__epi_16xi8 arg_0, __epi_16xi8 arg_1, __epi_ // CHECK-O2-LABEL: @test_vredand_8xi16( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv4i16.nxv8i16( [[ARG_1:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv4i16.nxv8i16( [[ARG_1:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.riscv.vredand.nxv4i16.nxv8i16.i64( undef, [[ARG_0:%.*]], [[TMP0]], i64 [[ARG_2:%.*]]) -// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.experimental.vector.insert.nxv8i16.nxv4i16( undef, [[TMP1]], i64 0) +// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.vector.insert.nxv8i16.nxv4i16( undef, [[TMP1]], i64 0) // CHECK-O2-NEXT: ret [[TMP2]] // __epi_8xi16 test_vredand_8xi16(__epi_8xi16 arg_0, __epi_8xi16 arg_1, unsigned long int arg_2) @@ -16031,10 +16031,10 @@ __epi_8xi16 test_vredand_8xi16(__epi_8xi16 arg_0, __epi_8xi16 arg_1, unsigned lo // CHECK-O2-LABEL: @test_vredand_8xi16_mask( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv4i16.nxv8i16( [[ARG_0:%.*]], i64 0) -// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.experimental.vector.extract.nxv4i16.nxv8i16( [[ARG_2:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv4i16.nxv8i16( [[ARG_0:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.vector.extract.nxv4i16.nxv8i16( [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.riscv.vredand.mask.nxv4i16.nxv8i16.i64( [[TMP0]], [[ARG_1:%.*]], [[TMP1]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.experimental.vector.insert.nxv8i16.nxv4i16( undef, [[TMP2]], i64 0) +// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.vector.insert.nxv8i16.nxv4i16( undef, [[TMP2]], i64 0) // CHECK-O2-NEXT: ret [[TMP3]] // __epi_8xi16 test_vredand_8xi16_mask(__epi_8xi16 arg_0, __epi_8xi16 arg_1, __epi_8xi16 arg_2, __epi_8xi1 arg_3, unsigned long int arg_4) @@ -16044,9 +16044,9 @@ __epi_8xi16 test_vredand_8xi16_mask(__epi_8xi16 arg_0, __epi_8xi16 arg_1, __epi_ // CHECK-O2-LABEL: @test_vredand_4xi32( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv2i32.nxv4i32( [[ARG_1:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv2i32.nxv4i32( [[ARG_1:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.riscv.vredand.nxv2i32.nxv4i32.i64( undef, [[ARG_0:%.*]], [[TMP0]], i64 [[ARG_2:%.*]]) -// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.experimental.vector.insert.nxv4i32.nxv2i32( undef, [[TMP1]], i64 0) +// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.vector.insert.nxv4i32.nxv2i32( undef, [[TMP1]], i64 0) // CHECK-O2-NEXT: ret [[TMP2]] // __epi_4xi32 test_vredand_4xi32(__epi_4xi32 arg_0, __epi_4xi32 arg_1, unsigned long int arg_2) @@ -16056,10 +16056,10 @@ __epi_4xi32 test_vredand_4xi32(__epi_4xi32 arg_0, __epi_4xi32 arg_1, unsigned lo // CHECK-O2-LABEL: @test_vredand_4xi32_mask( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv2i32.nxv4i32( [[ARG_0:%.*]], i64 0) -// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.experimental.vector.extract.nxv2i32.nxv4i32( [[ARG_2:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv2i32.nxv4i32( [[ARG_0:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.vector.extract.nxv2i32.nxv4i32( [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.riscv.vredand.mask.nxv2i32.nxv4i32.i64( [[TMP0]], [[ARG_1:%.*]], [[TMP1]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.experimental.vector.insert.nxv4i32.nxv2i32( undef, [[TMP2]], i64 0) +// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.vector.insert.nxv4i32.nxv2i32( undef, [[TMP2]], i64 0) // CHECK-O2-NEXT: ret [[TMP3]] // __epi_4xi32 test_vredand_4xi32_mask(__epi_4xi32 arg_0, __epi_4xi32 arg_1, __epi_4xi32 arg_2, __epi_4xi1 arg_3, unsigned long int arg_4) @@ -16069,9 +16069,9 @@ __epi_4xi32 test_vredand_4xi32_mask(__epi_4xi32 arg_0, __epi_4xi32 arg_1, __epi_ // CHECK-O2-LABEL: @test_vredand_2xi64( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv1i64.nxv2i64( [[ARG_1:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv1i64.nxv2i64( [[ARG_1:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.riscv.vredand.nxv1i64.nxv2i64.i64( undef, [[ARG_0:%.*]], [[TMP0]], i64 [[ARG_2:%.*]]) -// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.experimental.vector.insert.nxv2i64.nxv1i64( undef, [[TMP1]], i64 0) +// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.vector.insert.nxv2i64.nxv1i64( undef, [[TMP1]], i64 0) // CHECK-O2-NEXT: ret [[TMP2]] // __epi_2xi64 test_vredand_2xi64(__epi_2xi64 arg_0, __epi_2xi64 arg_1, unsigned long int arg_2) @@ -16081,10 +16081,10 @@ __epi_2xi64 test_vredand_2xi64(__epi_2xi64 arg_0, __epi_2xi64 arg_1, unsigned lo // CHECK-O2-LABEL: @test_vredand_2xi64_mask( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv1i64.nxv2i64( [[ARG_0:%.*]], i64 0) -// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.experimental.vector.extract.nxv1i64.nxv2i64( [[ARG_2:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv1i64.nxv2i64( [[ARG_0:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.vector.extract.nxv1i64.nxv2i64( [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.riscv.vredand.mask.nxv1i64.nxv2i64.i64( [[TMP0]], [[ARG_1:%.*]], [[TMP1]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.experimental.vector.insert.nxv2i64.nxv1i64( undef, [[TMP2]], i64 0) +// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.vector.insert.nxv2i64.nxv1i64( undef, [[TMP2]], i64 0) // CHECK-O2-NEXT: ret [[TMP3]] // __epi_2xi64 test_vredand_2xi64_mask(__epi_2xi64 arg_0, __epi_2xi64 arg_1, __epi_2xi64 arg_2, __epi_2xi1 arg_3, unsigned long int arg_4) @@ -16094,9 +16094,9 @@ __epi_2xi64 test_vredand_2xi64_mask(__epi_2xi64 arg_0, __epi_2xi64 arg_1, __epi_ // CHECK-O2-LABEL: @test_vredand_32xi8( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv8i8.nxv32i8( [[ARG_1:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv8i8.nxv32i8( [[ARG_1:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.riscv.vredand.nxv8i8.nxv32i8.i64( undef, [[ARG_0:%.*]], [[TMP0]], i64 [[ARG_2:%.*]]) -// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.experimental.vector.insert.nxv32i8.nxv8i8( undef, [[TMP1]], i64 0) +// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.vector.insert.nxv32i8.nxv8i8( undef, [[TMP1]], i64 0) // CHECK-O2-NEXT: ret [[TMP2]] // __epi_32xi8 test_vredand_32xi8(__epi_32xi8 arg_0, __epi_32xi8 arg_1, unsigned long int arg_2) @@ -16106,10 +16106,10 @@ __epi_32xi8 test_vredand_32xi8(__epi_32xi8 arg_0, __epi_32xi8 arg_1, unsigned lo // CHECK-O2-LABEL: @test_vredand_32xi8_mask( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv8i8.nxv32i8( [[ARG_0:%.*]], i64 0) -// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.experimental.vector.extract.nxv8i8.nxv32i8( [[ARG_2:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv8i8.nxv32i8( [[ARG_0:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.vector.extract.nxv8i8.nxv32i8( [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.riscv.vredand.mask.nxv8i8.nxv32i8.i64( [[TMP0]], [[ARG_1:%.*]], [[TMP1]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.experimental.vector.insert.nxv32i8.nxv8i8( undef, [[TMP2]], i64 0) +// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.vector.insert.nxv32i8.nxv8i8( undef, [[TMP2]], i64 0) // CHECK-O2-NEXT: ret [[TMP3]] // __epi_32xi8 test_vredand_32xi8_mask(__epi_32xi8 arg_0, __epi_32xi8 arg_1, __epi_32xi8 arg_2, __epi_32xi1 arg_3, unsigned long int arg_4) @@ -16119,9 +16119,9 @@ __epi_32xi8 test_vredand_32xi8_mask(__epi_32xi8 arg_0, __epi_32xi8 arg_1, __epi_ // CHECK-O2-LABEL: @test_vredand_16xi16( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv4i16.nxv16i16( [[ARG_1:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv4i16.nxv16i16( [[ARG_1:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.riscv.vredand.nxv4i16.nxv16i16.i64( undef, [[ARG_0:%.*]], [[TMP0]], i64 [[ARG_2:%.*]]) -// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.experimental.vector.insert.nxv16i16.nxv4i16( undef, [[TMP1]], i64 0) +// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.vector.insert.nxv16i16.nxv4i16( undef, [[TMP1]], i64 0) // CHECK-O2-NEXT: ret [[TMP2]] // __epi_16xi16 test_vredand_16xi16(__epi_16xi16 arg_0, __epi_16xi16 arg_1, unsigned long int arg_2) @@ -16131,10 +16131,10 @@ __epi_16xi16 test_vredand_16xi16(__epi_16xi16 arg_0, __epi_16xi16 arg_1, unsigne // CHECK-O2-LABEL: @test_vredand_16xi16_mask( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv4i16.nxv16i16( [[ARG_0:%.*]], i64 0) -// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.experimental.vector.extract.nxv4i16.nxv16i16( [[ARG_2:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv4i16.nxv16i16( [[ARG_0:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.vector.extract.nxv4i16.nxv16i16( [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.riscv.vredand.mask.nxv4i16.nxv16i16.i64( [[TMP0]], [[ARG_1:%.*]], [[TMP1]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.experimental.vector.insert.nxv16i16.nxv4i16( undef, [[TMP2]], i64 0) +// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.vector.insert.nxv16i16.nxv4i16( undef, [[TMP2]], i64 0) // CHECK-O2-NEXT: ret [[TMP3]] // __epi_16xi16 test_vredand_16xi16_mask(__epi_16xi16 arg_0, __epi_16xi16 arg_1, __epi_16xi16 arg_2, __epi_16xi1 arg_3, unsigned long int arg_4) @@ -16144,9 +16144,9 @@ __epi_16xi16 test_vredand_16xi16_mask(__epi_16xi16 arg_0, __epi_16xi16 arg_1, __ // CHECK-O2-LABEL: @test_vredand_8xi32( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv2i32.nxv8i32( [[ARG_1:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv2i32.nxv8i32( [[ARG_1:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.riscv.vredand.nxv2i32.nxv8i32.i64( undef, [[ARG_0:%.*]], [[TMP0]], i64 [[ARG_2:%.*]]) -// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.experimental.vector.insert.nxv8i32.nxv2i32( undef, [[TMP1]], i64 0) +// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.vector.insert.nxv8i32.nxv2i32( undef, [[TMP1]], i64 0) // CHECK-O2-NEXT: ret [[TMP2]] // __epi_8xi32 test_vredand_8xi32(__epi_8xi32 arg_0, __epi_8xi32 arg_1, unsigned long int arg_2) @@ -16156,10 +16156,10 @@ __epi_8xi32 test_vredand_8xi32(__epi_8xi32 arg_0, __epi_8xi32 arg_1, unsigned lo // CHECK-O2-LABEL: @test_vredand_8xi32_mask( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv2i32.nxv8i32( [[ARG_0:%.*]], i64 0) -// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.experimental.vector.extract.nxv2i32.nxv8i32( [[ARG_2:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv2i32.nxv8i32( [[ARG_0:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.vector.extract.nxv2i32.nxv8i32( [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.riscv.vredand.mask.nxv2i32.nxv8i32.i64( [[TMP0]], [[ARG_1:%.*]], [[TMP1]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.experimental.vector.insert.nxv8i32.nxv2i32( undef, [[TMP2]], i64 0) +// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.vector.insert.nxv8i32.nxv2i32( undef, [[TMP2]], i64 0) // CHECK-O2-NEXT: ret [[TMP3]] // __epi_8xi32 test_vredand_8xi32_mask(__epi_8xi32 arg_0, __epi_8xi32 arg_1, __epi_8xi32 arg_2, __epi_8xi1 arg_3, unsigned long int arg_4) @@ -16169,9 +16169,9 @@ __epi_8xi32 test_vredand_8xi32_mask(__epi_8xi32 arg_0, __epi_8xi32 arg_1, __epi_ // CHECK-O2-LABEL: @test_vredand_4xi64( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv1i64.nxv4i64( [[ARG_1:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv1i64.nxv4i64( [[ARG_1:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.riscv.vredand.nxv1i64.nxv4i64.i64( undef, [[ARG_0:%.*]], [[TMP0]], i64 [[ARG_2:%.*]]) -// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.experimental.vector.insert.nxv4i64.nxv1i64( undef, [[TMP1]], i64 0) +// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.vector.insert.nxv4i64.nxv1i64( undef, [[TMP1]], i64 0) // CHECK-O2-NEXT: ret [[TMP2]] // __epi_4xi64 test_vredand_4xi64(__epi_4xi64 arg_0, __epi_4xi64 arg_1, unsigned long int arg_2) @@ -16181,10 +16181,10 @@ __epi_4xi64 test_vredand_4xi64(__epi_4xi64 arg_0, __epi_4xi64 arg_1, unsigned lo // CHECK-O2-LABEL: @test_vredand_4xi64_mask( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv1i64.nxv4i64( [[ARG_0:%.*]], i64 0) -// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.experimental.vector.extract.nxv1i64.nxv4i64( [[ARG_2:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv1i64.nxv4i64( [[ARG_0:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.vector.extract.nxv1i64.nxv4i64( [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.riscv.vredand.mask.nxv1i64.nxv4i64.i64( [[TMP0]], [[ARG_1:%.*]], [[TMP1]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.experimental.vector.insert.nxv4i64.nxv1i64( undef, [[TMP2]], i64 0) +// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.vector.insert.nxv4i64.nxv1i64( undef, [[TMP2]], i64 0) // CHECK-O2-NEXT: ret [[TMP3]] // __epi_4xi64 test_vredand_4xi64_mask(__epi_4xi64 arg_0, __epi_4xi64 arg_1, __epi_4xi64 arg_2, __epi_4xi1 arg_3, unsigned long int arg_4) @@ -16274,9 +16274,9 @@ __epi_1xi64 test_vredmax_1xi64_mask(__epi_1xi64 arg_0, __epi_1xi64 arg_1, __epi_ // CHECK-O2-LABEL: @test_vredmax_16xi8( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv8i8.nxv16i8( [[ARG_1:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv8i8.nxv16i8( [[ARG_1:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.riscv.vredmax.nxv8i8.nxv16i8.i64( undef, [[ARG_0:%.*]], [[TMP0]], i64 [[ARG_2:%.*]]) -// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.experimental.vector.insert.nxv16i8.nxv8i8( undef, [[TMP1]], i64 0) +// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.vector.insert.nxv16i8.nxv8i8( undef, [[TMP1]], i64 0) // CHECK-O2-NEXT: ret [[TMP2]] // __epi_16xi8 test_vredmax_16xi8(__epi_16xi8 arg_0, __epi_16xi8 arg_1, unsigned long int arg_2) @@ -16286,10 +16286,10 @@ __epi_16xi8 test_vredmax_16xi8(__epi_16xi8 arg_0, __epi_16xi8 arg_1, unsigned lo // CHECK-O2-LABEL: @test_vredmax_16xi8_mask( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv8i8.nxv16i8( [[ARG_0:%.*]], i64 0) -// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.experimental.vector.extract.nxv8i8.nxv16i8( [[ARG_2:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv8i8.nxv16i8( [[ARG_0:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.vector.extract.nxv8i8.nxv16i8( [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.riscv.vredmax.mask.nxv8i8.nxv16i8.i64( [[TMP0]], [[ARG_1:%.*]], [[TMP1]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.experimental.vector.insert.nxv16i8.nxv8i8( undef, [[TMP2]], i64 0) +// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.vector.insert.nxv16i8.nxv8i8( undef, [[TMP2]], i64 0) // CHECK-O2-NEXT: ret [[TMP3]] // __epi_16xi8 test_vredmax_16xi8_mask(__epi_16xi8 arg_0, __epi_16xi8 arg_1, __epi_16xi8 arg_2, __epi_16xi1 arg_3, unsigned long int arg_4) @@ -16299,9 +16299,9 @@ __epi_16xi8 test_vredmax_16xi8_mask(__epi_16xi8 arg_0, __epi_16xi8 arg_1, __epi_ // CHECK-O2-LABEL: @test_vredmax_8xi16( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv4i16.nxv8i16( [[ARG_1:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv4i16.nxv8i16( [[ARG_1:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.riscv.vredmax.nxv4i16.nxv8i16.i64( undef, [[ARG_0:%.*]], [[TMP0]], i64 [[ARG_2:%.*]]) -// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.experimental.vector.insert.nxv8i16.nxv4i16( undef, [[TMP1]], i64 0) +// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.vector.insert.nxv8i16.nxv4i16( undef, [[TMP1]], i64 0) // CHECK-O2-NEXT: ret [[TMP2]] // __epi_8xi16 test_vredmax_8xi16(__epi_8xi16 arg_0, __epi_8xi16 arg_1, unsigned long int arg_2) @@ -16311,10 +16311,10 @@ __epi_8xi16 test_vredmax_8xi16(__epi_8xi16 arg_0, __epi_8xi16 arg_1, unsigned lo // CHECK-O2-LABEL: @test_vredmax_8xi16_mask( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv4i16.nxv8i16( [[ARG_0:%.*]], i64 0) -// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.experimental.vector.extract.nxv4i16.nxv8i16( [[ARG_2:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv4i16.nxv8i16( [[ARG_0:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.vector.extract.nxv4i16.nxv8i16( [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.riscv.vredmax.mask.nxv4i16.nxv8i16.i64( [[TMP0]], [[ARG_1:%.*]], [[TMP1]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.experimental.vector.insert.nxv8i16.nxv4i16( undef, [[TMP2]], i64 0) +// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.vector.insert.nxv8i16.nxv4i16( undef, [[TMP2]], i64 0) // CHECK-O2-NEXT: ret [[TMP3]] // __epi_8xi16 test_vredmax_8xi16_mask(__epi_8xi16 arg_0, __epi_8xi16 arg_1, __epi_8xi16 arg_2, __epi_8xi1 arg_3, unsigned long int arg_4) @@ -16324,9 +16324,9 @@ __epi_8xi16 test_vredmax_8xi16_mask(__epi_8xi16 arg_0, __epi_8xi16 arg_1, __epi_ // CHECK-O2-LABEL: @test_vredmax_4xi32( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv2i32.nxv4i32( [[ARG_1:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv2i32.nxv4i32( [[ARG_1:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.riscv.vredmax.nxv2i32.nxv4i32.i64( undef, [[ARG_0:%.*]], [[TMP0]], i64 [[ARG_2:%.*]]) -// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.experimental.vector.insert.nxv4i32.nxv2i32( undef, [[TMP1]], i64 0) +// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.vector.insert.nxv4i32.nxv2i32( undef, [[TMP1]], i64 0) // CHECK-O2-NEXT: ret [[TMP2]] // __epi_4xi32 test_vredmax_4xi32(__epi_4xi32 arg_0, __epi_4xi32 arg_1, unsigned long int arg_2) @@ -16336,10 +16336,10 @@ __epi_4xi32 test_vredmax_4xi32(__epi_4xi32 arg_0, __epi_4xi32 arg_1, unsigned lo // CHECK-O2-LABEL: @test_vredmax_4xi32_mask( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv2i32.nxv4i32( [[ARG_0:%.*]], i64 0) -// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.experimental.vector.extract.nxv2i32.nxv4i32( [[ARG_2:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv2i32.nxv4i32( [[ARG_0:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.vector.extract.nxv2i32.nxv4i32( [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.riscv.vredmax.mask.nxv2i32.nxv4i32.i64( [[TMP0]], [[ARG_1:%.*]], [[TMP1]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.experimental.vector.insert.nxv4i32.nxv2i32( undef, [[TMP2]], i64 0) +// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.vector.insert.nxv4i32.nxv2i32( undef, [[TMP2]], i64 0) // CHECK-O2-NEXT: ret [[TMP3]] // __epi_4xi32 test_vredmax_4xi32_mask(__epi_4xi32 arg_0, __epi_4xi32 arg_1, __epi_4xi32 arg_2, __epi_4xi1 arg_3, unsigned long int arg_4) @@ -16349,9 +16349,9 @@ __epi_4xi32 test_vredmax_4xi32_mask(__epi_4xi32 arg_0, __epi_4xi32 arg_1, __epi_ // CHECK-O2-LABEL: @test_vredmax_2xi64( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv1i64.nxv2i64( [[ARG_1:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv1i64.nxv2i64( [[ARG_1:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.riscv.vredmax.nxv1i64.nxv2i64.i64( undef, [[ARG_0:%.*]], [[TMP0]], i64 [[ARG_2:%.*]]) -// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.experimental.vector.insert.nxv2i64.nxv1i64( undef, [[TMP1]], i64 0) +// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.vector.insert.nxv2i64.nxv1i64( undef, [[TMP1]], i64 0) // CHECK-O2-NEXT: ret [[TMP2]] // __epi_2xi64 test_vredmax_2xi64(__epi_2xi64 arg_0, __epi_2xi64 arg_1, unsigned long int arg_2) @@ -16361,10 +16361,10 @@ __epi_2xi64 test_vredmax_2xi64(__epi_2xi64 arg_0, __epi_2xi64 arg_1, unsigned lo // CHECK-O2-LABEL: @test_vredmax_2xi64_mask( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv1i64.nxv2i64( [[ARG_0:%.*]], i64 0) -// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.experimental.vector.extract.nxv1i64.nxv2i64( [[ARG_2:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv1i64.nxv2i64( [[ARG_0:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.vector.extract.nxv1i64.nxv2i64( [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.riscv.vredmax.mask.nxv1i64.nxv2i64.i64( [[TMP0]], [[ARG_1:%.*]], [[TMP1]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.experimental.vector.insert.nxv2i64.nxv1i64( undef, [[TMP2]], i64 0) +// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.vector.insert.nxv2i64.nxv1i64( undef, [[TMP2]], i64 0) // CHECK-O2-NEXT: ret [[TMP3]] // __epi_2xi64 test_vredmax_2xi64_mask(__epi_2xi64 arg_0, __epi_2xi64 arg_1, __epi_2xi64 arg_2, __epi_2xi1 arg_3, unsigned long int arg_4) @@ -16374,9 +16374,9 @@ __epi_2xi64 test_vredmax_2xi64_mask(__epi_2xi64 arg_0, __epi_2xi64 arg_1, __epi_ // CHECK-O2-LABEL: @test_vredmax_32xi8( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv8i8.nxv32i8( [[ARG_1:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv8i8.nxv32i8( [[ARG_1:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.riscv.vredmax.nxv8i8.nxv32i8.i64( undef, [[ARG_0:%.*]], [[TMP0]], i64 [[ARG_2:%.*]]) -// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.experimental.vector.insert.nxv32i8.nxv8i8( undef, [[TMP1]], i64 0) +// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.vector.insert.nxv32i8.nxv8i8( undef, [[TMP1]], i64 0) // CHECK-O2-NEXT: ret [[TMP2]] // __epi_32xi8 test_vredmax_32xi8(__epi_32xi8 arg_0, __epi_32xi8 arg_1, unsigned long int arg_2) @@ -16386,10 +16386,10 @@ __epi_32xi8 test_vredmax_32xi8(__epi_32xi8 arg_0, __epi_32xi8 arg_1, unsigned lo // CHECK-O2-LABEL: @test_vredmax_32xi8_mask( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv8i8.nxv32i8( [[ARG_0:%.*]], i64 0) -// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.experimental.vector.extract.nxv8i8.nxv32i8( [[ARG_2:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv8i8.nxv32i8( [[ARG_0:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.vector.extract.nxv8i8.nxv32i8( [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.riscv.vredmax.mask.nxv8i8.nxv32i8.i64( [[TMP0]], [[ARG_1:%.*]], [[TMP1]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.experimental.vector.insert.nxv32i8.nxv8i8( undef, [[TMP2]], i64 0) +// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.vector.insert.nxv32i8.nxv8i8( undef, [[TMP2]], i64 0) // CHECK-O2-NEXT: ret [[TMP3]] // __epi_32xi8 test_vredmax_32xi8_mask(__epi_32xi8 arg_0, __epi_32xi8 arg_1, __epi_32xi8 arg_2, __epi_32xi1 arg_3, unsigned long int arg_4) @@ -16399,9 +16399,9 @@ __epi_32xi8 test_vredmax_32xi8_mask(__epi_32xi8 arg_0, __epi_32xi8 arg_1, __epi_ // CHECK-O2-LABEL: @test_vredmax_16xi16( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv4i16.nxv16i16( [[ARG_1:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv4i16.nxv16i16( [[ARG_1:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.riscv.vredmax.nxv4i16.nxv16i16.i64( undef, [[ARG_0:%.*]], [[TMP0]], i64 [[ARG_2:%.*]]) -// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.experimental.vector.insert.nxv16i16.nxv4i16( undef, [[TMP1]], i64 0) +// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.vector.insert.nxv16i16.nxv4i16( undef, [[TMP1]], i64 0) // CHECK-O2-NEXT: ret [[TMP2]] // __epi_16xi16 test_vredmax_16xi16(__epi_16xi16 arg_0, __epi_16xi16 arg_1, unsigned long int arg_2) @@ -16411,10 +16411,10 @@ __epi_16xi16 test_vredmax_16xi16(__epi_16xi16 arg_0, __epi_16xi16 arg_1, unsigne // CHECK-O2-LABEL: @test_vredmax_16xi16_mask( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv4i16.nxv16i16( [[ARG_0:%.*]], i64 0) -// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.experimental.vector.extract.nxv4i16.nxv16i16( [[ARG_2:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv4i16.nxv16i16( [[ARG_0:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.vector.extract.nxv4i16.nxv16i16( [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.riscv.vredmax.mask.nxv4i16.nxv16i16.i64( [[TMP0]], [[ARG_1:%.*]], [[TMP1]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.experimental.vector.insert.nxv16i16.nxv4i16( undef, [[TMP2]], i64 0) +// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.vector.insert.nxv16i16.nxv4i16( undef, [[TMP2]], i64 0) // CHECK-O2-NEXT: ret [[TMP3]] // __epi_16xi16 test_vredmax_16xi16_mask(__epi_16xi16 arg_0, __epi_16xi16 arg_1, __epi_16xi16 arg_2, __epi_16xi1 arg_3, unsigned long int arg_4) @@ -16424,9 +16424,9 @@ __epi_16xi16 test_vredmax_16xi16_mask(__epi_16xi16 arg_0, __epi_16xi16 arg_1, __ // CHECK-O2-LABEL: @test_vredmax_8xi32( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv2i32.nxv8i32( [[ARG_1:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv2i32.nxv8i32( [[ARG_1:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.riscv.vredmax.nxv2i32.nxv8i32.i64( undef, [[ARG_0:%.*]], [[TMP0]], i64 [[ARG_2:%.*]]) -// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.experimental.vector.insert.nxv8i32.nxv2i32( undef, [[TMP1]], i64 0) +// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.vector.insert.nxv8i32.nxv2i32( undef, [[TMP1]], i64 0) // CHECK-O2-NEXT: ret [[TMP2]] // __epi_8xi32 test_vredmax_8xi32(__epi_8xi32 arg_0, __epi_8xi32 arg_1, unsigned long int arg_2) @@ -16436,10 +16436,10 @@ __epi_8xi32 test_vredmax_8xi32(__epi_8xi32 arg_0, __epi_8xi32 arg_1, unsigned lo // CHECK-O2-LABEL: @test_vredmax_8xi32_mask( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv2i32.nxv8i32( [[ARG_0:%.*]], i64 0) -// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.experimental.vector.extract.nxv2i32.nxv8i32( [[ARG_2:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv2i32.nxv8i32( [[ARG_0:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.vector.extract.nxv2i32.nxv8i32( [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.riscv.vredmax.mask.nxv2i32.nxv8i32.i64( [[TMP0]], [[ARG_1:%.*]], [[TMP1]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.experimental.vector.insert.nxv8i32.nxv2i32( undef, [[TMP2]], i64 0) +// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.vector.insert.nxv8i32.nxv2i32( undef, [[TMP2]], i64 0) // CHECK-O2-NEXT: ret [[TMP3]] // __epi_8xi32 test_vredmax_8xi32_mask(__epi_8xi32 arg_0, __epi_8xi32 arg_1, __epi_8xi32 arg_2, __epi_8xi1 arg_3, unsigned long int arg_4) @@ -16449,9 +16449,9 @@ __epi_8xi32 test_vredmax_8xi32_mask(__epi_8xi32 arg_0, __epi_8xi32 arg_1, __epi_ // CHECK-O2-LABEL: @test_vredmax_4xi64( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv1i64.nxv4i64( [[ARG_1:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv1i64.nxv4i64( [[ARG_1:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.riscv.vredmax.nxv1i64.nxv4i64.i64( undef, [[ARG_0:%.*]], [[TMP0]], i64 [[ARG_2:%.*]]) -// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.experimental.vector.insert.nxv4i64.nxv1i64( undef, [[TMP1]], i64 0) +// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.vector.insert.nxv4i64.nxv1i64( undef, [[TMP1]], i64 0) // CHECK-O2-NEXT: ret [[TMP2]] // __epi_4xi64 test_vredmax_4xi64(__epi_4xi64 arg_0, __epi_4xi64 arg_1, unsigned long int arg_2) @@ -16461,10 +16461,10 @@ __epi_4xi64 test_vredmax_4xi64(__epi_4xi64 arg_0, __epi_4xi64 arg_1, unsigned lo // CHECK-O2-LABEL: @test_vredmax_4xi64_mask( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv1i64.nxv4i64( [[ARG_0:%.*]], i64 0) -// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.experimental.vector.extract.nxv1i64.nxv4i64( [[ARG_2:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv1i64.nxv4i64( [[ARG_0:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.vector.extract.nxv1i64.nxv4i64( [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.riscv.vredmax.mask.nxv1i64.nxv4i64.i64( [[TMP0]], [[ARG_1:%.*]], [[TMP1]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.experimental.vector.insert.nxv4i64.nxv1i64( undef, [[TMP2]], i64 0) +// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.vector.insert.nxv4i64.nxv1i64( undef, [[TMP2]], i64 0) // CHECK-O2-NEXT: ret [[TMP3]] // __epi_4xi64 test_vredmax_4xi64_mask(__epi_4xi64 arg_0, __epi_4xi64 arg_1, __epi_4xi64 arg_2, __epi_4xi1 arg_3, unsigned long int arg_4) @@ -16554,9 +16554,9 @@ __epi_1xi64 test_vredmaxu_1xi64_mask(__epi_1xi64 arg_0, __epi_1xi64 arg_1, __epi // CHECK-O2-LABEL: @test_vredmaxu_16xi8( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv8i8.nxv16i8( [[ARG_1:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv8i8.nxv16i8( [[ARG_1:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.riscv.vredmaxu.nxv8i8.nxv16i8.i64( undef, [[ARG_0:%.*]], [[TMP0]], i64 [[ARG_2:%.*]]) -// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.experimental.vector.insert.nxv16i8.nxv8i8( undef, [[TMP1]], i64 0) +// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.vector.insert.nxv16i8.nxv8i8( undef, [[TMP1]], i64 0) // CHECK-O2-NEXT: ret [[TMP2]] // __epi_16xi8 test_vredmaxu_16xi8(__epi_16xi8 arg_0, __epi_16xi8 arg_1, unsigned long int arg_2) @@ -16566,10 +16566,10 @@ __epi_16xi8 test_vredmaxu_16xi8(__epi_16xi8 arg_0, __epi_16xi8 arg_1, unsigned l // CHECK-O2-LABEL: @test_vredmaxu_16xi8_mask( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv8i8.nxv16i8( [[ARG_0:%.*]], i64 0) -// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.experimental.vector.extract.nxv8i8.nxv16i8( [[ARG_2:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv8i8.nxv16i8( [[ARG_0:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.vector.extract.nxv8i8.nxv16i8( [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv16i8.i64( [[TMP0]], [[ARG_1:%.*]], [[TMP1]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.experimental.vector.insert.nxv16i8.nxv8i8( undef, [[TMP2]], i64 0) +// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.vector.insert.nxv16i8.nxv8i8( undef, [[TMP2]], i64 0) // CHECK-O2-NEXT: ret [[TMP3]] // __epi_16xi8 test_vredmaxu_16xi8_mask(__epi_16xi8 arg_0, __epi_16xi8 arg_1, __epi_16xi8 arg_2, __epi_16xi1 arg_3, unsigned long int arg_4) @@ -16579,9 +16579,9 @@ __epi_16xi8 test_vredmaxu_16xi8_mask(__epi_16xi8 arg_0, __epi_16xi8 arg_1, __epi // CHECK-O2-LABEL: @test_vredmaxu_8xi16( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv4i16.nxv8i16( [[ARG_1:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv4i16.nxv8i16( [[ARG_1:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.riscv.vredmaxu.nxv4i16.nxv8i16.i64( undef, [[ARG_0:%.*]], [[TMP0]], i64 [[ARG_2:%.*]]) -// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.experimental.vector.insert.nxv8i16.nxv4i16( undef, [[TMP1]], i64 0) +// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.vector.insert.nxv8i16.nxv4i16( undef, [[TMP1]], i64 0) // CHECK-O2-NEXT: ret [[TMP2]] // __epi_8xi16 test_vredmaxu_8xi16(__epi_8xi16 arg_0, __epi_8xi16 arg_1, unsigned long int arg_2) @@ -16591,10 +16591,10 @@ __epi_8xi16 test_vredmaxu_8xi16(__epi_8xi16 arg_0, __epi_8xi16 arg_1, unsigned l // CHECK-O2-LABEL: @test_vredmaxu_8xi16_mask( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv4i16.nxv8i16( [[ARG_0:%.*]], i64 0) -// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.experimental.vector.extract.nxv4i16.nxv8i16( [[ARG_2:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv4i16.nxv8i16( [[ARG_0:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.vector.extract.nxv4i16.nxv8i16( [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv8i16.i64( [[TMP0]], [[ARG_1:%.*]], [[TMP1]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.experimental.vector.insert.nxv8i16.nxv4i16( undef, [[TMP2]], i64 0) +// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.vector.insert.nxv8i16.nxv4i16( undef, [[TMP2]], i64 0) // CHECK-O2-NEXT: ret [[TMP3]] // __epi_8xi16 test_vredmaxu_8xi16_mask(__epi_8xi16 arg_0, __epi_8xi16 arg_1, __epi_8xi16 arg_2, __epi_8xi1 arg_3, unsigned long int arg_4) @@ -16604,9 +16604,9 @@ __epi_8xi16 test_vredmaxu_8xi16_mask(__epi_8xi16 arg_0, __epi_8xi16 arg_1, __epi // CHECK-O2-LABEL: @test_vredmaxu_4xi32( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv2i32.nxv4i32( [[ARG_1:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv2i32.nxv4i32( [[ARG_1:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.riscv.vredmaxu.nxv2i32.nxv4i32.i64( undef, [[ARG_0:%.*]], [[TMP0]], i64 [[ARG_2:%.*]]) -// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.experimental.vector.insert.nxv4i32.nxv2i32( undef, [[TMP1]], i64 0) +// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.vector.insert.nxv4i32.nxv2i32( undef, [[TMP1]], i64 0) // CHECK-O2-NEXT: ret [[TMP2]] // __epi_4xi32 test_vredmaxu_4xi32(__epi_4xi32 arg_0, __epi_4xi32 arg_1, unsigned long int arg_2) @@ -16616,10 +16616,10 @@ __epi_4xi32 test_vredmaxu_4xi32(__epi_4xi32 arg_0, __epi_4xi32 arg_1, unsigned l // CHECK-O2-LABEL: @test_vredmaxu_4xi32_mask( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv2i32.nxv4i32( [[ARG_0:%.*]], i64 0) -// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.experimental.vector.extract.nxv2i32.nxv4i32( [[ARG_2:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv2i32.nxv4i32( [[ARG_0:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.vector.extract.nxv2i32.nxv4i32( [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv4i32.i64( [[TMP0]], [[ARG_1:%.*]], [[TMP1]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.experimental.vector.insert.nxv4i32.nxv2i32( undef, [[TMP2]], i64 0) +// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.vector.insert.nxv4i32.nxv2i32( undef, [[TMP2]], i64 0) // CHECK-O2-NEXT: ret [[TMP3]] // __epi_4xi32 test_vredmaxu_4xi32_mask(__epi_4xi32 arg_0, __epi_4xi32 arg_1, __epi_4xi32 arg_2, __epi_4xi1 arg_3, unsigned long int arg_4) @@ -16629,9 +16629,9 @@ __epi_4xi32 test_vredmaxu_4xi32_mask(__epi_4xi32 arg_0, __epi_4xi32 arg_1, __epi // CHECK-O2-LABEL: @test_vredmaxu_2xi64( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv1i64.nxv2i64( [[ARG_1:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv1i64.nxv2i64( [[ARG_1:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.riscv.vredmaxu.nxv1i64.nxv2i64.i64( undef, [[ARG_0:%.*]], [[TMP0]], i64 [[ARG_2:%.*]]) -// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.experimental.vector.insert.nxv2i64.nxv1i64( undef, [[TMP1]], i64 0) +// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.vector.insert.nxv2i64.nxv1i64( undef, [[TMP1]], i64 0) // CHECK-O2-NEXT: ret [[TMP2]] // __epi_2xi64 test_vredmaxu_2xi64(__epi_2xi64 arg_0, __epi_2xi64 arg_1, unsigned long int arg_2) @@ -16641,10 +16641,10 @@ __epi_2xi64 test_vredmaxu_2xi64(__epi_2xi64 arg_0, __epi_2xi64 arg_1, unsigned l // CHECK-O2-LABEL: @test_vredmaxu_2xi64_mask( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv1i64.nxv2i64( [[ARG_0:%.*]], i64 0) -// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.experimental.vector.extract.nxv1i64.nxv2i64( [[ARG_2:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv1i64.nxv2i64( [[ARG_0:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.vector.extract.nxv1i64.nxv2i64( [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.riscv.vredmaxu.mask.nxv1i64.nxv2i64.i64( [[TMP0]], [[ARG_1:%.*]], [[TMP1]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.experimental.vector.insert.nxv2i64.nxv1i64( undef, [[TMP2]], i64 0) +// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.vector.insert.nxv2i64.nxv1i64( undef, [[TMP2]], i64 0) // CHECK-O2-NEXT: ret [[TMP3]] // __epi_2xi64 test_vredmaxu_2xi64_mask(__epi_2xi64 arg_0, __epi_2xi64 arg_1, __epi_2xi64 arg_2, __epi_2xi1 arg_3, unsigned long int arg_4) @@ -16654,9 +16654,9 @@ __epi_2xi64 test_vredmaxu_2xi64_mask(__epi_2xi64 arg_0, __epi_2xi64 arg_1, __epi // CHECK-O2-LABEL: @test_vredmaxu_32xi8( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv8i8.nxv32i8( [[ARG_1:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv8i8.nxv32i8( [[ARG_1:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.riscv.vredmaxu.nxv8i8.nxv32i8.i64( undef, [[ARG_0:%.*]], [[TMP0]], i64 [[ARG_2:%.*]]) -// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.experimental.vector.insert.nxv32i8.nxv8i8( undef, [[TMP1]], i64 0) +// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.vector.insert.nxv32i8.nxv8i8( undef, [[TMP1]], i64 0) // CHECK-O2-NEXT: ret [[TMP2]] // __epi_32xi8 test_vredmaxu_32xi8(__epi_32xi8 arg_0, __epi_32xi8 arg_1, unsigned long int arg_2) @@ -16666,10 +16666,10 @@ __epi_32xi8 test_vredmaxu_32xi8(__epi_32xi8 arg_0, __epi_32xi8 arg_1, unsigned l // CHECK-O2-LABEL: @test_vredmaxu_32xi8_mask( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv8i8.nxv32i8( [[ARG_0:%.*]], i64 0) -// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.experimental.vector.extract.nxv8i8.nxv32i8( [[ARG_2:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv8i8.nxv32i8( [[ARG_0:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.vector.extract.nxv8i8.nxv32i8( [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv32i8.i64( [[TMP0]], [[ARG_1:%.*]], [[TMP1]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.experimental.vector.insert.nxv32i8.nxv8i8( undef, [[TMP2]], i64 0) +// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.vector.insert.nxv32i8.nxv8i8( undef, [[TMP2]], i64 0) // CHECK-O2-NEXT: ret [[TMP3]] // __epi_32xi8 test_vredmaxu_32xi8_mask(__epi_32xi8 arg_0, __epi_32xi8 arg_1, __epi_32xi8 arg_2, __epi_32xi1 arg_3, unsigned long int arg_4) @@ -16679,9 +16679,9 @@ __epi_32xi8 test_vredmaxu_32xi8_mask(__epi_32xi8 arg_0, __epi_32xi8 arg_1, __epi // CHECK-O2-LABEL: @test_vredmaxu_16xi16( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv4i16.nxv16i16( [[ARG_1:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv4i16.nxv16i16( [[ARG_1:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.riscv.vredmaxu.nxv4i16.nxv16i16.i64( undef, [[ARG_0:%.*]], [[TMP0]], i64 [[ARG_2:%.*]]) -// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.experimental.vector.insert.nxv16i16.nxv4i16( undef, [[TMP1]], i64 0) +// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.vector.insert.nxv16i16.nxv4i16( undef, [[TMP1]], i64 0) // CHECK-O2-NEXT: ret [[TMP2]] // __epi_16xi16 test_vredmaxu_16xi16(__epi_16xi16 arg_0, __epi_16xi16 arg_1, unsigned long int arg_2) @@ -16691,10 +16691,10 @@ __epi_16xi16 test_vredmaxu_16xi16(__epi_16xi16 arg_0, __epi_16xi16 arg_1, unsign // CHECK-O2-LABEL: @test_vredmaxu_16xi16_mask( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv4i16.nxv16i16( [[ARG_0:%.*]], i64 0) -// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.experimental.vector.extract.nxv4i16.nxv16i16( [[ARG_2:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv4i16.nxv16i16( [[ARG_0:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.vector.extract.nxv4i16.nxv16i16( [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv16i16.i64( [[TMP0]], [[ARG_1:%.*]], [[TMP1]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.experimental.vector.insert.nxv16i16.nxv4i16( undef, [[TMP2]], i64 0) +// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.vector.insert.nxv16i16.nxv4i16( undef, [[TMP2]], i64 0) // CHECK-O2-NEXT: ret [[TMP3]] // __epi_16xi16 test_vredmaxu_16xi16_mask(__epi_16xi16 arg_0, __epi_16xi16 arg_1, __epi_16xi16 arg_2, __epi_16xi1 arg_3, unsigned long int arg_4) @@ -16704,9 +16704,9 @@ __epi_16xi16 test_vredmaxu_16xi16_mask(__epi_16xi16 arg_0, __epi_16xi16 arg_1, _ // CHECK-O2-LABEL: @test_vredmaxu_8xi32( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv2i32.nxv8i32( [[ARG_1:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv2i32.nxv8i32( [[ARG_1:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.riscv.vredmaxu.nxv2i32.nxv8i32.i64( undef, [[ARG_0:%.*]], [[TMP0]], i64 [[ARG_2:%.*]]) -// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.experimental.vector.insert.nxv8i32.nxv2i32( undef, [[TMP1]], i64 0) +// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.vector.insert.nxv8i32.nxv2i32( undef, [[TMP1]], i64 0) // CHECK-O2-NEXT: ret [[TMP2]] // __epi_8xi32 test_vredmaxu_8xi32(__epi_8xi32 arg_0, __epi_8xi32 arg_1, unsigned long int arg_2) @@ -16716,10 +16716,10 @@ __epi_8xi32 test_vredmaxu_8xi32(__epi_8xi32 arg_0, __epi_8xi32 arg_1, unsigned l // CHECK-O2-LABEL: @test_vredmaxu_8xi32_mask( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv2i32.nxv8i32( [[ARG_0:%.*]], i64 0) -// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.experimental.vector.extract.nxv2i32.nxv8i32( [[ARG_2:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv2i32.nxv8i32( [[ARG_0:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.vector.extract.nxv2i32.nxv8i32( [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv8i32.i64( [[TMP0]], [[ARG_1:%.*]], [[TMP1]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.experimental.vector.insert.nxv8i32.nxv2i32( undef, [[TMP2]], i64 0) +// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.vector.insert.nxv8i32.nxv2i32( undef, [[TMP2]], i64 0) // CHECK-O2-NEXT: ret [[TMP3]] // __epi_8xi32 test_vredmaxu_8xi32_mask(__epi_8xi32 arg_0, __epi_8xi32 arg_1, __epi_8xi32 arg_2, __epi_8xi1 arg_3, unsigned long int arg_4) @@ -16729,9 +16729,9 @@ __epi_8xi32 test_vredmaxu_8xi32_mask(__epi_8xi32 arg_0, __epi_8xi32 arg_1, __epi // CHECK-O2-LABEL: @test_vredmaxu_4xi64( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv1i64.nxv4i64( [[ARG_1:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv1i64.nxv4i64( [[ARG_1:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.riscv.vredmaxu.nxv1i64.nxv4i64.i64( undef, [[ARG_0:%.*]], [[TMP0]], i64 [[ARG_2:%.*]]) -// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.experimental.vector.insert.nxv4i64.nxv1i64( undef, [[TMP1]], i64 0) +// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.vector.insert.nxv4i64.nxv1i64( undef, [[TMP1]], i64 0) // CHECK-O2-NEXT: ret [[TMP2]] // __epi_4xi64 test_vredmaxu_4xi64(__epi_4xi64 arg_0, __epi_4xi64 arg_1, unsigned long int arg_2) @@ -16741,10 +16741,10 @@ __epi_4xi64 test_vredmaxu_4xi64(__epi_4xi64 arg_0, __epi_4xi64 arg_1, unsigned l // CHECK-O2-LABEL: @test_vredmaxu_4xi64_mask( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv1i64.nxv4i64( [[ARG_0:%.*]], i64 0) -// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.experimental.vector.extract.nxv1i64.nxv4i64( [[ARG_2:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv1i64.nxv4i64( [[ARG_0:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.vector.extract.nxv1i64.nxv4i64( [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.riscv.vredmaxu.mask.nxv1i64.nxv4i64.i64( [[TMP0]], [[ARG_1:%.*]], [[TMP1]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.experimental.vector.insert.nxv4i64.nxv1i64( undef, [[TMP2]], i64 0) +// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.vector.insert.nxv4i64.nxv1i64( undef, [[TMP2]], i64 0) // CHECK-O2-NEXT: ret [[TMP3]] // __epi_4xi64 test_vredmaxu_4xi64_mask(__epi_4xi64 arg_0, __epi_4xi64 arg_1, __epi_4xi64 arg_2, __epi_4xi1 arg_3, unsigned long int arg_4) @@ -16834,9 +16834,9 @@ __epi_1xi64 test_vredmin_1xi64_mask(__epi_1xi64 arg_0, __epi_1xi64 arg_1, __epi_ // CHECK-O2-LABEL: @test_vredmin_16xi8( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv8i8.nxv16i8( [[ARG_1:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv8i8.nxv16i8( [[ARG_1:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.riscv.vredmin.nxv8i8.nxv16i8.i64( undef, [[ARG_0:%.*]], [[TMP0]], i64 [[ARG_2:%.*]]) -// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.experimental.vector.insert.nxv16i8.nxv8i8( undef, [[TMP1]], i64 0) +// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.vector.insert.nxv16i8.nxv8i8( undef, [[TMP1]], i64 0) // CHECK-O2-NEXT: ret [[TMP2]] // __epi_16xi8 test_vredmin_16xi8(__epi_16xi8 arg_0, __epi_16xi8 arg_1, unsigned long int arg_2) @@ -16846,10 +16846,10 @@ __epi_16xi8 test_vredmin_16xi8(__epi_16xi8 arg_0, __epi_16xi8 arg_1, unsigned lo // CHECK-O2-LABEL: @test_vredmin_16xi8_mask( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv8i8.nxv16i8( [[ARG_0:%.*]], i64 0) -// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.experimental.vector.extract.nxv8i8.nxv16i8( [[ARG_2:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv8i8.nxv16i8( [[ARG_0:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.vector.extract.nxv8i8.nxv16i8( [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.riscv.vredmin.mask.nxv8i8.nxv16i8.i64( [[TMP0]], [[ARG_1:%.*]], [[TMP1]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.experimental.vector.insert.nxv16i8.nxv8i8( undef, [[TMP2]], i64 0) +// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.vector.insert.nxv16i8.nxv8i8( undef, [[TMP2]], i64 0) // CHECK-O2-NEXT: ret [[TMP3]] // __epi_16xi8 test_vredmin_16xi8_mask(__epi_16xi8 arg_0, __epi_16xi8 arg_1, __epi_16xi8 arg_2, __epi_16xi1 arg_3, unsigned long int arg_4) @@ -16859,9 +16859,9 @@ __epi_16xi8 test_vredmin_16xi8_mask(__epi_16xi8 arg_0, __epi_16xi8 arg_1, __epi_ // CHECK-O2-LABEL: @test_vredmin_8xi16( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv4i16.nxv8i16( [[ARG_1:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv4i16.nxv8i16( [[ARG_1:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.riscv.vredmin.nxv4i16.nxv8i16.i64( undef, [[ARG_0:%.*]], [[TMP0]], i64 [[ARG_2:%.*]]) -// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.experimental.vector.insert.nxv8i16.nxv4i16( undef, [[TMP1]], i64 0) +// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.vector.insert.nxv8i16.nxv4i16( undef, [[TMP1]], i64 0) // CHECK-O2-NEXT: ret [[TMP2]] // __epi_8xi16 test_vredmin_8xi16(__epi_8xi16 arg_0, __epi_8xi16 arg_1, unsigned long int arg_2) @@ -16871,10 +16871,10 @@ __epi_8xi16 test_vredmin_8xi16(__epi_8xi16 arg_0, __epi_8xi16 arg_1, unsigned lo // CHECK-O2-LABEL: @test_vredmin_8xi16_mask( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv4i16.nxv8i16( [[ARG_0:%.*]], i64 0) -// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.experimental.vector.extract.nxv4i16.nxv8i16( [[ARG_2:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv4i16.nxv8i16( [[ARG_0:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.vector.extract.nxv4i16.nxv8i16( [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.riscv.vredmin.mask.nxv4i16.nxv8i16.i64( [[TMP0]], [[ARG_1:%.*]], [[TMP1]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.experimental.vector.insert.nxv8i16.nxv4i16( undef, [[TMP2]], i64 0) +// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.vector.insert.nxv8i16.nxv4i16( undef, [[TMP2]], i64 0) // CHECK-O2-NEXT: ret [[TMP3]] // __epi_8xi16 test_vredmin_8xi16_mask(__epi_8xi16 arg_0, __epi_8xi16 arg_1, __epi_8xi16 arg_2, __epi_8xi1 arg_3, unsigned long int arg_4) @@ -16884,9 +16884,9 @@ __epi_8xi16 test_vredmin_8xi16_mask(__epi_8xi16 arg_0, __epi_8xi16 arg_1, __epi_ // CHECK-O2-LABEL: @test_vredmin_4xi32( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv2i32.nxv4i32( [[ARG_1:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv2i32.nxv4i32( [[ARG_1:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.riscv.vredmin.nxv2i32.nxv4i32.i64( undef, [[ARG_0:%.*]], [[TMP0]], i64 [[ARG_2:%.*]]) -// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.experimental.vector.insert.nxv4i32.nxv2i32( undef, [[TMP1]], i64 0) +// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.vector.insert.nxv4i32.nxv2i32( undef, [[TMP1]], i64 0) // CHECK-O2-NEXT: ret [[TMP2]] // __epi_4xi32 test_vredmin_4xi32(__epi_4xi32 arg_0, __epi_4xi32 arg_1, unsigned long int arg_2) @@ -16896,10 +16896,10 @@ __epi_4xi32 test_vredmin_4xi32(__epi_4xi32 arg_0, __epi_4xi32 arg_1, unsigned lo // CHECK-O2-LABEL: @test_vredmin_4xi32_mask( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv2i32.nxv4i32( [[ARG_0:%.*]], i64 0) -// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.experimental.vector.extract.nxv2i32.nxv4i32( [[ARG_2:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv2i32.nxv4i32( [[ARG_0:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.vector.extract.nxv2i32.nxv4i32( [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.riscv.vredmin.mask.nxv2i32.nxv4i32.i64( [[TMP0]], [[ARG_1:%.*]], [[TMP1]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.experimental.vector.insert.nxv4i32.nxv2i32( undef, [[TMP2]], i64 0) +// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.vector.insert.nxv4i32.nxv2i32( undef, [[TMP2]], i64 0) // CHECK-O2-NEXT: ret [[TMP3]] // __epi_4xi32 test_vredmin_4xi32_mask(__epi_4xi32 arg_0, __epi_4xi32 arg_1, __epi_4xi32 arg_2, __epi_4xi1 arg_3, unsigned long int arg_4) @@ -16909,9 +16909,9 @@ __epi_4xi32 test_vredmin_4xi32_mask(__epi_4xi32 arg_0, __epi_4xi32 arg_1, __epi_ // CHECK-O2-LABEL: @test_vredmin_2xi64( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv1i64.nxv2i64( [[ARG_1:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv1i64.nxv2i64( [[ARG_1:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.riscv.vredmin.nxv1i64.nxv2i64.i64( undef, [[ARG_0:%.*]], [[TMP0]], i64 [[ARG_2:%.*]]) -// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.experimental.vector.insert.nxv2i64.nxv1i64( undef, [[TMP1]], i64 0) +// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.vector.insert.nxv2i64.nxv1i64( undef, [[TMP1]], i64 0) // CHECK-O2-NEXT: ret [[TMP2]] // __epi_2xi64 test_vredmin_2xi64(__epi_2xi64 arg_0, __epi_2xi64 arg_1, unsigned long int arg_2) @@ -16921,10 +16921,10 @@ __epi_2xi64 test_vredmin_2xi64(__epi_2xi64 arg_0, __epi_2xi64 arg_1, unsigned lo // CHECK-O2-LABEL: @test_vredmin_2xi64_mask( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv1i64.nxv2i64( [[ARG_0:%.*]], i64 0) -// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.experimental.vector.extract.nxv1i64.nxv2i64( [[ARG_2:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv1i64.nxv2i64( [[ARG_0:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.vector.extract.nxv1i64.nxv2i64( [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.riscv.vredmin.mask.nxv1i64.nxv2i64.i64( [[TMP0]], [[ARG_1:%.*]], [[TMP1]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.experimental.vector.insert.nxv2i64.nxv1i64( undef, [[TMP2]], i64 0) +// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.vector.insert.nxv2i64.nxv1i64( undef, [[TMP2]], i64 0) // CHECK-O2-NEXT: ret [[TMP3]] // __epi_2xi64 test_vredmin_2xi64_mask(__epi_2xi64 arg_0, __epi_2xi64 arg_1, __epi_2xi64 arg_2, __epi_2xi1 arg_3, unsigned long int arg_4) @@ -16934,9 +16934,9 @@ __epi_2xi64 test_vredmin_2xi64_mask(__epi_2xi64 arg_0, __epi_2xi64 arg_1, __epi_ // CHECK-O2-LABEL: @test_vredmin_32xi8( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv8i8.nxv32i8( [[ARG_1:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv8i8.nxv32i8( [[ARG_1:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.riscv.vredmin.nxv8i8.nxv32i8.i64( undef, [[ARG_0:%.*]], [[TMP0]], i64 [[ARG_2:%.*]]) -// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.experimental.vector.insert.nxv32i8.nxv8i8( undef, [[TMP1]], i64 0) +// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.vector.insert.nxv32i8.nxv8i8( undef, [[TMP1]], i64 0) // CHECK-O2-NEXT: ret [[TMP2]] // __epi_32xi8 test_vredmin_32xi8(__epi_32xi8 arg_0, __epi_32xi8 arg_1, unsigned long int arg_2) @@ -16946,10 +16946,10 @@ __epi_32xi8 test_vredmin_32xi8(__epi_32xi8 arg_0, __epi_32xi8 arg_1, unsigned lo // CHECK-O2-LABEL: @test_vredmin_32xi8_mask( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv8i8.nxv32i8( [[ARG_0:%.*]], i64 0) -// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.experimental.vector.extract.nxv8i8.nxv32i8( [[ARG_2:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv8i8.nxv32i8( [[ARG_0:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.vector.extract.nxv8i8.nxv32i8( [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.riscv.vredmin.mask.nxv8i8.nxv32i8.i64( [[TMP0]], [[ARG_1:%.*]], [[TMP1]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.experimental.vector.insert.nxv32i8.nxv8i8( undef, [[TMP2]], i64 0) +// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.vector.insert.nxv32i8.nxv8i8( undef, [[TMP2]], i64 0) // CHECK-O2-NEXT: ret [[TMP3]] // __epi_32xi8 test_vredmin_32xi8_mask(__epi_32xi8 arg_0, __epi_32xi8 arg_1, __epi_32xi8 arg_2, __epi_32xi1 arg_3, unsigned long int arg_4) @@ -16959,9 +16959,9 @@ __epi_32xi8 test_vredmin_32xi8_mask(__epi_32xi8 arg_0, __epi_32xi8 arg_1, __epi_ // CHECK-O2-LABEL: @test_vredmin_16xi16( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv4i16.nxv16i16( [[ARG_1:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv4i16.nxv16i16( [[ARG_1:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.riscv.vredmin.nxv4i16.nxv16i16.i64( undef, [[ARG_0:%.*]], [[TMP0]], i64 [[ARG_2:%.*]]) -// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.experimental.vector.insert.nxv16i16.nxv4i16( undef, [[TMP1]], i64 0) +// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.vector.insert.nxv16i16.nxv4i16( undef, [[TMP1]], i64 0) // CHECK-O2-NEXT: ret [[TMP2]] // __epi_16xi16 test_vredmin_16xi16(__epi_16xi16 arg_0, __epi_16xi16 arg_1, unsigned long int arg_2) @@ -16971,10 +16971,10 @@ __epi_16xi16 test_vredmin_16xi16(__epi_16xi16 arg_0, __epi_16xi16 arg_1, unsigne // CHECK-O2-LABEL: @test_vredmin_16xi16_mask( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv4i16.nxv16i16( [[ARG_0:%.*]], i64 0) -// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.experimental.vector.extract.nxv4i16.nxv16i16( [[ARG_2:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv4i16.nxv16i16( [[ARG_0:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.vector.extract.nxv4i16.nxv16i16( [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.riscv.vredmin.mask.nxv4i16.nxv16i16.i64( [[TMP0]], [[ARG_1:%.*]], [[TMP1]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.experimental.vector.insert.nxv16i16.nxv4i16( undef, [[TMP2]], i64 0) +// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.vector.insert.nxv16i16.nxv4i16( undef, [[TMP2]], i64 0) // CHECK-O2-NEXT: ret [[TMP3]] // __epi_16xi16 test_vredmin_16xi16_mask(__epi_16xi16 arg_0, __epi_16xi16 arg_1, __epi_16xi16 arg_2, __epi_16xi1 arg_3, unsigned long int arg_4) @@ -16984,9 +16984,9 @@ __epi_16xi16 test_vredmin_16xi16_mask(__epi_16xi16 arg_0, __epi_16xi16 arg_1, __ // CHECK-O2-LABEL: @test_vredmin_8xi32( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv2i32.nxv8i32( [[ARG_1:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv2i32.nxv8i32( [[ARG_1:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.riscv.vredmin.nxv2i32.nxv8i32.i64( undef, [[ARG_0:%.*]], [[TMP0]], i64 [[ARG_2:%.*]]) -// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.experimental.vector.insert.nxv8i32.nxv2i32( undef, [[TMP1]], i64 0) +// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.vector.insert.nxv8i32.nxv2i32( undef, [[TMP1]], i64 0) // CHECK-O2-NEXT: ret [[TMP2]] // __epi_8xi32 test_vredmin_8xi32(__epi_8xi32 arg_0, __epi_8xi32 arg_1, unsigned long int arg_2) @@ -16996,10 +16996,10 @@ __epi_8xi32 test_vredmin_8xi32(__epi_8xi32 arg_0, __epi_8xi32 arg_1, unsigned lo // CHECK-O2-LABEL: @test_vredmin_8xi32_mask( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv2i32.nxv8i32( [[ARG_0:%.*]], i64 0) -// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.experimental.vector.extract.nxv2i32.nxv8i32( [[ARG_2:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv2i32.nxv8i32( [[ARG_0:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.vector.extract.nxv2i32.nxv8i32( [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.riscv.vredmin.mask.nxv2i32.nxv8i32.i64( [[TMP0]], [[ARG_1:%.*]], [[TMP1]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.experimental.vector.insert.nxv8i32.nxv2i32( undef, [[TMP2]], i64 0) +// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.vector.insert.nxv8i32.nxv2i32( undef, [[TMP2]], i64 0) // CHECK-O2-NEXT: ret [[TMP3]] // __epi_8xi32 test_vredmin_8xi32_mask(__epi_8xi32 arg_0, __epi_8xi32 arg_1, __epi_8xi32 arg_2, __epi_8xi1 arg_3, unsigned long int arg_4) @@ -17009,9 +17009,9 @@ __epi_8xi32 test_vredmin_8xi32_mask(__epi_8xi32 arg_0, __epi_8xi32 arg_1, __epi_ // CHECK-O2-LABEL: @test_vredmin_4xi64( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv1i64.nxv4i64( [[ARG_1:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv1i64.nxv4i64( [[ARG_1:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.riscv.vredmin.nxv1i64.nxv4i64.i64( undef, [[ARG_0:%.*]], [[TMP0]], i64 [[ARG_2:%.*]]) -// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.experimental.vector.insert.nxv4i64.nxv1i64( undef, [[TMP1]], i64 0) +// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.vector.insert.nxv4i64.nxv1i64( undef, [[TMP1]], i64 0) // CHECK-O2-NEXT: ret [[TMP2]] // __epi_4xi64 test_vredmin_4xi64(__epi_4xi64 arg_0, __epi_4xi64 arg_1, unsigned long int arg_2) @@ -17021,10 +17021,10 @@ __epi_4xi64 test_vredmin_4xi64(__epi_4xi64 arg_0, __epi_4xi64 arg_1, unsigned lo // CHECK-O2-LABEL: @test_vredmin_4xi64_mask( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv1i64.nxv4i64( [[ARG_0:%.*]], i64 0) -// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.experimental.vector.extract.nxv1i64.nxv4i64( [[ARG_2:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv1i64.nxv4i64( [[ARG_0:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.vector.extract.nxv1i64.nxv4i64( [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.riscv.vredmin.mask.nxv1i64.nxv4i64.i64( [[TMP0]], [[ARG_1:%.*]], [[TMP1]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.experimental.vector.insert.nxv4i64.nxv1i64( undef, [[TMP2]], i64 0) +// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.vector.insert.nxv4i64.nxv1i64( undef, [[TMP2]], i64 0) // CHECK-O2-NEXT: ret [[TMP3]] // __epi_4xi64 test_vredmin_4xi64_mask(__epi_4xi64 arg_0, __epi_4xi64 arg_1, __epi_4xi64 arg_2, __epi_4xi1 arg_3, unsigned long int arg_4) @@ -17114,9 +17114,9 @@ __epi_1xi64 test_vredminu_1xi64_mask(__epi_1xi64 arg_0, __epi_1xi64 arg_1, __epi // CHECK-O2-LABEL: @test_vredminu_16xi8( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv8i8.nxv16i8( [[ARG_1:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv8i8.nxv16i8( [[ARG_1:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.riscv.vredminu.nxv8i8.nxv16i8.i64( undef, [[ARG_0:%.*]], [[TMP0]], i64 [[ARG_2:%.*]]) -// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.experimental.vector.insert.nxv16i8.nxv8i8( undef, [[TMP1]], i64 0) +// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.vector.insert.nxv16i8.nxv8i8( undef, [[TMP1]], i64 0) // CHECK-O2-NEXT: ret [[TMP2]] // __epi_16xi8 test_vredminu_16xi8(__epi_16xi8 arg_0, __epi_16xi8 arg_1, unsigned long int arg_2) @@ -17126,10 +17126,10 @@ __epi_16xi8 test_vredminu_16xi8(__epi_16xi8 arg_0, __epi_16xi8 arg_1, unsigned l // CHECK-O2-LABEL: @test_vredminu_16xi8_mask( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv8i8.nxv16i8( [[ARG_0:%.*]], i64 0) -// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.experimental.vector.extract.nxv8i8.nxv16i8( [[ARG_2:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv8i8.nxv16i8( [[ARG_0:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.vector.extract.nxv8i8.nxv16i8( [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.riscv.vredminu.mask.nxv8i8.nxv16i8.i64( [[TMP0]], [[ARG_1:%.*]], [[TMP1]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.experimental.vector.insert.nxv16i8.nxv8i8( undef, [[TMP2]], i64 0) +// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.vector.insert.nxv16i8.nxv8i8( undef, [[TMP2]], i64 0) // CHECK-O2-NEXT: ret [[TMP3]] // __epi_16xi8 test_vredminu_16xi8_mask(__epi_16xi8 arg_0, __epi_16xi8 arg_1, __epi_16xi8 arg_2, __epi_16xi1 arg_3, unsigned long int arg_4) @@ -17139,9 +17139,9 @@ __epi_16xi8 test_vredminu_16xi8_mask(__epi_16xi8 arg_0, __epi_16xi8 arg_1, __epi // CHECK-O2-LABEL: @test_vredminu_8xi16( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv4i16.nxv8i16( [[ARG_1:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv4i16.nxv8i16( [[ARG_1:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.riscv.vredminu.nxv4i16.nxv8i16.i64( undef, [[ARG_0:%.*]], [[TMP0]], i64 [[ARG_2:%.*]]) -// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.experimental.vector.insert.nxv8i16.nxv4i16( undef, [[TMP1]], i64 0) +// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.vector.insert.nxv8i16.nxv4i16( undef, [[TMP1]], i64 0) // CHECK-O2-NEXT: ret [[TMP2]] // __epi_8xi16 test_vredminu_8xi16(__epi_8xi16 arg_0, __epi_8xi16 arg_1, unsigned long int arg_2) @@ -17151,10 +17151,10 @@ __epi_8xi16 test_vredminu_8xi16(__epi_8xi16 arg_0, __epi_8xi16 arg_1, unsigned l // CHECK-O2-LABEL: @test_vredminu_8xi16_mask( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv4i16.nxv8i16( [[ARG_0:%.*]], i64 0) -// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.experimental.vector.extract.nxv4i16.nxv8i16( [[ARG_2:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv4i16.nxv8i16( [[ARG_0:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.vector.extract.nxv4i16.nxv8i16( [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.riscv.vredminu.mask.nxv4i16.nxv8i16.i64( [[TMP0]], [[ARG_1:%.*]], [[TMP1]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.experimental.vector.insert.nxv8i16.nxv4i16( undef, [[TMP2]], i64 0) +// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.vector.insert.nxv8i16.nxv4i16( undef, [[TMP2]], i64 0) // CHECK-O2-NEXT: ret [[TMP3]] // __epi_8xi16 test_vredminu_8xi16_mask(__epi_8xi16 arg_0, __epi_8xi16 arg_1, __epi_8xi16 arg_2, __epi_8xi1 arg_3, unsigned long int arg_4) @@ -17164,9 +17164,9 @@ __epi_8xi16 test_vredminu_8xi16_mask(__epi_8xi16 arg_0, __epi_8xi16 arg_1, __epi // CHECK-O2-LABEL: @test_vredminu_4xi32( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv2i32.nxv4i32( [[ARG_1:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv2i32.nxv4i32( [[ARG_1:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.riscv.vredminu.nxv2i32.nxv4i32.i64( undef, [[ARG_0:%.*]], [[TMP0]], i64 [[ARG_2:%.*]]) -// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.experimental.vector.insert.nxv4i32.nxv2i32( undef, [[TMP1]], i64 0) +// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.vector.insert.nxv4i32.nxv2i32( undef, [[TMP1]], i64 0) // CHECK-O2-NEXT: ret [[TMP2]] // __epi_4xi32 test_vredminu_4xi32(__epi_4xi32 arg_0, __epi_4xi32 arg_1, unsigned long int arg_2) @@ -17176,10 +17176,10 @@ __epi_4xi32 test_vredminu_4xi32(__epi_4xi32 arg_0, __epi_4xi32 arg_1, unsigned l // CHECK-O2-LABEL: @test_vredminu_4xi32_mask( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv2i32.nxv4i32( [[ARG_0:%.*]], i64 0) -// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.experimental.vector.extract.nxv2i32.nxv4i32( [[ARG_2:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv2i32.nxv4i32( [[ARG_0:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.vector.extract.nxv2i32.nxv4i32( [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.riscv.vredminu.mask.nxv2i32.nxv4i32.i64( [[TMP0]], [[ARG_1:%.*]], [[TMP1]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.experimental.vector.insert.nxv4i32.nxv2i32( undef, [[TMP2]], i64 0) +// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.vector.insert.nxv4i32.nxv2i32( undef, [[TMP2]], i64 0) // CHECK-O2-NEXT: ret [[TMP3]] // __epi_4xi32 test_vredminu_4xi32_mask(__epi_4xi32 arg_0, __epi_4xi32 arg_1, __epi_4xi32 arg_2, __epi_4xi1 arg_3, unsigned long int arg_4) @@ -17189,9 +17189,9 @@ __epi_4xi32 test_vredminu_4xi32_mask(__epi_4xi32 arg_0, __epi_4xi32 arg_1, __epi // CHECK-O2-LABEL: @test_vredminu_2xi64( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv1i64.nxv2i64( [[ARG_1:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv1i64.nxv2i64( [[ARG_1:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.riscv.vredminu.nxv1i64.nxv2i64.i64( undef, [[ARG_0:%.*]], [[TMP0]], i64 [[ARG_2:%.*]]) -// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.experimental.vector.insert.nxv2i64.nxv1i64( undef, [[TMP1]], i64 0) +// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.vector.insert.nxv2i64.nxv1i64( undef, [[TMP1]], i64 0) // CHECK-O2-NEXT: ret [[TMP2]] // __epi_2xi64 test_vredminu_2xi64(__epi_2xi64 arg_0, __epi_2xi64 arg_1, unsigned long int arg_2) @@ -17201,10 +17201,10 @@ __epi_2xi64 test_vredminu_2xi64(__epi_2xi64 arg_0, __epi_2xi64 arg_1, unsigned l // CHECK-O2-LABEL: @test_vredminu_2xi64_mask( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv1i64.nxv2i64( [[ARG_0:%.*]], i64 0) -// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.experimental.vector.extract.nxv1i64.nxv2i64( [[ARG_2:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv1i64.nxv2i64( [[ARG_0:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.vector.extract.nxv1i64.nxv2i64( [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.riscv.vredminu.mask.nxv1i64.nxv2i64.i64( [[TMP0]], [[ARG_1:%.*]], [[TMP1]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.experimental.vector.insert.nxv2i64.nxv1i64( undef, [[TMP2]], i64 0) +// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.vector.insert.nxv2i64.nxv1i64( undef, [[TMP2]], i64 0) // CHECK-O2-NEXT: ret [[TMP3]] // __epi_2xi64 test_vredminu_2xi64_mask(__epi_2xi64 arg_0, __epi_2xi64 arg_1, __epi_2xi64 arg_2, __epi_2xi1 arg_3, unsigned long int arg_4) @@ -17214,9 +17214,9 @@ __epi_2xi64 test_vredminu_2xi64_mask(__epi_2xi64 arg_0, __epi_2xi64 arg_1, __epi // CHECK-O2-LABEL: @test_vredminu_32xi8( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv8i8.nxv32i8( [[ARG_1:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv8i8.nxv32i8( [[ARG_1:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.riscv.vredminu.nxv8i8.nxv32i8.i64( undef, [[ARG_0:%.*]], [[TMP0]], i64 [[ARG_2:%.*]]) -// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.experimental.vector.insert.nxv32i8.nxv8i8( undef, [[TMP1]], i64 0) +// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.vector.insert.nxv32i8.nxv8i8( undef, [[TMP1]], i64 0) // CHECK-O2-NEXT: ret [[TMP2]] // __epi_32xi8 test_vredminu_32xi8(__epi_32xi8 arg_0, __epi_32xi8 arg_1, unsigned long int arg_2) @@ -17226,10 +17226,10 @@ __epi_32xi8 test_vredminu_32xi8(__epi_32xi8 arg_0, __epi_32xi8 arg_1, unsigned l // CHECK-O2-LABEL: @test_vredminu_32xi8_mask( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv8i8.nxv32i8( [[ARG_0:%.*]], i64 0) -// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.experimental.vector.extract.nxv8i8.nxv32i8( [[ARG_2:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv8i8.nxv32i8( [[ARG_0:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.vector.extract.nxv8i8.nxv32i8( [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.riscv.vredminu.mask.nxv8i8.nxv32i8.i64( [[TMP0]], [[ARG_1:%.*]], [[TMP1]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.experimental.vector.insert.nxv32i8.nxv8i8( undef, [[TMP2]], i64 0) +// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.vector.insert.nxv32i8.nxv8i8( undef, [[TMP2]], i64 0) // CHECK-O2-NEXT: ret [[TMP3]] // __epi_32xi8 test_vredminu_32xi8_mask(__epi_32xi8 arg_0, __epi_32xi8 arg_1, __epi_32xi8 arg_2, __epi_32xi1 arg_3, unsigned long int arg_4) @@ -17239,9 +17239,9 @@ __epi_32xi8 test_vredminu_32xi8_mask(__epi_32xi8 arg_0, __epi_32xi8 arg_1, __epi // CHECK-O2-LABEL: @test_vredminu_16xi16( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv4i16.nxv16i16( [[ARG_1:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv4i16.nxv16i16( [[ARG_1:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.riscv.vredminu.nxv4i16.nxv16i16.i64( undef, [[ARG_0:%.*]], [[TMP0]], i64 [[ARG_2:%.*]]) -// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.experimental.vector.insert.nxv16i16.nxv4i16( undef, [[TMP1]], i64 0) +// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.vector.insert.nxv16i16.nxv4i16( undef, [[TMP1]], i64 0) // CHECK-O2-NEXT: ret [[TMP2]] // __epi_16xi16 test_vredminu_16xi16(__epi_16xi16 arg_0, __epi_16xi16 arg_1, unsigned long int arg_2) @@ -17251,10 +17251,10 @@ __epi_16xi16 test_vredminu_16xi16(__epi_16xi16 arg_0, __epi_16xi16 arg_1, unsign // CHECK-O2-LABEL: @test_vredminu_16xi16_mask( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv4i16.nxv16i16( [[ARG_0:%.*]], i64 0) -// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.experimental.vector.extract.nxv4i16.nxv16i16( [[ARG_2:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv4i16.nxv16i16( [[ARG_0:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.vector.extract.nxv4i16.nxv16i16( [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.riscv.vredminu.mask.nxv4i16.nxv16i16.i64( [[TMP0]], [[ARG_1:%.*]], [[TMP1]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.experimental.vector.insert.nxv16i16.nxv4i16( undef, [[TMP2]], i64 0) +// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.vector.insert.nxv16i16.nxv4i16( undef, [[TMP2]], i64 0) // CHECK-O2-NEXT: ret [[TMP3]] // __epi_16xi16 test_vredminu_16xi16_mask(__epi_16xi16 arg_0, __epi_16xi16 arg_1, __epi_16xi16 arg_2, __epi_16xi1 arg_3, unsigned long int arg_4) @@ -17264,9 +17264,9 @@ __epi_16xi16 test_vredminu_16xi16_mask(__epi_16xi16 arg_0, __epi_16xi16 arg_1, _ // CHECK-O2-LABEL: @test_vredminu_8xi32( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv2i32.nxv8i32( [[ARG_1:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv2i32.nxv8i32( [[ARG_1:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.riscv.vredminu.nxv2i32.nxv8i32.i64( undef, [[ARG_0:%.*]], [[TMP0]], i64 [[ARG_2:%.*]]) -// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.experimental.vector.insert.nxv8i32.nxv2i32( undef, [[TMP1]], i64 0) +// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.vector.insert.nxv8i32.nxv2i32( undef, [[TMP1]], i64 0) // CHECK-O2-NEXT: ret [[TMP2]] // __epi_8xi32 test_vredminu_8xi32(__epi_8xi32 arg_0, __epi_8xi32 arg_1, unsigned long int arg_2) @@ -17276,10 +17276,10 @@ __epi_8xi32 test_vredminu_8xi32(__epi_8xi32 arg_0, __epi_8xi32 arg_1, unsigned l // CHECK-O2-LABEL: @test_vredminu_8xi32_mask( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv2i32.nxv8i32( [[ARG_0:%.*]], i64 0) -// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.experimental.vector.extract.nxv2i32.nxv8i32( [[ARG_2:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv2i32.nxv8i32( [[ARG_0:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.vector.extract.nxv2i32.nxv8i32( [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.riscv.vredminu.mask.nxv2i32.nxv8i32.i64( [[TMP0]], [[ARG_1:%.*]], [[TMP1]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.experimental.vector.insert.nxv8i32.nxv2i32( undef, [[TMP2]], i64 0) +// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.vector.insert.nxv8i32.nxv2i32( undef, [[TMP2]], i64 0) // CHECK-O2-NEXT: ret [[TMP3]] // __epi_8xi32 test_vredminu_8xi32_mask(__epi_8xi32 arg_0, __epi_8xi32 arg_1, __epi_8xi32 arg_2, __epi_8xi1 arg_3, unsigned long int arg_4) @@ -17289,9 +17289,9 @@ __epi_8xi32 test_vredminu_8xi32_mask(__epi_8xi32 arg_0, __epi_8xi32 arg_1, __epi // CHECK-O2-LABEL: @test_vredminu_4xi64( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv1i64.nxv4i64( [[ARG_1:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv1i64.nxv4i64( [[ARG_1:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.riscv.vredminu.nxv1i64.nxv4i64.i64( undef, [[ARG_0:%.*]], [[TMP0]], i64 [[ARG_2:%.*]]) -// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.experimental.vector.insert.nxv4i64.nxv1i64( undef, [[TMP1]], i64 0) +// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.vector.insert.nxv4i64.nxv1i64( undef, [[TMP1]], i64 0) // CHECK-O2-NEXT: ret [[TMP2]] // __epi_4xi64 test_vredminu_4xi64(__epi_4xi64 arg_0, __epi_4xi64 arg_1, unsigned long int arg_2) @@ -17301,10 +17301,10 @@ __epi_4xi64 test_vredminu_4xi64(__epi_4xi64 arg_0, __epi_4xi64 arg_1, unsigned l // CHECK-O2-LABEL: @test_vredminu_4xi64_mask( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv1i64.nxv4i64( [[ARG_0:%.*]], i64 0) -// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.experimental.vector.extract.nxv1i64.nxv4i64( [[ARG_2:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv1i64.nxv4i64( [[ARG_0:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.vector.extract.nxv1i64.nxv4i64( [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.riscv.vredminu.mask.nxv1i64.nxv4i64.i64( [[TMP0]], [[ARG_1:%.*]], [[TMP1]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.experimental.vector.insert.nxv4i64.nxv1i64( undef, [[TMP2]], i64 0) +// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.vector.insert.nxv4i64.nxv1i64( undef, [[TMP2]], i64 0) // CHECK-O2-NEXT: ret [[TMP3]] // __epi_4xi64 test_vredminu_4xi64_mask(__epi_4xi64 arg_0, __epi_4xi64 arg_1, __epi_4xi64 arg_2, __epi_4xi1 arg_3, unsigned long int arg_4) @@ -17394,9 +17394,9 @@ __epi_1xi64 test_vredor_1xi64_mask(__epi_1xi64 arg_0, __epi_1xi64 arg_1, __epi_1 // CHECK-O2-LABEL: @test_vredor_16xi8( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv8i8.nxv16i8( [[ARG_1:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv8i8.nxv16i8( [[ARG_1:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.riscv.vredor.nxv8i8.nxv16i8.i64( undef, [[ARG_0:%.*]], [[TMP0]], i64 [[ARG_2:%.*]]) -// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.experimental.vector.insert.nxv16i8.nxv8i8( undef, [[TMP1]], i64 0) +// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.vector.insert.nxv16i8.nxv8i8( undef, [[TMP1]], i64 0) // CHECK-O2-NEXT: ret [[TMP2]] // __epi_16xi8 test_vredor_16xi8(__epi_16xi8 arg_0, __epi_16xi8 arg_1, unsigned long int arg_2) @@ -17406,10 +17406,10 @@ __epi_16xi8 test_vredor_16xi8(__epi_16xi8 arg_0, __epi_16xi8 arg_1, unsigned lon // CHECK-O2-LABEL: @test_vredor_16xi8_mask( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv8i8.nxv16i8( [[ARG_0:%.*]], i64 0) -// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.experimental.vector.extract.nxv8i8.nxv16i8( [[ARG_2:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv8i8.nxv16i8( [[ARG_0:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.vector.extract.nxv8i8.nxv16i8( [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.riscv.vredor.mask.nxv8i8.nxv16i8.i64( [[TMP0]], [[ARG_1:%.*]], [[TMP1]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.experimental.vector.insert.nxv16i8.nxv8i8( undef, [[TMP2]], i64 0) +// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.vector.insert.nxv16i8.nxv8i8( undef, [[TMP2]], i64 0) // CHECK-O2-NEXT: ret [[TMP3]] // __epi_16xi8 test_vredor_16xi8_mask(__epi_16xi8 arg_0, __epi_16xi8 arg_1, __epi_16xi8 arg_2, __epi_16xi1 arg_3, unsigned long int arg_4) @@ -17419,9 +17419,9 @@ __epi_16xi8 test_vredor_16xi8_mask(__epi_16xi8 arg_0, __epi_16xi8 arg_1, __epi_1 // CHECK-O2-LABEL: @test_vredor_8xi16( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv4i16.nxv8i16( [[ARG_1:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv4i16.nxv8i16( [[ARG_1:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.riscv.vredor.nxv4i16.nxv8i16.i64( undef, [[ARG_0:%.*]], [[TMP0]], i64 [[ARG_2:%.*]]) -// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.experimental.vector.insert.nxv8i16.nxv4i16( undef, [[TMP1]], i64 0) +// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.vector.insert.nxv8i16.nxv4i16( undef, [[TMP1]], i64 0) // CHECK-O2-NEXT: ret [[TMP2]] // __epi_8xi16 test_vredor_8xi16(__epi_8xi16 arg_0, __epi_8xi16 arg_1, unsigned long int arg_2) @@ -17431,10 +17431,10 @@ __epi_8xi16 test_vredor_8xi16(__epi_8xi16 arg_0, __epi_8xi16 arg_1, unsigned lon // CHECK-O2-LABEL: @test_vredor_8xi16_mask( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv4i16.nxv8i16( [[ARG_0:%.*]], i64 0) -// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.experimental.vector.extract.nxv4i16.nxv8i16( [[ARG_2:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv4i16.nxv8i16( [[ARG_0:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.vector.extract.nxv4i16.nxv8i16( [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.riscv.vredor.mask.nxv4i16.nxv8i16.i64( [[TMP0]], [[ARG_1:%.*]], [[TMP1]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.experimental.vector.insert.nxv8i16.nxv4i16( undef, [[TMP2]], i64 0) +// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.vector.insert.nxv8i16.nxv4i16( undef, [[TMP2]], i64 0) // CHECK-O2-NEXT: ret [[TMP3]] // __epi_8xi16 test_vredor_8xi16_mask(__epi_8xi16 arg_0, __epi_8xi16 arg_1, __epi_8xi16 arg_2, __epi_8xi1 arg_3, unsigned long int arg_4) @@ -17444,9 +17444,9 @@ __epi_8xi16 test_vredor_8xi16_mask(__epi_8xi16 arg_0, __epi_8xi16 arg_1, __epi_8 // CHECK-O2-LABEL: @test_vredor_4xi32( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv2i32.nxv4i32( [[ARG_1:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv2i32.nxv4i32( [[ARG_1:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.riscv.vredor.nxv2i32.nxv4i32.i64( undef, [[ARG_0:%.*]], [[TMP0]], i64 [[ARG_2:%.*]]) -// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.experimental.vector.insert.nxv4i32.nxv2i32( undef, [[TMP1]], i64 0) +// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.vector.insert.nxv4i32.nxv2i32( undef, [[TMP1]], i64 0) // CHECK-O2-NEXT: ret [[TMP2]] // __epi_4xi32 test_vredor_4xi32(__epi_4xi32 arg_0, __epi_4xi32 arg_1, unsigned long int arg_2) @@ -17456,10 +17456,10 @@ __epi_4xi32 test_vredor_4xi32(__epi_4xi32 arg_0, __epi_4xi32 arg_1, unsigned lon // CHECK-O2-LABEL: @test_vredor_4xi32_mask( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv2i32.nxv4i32( [[ARG_0:%.*]], i64 0) -// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.experimental.vector.extract.nxv2i32.nxv4i32( [[ARG_2:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv2i32.nxv4i32( [[ARG_0:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.vector.extract.nxv2i32.nxv4i32( [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.riscv.vredor.mask.nxv2i32.nxv4i32.i64( [[TMP0]], [[ARG_1:%.*]], [[TMP1]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.experimental.vector.insert.nxv4i32.nxv2i32( undef, [[TMP2]], i64 0) +// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.vector.insert.nxv4i32.nxv2i32( undef, [[TMP2]], i64 0) // CHECK-O2-NEXT: ret [[TMP3]] // __epi_4xi32 test_vredor_4xi32_mask(__epi_4xi32 arg_0, __epi_4xi32 arg_1, __epi_4xi32 arg_2, __epi_4xi1 arg_3, unsigned long int arg_4) @@ -17469,9 +17469,9 @@ __epi_4xi32 test_vredor_4xi32_mask(__epi_4xi32 arg_0, __epi_4xi32 arg_1, __epi_4 // CHECK-O2-LABEL: @test_vredor_2xi64( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv1i64.nxv2i64( [[ARG_1:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv1i64.nxv2i64( [[ARG_1:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.riscv.vredor.nxv1i64.nxv2i64.i64( undef, [[ARG_0:%.*]], [[TMP0]], i64 [[ARG_2:%.*]]) -// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.experimental.vector.insert.nxv2i64.nxv1i64( undef, [[TMP1]], i64 0) +// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.vector.insert.nxv2i64.nxv1i64( undef, [[TMP1]], i64 0) // CHECK-O2-NEXT: ret [[TMP2]] // __epi_2xi64 test_vredor_2xi64(__epi_2xi64 arg_0, __epi_2xi64 arg_1, unsigned long int arg_2) @@ -17481,10 +17481,10 @@ __epi_2xi64 test_vredor_2xi64(__epi_2xi64 arg_0, __epi_2xi64 arg_1, unsigned lon // CHECK-O2-LABEL: @test_vredor_2xi64_mask( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv1i64.nxv2i64( [[ARG_0:%.*]], i64 0) -// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.experimental.vector.extract.nxv1i64.nxv2i64( [[ARG_2:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv1i64.nxv2i64( [[ARG_0:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.vector.extract.nxv1i64.nxv2i64( [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.riscv.vredor.mask.nxv1i64.nxv2i64.i64( [[TMP0]], [[ARG_1:%.*]], [[TMP1]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.experimental.vector.insert.nxv2i64.nxv1i64( undef, [[TMP2]], i64 0) +// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.vector.insert.nxv2i64.nxv1i64( undef, [[TMP2]], i64 0) // CHECK-O2-NEXT: ret [[TMP3]] // __epi_2xi64 test_vredor_2xi64_mask(__epi_2xi64 arg_0, __epi_2xi64 arg_1, __epi_2xi64 arg_2, __epi_2xi1 arg_3, unsigned long int arg_4) @@ -17494,9 +17494,9 @@ __epi_2xi64 test_vredor_2xi64_mask(__epi_2xi64 arg_0, __epi_2xi64 arg_1, __epi_2 // CHECK-O2-LABEL: @test_vredor_32xi8( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv8i8.nxv32i8( [[ARG_1:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv8i8.nxv32i8( [[ARG_1:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.riscv.vredor.nxv8i8.nxv32i8.i64( undef, [[ARG_0:%.*]], [[TMP0]], i64 [[ARG_2:%.*]]) -// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.experimental.vector.insert.nxv32i8.nxv8i8( undef, [[TMP1]], i64 0) +// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.vector.insert.nxv32i8.nxv8i8( undef, [[TMP1]], i64 0) // CHECK-O2-NEXT: ret [[TMP2]] // __epi_32xi8 test_vredor_32xi8(__epi_32xi8 arg_0, __epi_32xi8 arg_1, unsigned long int arg_2) @@ -17506,10 +17506,10 @@ __epi_32xi8 test_vredor_32xi8(__epi_32xi8 arg_0, __epi_32xi8 arg_1, unsigned lon // CHECK-O2-LABEL: @test_vredor_32xi8_mask( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv8i8.nxv32i8( [[ARG_0:%.*]], i64 0) -// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.experimental.vector.extract.nxv8i8.nxv32i8( [[ARG_2:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv8i8.nxv32i8( [[ARG_0:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.vector.extract.nxv8i8.nxv32i8( [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.riscv.vredor.mask.nxv8i8.nxv32i8.i64( [[TMP0]], [[ARG_1:%.*]], [[TMP1]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.experimental.vector.insert.nxv32i8.nxv8i8( undef, [[TMP2]], i64 0) +// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.vector.insert.nxv32i8.nxv8i8( undef, [[TMP2]], i64 0) // CHECK-O2-NEXT: ret [[TMP3]] // __epi_32xi8 test_vredor_32xi8_mask(__epi_32xi8 arg_0, __epi_32xi8 arg_1, __epi_32xi8 arg_2, __epi_32xi1 arg_3, unsigned long int arg_4) @@ -17519,9 +17519,9 @@ __epi_32xi8 test_vredor_32xi8_mask(__epi_32xi8 arg_0, __epi_32xi8 arg_1, __epi_3 // CHECK-O2-LABEL: @test_vredor_16xi16( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv4i16.nxv16i16( [[ARG_1:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv4i16.nxv16i16( [[ARG_1:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.riscv.vredor.nxv4i16.nxv16i16.i64( undef, [[ARG_0:%.*]], [[TMP0]], i64 [[ARG_2:%.*]]) -// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.experimental.vector.insert.nxv16i16.nxv4i16( undef, [[TMP1]], i64 0) +// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.vector.insert.nxv16i16.nxv4i16( undef, [[TMP1]], i64 0) // CHECK-O2-NEXT: ret [[TMP2]] // __epi_16xi16 test_vredor_16xi16(__epi_16xi16 arg_0, __epi_16xi16 arg_1, unsigned long int arg_2) @@ -17531,10 +17531,10 @@ __epi_16xi16 test_vredor_16xi16(__epi_16xi16 arg_0, __epi_16xi16 arg_1, unsigned // CHECK-O2-LABEL: @test_vredor_16xi16_mask( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv4i16.nxv16i16( [[ARG_0:%.*]], i64 0) -// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.experimental.vector.extract.nxv4i16.nxv16i16( [[ARG_2:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv4i16.nxv16i16( [[ARG_0:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.vector.extract.nxv4i16.nxv16i16( [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.riscv.vredor.mask.nxv4i16.nxv16i16.i64( [[TMP0]], [[ARG_1:%.*]], [[TMP1]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.experimental.vector.insert.nxv16i16.nxv4i16( undef, [[TMP2]], i64 0) +// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.vector.insert.nxv16i16.nxv4i16( undef, [[TMP2]], i64 0) // CHECK-O2-NEXT: ret [[TMP3]] // __epi_16xi16 test_vredor_16xi16_mask(__epi_16xi16 arg_0, __epi_16xi16 arg_1, __epi_16xi16 arg_2, __epi_16xi1 arg_3, unsigned long int arg_4) @@ -17544,9 +17544,9 @@ __epi_16xi16 test_vredor_16xi16_mask(__epi_16xi16 arg_0, __epi_16xi16 arg_1, __e // CHECK-O2-LABEL: @test_vredor_8xi32( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv2i32.nxv8i32( [[ARG_1:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv2i32.nxv8i32( [[ARG_1:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.riscv.vredor.nxv2i32.nxv8i32.i64( undef, [[ARG_0:%.*]], [[TMP0]], i64 [[ARG_2:%.*]]) -// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.experimental.vector.insert.nxv8i32.nxv2i32( undef, [[TMP1]], i64 0) +// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.vector.insert.nxv8i32.nxv2i32( undef, [[TMP1]], i64 0) // CHECK-O2-NEXT: ret [[TMP2]] // __epi_8xi32 test_vredor_8xi32(__epi_8xi32 arg_0, __epi_8xi32 arg_1, unsigned long int arg_2) @@ -17556,10 +17556,10 @@ __epi_8xi32 test_vredor_8xi32(__epi_8xi32 arg_0, __epi_8xi32 arg_1, unsigned lon // CHECK-O2-LABEL: @test_vredor_8xi32_mask( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv2i32.nxv8i32( [[ARG_0:%.*]], i64 0) -// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.experimental.vector.extract.nxv2i32.nxv8i32( [[ARG_2:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv2i32.nxv8i32( [[ARG_0:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.vector.extract.nxv2i32.nxv8i32( [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.riscv.vredor.mask.nxv2i32.nxv8i32.i64( [[TMP0]], [[ARG_1:%.*]], [[TMP1]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.experimental.vector.insert.nxv8i32.nxv2i32( undef, [[TMP2]], i64 0) +// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.vector.insert.nxv8i32.nxv2i32( undef, [[TMP2]], i64 0) // CHECK-O2-NEXT: ret [[TMP3]] // __epi_8xi32 test_vredor_8xi32_mask(__epi_8xi32 arg_0, __epi_8xi32 arg_1, __epi_8xi32 arg_2, __epi_8xi1 arg_3, unsigned long int arg_4) @@ -17569,9 +17569,9 @@ __epi_8xi32 test_vredor_8xi32_mask(__epi_8xi32 arg_0, __epi_8xi32 arg_1, __epi_8 // CHECK-O2-LABEL: @test_vredor_4xi64( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv1i64.nxv4i64( [[ARG_1:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv1i64.nxv4i64( [[ARG_1:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.riscv.vredor.nxv1i64.nxv4i64.i64( undef, [[ARG_0:%.*]], [[TMP0]], i64 [[ARG_2:%.*]]) -// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.experimental.vector.insert.nxv4i64.nxv1i64( undef, [[TMP1]], i64 0) +// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.vector.insert.nxv4i64.nxv1i64( undef, [[TMP1]], i64 0) // CHECK-O2-NEXT: ret [[TMP2]] // __epi_4xi64 test_vredor_4xi64(__epi_4xi64 arg_0, __epi_4xi64 arg_1, unsigned long int arg_2) @@ -17581,10 +17581,10 @@ __epi_4xi64 test_vredor_4xi64(__epi_4xi64 arg_0, __epi_4xi64 arg_1, unsigned lon // CHECK-O2-LABEL: @test_vredor_4xi64_mask( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv1i64.nxv4i64( [[ARG_0:%.*]], i64 0) -// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.experimental.vector.extract.nxv1i64.nxv4i64( [[ARG_2:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv1i64.nxv4i64( [[ARG_0:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.vector.extract.nxv1i64.nxv4i64( [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.riscv.vredor.mask.nxv1i64.nxv4i64.i64( [[TMP0]], [[ARG_1:%.*]], [[TMP1]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.experimental.vector.insert.nxv4i64.nxv1i64( undef, [[TMP2]], i64 0) +// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.vector.insert.nxv4i64.nxv1i64( undef, [[TMP2]], i64 0) // CHECK-O2-NEXT: ret [[TMP3]] // __epi_4xi64 test_vredor_4xi64_mask(__epi_4xi64 arg_0, __epi_4xi64 arg_1, __epi_4xi64 arg_2, __epi_4xi1 arg_3, unsigned long int arg_4) @@ -17674,9 +17674,9 @@ __epi_1xi64 test_vredsum_1xi64_mask(__epi_1xi64 arg_0, __epi_1xi64 arg_1, __epi_ // CHECK-O2-LABEL: @test_vredsum_16xi8( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv8i8.nxv16i8( [[ARG_1:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv8i8.nxv16i8( [[ARG_1:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.riscv.vredsum.nxv8i8.nxv16i8.i64( undef, [[ARG_0:%.*]], [[TMP0]], i64 [[ARG_2:%.*]]) -// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.experimental.vector.insert.nxv16i8.nxv8i8( undef, [[TMP1]], i64 0) +// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.vector.insert.nxv16i8.nxv8i8( undef, [[TMP1]], i64 0) // CHECK-O2-NEXT: ret [[TMP2]] // __epi_16xi8 test_vredsum_16xi8(__epi_16xi8 arg_0, __epi_16xi8 arg_1, unsigned long int arg_2) @@ -17686,10 +17686,10 @@ __epi_16xi8 test_vredsum_16xi8(__epi_16xi8 arg_0, __epi_16xi8 arg_1, unsigned lo // CHECK-O2-LABEL: @test_vredsum_16xi8_mask( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv8i8.nxv16i8( [[ARG_0:%.*]], i64 0) -// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.experimental.vector.extract.nxv8i8.nxv16i8( [[ARG_2:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv8i8.nxv16i8( [[ARG_0:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.vector.extract.nxv8i8.nxv16i8( [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.riscv.vredsum.mask.nxv8i8.nxv16i8.i64( [[TMP0]], [[ARG_1:%.*]], [[TMP1]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.experimental.vector.insert.nxv16i8.nxv8i8( undef, [[TMP2]], i64 0) +// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.vector.insert.nxv16i8.nxv8i8( undef, [[TMP2]], i64 0) // CHECK-O2-NEXT: ret [[TMP3]] // __epi_16xi8 test_vredsum_16xi8_mask(__epi_16xi8 arg_0, __epi_16xi8 arg_1, __epi_16xi8 arg_2, __epi_16xi1 arg_3, unsigned long int arg_4) @@ -17699,9 +17699,9 @@ __epi_16xi8 test_vredsum_16xi8_mask(__epi_16xi8 arg_0, __epi_16xi8 arg_1, __epi_ // CHECK-O2-LABEL: @test_vredsum_8xi16( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv4i16.nxv8i16( [[ARG_1:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv4i16.nxv8i16( [[ARG_1:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.riscv.vredsum.nxv4i16.nxv8i16.i64( undef, [[ARG_0:%.*]], [[TMP0]], i64 [[ARG_2:%.*]]) -// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.experimental.vector.insert.nxv8i16.nxv4i16( undef, [[TMP1]], i64 0) +// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.vector.insert.nxv8i16.nxv4i16( undef, [[TMP1]], i64 0) // CHECK-O2-NEXT: ret [[TMP2]] // __epi_8xi16 test_vredsum_8xi16(__epi_8xi16 arg_0, __epi_8xi16 arg_1, unsigned long int arg_2) @@ -17711,10 +17711,10 @@ __epi_8xi16 test_vredsum_8xi16(__epi_8xi16 arg_0, __epi_8xi16 arg_1, unsigned lo // CHECK-O2-LABEL: @test_vredsum_8xi16_mask( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv4i16.nxv8i16( [[ARG_0:%.*]], i64 0) -// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.experimental.vector.extract.nxv4i16.nxv8i16( [[ARG_2:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv4i16.nxv8i16( [[ARG_0:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.vector.extract.nxv4i16.nxv8i16( [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.riscv.vredsum.mask.nxv4i16.nxv8i16.i64( [[TMP0]], [[ARG_1:%.*]], [[TMP1]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.experimental.vector.insert.nxv8i16.nxv4i16( undef, [[TMP2]], i64 0) +// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.vector.insert.nxv8i16.nxv4i16( undef, [[TMP2]], i64 0) // CHECK-O2-NEXT: ret [[TMP3]] // __epi_8xi16 test_vredsum_8xi16_mask(__epi_8xi16 arg_0, __epi_8xi16 arg_1, __epi_8xi16 arg_2, __epi_8xi1 arg_3, unsigned long int arg_4) @@ -17724,9 +17724,9 @@ __epi_8xi16 test_vredsum_8xi16_mask(__epi_8xi16 arg_0, __epi_8xi16 arg_1, __epi_ // CHECK-O2-LABEL: @test_vredsum_4xi32( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv2i32.nxv4i32( [[ARG_1:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv2i32.nxv4i32( [[ARG_1:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.riscv.vredsum.nxv2i32.nxv4i32.i64( undef, [[ARG_0:%.*]], [[TMP0]], i64 [[ARG_2:%.*]]) -// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.experimental.vector.insert.nxv4i32.nxv2i32( undef, [[TMP1]], i64 0) +// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.vector.insert.nxv4i32.nxv2i32( undef, [[TMP1]], i64 0) // CHECK-O2-NEXT: ret [[TMP2]] // __epi_4xi32 test_vredsum_4xi32(__epi_4xi32 arg_0, __epi_4xi32 arg_1, unsigned long int arg_2) @@ -17736,10 +17736,10 @@ __epi_4xi32 test_vredsum_4xi32(__epi_4xi32 arg_0, __epi_4xi32 arg_1, unsigned lo // CHECK-O2-LABEL: @test_vredsum_4xi32_mask( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv2i32.nxv4i32( [[ARG_0:%.*]], i64 0) -// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.experimental.vector.extract.nxv2i32.nxv4i32( [[ARG_2:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv2i32.nxv4i32( [[ARG_0:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.vector.extract.nxv2i32.nxv4i32( [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.riscv.vredsum.mask.nxv2i32.nxv4i32.i64( [[TMP0]], [[ARG_1:%.*]], [[TMP1]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.experimental.vector.insert.nxv4i32.nxv2i32( undef, [[TMP2]], i64 0) +// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.vector.insert.nxv4i32.nxv2i32( undef, [[TMP2]], i64 0) // CHECK-O2-NEXT: ret [[TMP3]] // __epi_4xi32 test_vredsum_4xi32_mask(__epi_4xi32 arg_0, __epi_4xi32 arg_1, __epi_4xi32 arg_2, __epi_4xi1 arg_3, unsigned long int arg_4) @@ -17749,9 +17749,9 @@ __epi_4xi32 test_vredsum_4xi32_mask(__epi_4xi32 arg_0, __epi_4xi32 arg_1, __epi_ // CHECK-O2-LABEL: @test_vredsum_2xi64( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv1i64.nxv2i64( [[ARG_1:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv1i64.nxv2i64( [[ARG_1:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.riscv.vredsum.nxv1i64.nxv2i64.i64( undef, [[ARG_0:%.*]], [[TMP0]], i64 [[ARG_2:%.*]]) -// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.experimental.vector.insert.nxv2i64.nxv1i64( undef, [[TMP1]], i64 0) +// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.vector.insert.nxv2i64.nxv1i64( undef, [[TMP1]], i64 0) // CHECK-O2-NEXT: ret [[TMP2]] // __epi_2xi64 test_vredsum_2xi64(__epi_2xi64 arg_0, __epi_2xi64 arg_1, unsigned long int arg_2) @@ -17761,10 +17761,10 @@ __epi_2xi64 test_vredsum_2xi64(__epi_2xi64 arg_0, __epi_2xi64 arg_1, unsigned lo // CHECK-O2-LABEL: @test_vredsum_2xi64_mask( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv1i64.nxv2i64( [[ARG_0:%.*]], i64 0) -// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.experimental.vector.extract.nxv1i64.nxv2i64( [[ARG_2:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv1i64.nxv2i64( [[ARG_0:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.vector.extract.nxv1i64.nxv2i64( [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.riscv.vredsum.mask.nxv1i64.nxv2i64.i64( [[TMP0]], [[ARG_1:%.*]], [[TMP1]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.experimental.vector.insert.nxv2i64.nxv1i64( undef, [[TMP2]], i64 0) +// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.vector.insert.nxv2i64.nxv1i64( undef, [[TMP2]], i64 0) // CHECK-O2-NEXT: ret [[TMP3]] // __epi_2xi64 test_vredsum_2xi64_mask(__epi_2xi64 arg_0, __epi_2xi64 arg_1, __epi_2xi64 arg_2, __epi_2xi1 arg_3, unsigned long int arg_4) @@ -17774,9 +17774,9 @@ __epi_2xi64 test_vredsum_2xi64_mask(__epi_2xi64 arg_0, __epi_2xi64 arg_1, __epi_ // CHECK-O2-LABEL: @test_vredsum_32xi8( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv8i8.nxv32i8( [[ARG_1:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv8i8.nxv32i8( [[ARG_1:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.riscv.vredsum.nxv8i8.nxv32i8.i64( undef, [[ARG_0:%.*]], [[TMP0]], i64 [[ARG_2:%.*]]) -// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.experimental.vector.insert.nxv32i8.nxv8i8( undef, [[TMP1]], i64 0) +// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.vector.insert.nxv32i8.nxv8i8( undef, [[TMP1]], i64 0) // CHECK-O2-NEXT: ret [[TMP2]] // __epi_32xi8 test_vredsum_32xi8(__epi_32xi8 arg_0, __epi_32xi8 arg_1, unsigned long int arg_2) @@ -17786,10 +17786,10 @@ __epi_32xi8 test_vredsum_32xi8(__epi_32xi8 arg_0, __epi_32xi8 arg_1, unsigned lo // CHECK-O2-LABEL: @test_vredsum_32xi8_mask( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv8i8.nxv32i8( [[ARG_0:%.*]], i64 0) -// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.experimental.vector.extract.nxv8i8.nxv32i8( [[ARG_2:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv8i8.nxv32i8( [[ARG_0:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.vector.extract.nxv8i8.nxv32i8( [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.riscv.vredsum.mask.nxv8i8.nxv32i8.i64( [[TMP0]], [[ARG_1:%.*]], [[TMP1]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.experimental.vector.insert.nxv32i8.nxv8i8( undef, [[TMP2]], i64 0) +// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.vector.insert.nxv32i8.nxv8i8( undef, [[TMP2]], i64 0) // CHECK-O2-NEXT: ret [[TMP3]] // __epi_32xi8 test_vredsum_32xi8_mask(__epi_32xi8 arg_0, __epi_32xi8 arg_1, __epi_32xi8 arg_2, __epi_32xi1 arg_3, unsigned long int arg_4) @@ -17799,9 +17799,9 @@ __epi_32xi8 test_vredsum_32xi8_mask(__epi_32xi8 arg_0, __epi_32xi8 arg_1, __epi_ // CHECK-O2-LABEL: @test_vredsum_16xi16( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv4i16.nxv16i16( [[ARG_1:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv4i16.nxv16i16( [[ARG_1:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.riscv.vredsum.nxv4i16.nxv16i16.i64( undef, [[ARG_0:%.*]], [[TMP0]], i64 [[ARG_2:%.*]]) -// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.experimental.vector.insert.nxv16i16.nxv4i16( undef, [[TMP1]], i64 0) +// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.vector.insert.nxv16i16.nxv4i16( undef, [[TMP1]], i64 0) // CHECK-O2-NEXT: ret [[TMP2]] // __epi_16xi16 test_vredsum_16xi16(__epi_16xi16 arg_0, __epi_16xi16 arg_1, unsigned long int arg_2) @@ -17811,10 +17811,10 @@ __epi_16xi16 test_vredsum_16xi16(__epi_16xi16 arg_0, __epi_16xi16 arg_1, unsigne // CHECK-O2-LABEL: @test_vredsum_16xi16_mask( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv4i16.nxv16i16( [[ARG_0:%.*]], i64 0) -// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.experimental.vector.extract.nxv4i16.nxv16i16( [[ARG_2:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv4i16.nxv16i16( [[ARG_0:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.vector.extract.nxv4i16.nxv16i16( [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.riscv.vredsum.mask.nxv4i16.nxv16i16.i64( [[TMP0]], [[ARG_1:%.*]], [[TMP1]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.experimental.vector.insert.nxv16i16.nxv4i16( undef, [[TMP2]], i64 0) +// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.vector.insert.nxv16i16.nxv4i16( undef, [[TMP2]], i64 0) // CHECK-O2-NEXT: ret [[TMP3]] // __epi_16xi16 test_vredsum_16xi16_mask(__epi_16xi16 arg_0, __epi_16xi16 arg_1, __epi_16xi16 arg_2, __epi_16xi1 arg_3, unsigned long int arg_4) @@ -17824,9 +17824,9 @@ __epi_16xi16 test_vredsum_16xi16_mask(__epi_16xi16 arg_0, __epi_16xi16 arg_1, __ // CHECK-O2-LABEL: @test_vredsum_8xi32( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv2i32.nxv8i32( [[ARG_1:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv2i32.nxv8i32( [[ARG_1:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.riscv.vredsum.nxv2i32.nxv8i32.i64( undef, [[ARG_0:%.*]], [[TMP0]], i64 [[ARG_2:%.*]]) -// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.experimental.vector.insert.nxv8i32.nxv2i32( undef, [[TMP1]], i64 0) +// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.vector.insert.nxv8i32.nxv2i32( undef, [[TMP1]], i64 0) // CHECK-O2-NEXT: ret [[TMP2]] // __epi_8xi32 test_vredsum_8xi32(__epi_8xi32 arg_0, __epi_8xi32 arg_1, unsigned long int arg_2) @@ -17836,10 +17836,10 @@ __epi_8xi32 test_vredsum_8xi32(__epi_8xi32 arg_0, __epi_8xi32 arg_1, unsigned lo // CHECK-O2-LABEL: @test_vredsum_8xi32_mask( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv2i32.nxv8i32( [[ARG_0:%.*]], i64 0) -// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.experimental.vector.extract.nxv2i32.nxv8i32( [[ARG_2:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv2i32.nxv8i32( [[ARG_0:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.vector.extract.nxv2i32.nxv8i32( [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.riscv.vredsum.mask.nxv2i32.nxv8i32.i64( [[TMP0]], [[ARG_1:%.*]], [[TMP1]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.experimental.vector.insert.nxv8i32.nxv2i32( undef, [[TMP2]], i64 0) +// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.vector.insert.nxv8i32.nxv2i32( undef, [[TMP2]], i64 0) // CHECK-O2-NEXT: ret [[TMP3]] // __epi_8xi32 test_vredsum_8xi32_mask(__epi_8xi32 arg_0, __epi_8xi32 arg_1, __epi_8xi32 arg_2, __epi_8xi1 arg_3, unsigned long int arg_4) @@ -17849,9 +17849,9 @@ __epi_8xi32 test_vredsum_8xi32_mask(__epi_8xi32 arg_0, __epi_8xi32 arg_1, __epi_ // CHECK-O2-LABEL: @test_vredsum_4xi64( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv1i64.nxv4i64( [[ARG_1:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv1i64.nxv4i64( [[ARG_1:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.riscv.vredsum.nxv1i64.nxv4i64.i64( undef, [[ARG_0:%.*]], [[TMP0]], i64 [[ARG_2:%.*]]) -// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.experimental.vector.insert.nxv4i64.nxv1i64( undef, [[TMP1]], i64 0) +// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.vector.insert.nxv4i64.nxv1i64( undef, [[TMP1]], i64 0) // CHECK-O2-NEXT: ret [[TMP2]] // __epi_4xi64 test_vredsum_4xi64(__epi_4xi64 arg_0, __epi_4xi64 arg_1, unsigned long int arg_2) @@ -17861,10 +17861,10 @@ __epi_4xi64 test_vredsum_4xi64(__epi_4xi64 arg_0, __epi_4xi64 arg_1, unsigned lo // CHECK-O2-LABEL: @test_vredsum_4xi64_mask( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv1i64.nxv4i64( [[ARG_0:%.*]], i64 0) -// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.experimental.vector.extract.nxv1i64.nxv4i64( [[ARG_2:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv1i64.nxv4i64( [[ARG_0:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.vector.extract.nxv1i64.nxv4i64( [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.riscv.vredsum.mask.nxv1i64.nxv4i64.i64( [[TMP0]], [[ARG_1:%.*]], [[TMP1]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.experimental.vector.insert.nxv4i64.nxv1i64( undef, [[TMP2]], i64 0) +// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.vector.insert.nxv4i64.nxv1i64( undef, [[TMP2]], i64 0) // CHECK-O2-NEXT: ret [[TMP3]] // __epi_4xi64 test_vredsum_4xi64_mask(__epi_4xi64 arg_0, __epi_4xi64 arg_1, __epi_4xi64 arg_2, __epi_4xi1 arg_3, unsigned long int arg_4) @@ -17954,9 +17954,9 @@ __epi_1xi64 test_vredxor_1xi64_mask(__epi_1xi64 arg_0, __epi_1xi64 arg_1, __epi_ // CHECK-O2-LABEL: @test_vredxor_16xi8( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv8i8.nxv16i8( [[ARG_1:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv8i8.nxv16i8( [[ARG_1:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.riscv.vredxor.nxv8i8.nxv16i8.i64( undef, [[ARG_0:%.*]], [[TMP0]], i64 [[ARG_2:%.*]]) -// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.experimental.vector.insert.nxv16i8.nxv8i8( undef, [[TMP1]], i64 0) +// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.vector.insert.nxv16i8.nxv8i8( undef, [[TMP1]], i64 0) // CHECK-O2-NEXT: ret [[TMP2]] // __epi_16xi8 test_vredxor_16xi8(__epi_16xi8 arg_0, __epi_16xi8 arg_1, unsigned long int arg_2) @@ -17966,10 +17966,10 @@ __epi_16xi8 test_vredxor_16xi8(__epi_16xi8 arg_0, __epi_16xi8 arg_1, unsigned lo // CHECK-O2-LABEL: @test_vredxor_16xi8_mask( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv8i8.nxv16i8( [[ARG_0:%.*]], i64 0) -// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.experimental.vector.extract.nxv8i8.nxv16i8( [[ARG_2:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv8i8.nxv16i8( [[ARG_0:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.vector.extract.nxv8i8.nxv16i8( [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.riscv.vredxor.mask.nxv8i8.nxv16i8.i64( [[TMP0]], [[ARG_1:%.*]], [[TMP1]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.experimental.vector.insert.nxv16i8.nxv8i8( undef, [[TMP2]], i64 0) +// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.vector.insert.nxv16i8.nxv8i8( undef, [[TMP2]], i64 0) // CHECK-O2-NEXT: ret [[TMP3]] // __epi_16xi8 test_vredxor_16xi8_mask(__epi_16xi8 arg_0, __epi_16xi8 arg_1, __epi_16xi8 arg_2, __epi_16xi1 arg_3, unsigned long int arg_4) @@ -17979,9 +17979,9 @@ __epi_16xi8 test_vredxor_16xi8_mask(__epi_16xi8 arg_0, __epi_16xi8 arg_1, __epi_ // CHECK-O2-LABEL: @test_vredxor_8xi16( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv4i16.nxv8i16( [[ARG_1:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv4i16.nxv8i16( [[ARG_1:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.riscv.vredxor.nxv4i16.nxv8i16.i64( undef, [[ARG_0:%.*]], [[TMP0]], i64 [[ARG_2:%.*]]) -// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.experimental.vector.insert.nxv8i16.nxv4i16( undef, [[TMP1]], i64 0) +// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.vector.insert.nxv8i16.nxv4i16( undef, [[TMP1]], i64 0) // CHECK-O2-NEXT: ret [[TMP2]] // __epi_8xi16 test_vredxor_8xi16(__epi_8xi16 arg_0, __epi_8xi16 arg_1, unsigned long int arg_2) @@ -17991,10 +17991,10 @@ __epi_8xi16 test_vredxor_8xi16(__epi_8xi16 arg_0, __epi_8xi16 arg_1, unsigned lo // CHECK-O2-LABEL: @test_vredxor_8xi16_mask( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv4i16.nxv8i16( [[ARG_0:%.*]], i64 0) -// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.experimental.vector.extract.nxv4i16.nxv8i16( [[ARG_2:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv4i16.nxv8i16( [[ARG_0:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.vector.extract.nxv4i16.nxv8i16( [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.riscv.vredxor.mask.nxv4i16.nxv8i16.i64( [[TMP0]], [[ARG_1:%.*]], [[TMP1]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.experimental.vector.insert.nxv8i16.nxv4i16( undef, [[TMP2]], i64 0) +// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.vector.insert.nxv8i16.nxv4i16( undef, [[TMP2]], i64 0) // CHECK-O2-NEXT: ret [[TMP3]] // __epi_8xi16 test_vredxor_8xi16_mask(__epi_8xi16 arg_0, __epi_8xi16 arg_1, __epi_8xi16 arg_2, __epi_8xi1 arg_3, unsigned long int arg_4) @@ -18004,9 +18004,9 @@ __epi_8xi16 test_vredxor_8xi16_mask(__epi_8xi16 arg_0, __epi_8xi16 arg_1, __epi_ // CHECK-O2-LABEL: @test_vredxor_4xi32( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv2i32.nxv4i32( [[ARG_1:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv2i32.nxv4i32( [[ARG_1:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.riscv.vredxor.nxv2i32.nxv4i32.i64( undef, [[ARG_0:%.*]], [[TMP0]], i64 [[ARG_2:%.*]]) -// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.experimental.vector.insert.nxv4i32.nxv2i32( undef, [[TMP1]], i64 0) +// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.vector.insert.nxv4i32.nxv2i32( undef, [[TMP1]], i64 0) // CHECK-O2-NEXT: ret [[TMP2]] // __epi_4xi32 test_vredxor_4xi32(__epi_4xi32 arg_0, __epi_4xi32 arg_1, unsigned long int arg_2) @@ -18016,10 +18016,10 @@ __epi_4xi32 test_vredxor_4xi32(__epi_4xi32 arg_0, __epi_4xi32 arg_1, unsigned lo // CHECK-O2-LABEL: @test_vredxor_4xi32_mask( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv2i32.nxv4i32( [[ARG_0:%.*]], i64 0) -// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.experimental.vector.extract.nxv2i32.nxv4i32( [[ARG_2:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv2i32.nxv4i32( [[ARG_0:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.vector.extract.nxv2i32.nxv4i32( [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.riscv.vredxor.mask.nxv2i32.nxv4i32.i64( [[TMP0]], [[ARG_1:%.*]], [[TMP1]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.experimental.vector.insert.nxv4i32.nxv2i32( undef, [[TMP2]], i64 0) +// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.vector.insert.nxv4i32.nxv2i32( undef, [[TMP2]], i64 0) // CHECK-O2-NEXT: ret [[TMP3]] // __epi_4xi32 test_vredxor_4xi32_mask(__epi_4xi32 arg_0, __epi_4xi32 arg_1, __epi_4xi32 arg_2, __epi_4xi1 arg_3, unsigned long int arg_4) @@ -18029,9 +18029,9 @@ __epi_4xi32 test_vredxor_4xi32_mask(__epi_4xi32 arg_0, __epi_4xi32 arg_1, __epi_ // CHECK-O2-LABEL: @test_vredxor_2xi64( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv1i64.nxv2i64( [[ARG_1:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv1i64.nxv2i64( [[ARG_1:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.riscv.vredxor.nxv1i64.nxv2i64.i64( undef, [[ARG_0:%.*]], [[TMP0]], i64 [[ARG_2:%.*]]) -// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.experimental.vector.insert.nxv2i64.nxv1i64( undef, [[TMP1]], i64 0) +// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.vector.insert.nxv2i64.nxv1i64( undef, [[TMP1]], i64 0) // CHECK-O2-NEXT: ret [[TMP2]] // __epi_2xi64 test_vredxor_2xi64(__epi_2xi64 arg_0, __epi_2xi64 arg_1, unsigned long int arg_2) @@ -18041,10 +18041,10 @@ __epi_2xi64 test_vredxor_2xi64(__epi_2xi64 arg_0, __epi_2xi64 arg_1, unsigned lo // CHECK-O2-LABEL: @test_vredxor_2xi64_mask( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv1i64.nxv2i64( [[ARG_0:%.*]], i64 0) -// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.experimental.vector.extract.nxv1i64.nxv2i64( [[ARG_2:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv1i64.nxv2i64( [[ARG_0:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.vector.extract.nxv1i64.nxv2i64( [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.riscv.vredxor.mask.nxv1i64.nxv2i64.i64( [[TMP0]], [[ARG_1:%.*]], [[TMP1]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.experimental.vector.insert.nxv2i64.nxv1i64( undef, [[TMP2]], i64 0) +// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.vector.insert.nxv2i64.nxv1i64( undef, [[TMP2]], i64 0) // CHECK-O2-NEXT: ret [[TMP3]] // __epi_2xi64 test_vredxor_2xi64_mask(__epi_2xi64 arg_0, __epi_2xi64 arg_1, __epi_2xi64 arg_2, __epi_2xi1 arg_3, unsigned long int arg_4) @@ -18054,9 +18054,9 @@ __epi_2xi64 test_vredxor_2xi64_mask(__epi_2xi64 arg_0, __epi_2xi64 arg_1, __epi_ // CHECK-O2-LABEL: @test_vredxor_32xi8( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv8i8.nxv32i8( [[ARG_1:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv8i8.nxv32i8( [[ARG_1:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.riscv.vredxor.nxv8i8.nxv32i8.i64( undef, [[ARG_0:%.*]], [[TMP0]], i64 [[ARG_2:%.*]]) -// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.experimental.vector.insert.nxv32i8.nxv8i8( undef, [[TMP1]], i64 0) +// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.vector.insert.nxv32i8.nxv8i8( undef, [[TMP1]], i64 0) // CHECK-O2-NEXT: ret [[TMP2]] // __epi_32xi8 test_vredxor_32xi8(__epi_32xi8 arg_0, __epi_32xi8 arg_1, unsigned long int arg_2) @@ -18066,10 +18066,10 @@ __epi_32xi8 test_vredxor_32xi8(__epi_32xi8 arg_0, __epi_32xi8 arg_1, unsigned lo // CHECK-O2-LABEL: @test_vredxor_32xi8_mask( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv8i8.nxv32i8( [[ARG_0:%.*]], i64 0) -// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.experimental.vector.extract.nxv8i8.nxv32i8( [[ARG_2:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv8i8.nxv32i8( [[ARG_0:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.vector.extract.nxv8i8.nxv32i8( [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.riscv.vredxor.mask.nxv8i8.nxv32i8.i64( [[TMP0]], [[ARG_1:%.*]], [[TMP1]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.experimental.vector.insert.nxv32i8.nxv8i8( undef, [[TMP2]], i64 0) +// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.vector.insert.nxv32i8.nxv8i8( undef, [[TMP2]], i64 0) // CHECK-O2-NEXT: ret [[TMP3]] // __epi_32xi8 test_vredxor_32xi8_mask(__epi_32xi8 arg_0, __epi_32xi8 arg_1, __epi_32xi8 arg_2, __epi_32xi1 arg_3, unsigned long int arg_4) @@ -18079,9 +18079,9 @@ __epi_32xi8 test_vredxor_32xi8_mask(__epi_32xi8 arg_0, __epi_32xi8 arg_1, __epi_ // CHECK-O2-LABEL: @test_vredxor_16xi16( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv4i16.nxv16i16( [[ARG_1:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv4i16.nxv16i16( [[ARG_1:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.riscv.vredxor.nxv4i16.nxv16i16.i64( undef, [[ARG_0:%.*]], [[TMP0]], i64 [[ARG_2:%.*]]) -// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.experimental.vector.insert.nxv16i16.nxv4i16( undef, [[TMP1]], i64 0) +// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.vector.insert.nxv16i16.nxv4i16( undef, [[TMP1]], i64 0) // CHECK-O2-NEXT: ret [[TMP2]] // __epi_16xi16 test_vredxor_16xi16(__epi_16xi16 arg_0, __epi_16xi16 arg_1, unsigned long int arg_2) @@ -18091,10 +18091,10 @@ __epi_16xi16 test_vredxor_16xi16(__epi_16xi16 arg_0, __epi_16xi16 arg_1, unsigne // CHECK-O2-LABEL: @test_vredxor_16xi16_mask( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv4i16.nxv16i16( [[ARG_0:%.*]], i64 0) -// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.experimental.vector.extract.nxv4i16.nxv16i16( [[ARG_2:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv4i16.nxv16i16( [[ARG_0:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.vector.extract.nxv4i16.nxv16i16( [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.riscv.vredxor.mask.nxv4i16.nxv16i16.i64( [[TMP0]], [[ARG_1:%.*]], [[TMP1]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.experimental.vector.insert.nxv16i16.nxv4i16( undef, [[TMP2]], i64 0) +// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.vector.insert.nxv16i16.nxv4i16( undef, [[TMP2]], i64 0) // CHECK-O2-NEXT: ret [[TMP3]] // __epi_16xi16 test_vredxor_16xi16_mask(__epi_16xi16 arg_0, __epi_16xi16 arg_1, __epi_16xi16 arg_2, __epi_16xi1 arg_3, unsigned long int arg_4) @@ -18104,9 +18104,9 @@ __epi_16xi16 test_vredxor_16xi16_mask(__epi_16xi16 arg_0, __epi_16xi16 arg_1, __ // CHECK-O2-LABEL: @test_vredxor_8xi32( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv2i32.nxv8i32( [[ARG_1:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv2i32.nxv8i32( [[ARG_1:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.riscv.vredxor.nxv2i32.nxv8i32.i64( undef, [[ARG_0:%.*]], [[TMP0]], i64 [[ARG_2:%.*]]) -// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.experimental.vector.insert.nxv8i32.nxv2i32( undef, [[TMP1]], i64 0) +// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.vector.insert.nxv8i32.nxv2i32( undef, [[TMP1]], i64 0) // CHECK-O2-NEXT: ret [[TMP2]] // __epi_8xi32 test_vredxor_8xi32(__epi_8xi32 arg_0, __epi_8xi32 arg_1, unsigned long int arg_2) @@ -18116,10 +18116,10 @@ __epi_8xi32 test_vredxor_8xi32(__epi_8xi32 arg_0, __epi_8xi32 arg_1, unsigned lo // CHECK-O2-LABEL: @test_vredxor_8xi32_mask( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv2i32.nxv8i32( [[ARG_0:%.*]], i64 0) -// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.experimental.vector.extract.nxv2i32.nxv8i32( [[ARG_2:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv2i32.nxv8i32( [[ARG_0:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.vector.extract.nxv2i32.nxv8i32( [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.riscv.vredxor.mask.nxv2i32.nxv8i32.i64( [[TMP0]], [[ARG_1:%.*]], [[TMP1]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.experimental.vector.insert.nxv8i32.nxv2i32( undef, [[TMP2]], i64 0) +// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.vector.insert.nxv8i32.nxv2i32( undef, [[TMP2]], i64 0) // CHECK-O2-NEXT: ret [[TMP3]] // __epi_8xi32 test_vredxor_8xi32_mask(__epi_8xi32 arg_0, __epi_8xi32 arg_1, __epi_8xi32 arg_2, __epi_8xi1 arg_3, unsigned long int arg_4) @@ -18129,9 +18129,9 @@ __epi_8xi32 test_vredxor_8xi32_mask(__epi_8xi32 arg_0, __epi_8xi32 arg_1, __epi_ // CHECK-O2-LABEL: @test_vredxor_4xi64( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv1i64.nxv4i64( [[ARG_1:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv1i64.nxv4i64( [[ARG_1:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.riscv.vredxor.nxv1i64.nxv4i64.i64( undef, [[ARG_0:%.*]], [[TMP0]], i64 [[ARG_2:%.*]]) -// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.experimental.vector.insert.nxv4i64.nxv1i64( undef, [[TMP1]], i64 0) +// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.vector.insert.nxv4i64.nxv1i64( undef, [[TMP1]], i64 0) // CHECK-O2-NEXT: ret [[TMP2]] // __epi_4xi64 test_vredxor_4xi64(__epi_4xi64 arg_0, __epi_4xi64 arg_1, unsigned long int arg_2) @@ -18141,10 +18141,10 @@ __epi_4xi64 test_vredxor_4xi64(__epi_4xi64 arg_0, __epi_4xi64 arg_1, unsigned lo // CHECK-O2-LABEL: @test_vredxor_4xi64_mask( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv1i64.nxv4i64( [[ARG_0:%.*]], i64 0) -// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.experimental.vector.extract.nxv1i64.nxv4i64( [[ARG_2:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv1i64.nxv4i64( [[ARG_0:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.vector.extract.nxv1i64.nxv4i64( [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.riscv.vredxor.mask.nxv1i64.nxv4i64.i64( [[TMP0]], [[ARG_1:%.*]], [[TMP1]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.experimental.vector.insert.nxv4i64.nxv1i64( undef, [[TMP2]], i64 0) +// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.vector.insert.nxv4i64.nxv1i64( undef, [[TMP2]], i64 0) // CHECK-O2-NEXT: ret [[TMP3]] // __epi_4xi64 test_vredxor_4xi64_mask(__epi_4xi64 arg_0, __epi_4xi64 arg_1, __epi_4xi64 arg_2, __epi_4xi1 arg_3, unsigned long int arg_4) @@ -19894,7 +19894,7 @@ __epi_4xf64 test_vslide1up_4xf64_mask(__epi_4xf64 arg_0, __epi_4xf64 arg_1, unsi // CHECK-O2-LABEL: @test_vslidedown_8xi8( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vslidedown.nxv8i8.i64( undef, [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vslidedown.nxv8i8.i64( undef, [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i64 [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: ret [[TMP0]] // __epi_8xi8 test_vslidedown_8xi8(__epi_8xi8 arg_0, unsigned long int arg_1, unsigned long int arg_2) @@ -19914,7 +19914,7 @@ __epi_8xi8 test_vslidedown_8xi8_mask(__epi_8xi8 arg_0, __epi_8xi8 arg_1, unsigne // CHECK-O2-LABEL: @test_vslidedown_4xi16( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vslidedown.nxv4i16.i64( undef, [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vslidedown.nxv4i16.i64( undef, [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i64 [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: ret [[TMP0]] // __epi_4xi16 test_vslidedown_4xi16(__epi_4xi16 arg_0, unsigned long int arg_1, unsigned long int arg_2) @@ -19934,7 +19934,7 @@ __epi_4xi16 test_vslidedown_4xi16_mask(__epi_4xi16 arg_0, __epi_4xi16 arg_1, uns // CHECK-O2-LABEL: @test_vslidedown_2xi32( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vslidedown.nxv2i32.i64( undef, [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vslidedown.nxv2i32.i64( undef, [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i64 [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: ret [[TMP0]] // __epi_2xi32 test_vslidedown_2xi32(__epi_2xi32 arg_0, unsigned long int arg_1, unsigned long int arg_2) @@ -19954,7 +19954,7 @@ __epi_2xi32 test_vslidedown_2xi32_mask(__epi_2xi32 arg_0, __epi_2xi32 arg_1, uns // CHECK-O2-LABEL: @test_vslidedown_1xi64( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vslidedown.nxv1i64.i64( undef, [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vslidedown.nxv1i64.i64( undef, [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i64 [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: ret [[TMP0]] // __epi_1xi64 test_vslidedown_1xi64(__epi_1xi64 arg_0, unsigned long int arg_1, unsigned long int arg_2) @@ -19974,7 +19974,7 @@ __epi_1xi64 test_vslidedown_1xi64_mask(__epi_1xi64 arg_0, __epi_1xi64 arg_1, uns // CHECK-O2-LABEL: @test_vslidedown_2xf32( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vslidedown.nxv2f32.i64( undef, [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vslidedown.nxv2f32.i64( undef, [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i64 [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: ret [[TMP0]] // __epi_2xf32 test_vslidedown_2xf32(__epi_2xf32 arg_0, unsigned long int arg_1, unsigned long int arg_2) @@ -19994,7 +19994,7 @@ __epi_2xf32 test_vslidedown_2xf32_mask(__epi_2xf32 arg_0, __epi_2xf32 arg_1, uns // CHECK-O2-LABEL: @test_vslidedown_1xf64( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vslidedown.nxv1f64.i64( undef, [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vslidedown.nxv1f64.i64( undef, [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i64 [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: ret [[TMP0]] // __epi_1xf64 test_vslidedown_1xf64(__epi_1xf64 arg_0, unsigned long int arg_1, unsigned long int arg_2) @@ -20014,7 +20014,7 @@ __epi_1xf64 test_vslidedown_1xf64_mask(__epi_1xf64 arg_0, __epi_1xf64 arg_1, uns // CHECK-O2-LABEL: @test_vslidedown_16xi8( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vslidedown.nxv16i8.i64( undef, [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vslidedown.nxv16i8.i64( undef, [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i64 [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: ret [[TMP0]] // __epi_16xi8 test_vslidedown_16xi8(__epi_16xi8 arg_0, unsigned long int arg_1, unsigned long int arg_2) @@ -20034,7 +20034,7 @@ __epi_16xi8 test_vslidedown_16xi8_mask(__epi_16xi8 arg_0, __epi_16xi8 arg_1, uns // CHECK-O2-LABEL: @test_vslidedown_8xi16( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vslidedown.nxv8i16.i64( undef, [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vslidedown.nxv8i16.i64( undef, [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i64 [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: ret [[TMP0]] // __epi_8xi16 test_vslidedown_8xi16(__epi_8xi16 arg_0, unsigned long int arg_1, unsigned long int arg_2) @@ -20054,7 +20054,7 @@ __epi_8xi16 test_vslidedown_8xi16_mask(__epi_8xi16 arg_0, __epi_8xi16 arg_1, uns // CHECK-O2-LABEL: @test_vslidedown_4xi32( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vslidedown.nxv4i32.i64( undef, [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vslidedown.nxv4i32.i64( undef, [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i64 [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: ret [[TMP0]] // __epi_4xi32 test_vslidedown_4xi32(__epi_4xi32 arg_0, unsigned long int arg_1, unsigned long int arg_2) @@ -20074,7 +20074,7 @@ __epi_4xi32 test_vslidedown_4xi32_mask(__epi_4xi32 arg_0, __epi_4xi32 arg_1, uns // CHECK-O2-LABEL: @test_vslidedown_2xi64( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vslidedown.nxv2i64.i64( undef, [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vslidedown.nxv2i64.i64( undef, [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i64 [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: ret [[TMP0]] // __epi_2xi64 test_vslidedown_2xi64(__epi_2xi64 arg_0, unsigned long int arg_1, unsigned long int arg_2) @@ -20094,7 +20094,7 @@ __epi_2xi64 test_vslidedown_2xi64_mask(__epi_2xi64 arg_0, __epi_2xi64 arg_1, uns // CHECK-O2-LABEL: @test_vslidedown_4xf32( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vslidedown.nxv4f32.i64( undef, [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vslidedown.nxv4f32.i64( undef, [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i64 [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: ret [[TMP0]] // __epi_4xf32 test_vslidedown_4xf32(__epi_4xf32 arg_0, unsigned long int arg_1, unsigned long int arg_2) @@ -20114,7 +20114,7 @@ __epi_4xf32 test_vslidedown_4xf32_mask(__epi_4xf32 arg_0, __epi_4xf32 arg_1, uns // CHECK-O2-LABEL: @test_vslidedown_2xf64( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vslidedown.nxv2f64.i64( undef, [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vslidedown.nxv2f64.i64( undef, [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i64 [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: ret [[TMP0]] // __epi_2xf64 test_vslidedown_2xf64(__epi_2xf64 arg_0, unsigned long int arg_1, unsigned long int arg_2) @@ -20134,7 +20134,7 @@ __epi_2xf64 test_vslidedown_2xf64_mask(__epi_2xf64 arg_0, __epi_2xf64 arg_1, uns // CHECK-O2-LABEL: @test_vslidedown_32xi8( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vslidedown.nxv32i8.i64( undef, [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vslidedown.nxv32i8.i64( undef, [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i64 [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: ret [[TMP0]] // __epi_32xi8 test_vslidedown_32xi8(__epi_32xi8 arg_0, unsigned long int arg_1, unsigned long int arg_2) @@ -20154,7 +20154,7 @@ __epi_32xi8 test_vslidedown_32xi8_mask(__epi_32xi8 arg_0, __epi_32xi8 arg_1, uns // CHECK-O2-LABEL: @test_vslidedown_16xi16( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vslidedown.nxv16i16.i64( undef, [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vslidedown.nxv16i16.i64( undef, [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i64 [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: ret [[TMP0]] // __epi_16xi16 test_vslidedown_16xi16(__epi_16xi16 arg_0, unsigned long int arg_1, unsigned long int arg_2) @@ -20174,7 +20174,7 @@ __epi_16xi16 test_vslidedown_16xi16_mask(__epi_16xi16 arg_0, __epi_16xi16 arg_1, // CHECK-O2-LABEL: @test_vslidedown_8xi32( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vslidedown.nxv8i32.i64( undef, [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vslidedown.nxv8i32.i64( undef, [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i64 [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: ret [[TMP0]] // __epi_8xi32 test_vslidedown_8xi32(__epi_8xi32 arg_0, unsigned long int arg_1, unsigned long int arg_2) @@ -20194,7 +20194,7 @@ __epi_8xi32 test_vslidedown_8xi32_mask(__epi_8xi32 arg_0, __epi_8xi32 arg_1, uns // CHECK-O2-LABEL: @test_vslidedown_4xi64( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vslidedown.nxv4i64.i64( undef, [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vslidedown.nxv4i64.i64( undef, [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i64 [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: ret [[TMP0]] // __epi_4xi64 test_vslidedown_4xi64(__epi_4xi64 arg_0, unsigned long int arg_1, unsigned long int arg_2) @@ -20214,7 +20214,7 @@ __epi_4xi64 test_vslidedown_4xi64_mask(__epi_4xi64 arg_0, __epi_4xi64 arg_1, uns // CHECK-O2-LABEL: @test_vslidedown_8xf32( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vslidedown.nxv8f32.i64( undef, [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vslidedown.nxv8f32.i64( undef, [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i64 [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: ret [[TMP0]] // __epi_8xf32 test_vslidedown_8xf32(__epi_8xf32 arg_0, unsigned long int arg_1, unsigned long int arg_2) @@ -20234,7 +20234,7 @@ __epi_8xf32 test_vslidedown_8xf32_mask(__epi_8xf32 arg_0, __epi_8xf32 arg_1, uns // CHECK-O2-LABEL: @test_vslidedown_4xf64( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vslidedown.nxv4f64.i64( undef, [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vslidedown.nxv4f64.i64( undef, [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i64 [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: ret [[TMP0]] // __epi_4xf64 test_vslidedown_4xf64(__epi_4xf64 arg_0, unsigned long int arg_1, unsigned long int arg_2) @@ -20254,7 +20254,7 @@ __epi_4xf64 test_vslidedown_4xf64_mask(__epi_4xf64 arg_0, __epi_4xf64 arg_1, uns // CHECK-O2-LABEL: @test_vslideup_8xi8( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vslideup.nxv8i8.i64( undef, [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vslideup.nxv8i8.i64( undef, [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i64 [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: ret [[TMP0]] // __epi_8xi8 test_vslideup_8xi8(__epi_8xi8 arg_0, unsigned long int arg_1, unsigned long int arg_2) @@ -20274,7 +20274,7 @@ __epi_8xi8 test_vslideup_8xi8_mask(__epi_8xi8 arg_0, __epi_8xi8 arg_1, unsigned // CHECK-O2-LABEL: @test_vslideup_4xi16( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vslideup.nxv4i16.i64( undef, [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vslideup.nxv4i16.i64( undef, [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i64 [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: ret [[TMP0]] // __epi_4xi16 test_vslideup_4xi16(__epi_4xi16 arg_0, unsigned long int arg_1, unsigned long int arg_2) @@ -20294,7 +20294,7 @@ __epi_4xi16 test_vslideup_4xi16_mask(__epi_4xi16 arg_0, __epi_4xi16 arg_1, unsig // CHECK-O2-LABEL: @test_vslideup_2xi32( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vslideup.nxv2i32.i64( undef, [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vslideup.nxv2i32.i64( undef, [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i64 [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: ret [[TMP0]] // __epi_2xi32 test_vslideup_2xi32(__epi_2xi32 arg_0, unsigned long int arg_1, unsigned long int arg_2) @@ -20314,7 +20314,7 @@ __epi_2xi32 test_vslideup_2xi32_mask(__epi_2xi32 arg_0, __epi_2xi32 arg_1, unsig // CHECK-O2-LABEL: @test_vslideup_1xi64( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vslideup.nxv1i64.i64( undef, [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vslideup.nxv1i64.i64( undef, [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i64 [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: ret [[TMP0]] // __epi_1xi64 test_vslideup_1xi64(__epi_1xi64 arg_0, unsigned long int arg_1, unsigned long int arg_2) @@ -20334,7 +20334,7 @@ __epi_1xi64 test_vslideup_1xi64_mask(__epi_1xi64 arg_0, __epi_1xi64 arg_1, unsig // CHECK-O2-LABEL: @test_vslideup_2xf32( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vslideup.nxv2f32.i64( undef, [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vslideup.nxv2f32.i64( undef, [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i64 [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: ret [[TMP0]] // __epi_2xf32 test_vslideup_2xf32(__epi_2xf32 arg_0, unsigned long int arg_1, unsigned long int arg_2) @@ -20354,7 +20354,7 @@ __epi_2xf32 test_vslideup_2xf32_mask(__epi_2xf32 arg_0, __epi_2xf32 arg_1, unsig // CHECK-O2-LABEL: @test_vslideup_1xf64( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vslideup.nxv1f64.i64( undef, [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vslideup.nxv1f64.i64( undef, [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i64 [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: ret [[TMP0]] // __epi_1xf64 test_vslideup_1xf64(__epi_1xf64 arg_0, unsigned long int arg_1, unsigned long int arg_2) @@ -20374,7 +20374,7 @@ __epi_1xf64 test_vslideup_1xf64_mask(__epi_1xf64 arg_0, __epi_1xf64 arg_1, unsig // CHECK-O2-LABEL: @test_vslideup_16xi8( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vslideup.nxv16i8.i64( undef, [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vslideup.nxv16i8.i64( undef, [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i64 [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: ret [[TMP0]] // __epi_16xi8 test_vslideup_16xi8(__epi_16xi8 arg_0, unsigned long int arg_1, unsigned long int arg_2) @@ -20394,7 +20394,7 @@ __epi_16xi8 test_vslideup_16xi8_mask(__epi_16xi8 arg_0, __epi_16xi8 arg_1, unsig // CHECK-O2-LABEL: @test_vslideup_8xi16( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vslideup.nxv8i16.i64( undef, [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vslideup.nxv8i16.i64( undef, [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i64 [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: ret [[TMP0]] // __epi_8xi16 test_vslideup_8xi16(__epi_8xi16 arg_0, unsigned long int arg_1, unsigned long int arg_2) @@ -20414,7 +20414,7 @@ __epi_8xi16 test_vslideup_8xi16_mask(__epi_8xi16 arg_0, __epi_8xi16 arg_1, unsig // CHECK-O2-LABEL: @test_vslideup_4xi32( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vslideup.nxv4i32.i64( undef, [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vslideup.nxv4i32.i64( undef, [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i64 [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: ret [[TMP0]] // __epi_4xi32 test_vslideup_4xi32(__epi_4xi32 arg_0, unsigned long int arg_1, unsigned long int arg_2) @@ -20434,7 +20434,7 @@ __epi_4xi32 test_vslideup_4xi32_mask(__epi_4xi32 arg_0, __epi_4xi32 arg_1, unsig // CHECK-O2-LABEL: @test_vslideup_2xi64( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vslideup.nxv2i64.i64( undef, [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vslideup.nxv2i64.i64( undef, [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i64 [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: ret [[TMP0]] // __epi_2xi64 test_vslideup_2xi64(__epi_2xi64 arg_0, unsigned long int arg_1, unsigned long int arg_2) @@ -20454,7 +20454,7 @@ __epi_2xi64 test_vslideup_2xi64_mask(__epi_2xi64 arg_0, __epi_2xi64 arg_1, unsig // CHECK-O2-LABEL: @test_vslideup_4xf32( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vslideup.nxv4f32.i64( undef, [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vslideup.nxv4f32.i64( undef, [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i64 [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: ret [[TMP0]] // __epi_4xf32 test_vslideup_4xf32(__epi_4xf32 arg_0, unsigned long int arg_1, unsigned long int arg_2) @@ -20474,7 +20474,7 @@ __epi_4xf32 test_vslideup_4xf32_mask(__epi_4xf32 arg_0, __epi_4xf32 arg_1, unsig // CHECK-O2-LABEL: @test_vslideup_2xf64( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vslideup.nxv2f64.i64( undef, [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vslideup.nxv2f64.i64( undef, [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i64 [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: ret [[TMP0]] // __epi_2xf64 test_vslideup_2xf64(__epi_2xf64 arg_0, unsigned long int arg_1, unsigned long int arg_2) @@ -20494,7 +20494,7 @@ __epi_2xf64 test_vslideup_2xf64_mask(__epi_2xf64 arg_0, __epi_2xf64 arg_1, unsig // CHECK-O2-LABEL: @test_vslideup_32xi8( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vslideup.nxv32i8.i64( undef, [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vslideup.nxv32i8.i64( undef, [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i64 [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: ret [[TMP0]] // __epi_32xi8 test_vslideup_32xi8(__epi_32xi8 arg_0, unsigned long int arg_1, unsigned long int arg_2) @@ -20514,7 +20514,7 @@ __epi_32xi8 test_vslideup_32xi8_mask(__epi_32xi8 arg_0, __epi_32xi8 arg_1, unsig // CHECK-O2-LABEL: @test_vslideup_16xi16( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vslideup.nxv16i16.i64( undef, [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vslideup.nxv16i16.i64( undef, [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i64 [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: ret [[TMP0]] // __epi_16xi16 test_vslideup_16xi16(__epi_16xi16 arg_0, unsigned long int arg_1, unsigned long int arg_2) @@ -20534,7 +20534,7 @@ __epi_16xi16 test_vslideup_16xi16_mask(__epi_16xi16 arg_0, __epi_16xi16 arg_1, u // CHECK-O2-LABEL: @test_vslideup_8xi32( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vslideup.nxv8i32.i64( undef, [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vslideup.nxv8i32.i64( undef, [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i64 [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: ret [[TMP0]] // __epi_8xi32 test_vslideup_8xi32(__epi_8xi32 arg_0, unsigned long int arg_1, unsigned long int arg_2) @@ -20554,7 +20554,7 @@ __epi_8xi32 test_vslideup_8xi32_mask(__epi_8xi32 arg_0, __epi_8xi32 arg_1, unsig // CHECK-O2-LABEL: @test_vslideup_4xi64( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vslideup.nxv4i64.i64( undef, [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vslideup.nxv4i64.i64( undef, [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i64 [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: ret [[TMP0]] // __epi_4xi64 test_vslideup_4xi64(__epi_4xi64 arg_0, unsigned long int arg_1, unsigned long int arg_2) @@ -20574,7 +20574,7 @@ __epi_4xi64 test_vslideup_4xi64_mask(__epi_4xi64 arg_0, __epi_4xi64 arg_1, unsig // CHECK-O2-LABEL: @test_vslideup_8xf32( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vslideup.nxv8f32.i64( undef, [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vslideup.nxv8f32.i64( undef, [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i64 [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: ret [[TMP0]] // __epi_8xf32 test_vslideup_8xf32(__epi_8xf32 arg_0, unsigned long int arg_1, unsigned long int arg_2) @@ -20594,7 +20594,7 @@ __epi_8xf32 test_vslideup_8xf32_mask(__epi_8xf32 arg_0, __epi_8xf32 arg_1, unsig // CHECK-O2-LABEL: @test_vslideup_4xf64( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vslideup.nxv4f64.i64( undef, [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i64 [[ARG_2:%.*]]) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.riscv.vslideup.nxv4f64.i64( undef, [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i64 [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: ret [[TMP0]] // __epi_4xf64 test_vslideup_4xf64(__epi_4xf64 arg_0, unsigned long int arg_1, unsigned long int arg_2) @@ -25174,9 +25174,9 @@ __epi_8xi64 test_vwmulu_8xi64_mask(__epi_8xi64 arg_0, __epi_8xi32 arg_1, __epi_8 // CHECK-O2-LABEL: @test_vwredsum_8xi16( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv4i16.nxv8i16( [[ARG_1:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv4i16.nxv8i16( [[ARG_1:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.riscv.vwredsum.nxv4i16.nxv8i8.i64( undef, [[ARG_0:%.*]], [[TMP0]], i64 [[ARG_2:%.*]]) -// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.experimental.vector.insert.nxv8i16.nxv4i16( undef, [[TMP1]], i64 0) +// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.vector.insert.nxv8i16.nxv4i16( undef, [[TMP1]], i64 0) // CHECK-O2-NEXT: ret [[TMP2]] // __epi_8xi16 test_vwredsum_8xi16(__epi_8xi8 arg_0, __epi_8xi16 arg_1, unsigned long int arg_2) @@ -25186,10 +25186,10 @@ __epi_8xi16 test_vwredsum_8xi16(__epi_8xi8 arg_0, __epi_8xi16 arg_1, unsigned lo // CHECK-O2-LABEL: @test_vwredsum_8xi16_mask( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv4i16.nxv8i16( [[ARG_0:%.*]], i64 0) -// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.experimental.vector.extract.nxv4i16.nxv8i16( [[ARG_2:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv4i16.nxv8i16( [[ARG_0:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.vector.extract.nxv4i16.nxv8i16( [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.riscv.vwredsum.mask.nxv4i16.nxv8i8.i64( [[TMP0]], [[ARG_1:%.*]], [[TMP1]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.experimental.vector.insert.nxv8i16.nxv4i16( undef, [[TMP2]], i64 0) +// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.vector.insert.nxv8i16.nxv4i16( undef, [[TMP2]], i64 0) // CHECK-O2-NEXT: ret [[TMP3]] // __epi_8xi16 test_vwredsum_8xi16_mask(__epi_8xi16 arg_0, __epi_8xi8 arg_1, __epi_8xi16 arg_2, __epi_8xi1 arg_3, unsigned long int arg_4) @@ -25199,9 +25199,9 @@ __epi_8xi16 test_vwredsum_8xi16_mask(__epi_8xi16 arg_0, __epi_8xi8 arg_1, __epi_ // CHECK-O2-LABEL: @test_vwredsum_4xi32( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv2i32.nxv4i32( [[ARG_1:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv2i32.nxv4i32( [[ARG_1:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.riscv.vwredsum.nxv2i32.nxv4i16.i64( undef, [[ARG_0:%.*]], [[TMP0]], i64 [[ARG_2:%.*]]) -// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.experimental.vector.insert.nxv4i32.nxv2i32( undef, [[TMP1]], i64 0) +// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.vector.insert.nxv4i32.nxv2i32( undef, [[TMP1]], i64 0) // CHECK-O2-NEXT: ret [[TMP2]] // __epi_4xi32 test_vwredsum_4xi32(__epi_4xi16 arg_0, __epi_4xi32 arg_1, unsigned long int arg_2) @@ -25211,10 +25211,10 @@ __epi_4xi32 test_vwredsum_4xi32(__epi_4xi16 arg_0, __epi_4xi32 arg_1, unsigned l // CHECK-O2-LABEL: @test_vwredsum_4xi32_mask( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv2i32.nxv4i32( [[ARG_0:%.*]], i64 0) -// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.experimental.vector.extract.nxv2i32.nxv4i32( [[ARG_2:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv2i32.nxv4i32( [[ARG_0:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.vector.extract.nxv2i32.nxv4i32( [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.riscv.vwredsum.mask.nxv2i32.nxv4i16.i64( [[TMP0]], [[ARG_1:%.*]], [[TMP1]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.experimental.vector.insert.nxv4i32.nxv2i32( undef, [[TMP2]], i64 0) +// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.vector.insert.nxv4i32.nxv2i32( undef, [[TMP2]], i64 0) // CHECK-O2-NEXT: ret [[TMP3]] // __epi_4xi32 test_vwredsum_4xi32_mask(__epi_4xi32 arg_0, __epi_4xi16 arg_1, __epi_4xi32 arg_2, __epi_4xi1 arg_3, unsigned long int arg_4) @@ -25224,9 +25224,9 @@ __epi_4xi32 test_vwredsum_4xi32_mask(__epi_4xi32 arg_0, __epi_4xi16 arg_1, __epi // CHECK-O2-LABEL: @test_vwredsum_2xi64( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv1i64.nxv2i64( [[ARG_1:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv1i64.nxv2i64( [[ARG_1:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.riscv.vwredsum.nxv1i64.nxv2i32.i64( undef, [[ARG_0:%.*]], [[TMP0]], i64 [[ARG_2:%.*]]) -// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.experimental.vector.insert.nxv2i64.nxv1i64( undef, [[TMP1]], i64 0) +// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.vector.insert.nxv2i64.nxv1i64( undef, [[TMP1]], i64 0) // CHECK-O2-NEXT: ret [[TMP2]] // __epi_2xi64 test_vwredsum_2xi64(__epi_2xi32 arg_0, __epi_2xi64 arg_1, unsigned long int arg_2) @@ -25236,10 +25236,10 @@ __epi_2xi64 test_vwredsum_2xi64(__epi_2xi32 arg_0, __epi_2xi64 arg_1, unsigned l // CHECK-O2-LABEL: @test_vwredsum_2xi64_mask( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv1i64.nxv2i64( [[ARG_0:%.*]], i64 0) -// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.experimental.vector.extract.nxv1i64.nxv2i64( [[ARG_2:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv1i64.nxv2i64( [[ARG_0:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.vector.extract.nxv1i64.nxv2i64( [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.riscv.vwredsum.mask.nxv1i64.nxv2i32.i64( [[TMP0]], [[ARG_1:%.*]], [[TMP1]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.experimental.vector.insert.nxv2i64.nxv1i64( undef, [[TMP2]], i64 0) +// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.vector.insert.nxv2i64.nxv1i64( undef, [[TMP2]], i64 0) // CHECK-O2-NEXT: ret [[TMP3]] // __epi_2xi64 test_vwredsum_2xi64_mask(__epi_2xi64 arg_0, __epi_2xi32 arg_1, __epi_2xi64 arg_2, __epi_2xi1 arg_3, unsigned long int arg_4) @@ -25249,9 +25249,9 @@ __epi_2xi64 test_vwredsum_2xi64_mask(__epi_2xi64 arg_0, __epi_2xi32 arg_1, __epi // CHECK-O2-LABEL: @test_vwredsum_16xi16( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv4i16.nxv16i16( [[ARG_1:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv4i16.nxv16i16( [[ARG_1:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.riscv.vwredsum.nxv4i16.nxv16i8.i64( undef, [[ARG_0:%.*]], [[TMP0]], i64 [[ARG_2:%.*]]) -// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.experimental.vector.insert.nxv16i16.nxv4i16( undef, [[TMP1]], i64 0) +// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.vector.insert.nxv16i16.nxv4i16( undef, [[TMP1]], i64 0) // CHECK-O2-NEXT: ret [[TMP2]] // __epi_16xi16 test_vwredsum_16xi16(__epi_16xi8 arg_0, __epi_16xi16 arg_1, unsigned long int arg_2) @@ -25261,10 +25261,10 @@ __epi_16xi16 test_vwredsum_16xi16(__epi_16xi8 arg_0, __epi_16xi16 arg_1, unsigne // CHECK-O2-LABEL: @test_vwredsum_16xi16_mask( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv4i16.nxv16i16( [[ARG_0:%.*]], i64 0) -// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.experimental.vector.extract.nxv4i16.nxv16i16( [[ARG_2:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv4i16.nxv16i16( [[ARG_0:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.vector.extract.nxv4i16.nxv16i16( [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.riscv.vwredsum.mask.nxv4i16.nxv16i8.i64( [[TMP0]], [[ARG_1:%.*]], [[TMP1]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.experimental.vector.insert.nxv16i16.nxv4i16( undef, [[TMP2]], i64 0) +// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.vector.insert.nxv16i16.nxv4i16( undef, [[TMP2]], i64 0) // CHECK-O2-NEXT: ret [[TMP3]] // __epi_16xi16 test_vwredsum_16xi16_mask(__epi_16xi16 arg_0, __epi_16xi8 arg_1, __epi_16xi16 arg_2, __epi_16xi1 arg_3, unsigned long int arg_4) @@ -25274,9 +25274,9 @@ __epi_16xi16 test_vwredsum_16xi16_mask(__epi_16xi16 arg_0, __epi_16xi8 arg_1, __ // CHECK-O2-LABEL: @test_vwredsum_8xi32( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv2i32.nxv8i32( [[ARG_1:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv2i32.nxv8i32( [[ARG_1:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.riscv.vwredsum.nxv2i32.nxv8i16.i64( undef, [[ARG_0:%.*]], [[TMP0]], i64 [[ARG_2:%.*]]) -// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.experimental.vector.insert.nxv8i32.nxv2i32( undef, [[TMP1]], i64 0) +// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.vector.insert.nxv8i32.nxv2i32( undef, [[TMP1]], i64 0) // CHECK-O2-NEXT: ret [[TMP2]] // __epi_8xi32 test_vwredsum_8xi32(__epi_8xi16 arg_0, __epi_8xi32 arg_1, unsigned long int arg_2) @@ -25286,10 +25286,10 @@ __epi_8xi32 test_vwredsum_8xi32(__epi_8xi16 arg_0, __epi_8xi32 arg_1, unsigned l // CHECK-O2-LABEL: @test_vwredsum_8xi32_mask( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv2i32.nxv8i32( [[ARG_0:%.*]], i64 0) -// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.experimental.vector.extract.nxv2i32.nxv8i32( [[ARG_2:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv2i32.nxv8i32( [[ARG_0:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.vector.extract.nxv2i32.nxv8i32( [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.riscv.vwredsum.mask.nxv2i32.nxv8i16.i64( [[TMP0]], [[ARG_1:%.*]], [[TMP1]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.experimental.vector.insert.nxv8i32.nxv2i32( undef, [[TMP2]], i64 0) +// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.vector.insert.nxv8i32.nxv2i32( undef, [[TMP2]], i64 0) // CHECK-O2-NEXT: ret [[TMP3]] // __epi_8xi32 test_vwredsum_8xi32_mask(__epi_8xi32 arg_0, __epi_8xi16 arg_1, __epi_8xi32 arg_2, __epi_8xi1 arg_3, unsigned long int arg_4) @@ -25299,9 +25299,9 @@ __epi_8xi32 test_vwredsum_8xi32_mask(__epi_8xi32 arg_0, __epi_8xi16 arg_1, __epi // CHECK-O2-LABEL: @test_vwredsum_4xi64( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv1i64.nxv4i64( [[ARG_1:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv1i64.nxv4i64( [[ARG_1:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.riscv.vwredsum.nxv1i64.nxv4i32.i64( undef, [[ARG_0:%.*]], [[TMP0]], i64 [[ARG_2:%.*]]) -// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.experimental.vector.insert.nxv4i64.nxv1i64( undef, [[TMP1]], i64 0) +// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.vector.insert.nxv4i64.nxv1i64( undef, [[TMP1]], i64 0) // CHECK-O2-NEXT: ret [[TMP2]] // __epi_4xi64 test_vwredsum_4xi64(__epi_4xi32 arg_0, __epi_4xi64 arg_1, unsigned long int arg_2) @@ -25311,10 +25311,10 @@ __epi_4xi64 test_vwredsum_4xi64(__epi_4xi32 arg_0, __epi_4xi64 arg_1, unsigned l // CHECK-O2-LABEL: @test_vwredsum_4xi64_mask( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv1i64.nxv4i64( [[ARG_0:%.*]], i64 0) -// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.experimental.vector.extract.nxv1i64.nxv4i64( [[ARG_2:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv1i64.nxv4i64( [[ARG_0:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.vector.extract.nxv1i64.nxv4i64( [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.riscv.vwredsum.mask.nxv1i64.nxv4i32.i64( [[TMP0]], [[ARG_1:%.*]], [[TMP1]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.experimental.vector.insert.nxv4i64.nxv1i64( undef, [[TMP2]], i64 0) +// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.vector.insert.nxv4i64.nxv1i64( undef, [[TMP2]], i64 0) // CHECK-O2-NEXT: ret [[TMP3]] // __epi_4xi64 test_vwredsum_4xi64_mask(__epi_4xi64 arg_0, __epi_4xi32 arg_1, __epi_4xi64 arg_2, __epi_4xi1 arg_3, unsigned long int arg_4) @@ -25324,9 +25324,9 @@ __epi_4xi64 test_vwredsum_4xi64_mask(__epi_4xi64 arg_0, __epi_4xi32 arg_1, __epi // CHECK-O2-LABEL: @test_vwredsum_32xi16( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv4i16.nxv32i16( [[ARG_1:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv4i16.nxv32i16( [[ARG_1:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.riscv.vwredsum.nxv4i16.nxv32i8.i64( undef, [[ARG_0:%.*]], [[TMP0]], i64 [[ARG_2:%.*]]) -// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.experimental.vector.insert.nxv32i16.nxv4i16( undef, [[TMP1]], i64 0) +// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.vector.insert.nxv32i16.nxv4i16( undef, [[TMP1]], i64 0) // CHECK-O2-NEXT: ret [[TMP2]] // __epi_32xi16 test_vwredsum_32xi16(__epi_32xi8 arg_0, __epi_32xi16 arg_1, unsigned long int arg_2) @@ -25336,10 +25336,10 @@ __epi_32xi16 test_vwredsum_32xi16(__epi_32xi8 arg_0, __epi_32xi16 arg_1, unsigne // CHECK-O2-LABEL: @test_vwredsum_32xi16_mask( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv4i16.nxv32i16( [[ARG_0:%.*]], i64 0) -// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.experimental.vector.extract.nxv4i16.nxv32i16( [[ARG_2:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv4i16.nxv32i16( [[ARG_0:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.vector.extract.nxv4i16.nxv32i16( [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.riscv.vwredsum.mask.nxv4i16.nxv32i8.i64( [[TMP0]], [[ARG_1:%.*]], [[TMP1]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.experimental.vector.insert.nxv32i16.nxv4i16( undef, [[TMP2]], i64 0) +// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.vector.insert.nxv32i16.nxv4i16( undef, [[TMP2]], i64 0) // CHECK-O2-NEXT: ret [[TMP3]] // __epi_32xi16 test_vwredsum_32xi16_mask(__epi_32xi16 arg_0, __epi_32xi8 arg_1, __epi_32xi16 arg_2, __epi_32xi1 arg_3, unsigned long int arg_4) @@ -25349,9 +25349,9 @@ __epi_32xi16 test_vwredsum_32xi16_mask(__epi_32xi16 arg_0, __epi_32xi8 arg_1, __ // CHECK-O2-LABEL: @test_vwredsum_16xi32( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv2i32.nxv16i32( [[ARG_1:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv2i32.nxv16i32( [[ARG_1:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.riscv.vwredsum.nxv2i32.nxv16i16.i64( undef, [[ARG_0:%.*]], [[TMP0]], i64 [[ARG_2:%.*]]) -// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.experimental.vector.insert.nxv16i32.nxv2i32( undef, [[TMP1]], i64 0) +// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.vector.insert.nxv16i32.nxv2i32( undef, [[TMP1]], i64 0) // CHECK-O2-NEXT: ret [[TMP2]] // __epi_16xi32 test_vwredsum_16xi32(__epi_16xi16 arg_0, __epi_16xi32 arg_1, unsigned long int arg_2) @@ -25361,10 +25361,10 @@ __epi_16xi32 test_vwredsum_16xi32(__epi_16xi16 arg_0, __epi_16xi32 arg_1, unsign // CHECK-O2-LABEL: @test_vwredsum_16xi32_mask( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv2i32.nxv16i32( [[ARG_0:%.*]], i64 0) -// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.experimental.vector.extract.nxv2i32.nxv16i32( [[ARG_2:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv2i32.nxv16i32( [[ARG_0:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.vector.extract.nxv2i32.nxv16i32( [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.riscv.vwredsum.mask.nxv2i32.nxv16i16.i64( [[TMP0]], [[ARG_1:%.*]], [[TMP1]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.experimental.vector.insert.nxv16i32.nxv2i32( undef, [[TMP2]], i64 0) +// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.vector.insert.nxv16i32.nxv2i32( undef, [[TMP2]], i64 0) // CHECK-O2-NEXT: ret [[TMP3]] // __epi_16xi32 test_vwredsum_16xi32_mask(__epi_16xi32 arg_0, __epi_16xi16 arg_1, __epi_16xi32 arg_2, __epi_16xi1 arg_3, unsigned long int arg_4) @@ -25374,9 +25374,9 @@ __epi_16xi32 test_vwredsum_16xi32_mask(__epi_16xi32 arg_0, __epi_16xi16 arg_1, _ // CHECK-O2-LABEL: @test_vwredsum_8xi64( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv1i64.nxv8i64( [[ARG_1:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv1i64.nxv8i64( [[ARG_1:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.riscv.vwredsum.nxv1i64.nxv8i32.i64( undef, [[ARG_0:%.*]], [[TMP0]], i64 [[ARG_2:%.*]]) -// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.experimental.vector.insert.nxv8i64.nxv1i64( undef, [[TMP1]], i64 0) +// CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.vector.insert.nxv8i64.nxv1i64( undef, [[TMP1]], i64 0) // CHECK-O2-NEXT: ret [[TMP2]] // __epi_8xi64 test_vwredsum_8xi64(__epi_8xi32 arg_0, __epi_8xi64 arg_1, unsigned long int arg_2) @@ -25386,10 +25386,10 @@ __epi_8xi64 test_vwredsum_8xi64(__epi_8xi32 arg_0, __epi_8xi64 arg_1, unsigned l // CHECK-O2-LABEL: @test_vwredsum_8xi64_mask( // CHECK-O2-NEXT: entry: -// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.vector.extract.nxv1i64.nxv8i64( [[ARG_0:%.*]], i64 0) -// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.experimental.vector.extract.nxv1i64.nxv8i64( [[ARG_2:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.extract.nxv1i64.nxv8i64( [[ARG_0:%.*]], i64 0) +// CHECK-O2-NEXT: [[TMP1:%.*]] = tail call @llvm.vector.extract.nxv1i64.nxv8i64( [[ARG_2:%.*]], i64 0) // CHECK-O2-NEXT: [[TMP2:%.*]] = tail call @llvm.riscv.vwredsum.mask.nxv1i64.nxv8i32.i64( [[TMP0]], [[ARG_1:%.*]], [[TMP1]], [[ARG_3:%.*]], i64 [[ARG_4:%.*]]) -// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.experimental.vector.insert.nxv8i64.nxv1i64( undef, [[TMP2]], i64 0) +// CHECK-O2-NEXT: [[TMP3:%.*]] = tail call @llvm.vector.insert.nxv8i64.nxv1i64( undef, [[TMP2]], i64 0) // CHECK-O2-NEXT: ret [[TMP3]] // __epi_8xi64 test_vwredsum_8xi64_mask(__epi_8xi64 arg_0, __epi_8xi32 arg_1, __epi_8xi64 arg_2, __epi_8xi1 arg_3, unsigned long int arg_4) diff --git a/clang/test/CodeGen/hwasan-globals.cpp b/clang/test/CodeGen/hwasan-globals.cpp new file mode 100644 index 0000000000000000000000000000000000000000..39460d3297b4802331777377452f82c42f14fc37 --- /dev/null +++ b/clang/test/CodeGen/hwasan-globals.cpp @@ -0,0 +1,60 @@ +// REQUIRES: aarch64-registered-target + +// RUN: %clang_cc1 -include %S/Inputs/sanitizer-extra-source.cpp \ +// RUN: -fsanitize-ignorelist=%S/Inputs/sanitizer-ignorelist-global.txt \ +// RUN: -fsanitize=hwaddress -emit-llvm -triple aarch64-linux-android31 -o -\ +// RUN: %s | FileCheck %s + +// RUN: %clang_cc1 -include %S/Inputs/sanitizer-extra-source.cpp \ +// RUN: -fsanitize-ignorelist=%S/Inputs/sanitizer-ignorelist-src.txt \ +// RUN: -fsanitize=hwaddress -emit-llvm -triple aarch64-linux-android31 -o -\ +// RUN: %s | FileCheck %s --check-prefix=IGNORELIST + +int global; +int __attribute__((no_sanitize("hwaddress"))) attributed_global; +int __attribute__((disable_sanitizer_instrumentation)) disable_instrumentation_global; +int ignorelisted_global; + +void func() { + static int static_var = 0; + const char *literal = "Hello, world!"; +} + +// CHECK: @{{.*}}attributed_global{{.*}} ={{.*}} global {{.*}}, no_sanitize_hwaddress +// CHECK: @{{.*}}disable_instrumentation_global{{.*}} ={{.*}} global {{.*}}, no_sanitize_hwaddress +// CHECK: @{{.*}}ignorelisted_global{{.*}} ={{.*}} global {{.*}}, no_sanitize_hwaddress +// CHECK: @{{.*}}extra_global{{.*}}.hwasan{{.*}} = +// CHECK: @{{.*}}global{{.*}}.hwasan{{.*}} = +// CHECK: @{{.*}}static_var{{.*}}.hwasan{{.*}} = +// CHECK: @{{.*}}.hwasan{{.*}} = {{.*}} c"Hello, world!\00" + +// CHECK: !llvm.asan.globals = !{![[EXTRA_GLOBAL:[0-9]+]], ![[GLOBAL:[0-9]+]], ![[ATTR_GLOBAL:[0-9]+]], ![[DISABLE_INSTR_GLOBAL:[0-9]+]], ![[IGNORELISTED_GLOBAL:[0-9]+]], ![[STATIC_VAR:[0-9]+]], ![[LITERAL:[0-9]+]]} +// CHECK: ![[EXTRA_GLOBAL]] = !{{{.*}} ![[EXTRA_GLOBAL_LOC:[0-9]+]], !"extra_global", i1 false, i1 false} +// CHECK: ![[EXTRA_GLOBAL_LOC]] = !{!"{{.*}}extra-source.cpp", i32 1, i32 5} +// CHECK: ![[GLOBAL]] = !{{{.*}} ![[GLOBAL_LOC:[0-9]+]], !"global", i1 false, i1 false} +// CHECK: ![[GLOBAL_LOC]] = !{!"{{.*}}hwasan-globals.cpp", i32 13, i32 5} +// CHECK: ![[ATTR_GLOBAL]] = !{{{.*attributed_global.*}}, null, null, i1 false, i1 true} +// CHECK: ![[DISABLE_INSTR_GLOBAL]] = !{{{.*disable_instrumentation_global.*}}, null, null, i1 false, i1 true} +// CHECK: ![[IGNORELISTED_GLOBAL]] = !{{{.*ignorelisted_global.*}}, null, null, i1 false, i1 true} +// CHECK: ![[STATIC_VAR]] = !{{{.*}} ![[STATIC_LOC:[0-9]+]], !"static_var", i1 false, i1 false} +// CHECK: ![[STATIC_LOC]] = !{!"{{.*}}hwasan-globals.cpp", i32 19, i32 14} +// CHECK: ![[LITERAL]] = !{{{.*}} ![[LITERAL_LOC:[0-9]+]], !"", i1 false, i1 false} +// CHECK: ![[LITERAL_LOC]] = !{!"{{.*}}hwasan-globals.cpp", i32 20, i32 25} + +// IGNORELIST: @{{.*}}global{{.*}} ={{.*}} global {{.*}}, no_sanitize_hwaddress +// IGNORELIST: @{{.*}}attributed_global{{.*}} ={{.*}} global {{.*}}, no_sanitize_hwaddress +// IGNORELIST: @{{.*}}disable_instrumentation_global{{.*}} ={{.*}} global {{.*}}, no_sanitize_hwaddress +// IGNORELIST: @{{.*}}ignorelisted_globa{{.*}} ={{.*}} global {{.*}}, no_sanitize_hwaddress +// IGNORELIST: @{{.*}}static_var{{.*}} ={{.*}} global {{.*}}, no_sanitize_hwaddress +// IGNORELIST: @{{.*}} = {{.*}} c"Hello, world!\00"{{.*}}, no_sanitize_hwaddress +// IGNORELIST: @{{.*}}extra_global{{.*}}.hwasan{{.*}} = + +// IGNORELIST: !llvm.asan.globals = !{![[EXTRA_GLOBAL:[0-9]+]], ![[GLOBAL:[0-9]+]], ![[ATTR_GLOBAL:[0-9]+]], ![[DISABLE_INSTR_GLOBAL:[0-9]+]], ![[IGNORELISTED_GLOBAL:[0-9]+]], ![[STATIC_VAR:[0-9]+]], ![[LITERAL:[0-9]+]]} +// IGNORELIST: ![[EXTRA_GLOBAL]] = !{{{.*}} ![[EXTRA_GLOBAL_LOC:[0-9]+]], !"extra_global", i1 false, i1 false} +// IGNORELIST: ![[EXTRA_GLOBAL_LOC]] = !{!"{{.*}}extra-source.cpp", i32 1, i32 5} +// IGNORELIST: ![[GLOBAL]] = !{{{.*}} null, null, i1 false, i1 true} +// IGNORELIST: ![[ATTR_GLOBAL]] = !{{{.*attributed_global.*}}, null, null, i1 false, i1 true} +// IGNORELIST: ![[DISABLE_INSTR_GLOBAL]] = !{{{.*disable_instrumentation_global.*}}, null, null, i1 false, i1 true} +// IGNORELIST: ![[IGNORELISTED_GLOBAL]] = !{{{.*ignorelisted_global.*}}, null, null, i1 false, i1 true} +// IGNORELIST: ![[STATIC_VAR]] = !{{{.*}} null, null, i1 false, i1 true} +// IGNORELIST: ![[LITERAL]] = !{{{.*}} null, null, i1 false, i1 true} diff --git a/clang/test/CodeGen/memtag-globals.cpp b/clang/test/CodeGen/memtag-globals.cpp new file mode 100644 index 0000000000000000000000000000000000000000..6deb0e2ad0bdc3650e648552e8ed3bb01c17564d --- /dev/null +++ b/clang/test/CodeGen/memtag-globals.cpp @@ -0,0 +1,62 @@ +// RUN: %clang_cc1 -include %S/Inputs/sanitizer-extra-source.cpp \ +// RUN: -fsanitize-ignorelist=%S/Inputs/sanitizer-ignorelist-global.txt \ +// RUN: -fsanitize=memtag-globals -emit-llvm -o - %s | FileCheck %s + +// RUN: %clang_cc1 -include %S/Inputs/sanitizer-extra-source.cpp \ +// RUN: -fsanitize-ignorelist=%S/Inputs/sanitizer-ignorelist-src.txt \ +// RUN: -fsanitize=memtag-globals -emit-llvm -o - %s | \ +// RUN: FileCheck %s --check-prefix=IGNORELIST + +int global; +int __attribute__((no_sanitize("memtag"))) attributed_global; +int __attribute__((disable_sanitizer_instrumentation)) disable_instrumentation_global; +int ignorelisted_global; + +void func() { + static int static_var = 0; + const char *literal = "Hello, world!"; +} + +// CHECK: @{{.*}}extra_global{{.*}} = +// CHECK-NOT: no_sanitize_memtag +// CHECK: @{{.*}}global{{.*}} = +// CHECK-NOT: no_sanitize_memtag +// CHECK: @{{.*}}attributed_global{{.*}} ={{.*}} global {{.*}}, no_sanitize_memtag +// CHECK: @{{.*}}disable_instrumentation_global{{.*}} ={{.*}} global {{.*}}, no_sanitize_memtag +// CHECK: @{{.*}}ignorelisted_global{{.*}} ={{.*}} global {{.*}}, no_sanitize_memtag +// CHECK: @{{.*}}static_var{{.*}} = +// CHECK-NOT: no_sanitize_memtag +// CHECK: @{{.*}} = {{.*}} c"Hello, world!\00" +// CHECK-NOT: no_sanitize_memtag + +// CHECK: !llvm.asan.globals = !{![[EXTRA_GLOBAL:[0-9]+]], ![[GLOBAL:[0-9]+]], ![[ATTR_GLOBAL:[0-9]+]], ![[DISABLE_INSTR_GLOBAL:[0-9]+]], ![[IGNORELISTED_GLOBAL:[0-9]+]], ![[STATIC_VAR:[0-9]+]], ![[LITERAL:[0-9]+]]} +// CHECK: ![[EXTRA_GLOBAL]] = !{{{.*}} ![[EXTRA_GLOBAL_LOC:[0-9]+]], !"extra_global", i1 false, i1 false} +// CHECK: ![[EXTRA_GLOBAL_LOC]] = !{!"{{.*}}extra-source.cpp", i32 1, i32 5} +// CHECK: ![[GLOBAL]] = !{{{.*}} ![[GLOBAL_LOC:[0-9]+]], !"global", i1 false, i1 false} +// CHECK: ![[GLOBAL_LOC]] = !{!"{{.*}}memtag-globals.cpp", i32 10, i32 5} +// CHECK: ![[ATTR_GLOBAL]] = !{{{.*attributed_global.*}}, null, null, i1 false, i1 true} +// CHECK: ![[DISABLE_INSTR_GLOBAL]] = !{{{.*disable_instrumentation_global.*}}, null, null, i1 false, i1 true} +// CHECK: ![[IGNORELISTED_GLOBAL]] = !{{{.*ignorelisted_global.*}}, null, null, i1 false, i1 true} +// CHECK: ![[STATIC_VAR]] = !{{{.*}} ![[STATIC_LOC:[0-9]+]], !"static_var", i1 false, i1 false} +// CHECK: ![[STATIC_LOC]] = !{!"{{.*}}memtag-globals.cpp", i32 16, i32 14} +// CHECK: ![[LITERAL]] = !{{{.*}} ![[LITERAL_LOC:[0-9]+]], !"", i1 false, i1 false} +// CHECK: ![[LITERAL_LOC]] = !{!"{{.*}}memtag-globals.cpp", i32 17, i32 25} + +// IGNORELIST: @{{.*}}extra_global{{.*}} ={{.*}} global +// IGNORELIST-NOT: no_sanitize_memtag +// IGNORELIST: @{{.*}}global{{.*}} ={{.*}} global {{.*}}, no_sanitize_memtag +// IGNORELIST: @{{.*}}attributed_global{{.*}} ={{.*}} global {{.*}}, no_sanitize_memtag +// IGNORELIST: @{{.*}}disable_instrumentation_global{{.*}} ={{.*}} global {{.*}}, no_sanitize_memtag +// IGNORELIST: @{{.*}}ignorelisted_globa{{.*}} ={{.*}} global {{.*}}, no_sanitize_memtag +// IGNORELIST: @{{.*}}static_var{{.*}} ={{.*}} global {{.*}}, no_sanitize_memtag +// IGNORELIST: @{{.*}} = {{.*}} c"Hello, world!\00"{{.*}}, no_sanitize_memtag + +// IGNORELIST: !llvm.asan.globals = !{![[EXTRA_GLOBAL:[0-9]+]], ![[GLOBAL:[0-9]+]], ![[ATTR_GLOBAL:[0-9]+]], ![[DISABLE_INSTR_GLOBAL:[0-9]+]], ![[IGNORELISTED_GLOBAL:[0-9]+]], ![[STATIC_VAR:[0-9]+]], ![[LITERAL:[0-9]+]]} +// IGNORELIST: ![[EXTRA_GLOBAL]] = !{{{.*}} ![[EXTRA_GLOBAL_LOC:[0-9]+]], !"extra_global", i1 false, i1 false} +// IGNORELIST: ![[EXTRA_GLOBAL_LOC]] = !{!"{{.*}}extra-source.cpp", i32 1, i32 5} +// IGNORELIST: ![[GLOBAL]] = !{{{.*}} null, null, i1 false, i1 true} +// IGNORELIST: ![[ATTR_GLOBAL]] = !{{{.*attributed_global.*}}, null, null, i1 false, i1 true} +// IGNORELIST: ![[DISABLE_INSTR_GLOBAL]] = !{{{.*disable_instrumentation_global.*}}, null, null, i1 false, i1 true} +// IGNORELIST: ![[IGNORELISTED_GLOBAL]] = !{{{.*ignorelisted_global.*}}, null, null, i1 false, i1 true} +// IGNORELIST: ![[STATIC_VAR]] = !{{{.*}} null, null, i1 false, i1 true} +// IGNORELIST: ![[LITERAL]] = !{{{.*}} null, null, i1 false, i1 true} diff --git a/clang/test/CodeGen/object-size-flex-array.c b/clang/test/CodeGen/object-size-flex-array.c new file mode 100644 index 0000000000000000000000000000000000000000..9611485bef7e25f9fa4022e93dd9cc403e794292 --- /dev/null +++ b/clang/test/CodeGen/object-size-flex-array.c @@ -0,0 +1,106 @@ +// RUN: %clang -fstrict-flex-arrays=3 -target x86_64-apple-darwin -S -emit-llvm %s -o - 2>&1 | FileCheck --check-prefixes=CHECK,CHECK-STRICT-3 %s +// RUN: %clang -fstrict-flex-arrays=2 -target x86_64-apple-darwin -S -emit-llvm %s -o - 2>&1 | FileCheck --check-prefixes=CHECK,CHECK-STRICT-2 %s +// RUN: %clang -fstrict-flex-arrays=1 -target x86_64-apple-darwin -S -emit-llvm %s -o - 2>&1 | FileCheck --check-prefixes=CHECK,CHECK-STRICT-1 %s +// RUN: %clang -fstrict-flex-arrays=0 -target x86_64-apple-darwin -S -emit-llvm %s -o - 2>&1 | FileCheck --check-prefixes=CHECK,CHECK-STRICT-0 %s + +#define OBJECT_SIZE_BUILTIN __builtin_object_size + +typedef struct { + float f; + double c[]; +} foo_t; + +typedef struct { + float f; + double c[0]; +} foo0_t; + +typedef struct { + float f; + double c[1]; +} foo1_t; + +typedef struct { + float f; + double c[2]; +} foo2_t; + +// CHECK-LABEL: @bar +unsigned bar(foo_t *f) { + // CHECK-STRICT-0: ret i32 % + // CHECK-STRICT-1: ret i32 % + // CHECK-STRICT-2: ret i32 % + // CHECK-STRICT-3: ret i32 % + return OBJECT_SIZE_BUILTIN(f->c, 1); +} + +// CHECK-LABEL: @bar0 +unsigned bar0(foo0_t *f) { + // CHECK-STRICT-0: ret i32 % + // CHECK-STRICT-1: ret i32 % + // CHECK-STRICT-2: ret i32 % + // CHECK-STRICT-3: ret i32 0 + return OBJECT_SIZE_BUILTIN(f->c, 1); +} + +// CHECK-LABEL: @bar1 +unsigned bar1(foo1_t *f) { + // CHECK-STRICT-0: ret i32 % + // CHECK-STRICT-1: ret i32 % + // CHECK-STRICT-2: ret i32 8 + // CHECK-STRICT-3: ret i32 8 + return OBJECT_SIZE_BUILTIN(f->c, 1); +} + +// CHECK-LABEL: @bar2 +unsigned bar2(foo2_t *f) { + // CHECK-STRICT-0: ret i32 % + // CHECK-STRICT-1: ret i32 16 + // CHECK-STRICT-2: ret i32 16 + // CHECK-STRICT-3: ret i32 16 + return OBJECT_SIZE_BUILTIN(f->c, 1); +} + +// Also checks for non-trailing flex-array like members + +typedef struct { + double c[0]; + float f; +} foofoo0_t; + +typedef struct { + double c[1]; + float f; +} foofoo1_t; + +typedef struct { + double c[2]; + float f; +} foofoo2_t; + +// CHECK-LABEL: @babar0 +unsigned babar0(foofoo0_t *f) { + // CHECK-STRICT-0: ret i32 0 + // CHECK-STRICT-1: ret i32 0 + // CHECK-STRICT-2: ret i32 0 + // CHECK-STRICT-3: ret i32 0 + return OBJECT_SIZE_BUILTIN(f->c, 1); +} + +// CHECK-LABEL: @babar1 +unsigned babar1(foofoo1_t *f) { + // CHECK-STRICT-0: ret i32 8 + // CHECK-STRICT-1: ret i32 8 + // CHECK-STRICT-2: ret i32 8 + // CHECK-STRICT-3: ret i32 8 + return OBJECT_SIZE_BUILTIN(f->c, 1); +} + +// CHECK-LABEL: @babar2 +unsigned babar2(foofoo2_t *f) { + // CHECK-STRICT-0: ret i32 16 + // CHECK-STRICT-1: ret i32 16 + // CHECK-STRICT-2: ret i32 16 + // CHECK-STRICT-3: ret i32 16 + return OBJECT_SIZE_BUILTIN(f->c, 1); +} diff --git a/clang/test/CodeGen/pragma-msvc-optimize.c b/clang/test/CodeGen/pragma-msvc-optimize.c new file mode 100644 index 0000000000000000000000000000000000000000..17ba40de0a304df5b447eaabffc9256856e95e37 --- /dev/null +++ b/clang/test/CodeGen/pragma-msvc-optimize.c @@ -0,0 +1,20 @@ +// RUN: %clang_cc1 -O2 -emit-llvm -fms-extensions -o - %s | FileCheck %s + +#pragma optimize("", off) + +// CHECK: define{{.*}} void @f0(){{.*}} #[[OPTNONE:[0-9]+]] +void f0() {} + +// CHECK: define{{.*}} void @f1(){{.*}} #[[OPTNONE]] +void f1() {} + +#pragma optimize("", on) + +// CHECK: define{{.*}} void @f2(){{.*}} #[[NO_OPTNONE:[0-9]+]] +void f2() {} + +// CHECK: define{{.*}} void @f3(){{.*}} #[[NO_OPTNONE]] +void f3() {} + +// CHECK: attributes #[[OPTNONE]] = {{{.*}}optnone{{.*}}} +// CHECK-NOT: attributes #[[NO_OPTNONE]] = {{{.*}}optnone{{.*}}} diff --git a/clang/test/CodeGenCUDA/atomics-remarks-gfx90a.cu b/clang/test/CodeGenCUDA/atomics-remarks-gfx90a.cu index 96892286fd75e2484213088e902859ace9e63998..946927d88a1ee1f69d5b3aac0fcd137af631e2fd 100644 --- a/clang/test/CodeGenCUDA/atomics-remarks-gfx90a.cu +++ b/clang/test/CodeGenCUDA/atomics-remarks-gfx90a.cu @@ -9,7 +9,7 @@ // GFX90A-CAS: A compare and swap loop was generated for an atomic fadd operation at system memory scope // GFX90A-CAS-LABEL: _Z14atomic_add_casPf -// GFX90A-CAS: flat_atomic_cmpswap v0, v[2:3], v[4:5] glc +// GFX90A-CAS: flat_atomic_cmpswap // GFX90A-CAS: s_cbranch_execnz __device__ float atomic_add_cas(float *p) { return __atomic_fetch_add(p, 1.0f, memory_order_relaxed); diff --git a/clang/test/CodeGenCUDA/kernel-arg-name-metadata.cu b/clang/test/CodeGenCUDA/kernel-arg-name-metadata.cu new file mode 100644 index 0000000000000000000000000000000000000000..f4b00757da0afa6dc8e23fb4b54765691c555e7c --- /dev/null +++ b/clang/test/CodeGenCUDA/kernel-arg-name-metadata.cu @@ -0,0 +1,16 @@ +// RUN: %clang_cc1 -triple amdgcn-amd-amdhsa -fhip-kernel-arg-name \ +// RUN: -fcuda-is-device -emit-llvm -o - -x hip %s \ +// RUN: | FileCheck %s + +// RUN: %clang_cc1 -triple amdgcn-amd-amdhsa \ +// RUN: -fcuda-is-device -emit-llvm -o - -x hip %s \ +// RUN: | FileCheck -check-prefix=NEG %s + +#include "Inputs/cuda.h" + +// CHECK: define{{.*}} amdgpu_kernel void @_Z6kerneliPf({{.*}} !kernel_arg_name [[MD:![0-9]+]] +// NEG-NOT: define{{.*}} amdgpu_kernel void @_Z6kerneliPf({{.*}} !kernel_arg_name +__global__ void kernel(int arg1, float *arg2) { +} + +// CHECK: [[MD]] = !{!"arg1", !"arg2"} diff --git a/clang/test/CodeGenCXX/visibility.cpp b/clang/test/CodeGenCXX/visibility.cpp index aff6554282caf57f0de5b5db9b0d6fee8a58371f..d54aa2da033ac6b395b9b06056dbab41bd33b9f7 100644 --- a/clang/test/CodeGenCXX/visibility.cpp +++ b/clang/test/CodeGenCXX/visibility.cpp @@ -118,6 +118,8 @@ namespace test48 { // CHECK-HIDDEN: @_ZN6Test143varE = external global // CHECK: @_ZN6Test154TempINS_1AEE5Inner6bufferE = external global [0 x i8] // CHECK-HIDDEN: @_ZN6Test154TempINS_1AEE5Inner6bufferE = external global [0 x i8] +// CHECK: @_ZTVN6test701BE = external hidden unnamed_addr constant { [5 x ptr] }, align 8 +// CHECK: @_ZTTN6test701BE = external hidden unnamed_addr constant [2 x ptr], align 8 namespace test27 { template @@ -1317,3 +1319,16 @@ namespace test69 { // CHECK-LABEL: define void @_ZN6test693foo1fEv // CHECK-HIDDEN-LABEL: define hidden void @_ZN6test693foo1fEv } + +namespace test70 { + // Make sure both the vtable and VTT declarations are marked "hidden" + class HIDDEN A { + virtual void a(); + }; + class HIDDEN B : virtual A { + void a() override; + ~B(); + }; + B::~B() {} + // Check lines at top of file. +} diff --git a/clang/test/CodeGenHLSL/basic_types.hlsl b/clang/test/CodeGenHLSL/basic_types.hlsl index fcb0815c3af57b938ddfef2c2f201d3fc3f73920..f76e134743e5455a8484620c659fa4b38be3ebc9 100644 --- a/clang/test/CodeGenHLSL/basic_types.hlsl +++ b/clang/test/CodeGenHLSL/basic_types.hlsl @@ -2,27 +2,27 @@ // FIXME: check 16bit types once enable-16bit-types is ready. -// CHECK:@uint_Val = global i32 0, align 4 -// CHECK:@uint64_t_Val = global i64 0, align 8 -// CHECK:@int64_t_Val = global i64 0, align 8 -// CHECK:@int2_Val = global <2 x i32> zeroinitializer, align 8 -// CHECK:@int3_Val = global <3 x i32> zeroinitializer, align 16 -// CHECK:@int4_Val = global <4 x i32> zeroinitializer, align 16 -// CHECK:@uint2_Val = global <2 x i32> zeroinitializer, align 8 -// CHECK:@uint3_Val = global <3 x i32> zeroinitializer, align 16 -// CHECK:@uint4_Val = global <4 x i32> zeroinitializer, align 16 -// CHECK:@int64_t2_Val = global <2 x i64> zeroinitializer, align 16 -// CHECK:@int64_t3_Val = global <3 x i64> zeroinitializer, align 32 -// CHECK:@int64_t4_Val = global <4 x i64> zeroinitializer, align 32 -// CHECK:@uint64_t2_Val = global <2 x i64> zeroinitializer, align 16 -// CHECK:@uint64_t3_Val = global <3 x i64> zeroinitializer, align 32 -// CHECK:@uint64_t4_Val = global <4 x i64> zeroinitializer, align 32 -// CHECK:@float2_Val = global <2 x float> zeroinitializer, align 8 -// CHECK:@float3_Val = global <3 x float> zeroinitializer, align 16 -// CHECK:@float4_Val = global <4 x float> zeroinitializer, align 16 -// CHECK:@double2_Val = global <2 x double> zeroinitializer, align 16 -// CHECK:@double3_Val = global <3 x double> zeroinitializer, align 32 -// CHECK:@double4_Val = global <4 x double> zeroinitializer, align 32 +// CHECK:"?uint_Val@@3IA" = global i32 0, align 4 +// CHECK:"?uint64_t_Val@@3KA" = global i64 0, align 8 +// CHECK:"?int64_t_Val@@3JA" = global i64 0, align 8 +// CHECK:"?int2_Val@@3T?$__vector@H$01@__clang@@A" = global <2 x i32> zeroinitializer, align 8 +// CHECK:"?int3_Val@@3T?$__vector@H$02@__clang@@A" = global <3 x i32> zeroinitializer, align 16 +// CHECK:"?int4_Val@@3T?$__vector@H$03@__clang@@A" = global <4 x i32> zeroinitializer, align 16 +// CHECK:"?uint2_Val@@3T?$__vector@I$01@__clang@@A" = global <2 x i32> zeroinitializer, align 8 +// CHECK:"?uint3_Val@@3T?$__vector@I$02@__clang@@A" = global <3 x i32> zeroinitializer, align 16 +// CHECK:"?uint4_Val@@3T?$__vector@I$03@__clang@@A" = global <4 x i32> zeroinitializer, align 16 +// CHECK:"?int64_t2_Val@@3T?$__vector@J$01@__clang@@A" = global <2 x i64> zeroinitializer, align 16 +// CHECK:"?int64_t3_Val@@3T?$__vector@J$02@__clang@@A" = global <3 x i64> zeroinitializer, align 32 +// CHECK:"?int64_t4_Val@@3T?$__vector@J$03@__clang@@A" = global <4 x i64> zeroinitializer, align 32 +// CHECK:"?uint64_t2_Val@@3T?$__vector@K$01@__clang@@A" = global <2 x i64> zeroinitializer, align 16 +// CHECK:"?uint64_t3_Val@@3T?$__vector@K$02@__clang@@A" = global <3 x i64> zeroinitializer, align 32 +// CHECK:"?uint64_t4_Val@@3T?$__vector@K$03@__clang@@A" = global <4 x i64> zeroinitializer, align 32 +// CHECK:"?float2_Val@@3T?$__vector@M$01@__clang@@A" = global <2 x float> zeroinitializer, align 8 +// CHECK:"?float3_Val@@3T?$__vector@M$02@__clang@@A" = global <3 x float> zeroinitializer, align 16 +// CHECK:"?float4_Val@@3T?$__vector@M$03@__clang@@A" = global <4 x float> zeroinitializer, align 16 +// CHECK:"?double2_Val@@3T?$__vector@N$01@__clang@@A" = global <2 x double> zeroinitializer, align 16 +// CHECK:"?double3_Val@@3T?$__vector@N$02@__clang@@A" = global <3 x double> zeroinitializer, align 32 +// CHECK:"?double4_Val@@3T?$__vector@N$03@__clang@@A" = global <4 x double> zeroinitializer, align 32 #define TYPE_DECL(T) T T##_Val diff --git a/clang/test/CodeGenHLSL/half.hlsl b/clang/test/CodeGenHLSL/half.hlsl new file mode 100644 index 0000000000000000000000000000000000000000..90ef77f4bec54d83e4e15ecc400b3d6779835e8c --- /dev/null +++ b/clang/test/CodeGenHLSL/half.hlsl @@ -0,0 +1,15 @@ +// RUN: %clang_dxc -Tlib_6_7 -fcgl -Fo - %s | FileCheck %s --check-prefix=FLOAT +// RUN: %clang_dxc -Tlib_6_7 -enable-16bit-types -fcgl -Fo - %s | FileCheck %s --check-prefix=HALF + +// Make sure use float when not enable-16bit-types. +// FLOAT:define {{.*}}float @"?foo@@YA$halff@$halff@0@Z"(float{{[^,]+}}, float{{[^,)]+}}) +// FLOAT-NOT:half +// FLOAT:ret float % + +// Make sure use half when enable-16bit-types. +// HALF:define {{.*}}half @"?foo@@YA$f16@$f16@0@Z"(half{{[^,]+}}, half{{[^,)]+}}) +// HALF-NOT:float +// HALF:ret half % +half foo(half a, half b) { + return a+b; +} diff --git a/clang/test/CodeGenObjC/ubsan-array-bounds.m b/clang/test/CodeGenObjC/ubsan-array-bounds.m index 38d1eb310d21eadf3c27b346144b66e5b602865a..ebb7517adca98bd73d01f55b608a900f96137c53 100644 --- a/clang/test/CodeGenObjC/ubsan-array-bounds.m +++ b/clang/test/CodeGenObjC/ubsan-array-bounds.m @@ -14,46 +14,3 @@ char test_FlexibleArray1(FlexibleArray1 *FA1) { return FA1->chars[1]; // CHECK: } } - -@interface FlexibleArray2 { -@public - char chars[0]; -} -@end -@implementation FlexibleArray2 { -@public - char chars2[0]; -} -@end - -// CHECK-LABEL: test_FlexibleArray2_1 -char test_FlexibleArray2_1(FlexibleArray2 *FA2) { - // CHECK: !nosanitize - return FA2->chars[1]; - // CHECK: } -} - -// CHECK-LABEL: test_FlexibleArray2_2 -char test_FlexibleArray2_2(FlexibleArray2 *FA2) { - // CHECK-NOT: !nosanitize - return FA2->chars2[1]; - // CHECK: } -} - -@interface FlexibleArray3 { -@public - char chars[0]; -} -@end -@implementation FlexibleArray3 { -@public - int i; -} -@end - -// CHECK-LABEL: test_FlexibleArray3 -char test_FlexibleArray3(FlexibleArray3 *FA3) { - // CHECK: !nosanitize - return FA3->chars[1]; - // CHECK: } -} diff --git a/clang/test/Driver/compilation_database_multiarch.c b/clang/test/Driver/compilation_database_multiarch.c index 8ea3a457f11cc298df2d02c393bf29efa25de7ff..1540a8d29ec5cfb1c40a80971fbc0e8ccdb774b8 100644 --- a/clang/test/Driver/compilation_database_multiarch.c +++ b/clang/test/Driver/compilation_database_multiarch.c @@ -10,6 +10,6 @@ // RUN: FileCheck --input-file=%t/compilation_database.json %s -// CHECK: { "directory": "{{.*}}", "file": "{{.*}}", "output": "[[OUTPUT_X86_64:.*]]", "arguments": [{{.*}}, "-o", "[[OUTPUT_X86_64]]", {{.*}} "--target=x86_64-apple-macosx12.0.0"]}, -// CHECK-NEXT: { "directory": "{{.*}}", "file": "{{.*}}", "output": "[[OUTPUT_ARM64:.*]]", "arguments": [{{.*}}, "-o", "[[OUTPUT_ARM64]]", {{.*}} "--target=arm64-apple-macosx12.0.0"]}, -// CHECK-NEXT: EOF +// CHECK-DAG: { "directory": "{{.*}}", "file": "{{.*}}", "output": "[[OUTPUT_X86_64:.*]]", "arguments": [{{.*}}, "-o", "[[OUTPUT_X86_64]]", {{.*}} "--target=x86_64-apple-macosx12.0.0"]}, +// CHECK-DAG: { "directory": "{{.*}}", "file": "{{.*}}", "output": "[[OUTPUT_ARM64:.*]]", "arguments": [{{.*}}, "-o", "[[OUTPUT_ARM64]]", {{.*}} "--target=arm64-apple-macosx12.0.0"]}, +// CHECK: EOF diff --git a/clang/test/Driver/coverage-ld.c b/clang/test/Driver/coverage-ld.c index 7d6a48be8b0852630e7c489809ff803cd62f3929..edfe272fbb525a88a53627d144602b9b45655ef1 100644 --- a/clang/test/Driver/coverage-ld.c +++ b/clang/test/Driver/coverage-ld.c @@ -35,6 +35,15 @@ // CHECK-FREEBSD-X86-64: "{{(.*[^-.0-9A-Z_a-z])?}}ld{{(.exe)?}}" // CHECK-FREEBSD-X86-64: "{{.*}}/Inputs/resource_dir{{/|\\\\}}lib{{/|\\\\}}freebsd{{/|\\\\}}libclang_rt.profile-x86_64.a" // +// RUN: %clang -### %s 2>&1 \ +// RUN: --target=x86_64-unknown-openbsd --coverage -fuse-ld=ld \ +// RUN: -resource-dir=%S/Inputs/resource_dir \ +// RUN: --sysroot=%S/Inputs/basic_openbsd_tree \ +// RUN: | FileCheck --check-prefix=CHECK-OPENBSD-X86-64 %s + +// CHECK-OPENBSD-X86-64: "{{(.*[^-.0-9A-Z_a-z])?}}ld{{(.exe)?}}" +// CHECK-OPENBSD-X86-64: "{{.*}}/Inputs/resource_dir{{/|\\\\}}lib{{/|\\\\}}openbsd{{/|\\\\}}libclang_rt.profile-x86_64.a" + // RUN: %clang -### %s 2>&1 \ // RUN: --target=arm-linux-androideabi --coverage -fuse-ld=ld \ // RUN: -resource-dir=%S/Inputs/resource_dir \ diff --git a/clang/test/Driver/cuda-openmp-driver.cu b/clang/test/Driver/cuda-openmp-driver.cu index b27195da2fb154f149c3f6d99bc76b774a844838..ded2a758579ad150d278763f4bfbe8e869d48c3a 100644 --- a/clang/test/Driver/cuda-openmp-driver.cu +++ b/clang/test/Driver/cuda-openmp-driver.cu @@ -5,13 +5,11 @@ // RUN: --offload-new-driver --offload-arch=sm_35 --offload-arch=sm_70 %s 2>&1 \ // RUN: | FileCheck -check-prefix BINDINGS %s -// BINDINGS: "nvptx64-nvidia-cuda" - "clang", inputs: ["[[INPUT:.+]]"], output: "[[PTX_SM_35:.+]]" +// BINDINGS: "nvptx64-nvidia-cuda" - "clang", inputs: ["[[INPUT:.+]]"], output: "[[PTX_SM_35:.+]]" // BINDINGS-NEXT: "nvptx64-nvidia-cuda" - "NVPTX::Assembler", inputs: ["[[PTX_SM_35]]"], output: "[[CUBIN_SM_35:.+]]" -// BINDINGS-NEXT: "nvptx64-nvidia-cuda" - "NVPTX::Linker", inputs: ["[[CUBIN_SM_35]]", "[[PTX_SM_35]]"], output: "[[FATBIN_SM_35:.+]]" // BINDINGS-NEXT: "nvptx64-nvidia-cuda" - "clang", inputs: ["[[INPUT]]"], output: "[[PTX_SM_70:.+]]" // BINDINGS-NEXT: "nvptx64-nvidia-cuda" - "NVPTX::Assembler", inputs: ["[[PTX_SM_70:.+]]"], output: "[[CUBIN_SM_70:.+]]" -// BINDINGS-NEXT: "nvptx64-nvidia-cuda" - "NVPTX::Linker", inputs: ["[[CUBIN_SM_70]]", "[[PTX_SM_70:.+]]"], output: "[[FATBIN_SM_70:.+]]" -// BINDINGS-NEXT: "x86_64-unknown-linux-gnu" - "Offload::Packager", inputs: ["[[FATBIN_SM_35]]", "[[FATBIN_SM_70]]"], output: "[[BINARY:.+]]" +// BINDINGS-NEXT: "x86_64-unknown-linux-gnu" - "Offload::Packager", inputs: ["[[CUBIN_SM_35]]", "[[CUBIN_SM_70]]"], output: "[[BINARY:.+]]" // BINDINGS-NEXT: "x86_64-unknown-linux-gnu" - "clang", inputs: ["[[INPUT]]", "[[BINARY]]"], output: "[[HOST_OBJ:.+]]" // BINDINGS-NEXT: "x86_64-unknown-linux-gnu" - "Offload::Linker", inputs: ["[[HOST_OBJ]]"], output: "a.out" @@ -31,7 +29,6 @@ // BINDINGS-DEVICE: # "nvptx64-nvidia-cuda" - "clang", inputs: ["[[INPUT:.+]]"], output: "[[PTX:.+]]" // BINDINGS-DEVICE: # "nvptx64-nvidia-cuda" - "NVPTX::Assembler", inputs: ["[[PTX]]"], output: "[[CUBIN:.+]]" -// BINDINGS-DEVICE: # "nvptx64-nvidia-cuda" - "NVPTX::Linker", inputs: ["[[CUBIN]]", "[[PTX]]"], output: "{{.*}}.fatbin" // RUN: %clang -### -target x86_64-linux-gnu -nocudalib --cuda-feature=+ptx61 --offload-arch=sm_70 %s 2>&1 | FileCheck -check-prefix MANUAL-FEATURE %s // MANUAL-FEATURE: -cc1{{.*}}-target-feature{{.*}}+ptx61 diff --git a/clang/test/Driver/cuda-phases.cu b/clang/test/Driver/cuda-phases.cu index dac63942ddb49578e44b1f18499352623d28859b..404db69dff7b062bf5982494ba914b5da9ed5b1f 100644 --- a/clang/test/Driver/cuda-phases.cu +++ b/clang/test/Driver/cuda-phases.cu @@ -232,20 +232,14 @@ // NEW_DRIVER: 6: backend, {5}, assembler, (device-cuda, sm_52) // NEW_DRIVER: 7: assembler, {6}, object, (device-cuda, sm_52) // NEW_DRIVER: 8: offload, "device-cuda (nvptx64-nvidia-cuda:sm_52)" {7}, object -// NEW_DRIVER: 9: offload, "device-cuda (nvptx64-nvidia-cuda:sm_52)" {6}, assembler -// NEW_DRIVER: 10: linker, {8, 9}, cuda-fatbin, (device-cuda, sm_52) -// NEW_DRIVER: 11: offload, "device-cuda (nvptx64-nvidia-cuda:sm_52)" {10}, cuda-fatbin -// NEW_DRIVER: 12: input, "[[INPUT]]", cuda, (device-cuda, sm_70) -// NEW_DRIVER: 13: preprocessor, {12}, cuda-cpp-output, (device-cuda, sm_70) -// NEW_DRIVER: 14: compiler, {13}, ir, (device-cuda, sm_70) -// NEW_DRIVER: 15: backend, {14}, assembler, (device-cuda, sm_70) -// NEW_DRIVER: 16: assembler, {15}, object, (device-cuda, sm_70) -// NEW_DRIVER: 17: offload, "device-cuda (nvptx64-nvidia-cuda:sm_70)" {16}, object -// NEW_DRIVER: 18: offload, "device-cuda (nvptx64-nvidia-cuda:sm_70)" {15}, assembler -// NEW_DRIVER: 19: linker, {17, 18}, cuda-fatbin, (device-cuda, sm_70) -// NEW_DRIVER: 20: offload, "device-cuda (nvptx64-nvidia-cuda:sm_70)" {19}, cuda-fatbin -// NEW_DRIVER: 21: clang-offload-packager, {11, 20}, image -// NEW_DRIVER: 22: offload, " (powerpc64le-ibm-linux-gnu)" {2}, " (powerpc64le-ibm-linux-gnu)" {21}, ir -// NEW_DRIVER: 23: backend, {22}, assembler, (host-cuda) -// NEW_DRIVER: 24: assembler, {23}, object, (host-cuda) -// NEW_DRIVER: 25: clang-linker-wrapper, {24}, image, (host-cuda) +// NEW_DRIVER: 9: input, "[[INPUT]]", cuda, (device-cuda, sm_70) +// NEW_DRIVER: 10: preprocessor, {9}, cuda-cpp-output, (device-cuda, sm_70) +// NEW_DRIVER: 11: compiler, {10}, ir, (device-cuda, sm_70) +// NEW_DRIVER: 12: backend, {11}, assembler, (device-cuda, sm_70) +// NEW_DRIVER: 13: assembler, {12}, object, (device-cuda, sm_70) +// NEW_DRIVER: 14: offload, "device-cuda (nvptx64-nvidia-cuda:sm_70)" {13}, object +// NEW_DRIVER: 15: clang-offload-packager, {8, 14}, image +// NEW_DRIVER: 16: offload, " (powerpc64le-ibm-linux-gnu)" {2}, " (powerpc64le-ibm-linux-gnu)" {15}, ir +// NEW_DRIVER: 17: backend, {16}, assembler, (host-cuda) +// NEW_DRIVER: 18: assembler, {17}, object, (host-cuda) +// NEW_DRIVER: 19: clang-linker-wrapper, {18}, image, (host-cuda) diff --git a/clang/test/Driver/hip-options.hip b/clang/test/Driver/hip-options.hip index c4f436669b0b79b6ed644e3c8e95a68cd77cf029..2d6ed77cf4d7fe1dbae3fb2a7f135182c12ce8a4 100644 --- a/clang/test/Driver/hip-options.hip +++ b/clang/test/Driver/hip-options.hip @@ -116,3 +116,13 @@ // RUN: --cuda-gpu-arch=gfx906 -Xoffload-linker --build-id=md5 %s 2>&1 \ // RUN: | FileCheck -check-prefix=OFL-LINK %s // OFL-LINK: lld{{.*}}"--build-id=md5" + +// RUN: %clang -### --target=x86_64-unknown-linux-gnu -nogpuinc -nogpulib \ +// RUN: --offload-arch=gfx906 -fhip-kernel-arg-name %s 2>&1 \ +// RUN: | FileCheck -check-prefix=KAN %s +// KAN: "-cc1"{{.*}} "-triple" "amdgcn-amd-amdhsa" {{.*}} "-fhip-kernel-arg-name" + +// RUN: %clang -### --target=x86_64-unknown-linux-gnu -nogpuinc -nogpulib \ +// RUN: --offload-arch=gfx906 %s 2>&1 \ +// RUN: | FileCheck -check-prefix=KANNEG %s +// KANNEG-NOT: "-fhip-kernel-arg-name" diff --git a/clang/test/Driver/instrprof-ld.c b/clang/test/Driver/instrprof-ld.c index d1b070ed59c3f468c7b2af20f0ceb25bf1231811..fefc648d610e2c2213191e5f1b337e502423d281 100644 --- a/clang/test/Driver/instrprof-ld.c +++ b/clang/test/Driver/instrprof-ld.c @@ -36,6 +36,15 @@ // CHECK-FREEBSD-X86-64: "{{(.*[^-.0-9A-Z_a-z])?}}ld{{(.exe)?}}" // CHECK-FREEBSD-X86-64: "{{.*}}/Inputs/resource_dir{{/|\\\\}}lib{{/|\\\\}}freebsd{{/|\\\\}}libclang_rt.profile-x86_64.a" // +// RUN: %clang -### %s 2>&1 \ +// RUN: --target=x86_64-unknown-openbsd -fprofile-instr-generate -fuse-ld=ld \ +// RUN: -resource-dir=%S/Inputs/resource_dir \ +// RUN: --sysroot=%S/Inputs/basic_openbsd_tree \ +// RUN: | FileCheck --check-prefix=CHECK-OPENBSD-X86-64 %s + +// CHECK-OPENBSD-X86-64: "{{(.*[^-.0-9A-Z_a-z])?}}ld{{(.exe)?}}" +// CHECK-OPENBSD-X86-64: "{{.*}}/Inputs/resource_dir{{/|\\\\}}lib{{/|\\\\}}openbsd{{/|\\\\}}libclang_rt.profile-x86_64.a" + // RUN: %clang -### %s 2>&1 \ // RUN: -shared \ // RUN: --target=i386-unknown-linux -fprofile-instr-generate -fuse-ld=ld \ @@ -66,6 +75,16 @@ // CHECK-FREEBSD-X86-64-SHARED: "{{(.*[^-.0-9A-Z_a-z])?}}ld{{(.exe)?}}" // CHECK-FREEBSD-X86-64-SHARED: "{{.*}}/Inputs/resource_dir{{/|\\\\}}lib{{/|\\\\}}freebsd{{/|\\\\}}libclang_rt.profile-x86_64.a" // +// RUN: %clang -### %s 2>&1 \ +// RUN: -shared \ +// RUN: --target=x86_64-unknown-openbsd -fprofile-instr-generate -fuse-ld=ld \ +// RUN: -resource-dir=%S/Inputs/resource_dir \ +// RUN: --sysroot=%S/Inputs/basic_openbsd_tree \ +// RUN: | FileCheck --check-prefix=CHECK-OPENBSD-X86-64-SHARED %s + +// CHECK-OPENBSD-X86-64-SHARED: "{{(.*[^-.0-9A-Z_a-z])?}}ld{{(.exe)?}}" +// CHECK-OPENBSD-X86-64-SHARED: "{{.*}}/Inputs/resource_dir{{/|\\\\}}lib{{/|\\\\}}openbsd{{/|\\\\}}libclang_rt.profile-x86_64.a" + // RUN: %clang -### %s 2>&1 \ // RUN: --target=x86_64-apple-darwin14 -fprofile-instr-generate -fuse-ld=ld \ // RUN: -resource-dir=%S/Inputs/resource_dir \ diff --git a/clang/test/Driver/minix.c b/clang/test/Driver/minix.c new file mode 100644 index 0000000000000000000000000000000000000000..54f261d53809d10ec791f785e1467a2ba18ea601 --- /dev/null +++ b/clang/test/Driver/minix.c @@ -0,0 +1,6 @@ +// -r suppresses default -l and crt*.o like -nostdlib. +// RUN: %clang -### %s --target=i386-unknown-minix -r 2>&1 \ +// RUN: | FileCheck %s --check-prefix=CHECK-RELOCATABLE +// CHECK-RELOCATABLE: "-r" +// CHECK-RELOCATABLE-NOT: "-l +// CHECK-RELOCATABLE-NOT: /crt{{[^.]+}}.o diff --git a/clang/test/Driver/openmp-offload-gpu-new.c b/clang/test/Driver/openmp-offload-gpu-new.c index 4c2748515cf0df4f4fe24864d664d8a2ca423cf1..a59952a90e29efb0b2f6a83be555176d1690024c 100644 --- a/clang/test/Driver/openmp-offload-gpu-new.c +++ b/clang/test/Driver/openmp-offload-gpu-new.c @@ -115,3 +115,8 @@ // RUN: %s 2>&1 | FileCheck --check-prefix=CHECK-XLINKER %s // CHECK-XLINKER: -device-linker=a{{.*}}-device-linker=nvptx64-nvidia-cuda=b{{.*}}-device-linker=nvptx64-nvidia-cuda=c{{.*}}-- + +// RUN: %clang -### --target=x86_64-unknown-linux-gnu -fopenmp --offload-arch=sm_52 -nogpulib \ +// RUN: -foffload-lto %s 2>&1 | FileCheck --check-prefix=CHECK-LTO-FEATURES %s + +// CHECK-LTO-FEATURES: clang-offload-packager{{.*}}--image={{.*}}feature=+ptx{{[0-9]+}} diff --git a/clang/test/FixIt/fixit-unicode-named-escape-sequences.c b/clang/test/FixIt/fixit-unicode-named-escape-sequences.c new file mode 100644 index 0000000000000000000000000000000000000000..1f956427b1840c7cd21b7b2993034b105e32d029 --- /dev/null +++ b/clang/test/FixIt/fixit-unicode-named-escape-sequences.c @@ -0,0 +1,29 @@ +// RUN: not %clang_cc1 -fsyntax-only -fdiagnostics-parseable-fixits %s 2>&1 | FileCheck -check-prefix=CHECK-MACHINE %s +const char* +\N{GREEK_SMALL_LETTER-OMICRON} = // expected-error {{'GREEK_SMALL_LETTER-OMICRON' is not a valid Unicode character name}} \ + // expected-note {{sensitive to case and whitespaces}} +// CHECK-MACHINE: fix-it:"{{.*}}":{[[@LINE-2]]:4-[[@LINE-2]]:30}:"GREEK SMALL LETTER OMICRON" + +"\N{zero width no break space}" // expected-error {{'zero width no break space' is not a valid Unicode character name}} \ + // expected-note {{sensitive to case and whitespaces}} +// CHECK-MACHINE: fix-it:"{{.*}}":{[[@LINE-2]]:5-[[@LINE-2]]:30}:"ZERO WIDTH NO-BREAK SPACE" + +"abc\N{MAN IN A BUSINESS SUIT LEVITATING}" // expected-error {{'MAN IN A BUSINESS SUIT LEVITATING' is not a valid Unicode character name}} \ + // expected-note {{did you mean MAN IN BUSINESS SUIT LEVITATING ('🕴' U+1F574)?}} +// CHECK-MACHINE: fix-it:"{{.*}}":{[[@LINE-2]]:8-[[@LINE-2]]:41}:"MAN IN BUSINESS SUIT LEVITATING" + +"\N{AAA}" // expected-error {{'AAA' is not a valid Unicode character name}} \ + // expected-note 5{{did you mean}} +// CHECK-MACHINE: fix-it:"{{.*}}":{[[@LINE-2]]:5-[[@LINE-2]]:8}:"ANT" +// CHECK-MACHINE: fix-it:"{{.*}}":{[[@LINE-3]]:5-[[@LINE-3]]:8}:"ARC" +// CHECK-MACHINE: fix-it:"{{.*}}":{[[@LINE-4]]:5-[[@LINE-4]]:8}:"AXE" +// CHECK-MACHINE: fix-it:"{{.*}}":{[[@LINE-5]]:5-[[@LINE-5]]:8}:"BAT" +// CHECK-MACHINE: fix-it:"{{.*}}":{[[@LINE-6]]:5-[[@LINE-6]]:8}:"CAT" + +"\N{BLACKCHESSBISHOP}" // expected-error {{'BLACKCHESSBISHOP' is not a valid Unicode character name}} \ + // expected-note {{sensitive to case and whitespaces}} +// CHECK-MACHINE: fix-it:"{{.*}}":{[[@LINE-2]]:5-[[@LINE-2]]:21}:"BLACK CHESS BISHOP" + +; + + diff --git a/clang/test/Interpreter/code-undo.cpp b/clang/test/Interpreter/code-undo.cpp new file mode 100644 index 0000000000000000000000000000000000000000..d825460f3b4a42a0a6eaf990a53ce3fa70707ddb --- /dev/null +++ b/clang/test/Interpreter/code-undo.cpp @@ -0,0 +1,23 @@ +// RUN: clang-repl "int i = 10;" 'extern "C" int printf(const char*,...);' \ +// RUN: 'auto r1 = printf("i = %d\n", i);' | FileCheck --check-prefix=CHECK-DRIVER %s +// REQUIRES: host-supports-jit +// UNSUPPORTED: system-aix +// CHECK-DRIVER: i = 10 +// RUN: cat %s | clang-repl | FileCheck %s +extern "C" int printf(const char *, ...); +int x1 = 0; +int x2 = 42; +%undo +int x2 = 24; +auto r1 = printf("x1 = %d\n", x1); +// CHECK: x1 = 0 +auto r2 = printf("x2 = %d\n", x2); +// CHECK-NEXT: x2 = 24 + +int foo() { return 1; } +%undo +int foo() { return 2; } +auto r3 = printf("foo() = %d\n", foo()); +// CHECK-NEXT: foo() = 2 + +%quit diff --git a/clang/test/Interpreter/execute.cpp b/clang/test/Interpreter/execute.cpp index 61e68990acf96132daf141b3c806bbbfa25dac29..f5c70c21ac507584b3023ea5c02c6f35166800f3 100644 --- a/clang/test/Interpreter/execute.cpp +++ b/clang/test/Interpreter/execute.cpp @@ -1,3 +1,4 @@ +// RUN: clang-repl "int x = 10;" "int y=7; err;" "int y = 10;" // RUN: clang-repl "int i = 10;" 'extern "C" int printf(const char*,...);' \ // RUN: 'auto r1 = printf("i = %d\n", i);' | FileCheck --check-prefix=CHECK-DRIVER %s // REQUIRES: host-supports-jit @@ -17,4 +18,4 @@ auto r2 = printf("S[f=%f, m=0x%llx]\n", s.f, reinterpret_cast' == '$'); // expected-warning 2{{trigraph converted}} \ + // expected-warning {{named escape sequences are a Clang extension}} +#endif diff --git a/clang/test/Lexer/unicode.c b/clang/test/Lexer/unicode.c index b0cc28cfb915a354ba10450ffe073976b08016a3..efbd63fb00630d14ac512a6a8dca009d13e274c9 100644 --- a/clang/test/Lexer/unicode.c +++ b/clang/test/Lexer/unicode.c @@ -39,9 +39,14 @@ extern int 𐠈; extern int ꙮ; extern int \u1B4C; // BALINESE LETTER ARCHAIC JNYA - Added in Unicode 14 extern int \U00016AA2; // TANGSA LETTER GA - Added in Unicode 14 +extern int _\N{TANGSA LETTER GA}; +extern int _\N{TANGSALETTERGA}; // expected-error {{'TANGSALETTERGA' is not a valid Unicode character name}} \ + // expected-note {{characters names in Unicode escape sequences are sensitive to case and whitespace}} + + + // This character doesn't have the XID_Start property extern int \U00016AC0; // TANGSA DIGIT ZERO // expected-error {{expected unqualified-id}} -extern int _\U00016AC0; // TANGSA DIGIT ZERO extern int 🌹; // expected-error {{unexpected character }} \ expected-warning {{declaration does not declare anything}} diff --git a/clang/test/OpenMP/masked_taskloop_ast_print.cpp b/clang/test/OpenMP/masked_taskloop_ast_print.cpp new file mode 100644 index 0000000000000000000000000000000000000000..b33b0f6c6ab6729464f040ec9f1614a285f3d526 --- /dev/null +++ b/clang/test/OpenMP/masked_taskloop_ast_print.cpp @@ -0,0 +1,95 @@ +// RUN: %clang_cc1 -verify -fopenmp -ast-print %s | FileCheck %s +// RUN: %clang_cc1 -fopenmp -x c++ -std=c++11 -emit-pch -o %t %s +// RUN: %clang_cc1 -fopenmp -std=c++11 -include-pch %t -fsyntax-only -verify %s -ast-print | FileCheck %s + +// RUN: %clang_cc1 -verify -fopenmp-simd -ast-print %s | FileCheck %s +// RUN: %clang_cc1 -fopenmp-simd -x c++ -std=c++11 -emit-pch -o %t %s +// RUN: %clang_cc1 -fopenmp-simd -std=c++11 -include-pch %t -fsyntax-only -verify %s -ast-print | FileCheck %s +// expected-no-diagnostics + +#ifndef HEADER +#define HEADER + +void foo() {} + +template +T tmain(T argc) { + T b = argc, c, d, e, f, g; + int tid = 0; + static T a; +// CHECK: static T a; +#pragma omp taskgroup allocate(d) task_reduction(+: d) +#pragma omp masked taskloop if(taskloop: argc > N) default(shared) untied priority(N) grainsize(N) reduction(+:g) in_reduction(+: d) filter(tid) allocate(d) + // CHECK-NEXT: #pragma omp taskgroup allocate(d) task_reduction(+: d) + // CHECK-NEXT: #pragma omp masked taskloop if(taskloop: argc > N) default(shared) untied priority(N) grainsize(N) reduction(+: g) in_reduction(+: d) filter(tid) allocate(d){{$}} + for (int i = 0; i < 2; ++i) + a = 2; +// CHECK-NEXT: for (int i = 0; i < 2; ++i) +// CHECK-NEXT: a = 2; +#pragma omp parallel +#pragma omp masked taskloop private(argc, b), firstprivate(c, d), lastprivate(d, f) collapse(N) shared(g) if (c) final(d) mergeable priority(f) nogroup num_tasks(N) + for (int i = 0; i < 2; ++i) + for (int j = 0; j < 2; ++j) + for (int j = 0; j < 2; ++j) + for (int j = 0; j < 2; ++j) + for (int j = 0; j < 2; ++j) + for (int i = 0; i < 2; ++i) + for (int j = 0; j < 2; ++j) + for (int j = 0; j < 2; ++j) + for (int j = 0; j < 2; ++j) + for (int j = 0; j < 2; ++j) { +#pragma omp cancel taskgroup +#pragma omp cancellation point taskgroup + foo(); + } + // CHECK-NEXT: #pragma omp parallel + // CHECK-NEXT: #pragma omp masked taskloop private(argc,b) firstprivate(c,d) lastprivate(d,f) collapse(N) shared(g) if(c) final(d) mergeable priority(f) nogroup num_tasks(N) + // CHECK-NEXT: for (int i = 0; i < 2; ++i) + // CHECK-NEXT: for (int j = 0; j < 2; ++j) + // CHECK-NEXT: for (int j = 0; j < 2; ++j) + // CHECK-NEXT: for (int j = 0; j < 2; ++j) + // CHECK-NEXT: for (int j = 0; j < 2; ++j) + // CHECK-NEXT: for (int i = 0; i < 2; ++i) + // CHECK-NEXT: for (int j = 0; j < 2; ++j) + // CHECK-NEXT: for (int j = 0; j < 2; ++j) + // CHECK-NEXT: for (int j = 0; j < 2; ++j) + // CHECK-NEXT: for (int j = 0; j < 2; ++j) { + // CHECK-NEXT: #pragma omp cancel taskgroup + // CHECK-NEXT: #pragma omp cancellation point taskgroup + // CHECK-NEXT: foo(); + return T(); +} + +// CHECK-LABEL: int main(int argc, char **argv) { +int main(int argc, char **argv) { + int b = argc, c, d, e, f, g; + int tid = 0; + static int a; +// CHECK: static int a; +#pragma omp taskgroup task_reduction(+: d) +#pragma omp masked taskloop if(taskloop: a) default(none) shared(a) final(b) priority(5) num_tasks(argc) reduction(*: g) in_reduction(+:d) + // CHECK-NEXT: #pragma omp taskgroup task_reduction(+: d) + // CHECK-NEXT: #pragma omp masked taskloop if(taskloop: a) default(none) shared(a) final(b) priority(5) num_tasks(argc) reduction(*: g) in_reduction(+: d) + for (int i = 0; i < 2; ++i) + a = 2; +// CHECK-NEXT: for (int i = 0; i < 2; ++i) +// CHECK-NEXT: a = 2; +#pragma omp parallel +#pragma omp masked taskloop private(argc, b), firstprivate(argv, c), lastprivate(d, f) collapse(2) shared(g) if(argc) mergeable priority(argc) grainsize(argc) reduction(max: a, e) filter(tid) + for (int i = 0; i < 10; ++i) + for (int j = 0; j < 10; ++j) { +#pragma omp cancel taskgroup +#pragma omp cancellation point taskgroup + foo(); + } + // CHECK-NEXT: #pragma omp parallel + // CHECK-NEXT: #pragma omp masked taskloop private(argc,b) firstprivate(argv,c) lastprivate(d,f) collapse(2) shared(g) if(argc) mergeable priority(argc) grainsize(argc) reduction(max: a,e) filter(tid) + // CHECK-NEXT: for (int i = 0; i < 10; ++i) + // CHECK-NEXT: for (int j = 0; j < 10; ++j) { + // CHECK-NEXT: #pragma omp cancel taskgroup + // CHECK-NEXT: #pragma omp cancellation point taskgroup + // CHECK-NEXT: foo(); + return (tmain(argc) + tmain(argv[0][0])); +} + +#endif diff --git a/clang/test/OpenMP/masked_taskloop_collapse_messages.cpp b/clang/test/OpenMP/masked_taskloop_collapse_messages.cpp new file mode 100644 index 0000000000000000000000000000000000000000..6c15d4fd768827e7b92ea39a123ffc4527bf26ed --- /dev/null +++ b/clang/test/OpenMP/masked_taskloop_collapse_messages.cpp @@ -0,0 +1,99 @@ +// RUN: %clang_cc1 -verify -fopenmp %s -Wuninitialized +// RUN: %clang_cc1 -verify -fopenmp -std=c++98 %s -Wuninitialized +// RUN: %clang_cc1 -verify -fopenmp -std=c++11 %s -Wuninitialized + +// RUN: %clang_cc1 -verify -fopenmp-simd %s -Wuninitialized +// RUN: %clang_cc1 -verify -fopenmp-simd -std=c++98 %s -Wuninitialized +// RUN: %clang_cc1 -verify -fopenmp-simd -std=c++11 %s -Wuninitialized + +// expected-note@* 0+{{declared here}} + +void foo() { +} + +bool foobool(int argc) { + return argc; +} + +struct S1; + +template +T tmain(T argc, S **argv) { + #pragma omp masked taskloop collapse // expected-error {{expected '(' after 'collapse'}} + for (int i = ST; i < N; i++) argv[0][i] = argv[0][i] - argv[0][i-ST]; + #pragma omp masked taskloop collapse ( // expected-error {{expected expression}} expected-error {{expected ')'}} expected-note {{to match this '('}} + for (int i = ST; i < N; i++) argv[0][i] = argv[0][i] - argv[0][i-ST]; + #pragma omp masked taskloop collapse () // expected-error {{expected expression}} + for (int i = ST; i < N; i++) argv[0][i] = argv[0][i] - argv[0][i-ST]; + // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} + // expected-error@+1 2 {{integral constant expression}} expected-note@+1 0+{{constant expression}} + #pragma omp masked taskloop collapse (argc + for (int i = ST; i < N; i++) argv[0][i] = argv[0][i] - argv[0][i-ST]; + // expected-error@+1 2 {{argument to 'collapse' clause must be a strictly positive integer value}} + #pragma omp masked taskloop collapse (ST // expected-error {{expected ')'}} expected-note {{to match this '('}} + for (int i = ST; i < N; i++) argv[0][i] = argv[0][i] - argv[0][i-ST]; + #pragma omp masked taskloop collapse (1)) // expected-warning {{extra tokens at the end of '#pragma omp masked taskloop' are ignored}} + for (int i = ST; i < N; i++) argv[0][i] = argv[0][i] - argv[0][i-ST]; + #pragma omp masked taskloop collapse ((ST > 0) ? 1 + ST : 2) // expected-note 2 {{as specified in 'collapse' clause}} + for (int i = ST; i < N; i++) argv[0][i] = argv[0][i] - argv[0][i-ST]; // expected-error 2 {{expected 2 for loops after '#pragma omp masked taskloop', but found only 1}} + // expected-error@+3 2 {{directive '#pragma omp masked taskloop' cannot contain more than one 'collapse' clause}} + // expected-error@+2 {{argument to 'collapse' clause must be a strictly positive integer value}} + // expected-error@+1 2 {{integral constant expression}} expected-note@+1 0+{{constant expression}} + #pragma omp masked taskloop collapse (foobool(argc)), collapse (true), collapse (-5) + for (int i = ST; i < N; i++) argv[0][i] = argv[0][i] - argv[0][i-ST]; + #pragma omp masked taskloop collapse (S) // expected-error {{'S' does not refer to a value}} + for (int i = ST; i < N; i++) argv[0][i] = argv[0][i] - argv[0][i-ST]; +#if __cplusplus <= 199711L + // expected-error@+4 2 {{integral constant expression}} expected-note@+4 0+{{constant expression}} +#else + // expected-error@+2 2 {{integral constant expression must have integral or unscoped enumeration type, not 'char *'}} +#endif + #pragma omp masked taskloop collapse (argv[1]=2) // expected-error {{expected ')'}} expected-note {{to match this '('}} + for (int i = ST; i < N; i++) argv[0][i] = argv[0][i] - argv[0][i-ST]; + #pragma omp masked taskloop collapse (1) + for (int i = ST; i < N; i++) argv[0][i] = argv[0][i] - argv[0][i-ST]; + #pragma omp masked taskloop collapse (N) // expected-error {{argument to 'collapse' clause must be a strictly positive integer value}} + for (T i = ST; i < N; i++) argv[0][i] = argv[0][i] - argv[0][i-ST]; + #pragma omp masked taskloop collapse (2) // expected-note {{as specified in 'collapse' clause}} + foo(); // expected-error {{expected 2 for loops after '#pragma omp masked taskloop'}} + return argc; +} + +int main(int argc, char **argv) { + #pragma omp masked taskloop collapse // expected-error {{expected '(' after 'collapse'}} + for (int i = 4; i < 12; i++) argv[0][i] = argv[0][i] - argv[0][i-4]; + #pragma omp masked taskloop collapse ( // expected-error {{expected expression}} expected-error {{expected ')'}} expected-note {{to match this '('}} + for (int i = 4; i < 12; i++) argv[0][i] = argv[0][i] - argv[0][i-4]; + #pragma omp masked taskloop collapse () // expected-error {{expected expression}} + for (int i = 4; i < 12; i++) argv[0][i] = argv[0][i] - argv[0][i-4]; + #pragma omp masked taskloop collapse (4 // expected-error {{expected ')'}} expected-note {{to match this '('}} expected-note {{as specified in 'collapse' clause}} + for (int i = 4; i < 12; i++) argv[0][i] = argv[0][i] - argv[0][i-4]; // expected-error {{expected 4 for loops after '#pragma omp masked taskloop', but found only 1}} + #pragma omp masked taskloop collapse (2+2)) // expected-warning {{extra tokens at the end of '#pragma omp masked taskloop' are ignored}} expected-note {{as specified in 'collapse' clause}} + for (int i = 4; i < 12; i++) argv[0][i] = argv[0][i] - argv[0][i-4]; // expected-error {{expected 4 for loops after '#pragma omp masked taskloop', but found only 1}} + // expected-error@+1 {{integral constant expression}} expected-note@+1 0+{{constant expression}} + #pragma omp masked taskloop collapse (foobool(1) > 0 ? 1 : 2) + for (int i = 4; i < 12; i++) argv[0][i] = argv[0][i] - argv[0][i-4]; + // expected-error@+3 {{integral constant expression}} expected-note@+3 0+{{constant expression}} + // expected-error@+2 2 {{directive '#pragma omp masked taskloop' cannot contain more than one 'collapse' clause}} + // expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}} + #pragma omp masked taskloop collapse (foobool(argc)), collapse (true), collapse (-5) + for (int i = 4; i < 12; i++) argv[0][i] = argv[0][i] - argv[0][i-4]; + #pragma omp masked taskloop collapse (S1) // expected-error {{'S1' does not refer to a value}} + for (int i = 4; i < 12; i++) argv[0][i] = argv[0][i] - argv[0][i-4]; +#if __cplusplus <= 199711L + // expected-error@+4 {{integral constant expression}} expected-note@+4 0+{{constant expression}} +#else + // expected-error@+2 {{integral constant expression must have integral or unscoped enumeration type, not 'char *'}} +#endif + #pragma omp masked taskloop collapse (argv[1]=2) // expected-error {{expected ')'}} expected-note {{to match this '('}} + for (int i = 4; i < 12; i++) argv[0][i] = argv[0][i] - argv[0][i-4]; + // expected-error@+3 {{statement after '#pragma omp masked taskloop' must be a for loop}} + // expected-note@+1 {{in instantiation of function template specialization 'tmain' requested here}} + #pragma omp masked taskloop collapse(collapse(tmain(argc, argv) // expected-error 2 {{expected ')'}} expected-note 2 {{to match this '('}} + foo(); + #pragma omp masked taskloop collapse (2) // expected-note {{as specified in 'collapse' clause}} + foo(); // expected-error {{expected 2 for loops after '#pragma omp masked taskloop'}} + // expected-note@+1 {{in instantiation of function template specialization 'tmain' requested here}} + return tmain(argc, argv); +} + diff --git a/clang/test/OpenMP/masked_taskloop_final_messages.cpp b/clang/test/OpenMP/masked_taskloop_final_messages.cpp new file mode 100644 index 0000000000000000000000000000000000000000..eaca88255b63de48b6b4ff3e3335e4a6656464f8 --- /dev/null +++ b/clang/test/OpenMP/masked_taskloop_final_messages.cpp @@ -0,0 +1,94 @@ +// RUN: %clang_cc1 -verify -fopenmp -ferror-limit 100 %s -Wuninitialized + +// RUN: %clang_cc1 -verify -fopenmp-simd -ferror-limit 100 %s -Wuninitialized + +void foo() { +} + +bool foobool(int argc) { + return argc; +} + +struct S1; // expected-note {{declared here}} + +template // expected-note {{declared here}} +int tmain(T argc, S **argv) { + T z; +#pragma omp masked taskloop final // expected-error {{expected '(' after 'final'}} + for (int i = 0; i < 10; ++i) + foo(); +#pragma omp masked taskloop final( // expected-error {{expected expression}} expected-error {{expected ')'}} expected-note {{to match this '('}} + for (int i = 0; i < 10; ++i) + foo(); +#pragma omp masked taskloop final() // expected-error {{expected expression}} + for (int i = 0; i < 10; ++i) + foo(); +#pragma omp masked taskloop final(argc // expected-error {{expected ')'}} expected-note {{to match this '('}} + for (int i = 0; i < 10; ++i) + foo(); +#pragma omp masked taskloop final(argc)) // expected-warning {{extra tokens at the end of '#pragma omp masked taskloop' are ignored}} + for (int i = 0; i < 10; ++i) + foo(); +#pragma omp masked taskloop final(argc > 0 ? argv[1] : argv[2] + z) + for (int i = 0; i < 10; ++i) + foo(); +#pragma omp masked taskloop final(foobool(argc)), final(true) // expected-error {{directive '#pragma omp masked taskloop' cannot contain more than one 'final' clause}} + for (int i = 0; i < 10; ++i) + foo(); +#pragma omp masked taskloop final(S) // expected-error {{'S' does not refer to a value}} + for (int i = 0; i < 10; ++i) + foo(); +#pragma omp masked taskloop final(argv[1] = 2) // expected-error {{expected ')'}} expected-note {{to match this '('}} + for (int i = 0; i < 10; ++i) + foo(); +#pragma omp masked taskloop final(argc argc) // expected-error {{expected ')'}} expected-note {{to match this '('}} + for (int i = 0; i < 10; ++i) + foo(); +#pragma omp masked taskloop final(argc) + for (int i = 0; i < 10; ++i) + foo(); + + return 0; +} + +int main(int argc, char **argv) { + int z; +#pragma omp masked taskloop final // expected-error {{expected '(' after 'final'}} + for (int i = 0; i < 10; ++i) + foo(); +#pragma omp masked taskloop final( // expected-error {{expected expression}} expected-error {{expected ')'}} expected-note {{to match this '('}} + for (int i = 0; i < 10; ++i) + foo(); +#pragma omp masked taskloop final() // expected-error {{expected expression}} + for (int i = 0; i < 10; ++i) + foo(); +#pragma omp masked taskloop final(argc // expected-error {{expected ')'}} expected-note {{to match this '('}} + for (int i = 0; i < 10; ++i) + foo(); +#pragma omp masked taskloop final(argc)) // expected-warning {{extra tokens at the end of '#pragma omp masked taskloop' are ignored}} + for (int i = 0; i < 10; ++i) + foo(); +#pragma omp masked taskloop final(argc > 0 ? argv[1] : argv[2] - z) + for (int i = 0; i < 10; ++i) + foo(); +#pragma omp masked taskloop final(foobool(argc)), final(true) // expected-error {{directive '#pragma omp masked taskloop' cannot contain more than one 'final' clause}} + for (int i = 0; i < 10; ++i) + foo(); +#pragma omp masked taskloop final(S1) // expected-error {{'S1' does not refer to a value}} + for (int i = 0; i < 10; ++i) + foo(); +#pragma omp masked taskloop final(argv[1] = 2) // expected-error {{expected ')'}} expected-note {{to match this '('}} + for (int i = 0; i < 10; ++i) + foo(); +#pragma omp masked taskloop final(argc argc) // expected-error {{expected ')'}} expected-note {{to match this '('}} + for (int i = 0; i < 10; ++i) + foo(); +#pragma omp masked taskloop final(1 0) // expected-error {{expected ')'}} expected-note {{to match this '('}} + for (int i = 0; i < 10; ++i) + foo(); +#pragma omp masked taskloop final(if (tmain(argc, argv) // expected-error {{expected expression}} expected-error {{expected ')'}} expected-note {{to match this '('}} + for (int i = 0; i < 10; ++i) + foo(); + + return tmain(argc, argv); +} diff --git a/clang/test/OpenMP/masked_taskloop_firstprivate_messages.cpp b/clang/test/OpenMP/masked_taskloop_firstprivate_messages.cpp new file mode 100644 index 0000000000000000000000000000000000000000..fe486ffe85075ca677259a86aca448cfe6a3db9a --- /dev/null +++ b/clang/test/OpenMP/masked_taskloop_firstprivate_messages.cpp @@ -0,0 +1,336 @@ +// RUN: %clang_cc1 -verify -fopenmp %s -Wuninitialized + +// RUN: %clang_cc1 -verify -fopenmp-simd %s -Wuninitialized + +typedef void **omp_allocator_handle_t; +extern const omp_allocator_handle_t omp_null_allocator; +extern const omp_allocator_handle_t omp_default_mem_alloc; +extern const omp_allocator_handle_t omp_large_cap_mem_alloc; +extern const omp_allocator_handle_t omp_const_mem_alloc; +extern const omp_allocator_handle_t omp_high_bw_mem_alloc; +extern const omp_allocator_handle_t omp_low_lat_mem_alloc; +extern const omp_allocator_handle_t omp_cgroup_mem_alloc; +extern const omp_allocator_handle_t omp_pteam_mem_alloc; +extern const omp_allocator_handle_t omp_thread_mem_alloc; + +void foo() { +} + +bool foobool(int argc) { + return argc; +} + +void xxx(int argc) { + int fp; // expected-note {{initialize the variable 'fp' to silence this warning}} +#pragma omp masked taskloop firstprivate(fp) // expected-warning {{variable 'fp' is uninitialized when used here}} + for (int i = 0; i < 10; ++i) + ; +} + +struct S1; // expected-note 2 {{declared here}} expected-note 2 {{forward declaration of 'S1'}} +extern S1 a; +class S2 { + mutable int a; + +public: + S2() : a(0) {} + S2(const S2 &s2) : a(s2.a) {} + static float S2s; + static const float S2sc; +}; +const float S2::S2sc = 0; +const S2 b; +const S2 ba[5]; +class S3 { + int a; + S3 &operator=(const S3 &s3); + +public: + S3() : a(0) {} // expected-note 2 {{candidate constructor not viable: requires 0 arguments, but 1 was provided}} + S3(S3 &s3) : a(s3.a) {} // expected-note 2 {{candidate constructor not viable: 1st argument ('const S3') would lose const qualifier}} +}; +const S3 c; +const S3 ca[5]; +extern const int f; +class S4 { + int a; + S4(); + S4(const S4 &s4); // expected-note 2 {{implicitly declared private here}} + +public: + S4(int v) : a(v) {} +}; +class S5 { + int a; + S5(const S5 &s5) : a(s5.a) {} // expected-note 4 {{implicitly declared private here}} + +public: + S5() : a(0) {} + S5(int v) : a(v) {} +}; +class S6 { + int a; + S6() : a(0) {} + +public: + S6(const S6 &s6) : a(s6.a) {} + S6(int v) : a(v) {} +}; + +S3 h; +#pragma omp threadprivate(h) // expected-note 2 {{defined as threadprivate or thread local}} + +template +int foomain(int argc, char **argv) { + I e(4); + C g(5); + int i, z; + int &j = i; +#pragma omp parallel +#pragma omp masked taskloop firstprivate // expected-error {{expected '(' after 'firstprivate'}} + for (int k = 0; k < argc; ++k) + ++k; +#pragma omp parallel +#pragma omp masked taskloop firstprivate( // expected-error {{expected expression}} expected-error {{expected ')'}} expected-note {{to match this '('}} + for (int k = 0; k < argc; ++k) + ++k; +#pragma omp parallel +#pragma omp masked taskloop firstprivate() // expected-error {{expected expression}} + for (int k = 0; k < argc; ++k) + ++k; +#pragma omp parallel +#pragma omp masked taskloop firstprivate(argc // expected-error {{expected ')'}} expected-note {{to match this '('}} + for (int k = 0; k < argc; ++k) + ++k; +#pragma omp parallel +#pragma omp masked taskloop firstprivate(argc, // expected-error {{expected expression}} expected-error {{expected ')'}} expected-note {{to match this '('}} + for (int k = 0; k < argc; ++k) + ++k; +#pragma omp parallel +#pragma omp masked taskloop firstprivate(argc > 0 ? argv[1] : argv[2]) // expected-error {{expected variable name}} + for (int k = 0; k < argc; ++k) + ++k; +#pragma omp parallel +#pragma omp masked taskloop allocate(omp_thread_mem_alloc: argc) firstprivate(argc) // expected-warning {{allocator with the 'thread' trait access has unspecified behavior on 'masked taskloop' directive}} + for (int k = 0; k < argc; ++k) + ++k; +#pragma omp parallel +#pragma omp masked taskloop firstprivate(S1) // expected-error {{'S1' does not refer to a value}} + for (int k = 0; k < argc; ++k) + ++k; +#pragma omp parallel +#pragma omp masked taskloop firstprivate(a, b) // expected-error {{firstprivate variable with incomplete type 'S1'}} + for (int k = 0; k < argc; ++k) + ++k; +#pragma omp parallel +#pragma omp masked taskloop firstprivate(argv[1]) // expected-error {{expected variable name}} + for (int k = 0; k < argc; ++k) + ++k; +#pragma omp parallel +#pragma omp masked taskloop firstprivate(z, e, g) // expected-error {{calling a private constructor of class 'S4'}} expected-error {{calling a private constructor of class 'S5'}} + for (int k = 0; k < argc; ++k) + ++k; +#pragma omp parallel +#pragma omp masked taskloop firstprivate(h) // expected-error {{threadprivate or thread local variable cannot be firstprivate}} + for (int k = 0; k < argc; ++k) + ++k; +#pragma omp parallel + { + int v = 0; + int i; +#pragma omp masked taskloop firstprivate(i) + for (int k = 0; k < argc; ++k) { + i = k; + v += i; + } + } +#pragma omp parallel shared(i) +#pragma omp parallel private(i) +#pragma omp masked taskloop firstprivate(j) + for (int k = 0; k < argc; ++k) + ++k; +#pragma omp parallel +#pragma omp masked taskloop firstprivate(i) + for (int k = 0; k < argc; ++k) + ++k; +#pragma omp parallel +#pragma omp masked taskloop lastprivate(g) firstprivate(g) // expected-error {{calling a private constructor of class 'S5'}} + for (i = 0; i < argc; ++i) + foo(); +#pragma omp parallel private(i) +#pragma omp masked taskloop firstprivate(i) // expected-note 2 {{defined as firstprivate}} + for (i = 0; i < argc; ++i) // expected-error 2 {{loop iteration variable in the associated loop of 'omp masked taskloop' directive may not be firstprivate, predetermined as private}} + foo(); +#pragma omp parallel reduction(+ : i) // expected-note {{defined as reduction}} +#pragma omp masked taskloop firstprivate(i) // expected-note {{defined as firstprivate}} expected-error {{argument of a reduction clause of a parallel construct must not appear in a firstprivate clause on a task construct}} + for (i = 0; i < argc; ++i) // expected-error {{loop iteration variable in the associated loop of 'omp masked taskloop' directive may not be firstprivate, predetermined as private}} + foo(); + return 0; +} + +void bar(S4 a[2]) { +#pragma omp parallel +#pragma omp masked taskloop firstprivate(a) + for (int i = 0; i < 2; ++i) + foo(); +} + +namespace A { +double x; +#pragma omp threadprivate(x) // expected-note {{defined as threadprivate or thread local}} +} +namespace B { +using A::x; +} + +int main(int argc, char **argv) { + const int d = 5; + const int da[5] = {0}; + S4 e(4); + S5 g(5); + S3 m; + S6 n(2); + int i; + int &j = i; +#pragma omp parallel +#pragma omp masked taskloop firstprivate // expected-error {{expected '(' after 'firstprivate'}} + for (i = 0; i < argc; ++i) + foo(); +#pragma omp parallel +#pragma omp masked taskloop firstprivate( // expected-error {{expected expression}} expected-error {{expected ')'}} expected-note {{to match this '('}} + for (i = 0; i < argc; ++i) + foo(); +#pragma omp parallel +#pragma omp masked taskloop firstprivate() // expected-error {{expected expression}} + for (i = 0; i < argc; ++i) + foo(); +#pragma omp parallel +#pragma omp masked taskloop firstprivate(argc // expected-error {{expected ')'}} expected-note {{to match this '('}} + for (i = 0; i < argc; ++i) + foo(); +#pragma omp parallel +#pragma omp masked taskloop firstprivate(argc, // expected-error {{expected expression}} expected-error {{expected ')'}} expected-note {{to match this '('}} + for (i = 0; i < argc; ++i) + foo(); +#pragma omp parallel +#pragma omp masked taskloop firstprivate(argc > 0 ? argv[1] : argv[2]) // expected-error {{expected variable name}} + for (i = 0; i < argc; ++i) + foo(); +#pragma omp parallel +#pragma omp masked taskloop firstprivate(argc) allocate , allocate(, allocate(omp_default , allocate(omp_default_mem_alloc, allocate(omp_default_mem_alloc:, allocate(omp_default_mem_alloc: argc, allocate(omp_default_mem_alloc: argv), allocate(argv) // expected-error {{expected '(' after 'allocate'}} expected-error 2 {{expected expression}} expected-error 2 {{expected ')'}} expected-error {{use of undeclared identifier 'omp_default'}} expected-note 2 {{to match this '('}} + for (i = 0; i < argc; ++i) + foo(); +#pragma omp parallel +#pragma omp masked taskloop firstprivate(S1) // expected-error {{'S1' does not refer to a value}} + for (i = 0; i < argc; ++i) + foo(); +#pragma omp parallel +#pragma omp masked taskloop firstprivate(a, b, c, d, f) // expected-error {{firstprivate variable with incomplete type 'S1'}} expected-error {{no matching constructor for initialization of 'S3'}} + for (i = 0; i < argc; ++i) + foo(); +#pragma omp parallel +#pragma omp masked taskloop firstprivate(argv[1]) // expected-error {{expected variable name}} + for (i = 0; i < argc; ++i) + foo(); +#pragma omp parallel +#pragma omp masked taskloop firstprivate(2 * 2) // expected-error {{expected variable name}} + for (i = 0; i < argc; ++i) + foo(); +#pragma omp parallel +#pragma omp masked taskloop firstprivate(ba) // OK + for (i = 0; i < argc; ++i) + foo(); +#pragma omp parallel +#pragma omp masked taskloop firstprivate(ca) // expected-error {{no matching constructor for initialization of 'S3'}} + for (i = 0; i < argc; ++i) + foo(); +#pragma omp parallel +#pragma omp masked taskloop firstprivate(da) // OK + for (i = 0; i < argc; ++i) + foo(); + int xa; +#pragma omp parallel +#pragma omp masked taskloop firstprivate(xa) // OK + for (i = 0; i < argc; ++i) + foo(); +#pragma omp parallel +#pragma omp masked taskloop firstprivate(S2::S2s) // OK + for (i = 0; i < argc; ++i) + foo(); +#pragma omp parallel +#pragma omp masked taskloop firstprivate(S2::S2sc) // OK + for (i = 0; i < argc; ++i) + foo(); +#pragma omp parallel +#pragma omp masked taskloop safelen(5) // expected-error {{unexpected OpenMP clause 'safelen' in directive '#pragma omp masked taskloop'}} + for (i = 0; i < argc; ++i) + foo(); +#pragma omp parallel +#pragma omp masked taskloop firstprivate(e, g) // expected-error {{calling a private constructor of class 'S4'}} expected-error {{calling a private constructor of class 'S5'}} + for (i = 0; i < argc; ++i) + foo(); +#pragma omp parallel +#pragma omp masked taskloop firstprivate(m) // OK + for (i = 0; i < argc; ++i) + foo(); +#pragma omp parallel +#pragma omp masked taskloop firstprivate(h) // expected-error {{threadprivate or thread local variable cannot be firstprivate}} + for (i = 0; i < argc; ++i) + foo(); +#pragma omp parallel +#pragma omp masked taskloop private(xa), firstprivate(xa) // expected-error {{private variable cannot be firstprivate}} expected-note {{defined as private}} + for (i = 0; i < argc; ++i) + foo(); +#pragma omp parallel +#pragma omp masked taskloop firstprivate(i) // expected-note {{defined as firstprivate}} + for (i = 0; i < argc; ++i) // expected-error {{loop iteration variable in the associated loop of 'omp masked taskloop' directive may not be firstprivate, predetermined as private}} + foo(); +#pragma omp parallel shared(xa) +#pragma omp masked taskloop firstprivate(xa) // OK: may be firstprivate + for (i = 0; i < argc; ++i) + foo(); +#pragma omp parallel +#pragma omp masked taskloop firstprivate(j) + for (i = 0; i < argc; ++i) + foo(); +#pragma omp parallel +#pragma omp masked taskloop lastprivate(g) firstprivate(g) // expected-error {{calling a private constructor of class 'S5'}} + for (i = 0; i < argc; ++i) + foo(); +#pragma omp parallel +#pragma omp masked taskloop lastprivate(n) firstprivate(n) // OK + for (i = 0; i < argc; ++i) + foo(); +#pragma omp parallel + { + int v = 0; + int i; +#pragma omp masked taskloop firstprivate(i) + for (int k = 0; k < argc; ++k) { + i = k; + v += i; + } + } +#pragma omp parallel private(i) +#pragma omp masked taskloop firstprivate(i) // expected-note {{defined as firstprivate}} + for (i = 0; i < argc; ++i) // expected-error {{loop iteration variable in the associated loop of 'omp masked taskloop' directive may not be firstprivate, predetermined as private}} + foo(); +#pragma omp parallel reduction(+ : i) // expected-note {{defined as reduction}} +#pragma omp masked taskloop firstprivate(i) //expected-error {{argument of a reduction clause of a parallel construct must not appear in a firstprivate clause on a task construct}} + for (i = 0; i < argc; ++i) + foo(); +#pragma omp masked taskloop firstprivate(i) //expected-note {{defined as firstprivate}} + for (i = 0; i < argc; ++i) // expected-error {{loop iteration variable in the associated loop of 'omp masked taskloop' directive may not be firstprivate, predetermined as private}} + foo(); +#pragma omp parallel +#pragma omp masked taskloop firstprivate(B::x) // expected-error {{threadprivate or thread local variable cannot be firstprivate}} + for (i = 0; i < argc; ++i) + foo(); + static int si; +#pragma omp masked taskloop firstprivate(si) // OK + for (i = 0; i < argc; ++i) + si = i + 1; + + return foomain(argc, argv); // expected-note {{in instantiation of function template specialization 'foomain' requested here}} +} + diff --git a/clang/test/OpenMP/masked_taskloop_grainsize_messages.cpp b/clang/test/OpenMP/masked_taskloop_grainsize_messages.cpp new file mode 100644 index 0000000000000000000000000000000000000000..636db374c5121b387dc6f7a249fdfc8f3a98b562 --- /dev/null +++ b/clang/test/OpenMP/masked_taskloop_grainsize_messages.cpp @@ -0,0 +1,103 @@ +// RUN: %clang_cc1 -verify -fopenmp -ferror-limit 100 %s -Wuninitialized + +// RUN: %clang_cc1 -verify -fopenmp-simd -ferror-limit 100 %s -Wuninitialized + +void foo() { +} + +bool foobool(int argc) { + return argc; +} + +struct S1; // expected-note {{declared here}} + +template // expected-note {{declared here}} +int tmain(T argc, S **argv) { + T z; + #pragma omp masked taskloop grainsize // expected-error {{expected '(' after 'grainsize'}} + for (int i = 0; i < 10; ++i) + foo(); + #pragma omp masked taskloop grainsize ( // expected-error {{expected expression}} expected-error {{expected ')'}} expected-note {{to match this '('}} + for (int i = 0; i < 10; ++i) + foo(); + #pragma omp masked taskloop grainsize () // expected-error {{expected expression}} + for (int i = 0; i < 10; ++i) + foo(); + #pragma omp masked taskloop grainsize (argc // expected-error {{expected ')'}} expected-note {{to match this '('}} + for (int i = 0; i < 10; ++i) + foo(); + #pragma omp masked taskloop grainsize (argc)) // expected-warning {{extra tokens at the end of '#pragma omp masked taskloop' are ignored}} + for (int i = 0; i < 10; ++i) + foo(); + #pragma omp masked taskloop grainsize (argc > 0 ? argv[1][0] : argv[2][argc] + z) + for (int i = 0; i < 10; ++i) + foo(); + #pragma omp masked taskloop grainsize (foobool(argc)), grainsize (true) // expected-error {{directive '#pragma omp masked taskloop' cannot contain more than one 'grainsize' clause}} + for (int i = 0; i < 10; ++i) + foo(); + #pragma omp masked taskloop grainsize (S) // expected-error {{'S' does not refer to a value}} + for (int i = 0; i < 10; ++i) + foo(); + #pragma omp masked taskloop grainsize (argc argc) // expected-error {{expected ')'}} expected-note {{to match this '('}} + for (int i = 0; i < 10; ++i) + foo(); + #pragma omp masked taskloop grainsize(0) // expected-error {{argument to 'grainsize' clause must be a strictly positive integer value}} + for (int i = 0; i < 10; ++i) + foo(); + #pragma omp masked taskloop grainsize(-1) // expected-error {{argument to 'grainsize' clause must be a strictly positive integer value}} + for (int i = 0; i < 10; ++i) + foo(); + #pragma omp masked taskloop grainsize(argc) num_tasks(argc) // expected-error {{'num_tasks' and 'grainsize' clause are mutually exclusive and may not appear on the same directive}} expected-note {{'grainsize' clause is specified here}} + for (int i = 0; i < 10; ++i) + foo(); + + return 0; +} + +int main(int argc, char **argv) { + int z; + #pragma omp masked taskloop grainsize // expected-error {{expected '(' after 'grainsize'}} + for (int i = 0; i < 10; ++i) + foo(); + #pragma omp masked taskloop grainsize ( // expected-error {{expected expression}} expected-error {{expected ')'}} expected-note {{to match this '('}} + for (int i = 0; i < 10; ++i) + foo(); + #pragma omp masked taskloop grainsize () // expected-error {{expected expression}} + for (int i = 0; i < 10; ++i) + foo(); + #pragma omp masked taskloop grainsize (argc // expected-error {{expected ')'}} expected-note {{to match this '('}} + for (int i = 0; i < 10; ++i) + foo(); + #pragma omp masked taskloop grainsize (argc)) // expected-warning {{extra tokens at the end of '#pragma omp masked taskloop' are ignored}} + for (int i = 0; i < 10; ++i) + foo(); + #pragma omp masked taskloop grainsize (argc > 0 ? argv[1][0] : argv[2][argc] + z) + for (int i = 0; i < 10; ++i) + foo(); + #pragma omp masked taskloop grainsize (foobool(argc)), grainsize (true) // expected-error {{directive '#pragma omp masked taskloop' cannot contain more than one 'grainsize' clause}} + for (int i = 0; i < 10; ++i) + foo(); + #pragma omp masked taskloop grainsize (S1) // expected-error {{'S1' does not refer to a value}} + for (int i = 0; i < 10; ++i) + foo(); + #pragma omp masked taskloop grainsize (argc argc) // expected-error {{expected ')'}} expected-note {{to match this '('}} + for (int i = 0; i < 10; ++i) + foo(); + #pragma omp masked taskloop grainsize (1 0) // expected-error {{expected ')'}} expected-note {{to match this '('}} + for (int i = 0; i < 10; ++i) + foo(); + #pragma omp masked taskloop grainsize(if(tmain(argc, argv) // expected-error {{expected expression}} expected-error {{expected ')'}} expected-note {{to match this '('}} + for (int i = 0; i < 10; ++i) + foo(); + #pragma omp masked taskloop grainsize(0) // expected-error {{argument to 'grainsize' clause must be a strictly positive integer value}} + for (int i = 0; i < 10; ++i) + foo(); + #pragma omp masked taskloop grainsize(-1) // expected-error {{argument to 'grainsize' clause must be a strictly positive integer value}} + for (int i = 0; i < 10; ++i) + foo(); + #pragma omp masked taskloop grainsize(argc) num_tasks(argc) // expected-error {{'num_tasks' and 'grainsize' clause are mutually exclusive and may not appear on the same directive}} expected-note {{'grainsize' clause is specified here}} + for (int i = 0; i < 10; ++i) + foo(); + + return tmain(argc, argv); +} diff --git a/clang/test/OpenMP/masked_taskloop_in_reduction_messages.cpp b/clang/test/OpenMP/masked_taskloop_in_reduction_messages.cpp new file mode 100644 index 0000000000000000000000000000000000000000..e7740732264b1c20142c6bc16f07f4a080ef32dd --- /dev/null +++ b/clang/test/OpenMP/masked_taskloop_in_reduction_messages.cpp @@ -0,0 +1,393 @@ +// RUN: %clang_cc1 -verify -fopenmp -ferror-limit 150 -o - %s -Wuninitialized +// RUN: %clang_cc1 -verify -fopenmp -std=c++98 -ferror-limit 150 -o - %s -Wuninitialized +// RUN: %clang_cc1 -verify -fopenmp -std=c++11 -ferror-limit 150 -o - %s -Wuninitialized + +// RUN: %clang_cc1 -verify -fopenmp-simd -ferror-limit 150 -o - %s -Wuninitialized +// RUN: %clang_cc1 -verify -fopenmp-simd -std=c++98 -ferror-limit 150 -o - %s -Wuninitialized +// RUN: %clang_cc1 -verify -fopenmp-simd -std=c++11 -ferror-limit 150 -o - %s -Wuninitialized + +typedef void **omp_allocator_handle_t; +extern const omp_allocator_handle_t omp_null_allocator; +extern const omp_allocator_handle_t omp_default_mem_alloc; +extern const omp_allocator_handle_t omp_large_cap_mem_alloc; +extern const omp_allocator_handle_t omp_const_mem_alloc; +extern const omp_allocator_handle_t omp_high_bw_mem_alloc; +extern const omp_allocator_handle_t omp_low_lat_mem_alloc; +extern const omp_allocator_handle_t omp_cgroup_mem_alloc; +extern const omp_allocator_handle_t omp_pteam_mem_alloc; +extern const omp_allocator_handle_t omp_thread_mem_alloc; + +void foo() { +} + +bool foobool(int argc) { + return argc; +} + +void foobar(int &ref) { + int tid = 0; +#pragma omp taskgroup task_reduction(+:ref) +#pragma omp masked taskloop filter(tid) in_reduction(+:ref) + for (int i = 0; i < 10; ++i) + foo(); +} + +void foobar1(int &ref) { +#pragma omp taskgroup task_reduction(+:ref) +#pragma omp masked taskloop in_reduction(-:ref) + for (int i = 0; i < 10; ++i) + foo(); +} + +#pragma omp declare reduction (red:int:omp_out += omp_in) + +void foobar2(int &ref) { +#pragma omp taskgroup task_reduction(+:ref) // expected-note {{previously marked as task_reduction with different reduction operation}} +#pragma omp masked taskloop in_reduction(red:ref) // expected-error{{in_reduction variable must have the same reduction operation as in a task_reduction clause}} + for (int i = 0; i < 10; ++i) + foo(); +} + +void foobar3(int &ref) { +#pragma omp taskgroup task_reduction(red:ref) // expected-note {{previously marked as task_reduction with different reduction operation}} +#pragma omp masked taskloop in_reduction(min:ref) // expected-error{{in_reduction variable must have the same reduction operation as in a task_reduction clause}} + for (int i = 0; i < 10; ++i) + foo(); +} + +void foobar4(int &ref) { + int tid = 0; +#pragma omp masked taskloop filter(tid) in_reduction(min:ref) + for (int i = 0; i < 10; ++i) + foo(); +} + +struct S1; // expected-note {{declared here}} expected-note 4 {{forward declaration of 'S1'}} +extern S1 a; +class S2 { + mutable int a; + S2 &operator+(const S2 &arg) { return (*this); } // expected-note 3 {{implicitly declared private here}} + +public: + S2() : a(0) {} + S2(S2 &s2) : a(s2.a) {} + static float S2s; // expected-note 2 {{static data member is predetermined as shared}} + static const float S2sc; // expected-note 2 {{'S2sc' declared here}} +}; +const float S2::S2sc = 0; +S2 b; // expected-note 3 {{'b' defined here}} +const S2 ba[5]; // expected-note 2 {{'ba' defined here}} +class S3 { + int a; + +public: + int b; + S3() : a(0) {} + S3(const S3 &s3) : a(s3.a) {} + S3 operator+(const S3 &arg1) { return arg1; } +}; +int operator+(const S3 &arg1, const S3 &arg2) { return 5; } +S3 c; // expected-note 3 {{'c' defined here}} +const S3 ca[5]; // expected-note 2 {{'ca' defined here}} +extern const int f; // expected-note 4 {{'f' declared here}} +class S4 { + int a; + S4(); // expected-note {{implicitly declared private here}} + S4(const S4 &s4); + S4 &operator+(const S4 &arg) { return (*this); } + +public: + S4(int v) : a(v) {} +}; +S4 &operator&=(S4 &arg1, S4 &arg2) { return arg1; } +class S5 { + int a; + S5() : a(0) {} // expected-note {{implicitly declared private here}} + S5(const S5 &s5) : a(s5.a) {} + S5 &operator+(const S5 &arg); + +public: + S5(int v) : a(v) {} +}; +class S6 { // expected-note 3 {{candidate function (the implicit copy assignment operator) not viable: no known conversion from 'int' to 'const S6' for 1st argument}} +#if __cplusplus >= 201103L // C++11 or later +// expected-note@-2 3 {{candidate function (the implicit move assignment operator) not viable}} +#endif + int a; + +public: + S6() : a(6) {} + operator int() { return 6; } +} o; + +S3 h, k; +#pragma omp threadprivate(h) // expected-note 2 {{defined as threadprivate or thread local}} + +template // expected-note {{declared here}} +T tmain(T argc) { + const T d = T(); // expected-note 4 {{'d' defined here}} + const T da[5] = {T()}; // expected-note 2 {{'da' defined here}} + T qa[5] = {T()}; + T i; + T &j = i; // expected-note 2 {{'j' defined here}} + S3 &p = k; // expected-note 2 {{'p' defined here}} + const T &r = da[(int)i]; // expected-note 2 {{'r' defined here}} + T &q = qa[(int)i]; + T fl; +#pragma omp taskgroup task_reduction(+:argc) +#pragma omp masked taskloop in_reduction // expected-error {{expected '(' after 'in_reduction'}} + for (int i = 0; i < 10; ++i) + foo(); +#pragma omp taskgroup task_reduction(+:argc) +#pragma omp masked taskloop in_reduction + // expected-error {{expected '(' after 'in_reduction'}} expected-warning {{extra tokens at the end of '#pragma omp masked taskloop' are ignored}} + for (int i = 0; i < 10; ++i) + foo(); +#pragma omp taskgroup task_reduction(+:argc) +#pragma omp masked taskloop in_reduction( // expected-error {{expected unqualified-id}} expected-warning {{missing ':' after reduction identifier - ignoring}} expected-error {{expected ')'}} expected-note {{to match this '('}} + for (int i = 0; i < 10; ++i) + foo(); +#pragma omp taskgroup task_reduction(+:argc) +#pragma omp masked taskloop in_reduction(- // expected-warning {{missing ':' after reduction identifier - ignoring}} expected-error {{expected ')'}} expected-note {{to match this '('}} + for (int i = 0; i < 10; ++i) + foo(); +#pragma omp taskgroup task_reduction(+:argc) +#pragma omp masked taskloop in_reduction() // expected-error {{expected unqualified-id}} expected-warning {{missing ':' after reduction identifier - ignoring}} + for (int i = 0; i < 10; ++i) + foo(); +#pragma omp taskgroup task_reduction(+:argc) +#pragma omp masked taskloop in_reduction(*) // expected-warning {{missing ':' after reduction identifier - ignoring}} + for (int i = 0; i < 10; ++i) + foo(); +#pragma omp taskgroup task_reduction(+:argc) +#pragma omp masked taskloop in_reduction(\) // expected-error {{expected unqualified-id}} expected-warning {{missing ':' after reduction identifier - ignoring}} + for (int i = 0; i < 10; ++i) + foo(); +#pragma omp taskgroup task_reduction(&:argc) // expected-error {{invalid operands to binary expression ('float' and 'float')}} +#pragma omp masked taskloop in_reduction(& : argc // expected-error {{expected ')'}} expected-note {{to match this '('}} expected-error {{invalid operands to binary expression ('float' and 'float')}} + for (int i = 0; i < 10; ++i) + foo(); +#pragma omp taskgroup task_reduction(|:argc) // expected-error {{invalid operands to binary expression ('float' and 'float')}} +#pragma omp masked taskloop in_reduction(| : argc, // expected-error {{expected expression}} expected-error {{expected ')'}} expected-note {{to match this '('}} expected-error {{invalid operands to binary expression ('float' and 'float')}} + for (int i = 0; i < 10; ++i) + foo(); +#pragma omp masked taskloop in_reduction(|| : argc ? i : argc) // expected-error 2 {{expected variable name, array element or array section}} + for (int i = 0; i < 10; ++i) + foo(); +#pragma omp masked taskloop in_reduction(foo : argc) //expected-error {{incorrect reduction identifier, expected one of '+', '-', '*', '&', '|', '^', '&&', '||', 'min' or 'max' or declare reduction for type 'float'}} expected-error {{incorrect reduction identifier, expected one of '+', '-', '*', '&', '|', '^', '&&', '||', 'min' or 'max' or declare reduction for type 'int'}} + for (int i = 0; i < 10; ++i) + foo(); +#pragma omp taskgroup task_reduction(&&:argc) +#pragma omp masked taskloop in_reduction(&& : argc) allocate , allocate(, allocate(omp_default , allocate(omp_default_mem_alloc, allocate(omp_default_mem_alloc:, allocate(omp_default_mem_alloc: argc, allocate(omp_default_mem_alloc: argv), allocate(argv) // expected-error {{expected '(' after 'allocate'}} expected-error 2 {{expected expression}} expected-error 2 {{expected ')'}} expected-error {{use of undeclared identifier 'omp_default'}} expected-note 2 {{to match this '('}} + for (int i = 0; i < 10; ++i) + foo(); +#pragma omp masked taskloop in_reduction(^ : T) // expected-error {{'T' does not refer to a value}} + for (int i = 0; i < 10; ++i) + foo(); +#pragma omp taskgroup task_reduction(+:c) +#pragma omp masked taskloop in_reduction(+ : a, b, c, d, f) // expected-error {{a reduction list item with incomplete type 'S1'}} expected-error 3 {{const-qualified variable cannot be in_reduction}} expected-error 2 {{'operator+' is a private member of 'S2'}} + for (int i = 0; i < 10; ++i) + foo(); +#pragma omp masked taskloop in_reduction(min : a, b, c, d, f) // expected-error {{a reduction list item with incomplete type 'S1'}} expected-error 4 {{arguments of OpenMP clause 'in_reduction' for 'min' or 'max' must be of arithmetic type}} expected-error 3 {{const-qualified variable cannot be in_reduction}} + for (int i = 0; i < 10; ++i) + foo(); +#pragma omp masked taskloop in_reduction(max : h.b) // expected-error {{expected variable name, array element or array section}} + for (int i = 0; i < 10; ++i) + foo(); +#pragma omp masked taskloop in_reduction(+ : ba) // expected-error {{const-qualified variable cannot be in_reduction}} + for (int i = 0; i < 10; ++i) + foo(); +#pragma omp masked taskloop in_reduction(* : ca) // expected-error {{const-qualified variable cannot be in_reduction}} + for (int i = 0; i < 10; ++i) + foo(); +#pragma omp masked taskloop in_reduction(- : da) // expected-error {{const-qualified variable cannot be in_reduction}} expected-error {{const-qualified variable cannot be in_reduction}} + for (int i = 0; i < 10; ++i) + foo(); +#pragma omp masked taskloop in_reduction(^ : fl) // expected-error {{invalid operands to binary expression ('float' and 'float')}} + for (int i = 0; i < 10; ++i) + foo(); +#pragma omp masked taskloop in_reduction(&& : S2::S2s) // expected-error {{shared variable cannot be reduction}} + for (int i = 0; i < 10; ++i) + foo(); +#pragma omp masked taskloop in_reduction(&& : S2::S2sc) // expected-error {{const-qualified variable cannot be in_reduction}} + for (int i = 0; i < 10; ++i) + foo(); +#pragma omp taskgroup task_reduction(+:k) +#pragma omp masked taskloop in_reduction(+ : h, k) // expected-error {{threadprivate or thread local variable cannot be reduction}} + for (int i = 0; i < 10; ++i) + foo(); +#pragma omp masked taskloop in_reduction(+ : o) // expected-error 2 {{no viable overloaded '='}} + for (int i = 0; i < 10; ++i) + foo(); +#pragma omp parallel private(k) +#pragma omp masked taskloop in_reduction(+ : p), in_reduction(+ : p) // expected-error 2 {{argument of OpenMP clause 'in_reduction' must reference the same object in all threads}} + for (int i = 0; i < 10; ++i) + foo(); +#pragma omp taskgroup task_reduction(+:p) +#pragma omp masked taskloop in_reduction(+ : p), in_reduction(+ : p) // expected-error 2 {{variable can appear only once in OpenMP 'in_reduction' clause}} expected-note 2 {{previously referenced here}} + for (int i = 0; i < 10; ++i) + foo(); +#pragma omp masked taskloop in_reduction(+ : r) // expected-error 2 {{const-qualified variable cannot be in_reduction}} + for (int i = 0; i < 10; ++i) + foo(); +#pragma omp parallel shared(i) +#pragma omp parallel reduction(min : i) +#pragma omp masked taskloop in_reduction(max : j) // expected-error 2 {{argument of OpenMP clause 'in_reduction' must reference the same object in all threads}} + for (int i = 0; i < 10; ++i) + foo(); +#pragma omp taskgroup task_reduction(+:fl) +{ +#pragma omp masked taskloop in_reduction(+ : fl) allocate(omp_thread_mem_alloc: fl) // expected-warning 2 {{allocator with the 'thread' trait access has unspecified behavior on 'masked taskloop' directive}} + for (int i = 0; i < 10; ++i) + foo(); +#pragma omp taskgroup task_reduction(*:fl) // expected-note 2 {{previously marked as task_reduction with different reduction operation}} +{ +#pragma omp masked taskloop in_reduction(+ : fl) // expected-error 2 {{in_reduction variable must have the same reduction operation as in a task_reduction clause}} + for (int i = 0; i < 10; ++i) + foo(); +} +} +#pragma omp parallel +#pragma omp for reduction(- : fl) + for (int i = 0; i < 10; ++i) +#pragma omp taskgroup task_reduction(+:fl) +#pragma omp masked taskloop in_reduction(+ : fl) + for (int j = 0; j < 10; ++j) + foo(); + + return T(); +} + +namespace A { +double x; +#pragma omp threadprivate(x) // expected-note {{defined as threadprivate or thread local}} +} +namespace B { +using A::x; +} + +int main(int argc, char **argv) { + const int d = 5; // expected-note 2 {{'d' defined here}} + const int da[5] = {0}; // expected-note {{'da' defined here}} + int qa[5] = {0}; + S4 e(4); + S5 g(5); + int i; + int &j = i; // expected-note {{'j' defined here}} + S3 &p = k; // expected-note 2 {{'p' defined here}} + const int &r = da[i]; // expected-note {{'r' defined here}} + int &q = qa[i]; + float fl; +#pragma omp masked taskloop in_reduction // expected-error {{expected '(' after 'in_reduction'}} + for (int i = 0; i < 10; ++i) + foo(); +#pragma omp masked taskloop in_reduction + // expected-error {{expected '(' after 'in_reduction'}} expected-warning {{extra tokens at the end of '#pragma omp masked taskloop' are ignored}} + for (int i = 0; i < 10; ++i) + foo(); +#pragma omp masked taskloop in_reduction( // expected-error {{expected unqualified-id}} expected-warning {{missing ':' after reduction identifier - ignoring}} expected-error {{expected ')'}} expected-note {{to match this '('}} + for (int i = 0; i < 10; ++i) + foo(); +#pragma omp masked taskloop in_reduction(- // expected-warning {{missing ':' after reduction identifier - ignoring}} expected-error {{expected ')'}} expected-note {{to match this '('}} + for (int i = 0; i < 10; ++i) + foo(); +#pragma omp masked taskloop in_reduction() // expected-error {{expected unqualified-id}} expected-warning {{missing ':' after reduction identifier - ignoring}} + for (int i = 0; i < 10; ++i) + foo(); +#pragma omp masked taskloop in_reduction(*) // expected-warning {{missing ':' after reduction identifier - ignoring}} + for (int i = 0; i < 10; ++i) + foo(); +#pragma omp masked taskloop in_reduction(\) // expected-error {{expected unqualified-id}} expected-warning {{missing ':' after reduction identifier - ignoring}} + for (int i = 0; i < 10; ++i) + foo(); +#pragma omp masked taskloop in_reduction(foo : argc // expected-error {{expected ')'}} expected-note {{to match this '('}} expected-error {{incorrect reduction identifier, expected one of '+', '-', '*', '&', '|', '^', '&&', '||', 'min' or 'max'}} + for (int i = 0; i < 10; ++i) + foo(); +#pragma omp taskgroup task_reduction(|:argc) +#pragma omp masked taskloop in_reduction(| : argc, // expected-error {{expected expression}} expected-error {{expected ')'}} expected-note {{to match this '('}} + for (int i = 0; i < 10; ++i) + foo(); +#pragma omp masked taskloop in_reduction(|| : argc > 0 ? argv[1] : argv[2]) // expected-error {{expected variable name, array element or array section}} + for (int i = 0; i < 10; ++i) + foo(); +#pragma omp masked taskloop in_reduction(~ : argc) // expected-error {{expected unqualified-id}} + for (int i = 0; i < 10; ++i) + foo(); +#pragma omp taskgroup task_reduction(&&:argc) +#pragma omp masked taskloop in_reduction(&& : argc) + for (int i = 0; i < 10; ++i) + foo(); +#pragma omp masked taskloop in_reduction(^ : S1) // expected-error {{'S1' does not refer to a value}} + for (int i = 0; i < 10; ++i) + foo(); +#pragma omp taskgroup task_reduction(+:c) +#pragma omp masked taskloop in_reduction(+ : a, b, c, d, f) // expected-error {{a reduction list item with incomplete type 'S1'}} expected-error 2 {{const-qualified variable cannot be in_reduction}} expected-error {{'operator+' is a private member of 'S2'}} + for (int i = 0; i < 10; ++i) + foo(); +#pragma omp masked taskloop in_reduction(min : a, b, c, d, f) // expected-error {{a reduction list item with incomplete type 'S1'}} expected-error 2 {{arguments of OpenMP clause 'in_reduction' for 'min' or 'max' must be of arithmetic type}} expected-error 2 {{const-qualified variable cannot be in_reduction}} + for (int i = 0; i < 10; ++i) + foo(); +#pragma omp masked taskloop in_reduction(max : h.b) // expected-error {{expected variable name, array element or array section}} + for (int i = 0; i < 10; ++i) + foo(); +#pragma omp masked taskloop in_reduction(+ : ba) // expected-error {{const-qualified variable cannot be in_reduction}} + for (int i = 0; i < 10; ++i) + foo(); +#pragma omp masked taskloop in_reduction(* : ca) // expected-error {{const-qualified variable cannot be in_reduction}} + for (int i = 0; i < 10; ++i) + foo(); +#pragma omp masked taskloop in_reduction(- : da) // expected-error {{const-qualified variable cannot be in_reduction}} + for (int i = 0; i < 10; ++i) + foo(); +#pragma omp masked taskloop in_reduction(^ : fl) // expected-error {{invalid operands to binary expression ('float' and 'float')}} + for (int i = 0; i < 10; ++i) + foo(); +#pragma omp masked taskloop in_reduction(&& : S2::S2s) // expected-error {{shared variable cannot be reduction}} + for (int i = 0; i < 10; ++i) + foo(); +#pragma omp masked taskloop in_reduction(&& : S2::S2sc) // expected-error {{const-qualified variable cannot be in_reduction}} + for (int i = 0; i < 10; ++i) + foo(); +#pragma omp masked taskloop in_reduction(& : e, g) // expected-error {{calling a private constructor of class 'S4'}} expected-error {{calling a private constructor of class 'S5'}} expected-error {{invalid operands to binary expression ('S5' and 'S5')}} + for (int i = 0; i < 10; ++i) + foo(); +#pragma omp taskgroup task_reduction(+:k) +#pragma omp masked taskloop in_reduction(+ : h, k, B::x) // expected-error 2 {{threadprivate or thread local variable cannot be reduction}} + for (int i = 0; i < 10; ++i) + foo(); +#pragma omp masked taskloop in_reduction(+ : o) // expected-error {{no viable overloaded '='}} + for (int i = 0; i < 10; ++i) + foo(); +#pragma omp parallel private(k) +#pragma omp masked taskloop in_reduction(+ : p), in_reduction(+ : p) // expected-error 2 {{argument of OpenMP clause 'in_reduction' must reference the same object in all threads}} + for (int i = 0; i < 10; ++i) + foo(); +#pragma omp taskgroup task_reduction(+:p) +#pragma omp masked taskloop in_reduction(+ : p), in_reduction(+ : p) // expected-error {{variable can appear only once in OpenMP 'in_reduction' clause}} expected-note {{previously referenced here}} + for (int i = 0; i < 10; ++i) + foo(); +#pragma omp masked taskloop in_reduction(+ : r) // expected-error {{const-qualified variable cannot be in_reduction}} + for (int i = 0; i < 10; ++i) + foo(); +#pragma omp parallel shared(i) +#pragma omp parallel reduction(min : i) +#pragma omp masked taskloop in_reduction(max : j) // expected-error {{argument of OpenMP clause 'in_reduction' must reference the same object in all threads}} + for (int i = 0; i < 10; ++i) + foo(); +#pragma omp parallel +#pragma omp for private(fl) + for (int i = 0; i < 10; ++i) +#pragma omp taskgroup task_reduction(+:fl) +#pragma omp masked taskloop in_reduction(+ : fl) + for (int j = 0; j < 10; ++j) + foo(); +#pragma omp taskgroup task_reduction(+:fl) +#pragma omp masked taskloop in_reduction(+ : fl) + for (int i = 0; i < 10; ++i) + foo(); + static int m; +#pragma omp taskgroup task_reduction(+:m) +#pragma omp masked taskloop in_reduction(+ : m) // OK + for (int i = 0; i < 10; ++i) + m++; + + return tmain(argc) + tmain(fl); // expected-note {{in instantiation of function template specialization 'tmain' requested here}} expected-note {{in instantiation of function template specialization 'tmain' requested here}} +} diff --git a/clang/test/OpenMP/masked_taskloop_lastprivate_messages.cpp b/clang/test/OpenMP/masked_taskloop_lastprivate_messages.cpp new file mode 100644 index 0000000000000000000000000000000000000000..f38d5f42ffcf3c9bb504d4d9d959d0f528bc7a36 --- /dev/null +++ b/clang/test/OpenMP/masked_taskloop_lastprivate_messages.cpp @@ -0,0 +1,306 @@ +// RUN: %clang_cc1 -verify=expected,omp45 -fopenmp-version=45 -fopenmp %s -Wuninitialized +// RUN: %clang_cc1 -verify=expected,omp50 -fopenmp %s -Wuninitialized + +// RUN: %clang_cc1 -verify=expected,omp45 -fopenmp-version=45 -fopenmp-simd %s -Wuninitialized +// RUN: %clang_cc1 -verify=expected,omp50 -fopenmp-simd %s -Wuninitialized + +typedef void **omp_allocator_handle_t; +extern const omp_allocator_handle_t omp_null_allocator; +extern const omp_allocator_handle_t omp_default_mem_alloc; +extern const omp_allocator_handle_t omp_large_cap_mem_alloc; +extern const omp_allocator_handle_t omp_const_mem_alloc; +extern const omp_allocator_handle_t omp_high_bw_mem_alloc; +extern const omp_allocator_handle_t omp_low_lat_mem_alloc; +extern const omp_allocator_handle_t omp_cgroup_mem_alloc; +extern const omp_allocator_handle_t omp_pteam_mem_alloc; +extern const omp_allocator_handle_t omp_thread_mem_alloc; + +void foo() { +} + +bool foobool(int argc) { + return argc; +} + +struct S1; // expected-note 2 {{declared here}} expected-note 2 {{forward declaration of 'S1'}} +extern S1 a; +class S2 { + mutable int a; + +public: + S2() : a(0) {} + S2(S2 &s2) : a(s2.a) {} + const S2 &operator =(const S2&) const; + S2 &operator =(const S2&); + static float S2s; // expected-note {{static data member is predetermined as shared}} + static const float S2sc; // expected-note {{'S2sc' declared here}} +}; +const float S2::S2sc = 0; +const S2 b; +const S2 ba[5]; +class S3 { + int a; + S3 &operator=(const S3 &s3); // expected-note 2 {{implicitly declared private here}} + +public: + S3() : a(0) {} + S3(S3 &s3) : a(s3.a) {} +}; +const S3 c; // expected-note {{'c' defined here}} +const S3 ca[5]; // expected-note {{'ca' defined here}} +extern const int f; // expected-note {{'f' declared here}} +class S4 { + int a; + S4(); // expected-note 3 {{implicitly declared private here}} + S4(const S4 &s4); + +public: + S4(int v) : a(v) {} +}; +class S5 { + int a; + S5() : a(0) {} // expected-note {{implicitly declared private here}} + +public: + S5(const S5 &s5) : a(s5.a) {} + S5(int v) : a(v) {} +}; +class S6 { + int a; + S6() : a(0) {} + +public: + S6(const S6 &s6) : a(s6.a) {} + S6(int v) : a(v) {} +}; + +S3 h; +#pragma omp threadprivate(h) // expected-note 2 {{defined as threadprivate or thread local}} + +template +int foomain(int argc, char **argv) { + I e(4); + I g(5); + int i, z; + int &j = i; +#pragma omp parallel +#pragma omp masked taskloop lastprivate // expected-error {{expected '(' after 'lastprivate'}} + for (int k = 0; k < argc; ++k) + ++k; +#pragma omp parallel +#pragma omp masked taskloop lastprivate( // expected-error {{expected expression}} expected-error {{expected ')'}} expected-note {{to match this '('}} + for (int k = 0; k < argc; ++k) + ++k; +#pragma omp parallel +#pragma omp masked taskloop lastprivate() // expected-error {{expected expression}} + for (int k = 0; k < argc; ++k) + ++k; +#pragma omp parallel +#pragma omp masked taskloop lastprivate(argc // expected-error {{expected ')'}} expected-note {{to match this '('}} + for (int k = 0; k < argc; ++k) + ++k; +#pragma omp parallel +#pragma omp masked taskloop lastprivate(argc, // expected-error {{expected expression}} expected-error {{expected ')'}} expected-note {{to match this '('}} + for (int k = 0; k < argc; ++k) + ++k; +#pragma omp parallel +#pragma omp masked taskloop lastprivate(argc > 0 ? argv[1] : argv[2]) // expected-error {{expected variable name}} + for (int k = 0; k < argc; ++k) + ++k; +#pragma omp parallel +#pragma omp masked taskloop lastprivate(argc) allocate , allocate(, allocate(omp_default , allocate(omp_default_mem_alloc, allocate(omp_default_mem_alloc:, allocate(omp_default_mem_alloc: argc, allocate(omp_default_mem_alloc: argv), allocate(argv) // expected-error {{expected '(' after 'allocate'}} expected-error 2 {{expected expression}} expected-error 2 {{expected ')'}} expected-error {{use of undeclared identifier 'omp_default'}} expected-note 2 {{to match this '('}} + for (int k = 0; k < argc; ++k) + ++k; +#pragma omp parallel +#pragma omp masked taskloop lastprivate(conditional: argc) lastprivate(conditional: // expected-error 2 {{use of undeclared identifier 'conditional'}} expected-error {{expected ')'}} expected-note {{to match this '('}} + for (int k = 0; k < argc; ++k) + ++k; +#pragma omp parallel +#pragma omp masked taskloop lastprivate(S1) // expected-error {{'S1' does not refer to a value}} + for (int k = 0; k < argc; ++k) + ++k; +#pragma omp parallel +#pragma omp masked taskloop lastprivate(a, b) // expected-error {{lastprivate variable with incomplete type 'S1'}} + for (int k = 0; k < argc; ++k) + ++k; +#pragma omp parallel +#pragma omp masked taskloop lastprivate(argv[1]) // expected-error {{expected variable name}} + for (int k = 0; k < argc; ++k) + ++k; +#pragma omp parallel +#pragma omp masked taskloop lastprivate(z, e, g) // expected-error 2 {{calling a private constructor of class 'S4'}} + for (int k = 0; k < argc; ++k) + ++k; +#pragma omp parallel +#pragma omp masked taskloop lastprivate(h) // expected-error {{threadprivate or thread local variable cannot be lastprivate}} + for (int k = 0; k < argc; ++k) + ++k; +#pragma omp parallel + { + int v = 0; + int i; +#pragma omp masked taskloop allocate(omp_thread_mem_alloc: i) lastprivate(i) // expected-warning {{allocator with the 'thread' trait access has unspecified behavior on 'masked taskloop' directive}} + for (int k = 0; k < argc; ++k) { + i = k; + v += i; + } + } +#pragma omp parallel shared(i) +#pragma omp parallel private(i) +#pragma omp masked taskloop lastprivate(j) + for (int k = 0; k < argc; ++k) + ++k; +#pragma omp parallel +#pragma omp masked taskloop lastprivate(i) + for (int k = 0; k < argc; ++k) + ++k; + return 0; +} + +void bar(S4 a[2]) { +#pragma omp parallel +#pragma omp masked taskloop lastprivate(a) + for (int i = 0; i < 2; ++i) + foo(); +} + +namespace A { +double x; +#pragma omp threadprivate(x) // expected-note {{defined as threadprivate or thread local}} +} +namespace B { +using A::x; +} + +int main(int argc, char **argv) { + const int d = 5; // expected-note {{'d' defined here}} + const int da[5] = {0}; // expected-note {{'da' defined here}} + S4 e(4); + S5 g(5); + S3 m; + S6 n(2); + int i, z; + int &j = i; +#pragma omp parallel +#pragma omp masked taskloop lastprivate // expected-error {{expected '(' after 'lastprivate'}} + for (i = 0; i < argc; ++i) + foo(); +#pragma omp parallel +#pragma omp masked taskloop lastprivate( // expected-error {{expected expression}} expected-error {{expected ')'}} expected-note {{to match this '('}} + for (i = 0; i < argc; ++i) + foo(); +#pragma omp parallel +#pragma omp masked taskloop lastprivate() // expected-error {{expected expression}} + for (i = 0; i < argc; ++i) + foo(); +#pragma omp parallel +#pragma omp masked taskloop lastprivate(argc // expected-error {{expected ')'}} expected-note {{to match this '('}} + for (i = 0; i < argc; ++i) + foo(); +#pragma omp parallel +#pragma omp masked taskloop lastprivate(argc, // expected-error {{expected expression}} expected-error {{expected ')'}} expected-note {{to match this '('}} + for (i = 0; i < argc; ++i) + foo(); +#pragma omp parallel +#pragma omp masked taskloop lastprivate(argc > 0 ? argv[1] : argv[2]) // expected-error {{expected variable name}} + for (i = 0; i < argc; ++i) + foo(); +#pragma omp parallel +#pragma omp masked taskloop lastprivate(argc, z) + for (i = 0; i < argc; ++i) + foo(); +#pragma omp parallel +#pragma omp masked taskloop lastprivate(S1) // expected-error {{'S1' does not refer to a value}} + for (i = 0; i < argc; ++i) + foo(); +#pragma omp parallel +#pragma omp masked taskloop lastprivate(a, b, c, d, f) // expected-error {{lastprivate variable with incomplete type 'S1'}} expected-error 1 {{const-qualified variable without mutable fields cannot be lastprivate}} expected-error 2 {{const-qualified variable cannot be lastprivate}} + for (i = 0; i < argc; ++i) + foo(); +#pragma omp parallel +#pragma omp masked taskloop lastprivate(argv[1]) // expected-error {{expected variable name}} + for (i = 0; i < argc; ++i) + foo(); +#pragma omp parallel +#pragma omp masked taskloop lastprivate(2 * 2) // expected-error {{expected variable name}} + for (i = 0; i < argc; ++i) + foo(); +#pragma omp parallel +#pragma omp masked taskloop lastprivate(ba) + for (i = 0; i < argc; ++i) + foo(); +#pragma omp parallel +#pragma omp masked taskloop lastprivate(ca) // expected-error {{const-qualified variable without mutable fields cannot be lastprivate}} + for (i = 0; i < argc; ++i) + foo(); +#pragma omp parallel +#pragma omp masked taskloop lastprivate(da) // expected-error {{const-qualified variable cannot be lastprivate}} + for (i = 0; i < argc; ++i) + foo(); + int xa; +#pragma omp parallel +#pragma omp masked taskloop lastprivate(xa) // OK + for (i = 0; i < argc; ++i) + foo(); +#pragma omp parallel +#pragma omp masked taskloop lastprivate(S2::S2s) // expected-error {{shared variable cannot be lastprivate}} + for (i = 0; i < argc; ++i) + foo(); +#pragma omp parallel +#pragma omp masked taskloop lastprivate(S2::S2sc) // expected-error {{const-qualified variable cannot be lastprivate}} + for (i = 0; i < argc; ++i) + foo(); +#pragma omp parallel +#pragma omp masked taskloop safelen(5) // expected-error {{unexpected OpenMP clause 'safelen' in directive '#pragma omp masked taskloop'}} + for (i = 0; i < argc; ++i) + foo(); +#pragma omp parallel +#pragma omp masked taskloop lastprivate(e, g) // expected-error {{calling a private constructor of class 'S4'}} expected-error {{calling a private constructor of class 'S5'}} + for (i = 0; i < argc; ++i) + foo(); +#pragma omp parallel +#pragma omp masked taskloop lastprivate(m) // expected-error {{'operator=' is a private member of 'S3'}} + for (i = 0; i < argc; ++i) + foo(); +#pragma omp parallel +#pragma omp masked taskloop lastprivate(h) // expected-error {{threadprivate or thread local variable cannot be lastprivate}} + for (i = 0; i < argc; ++i) + foo(); +#pragma omp parallel +#pragma omp masked taskloop lastprivate(B::x) // expected-error {{threadprivate or thread local variable cannot be lastprivate}} + for (i = 0; i < argc; ++i) + foo(); +#pragma omp parallel +#pragma omp masked taskloop private(xa), lastprivate(xa) // expected-error {{private variable cannot be lastprivate}} expected-note {{defined as private}} + for (i = 0; i < argc; ++i) + foo(); +#pragma omp parallel +#pragma omp masked taskloop lastprivate(i) + for (i = 0; i < argc; ++i) + foo(); +#pragma omp parallel private(xa) +#pragma omp masked taskloop lastprivate(xa) + for (i = 0; i < argc; ++i) + foo(); +#pragma omp parallel reduction(+ : xa) +#pragma omp masked taskloop lastprivate(xa) + for (i = 0; i < argc; ++i) + foo(); +#pragma omp parallel +#pragma omp masked taskloop lastprivate(j) + for (i = 0; i < argc; ++i) + foo(); +#pragma omp parallel +#pragma omp masked taskloop firstprivate(m) lastprivate(m) // expected-error {{'operator=' is a private member of 'S3'}} + for (i = 0; i < argc; ++i) + foo(); +#pragma omp parallel +#pragma omp masked taskloop lastprivate(n) firstprivate(n) // OK + for (i = 0; i < argc; ++i) + foo(); + static int si; +#pragma omp masked taskloop lastprivate(si) // OK + for (i = 0; i < argc; ++i) + si = i + 1; + return foomain(argc, argv); // expected-note {{in instantiation of function template specialization 'foomain' requested here}} +} diff --git a/clang/test/OpenMP/masked_taskloop_loop_messages.cpp b/clang/test/OpenMP/masked_taskloop_loop_messages.cpp new file mode 100644 index 0000000000000000000000000000000000000000..b9c90a65df0e8443425bc3e9e176eb164db6855e --- /dev/null +++ b/clang/test/OpenMP/masked_taskloop_loop_messages.cpp @@ -0,0 +1,741 @@ +// RUN: %clang_cc1 -fsyntax-only -fopenmp -fopenmp-version=45 -x c++ -std=c++11 -fexceptions -fcxx-exceptions -verify=expected,omp4 %s -Wuninitialized +// RUN: %clang_cc1 -fsyntax-only -fopenmp -x c++ -std=c++11 -fexceptions -fcxx-exceptions -verify=expected,omp5 %s -Wuninitialized + +// RUN: %clang_cc1 -fsyntax-only -fopenmp-simd -fopenmp-version=45 -x c++ -std=c++11 -fexceptions -fcxx-exceptions -verify=expected,omp4 %s -Wuninitialized +// RUN: %clang_cc1 -fsyntax-only -fopenmp-simd -x c++ -std=c++11 -fexceptions -fcxx-exceptions -verify=expected,omp5 %s -Wuninitialized + +class S { + int a; + S() : a(0) {} + +public: + S(int v) : a(v) {} + S(const S &s) : a(s.a) {} +}; + +static int sii; +// expected-note@+1 {{defined as threadprivate or thread local}} +#pragma omp threadprivate(sii) +static int globalii; + +// Currently, we cannot use "0" for global register variables. +// register int reg0 __asm__("0"); +int reg0; + +int test_iteration_spaces() { + const int N = 100; + float a[N], b[N], c[N]; + int ii, jj, kk; + float fii; + double dii; + register int reg; // expected-warning {{'register' storage class specifier is deprecated}} +#pragma omp parallel +#pragma omp masked taskloop + for (int i = 0; i < 10; i += 1) { + c[i] = a[i] + b[i]; + } +#pragma omp parallel +#pragma omp masked taskloop + for (char i = 0; i < 10; i++) { + c[i] = a[i] + b[i]; + } +#pragma omp parallel +#pragma omp masked taskloop + for (char i = 0; i < 10; i += '\1') { + c[i] = a[i] + b[i]; + } +#pragma omp parallel +#pragma omp masked taskloop + for (long long i = 0; i < 10; i++) { + c[i] = a[i] + b[i]; + } +#pragma omp parallel +// expected-error@+2 {{expression must have integral or unscoped enumeration type, not 'double'}} +#pragma omp masked taskloop + for (long long i = 0; i < 10; i += 1.5) { + c[i] = a[i] + b[i]; + } +#pragma omp parallel +#pragma omp masked taskloop + for (long long i = 0; i < 'z'; i += 1u) { + c[i] = a[i] + b[i]; + } +#pragma omp parallel +// expected-error@+2 {{variable must be of integer or random access iterator type}} +#pragma omp masked taskloop + for (float fi = 0; fi < 10.0; fi++) { + c[(int)fi] = a[(int)fi] + b[(int)fi]; + } +#pragma omp parallel +// expected-error@+2 {{variable must be of integer or random access iterator type}} +#pragma omp masked taskloop + for (double fi = 0; fi < 10.0; fi++) { + c[(int)fi] = a[(int)fi] + b[(int)fi]; + } +#pragma omp parallel +// expected-error@+2 {{initialization clause of OpenMP for loop is not in canonical form ('var = init' or 'T var = init')}} +#pragma omp masked taskloop + for (int &ref = ii; ref < 10; ref++) { + } +#pragma omp parallel +// expected-error@+2 {{initialization clause of OpenMP for loop is not in canonical form ('var = init' or 'T var = init')}} +#pragma omp masked taskloop + for (int i; i < 10; i++) + c[i] = a[i]; + +#pragma omp parallel +// expected-error@+2 {{initialization clause of OpenMP for loop is not in canonical form ('var = init' or 'T var = init')}} +#pragma omp masked taskloop + for (int i = 0, j = 0; i < 10; ++i) + c[i] = a[i]; + +#pragma omp parallel +// expected-error@+2 {{initialization clause of OpenMP for loop is not in canonical form ('var = init' or 'T var = init')}} +#pragma omp masked taskloop + for (; ii < 10; ++ii) + c[ii] = a[ii]; + +#pragma omp parallel +// expected-warning@+3 {{expression result unused}} +// expected-error@+2 {{initialization clause of OpenMP for loop is not in canonical form ('var = init' or 'T var = init')}} +#pragma omp masked taskloop + for (ii + 1; ii < 10; ++ii) + c[ii] = a[ii]; + +#pragma omp parallel +// expected-error@+2 {{initialization clause of OpenMP for loop is not in canonical form ('var = init' or 'T var = init')}} +#pragma omp masked taskloop + for (c[ii] = 0; ii < 10; ++ii) + c[ii] = a[ii]; + +#pragma omp parallel +// Ok to skip parenthesises. +#pragma omp masked taskloop + for (((ii)) = 0; ii < 10; ++ii) + c[ii] = a[ii]; + +#pragma omp parallel +// omp4-error@+2 {{condition of OpenMP for loop must be a relational comparison ('<', '<=', '>', or '>=') of loop variable 'i'}} omp5-error@+2 {{condition of OpenMP for loop must be a relational comparison ('<', '<=', '>', '>=', or '!=') of loop variable 'i'}} +#pragma omp masked taskloop + for (int i = 0; i; i++) + c[i] = a[i]; + +#pragma omp parallel +// omp4-error@+3 {{condition of OpenMP for loop must be a relational comparison ('<', '<=', '>', or '>=') of loop variable 'i'}} omp5-error@+3 {{condition of OpenMP for loop must be a relational comparison ('<', '<=', '>', '>=', or '!=') of loop variable 'i'}} +// expected-error@+2 {{increment clause of OpenMP for loop must perform simple addition or subtraction on loop variable 'i'}} +#pragma omp masked taskloop + for (int i = 0; jj < kk; ii++) + c[i] = a[i]; + +#pragma omp parallel +// omp4-error@+2 {{condition of OpenMP for loop must be a relational comparison ('<', '<=', '>', or '>=') of loop variable 'i'}} omp5-error@+2 {{condition of OpenMP for loop must be a relational comparison ('<', '<=', '>', '>=', or '!=') of loop variable 'i'}} +#pragma omp masked taskloop + for (int i = 0; !!i; i++) + c[i] = a[i]; + +// Ok +#pragma omp parallel +// omp4-error@+2 {{condition of OpenMP for loop must be a relational comparison ('<', '<=', '>', or '>=') of loop variable 'i'}} +#pragma omp masked taskloop + for (int i = 0; i != 1; i++) + c[i] = a[i]; + +#pragma omp parallel +// omp4-error@+2 {{condition of OpenMP for loop must be a relational comparison ('<', '<=', '>', or '>=') of loop variable 'i'}} omp5-error@+2 {{condition of OpenMP for loop must be a relational comparison ('<', '<=', '>', '>=', or '!=') of loop variable 'i'}} +#pragma omp masked taskloop + for (int i = 0;; i++) + c[i] = a[i]; + +#pragma omp parallel +// Ok. +#pragma omp masked taskloop + for (int i = 11; i > 10; i--) + c[i] = a[i]; + +#pragma omp parallel +// Ok. +#pragma omp masked taskloop + for (int i = 0; i < 10; ++i) + c[i] = a[i]; + +#pragma omp parallel +// Ok. +#pragma omp masked taskloop + for (ii = 0; ii < 10; ++ii) + c[ii] = a[ii]; + +#pragma omp parallel +// expected-error@+2 {{increment clause of OpenMP for loop must perform simple addition or subtraction on loop variable 'ii'}} +#pragma omp masked taskloop + for (ii = 0; ii < 10; ++jj) + c[ii] = a[jj]; + +#pragma omp parallel +// expected-error@+2 {{increment clause of OpenMP for loop must perform simple addition or subtraction on loop variable 'ii'}} +#pragma omp masked taskloop + for (ii = 0; ii < 10; ++++ii) + c[ii] = a[ii]; + +#pragma omp parallel +// Ok but undefined behavior (in general, cannot check that incr +// is really loop-invariant). +#pragma omp masked taskloop + for (ii = 0; ii < 10; ii = ii + ii) + c[ii] = a[ii]; + +#pragma omp parallel +// expected-error@+2 {{expression must have integral or unscoped enumeration type, not 'float'}} +#pragma omp masked taskloop + for (ii = 0; ii < 10; ii = ii + 1.0f) + c[ii] = a[ii]; + +#pragma omp parallel +// Ok - step was converted to integer type. +#pragma omp masked taskloop + for (ii = 0; ii < 10; ii = ii + (int)1.1f) + c[ii] = a[ii]; + +#pragma omp parallel +// expected-error@+2 {{increment clause of OpenMP for loop must perform simple addition or subtraction on loop variable 'ii'}} +#pragma omp masked taskloop + for (ii = 0; ii < 10; jj = ii + 2) + c[ii] = a[ii]; + +#pragma omp parallel +// expected-warning@+3 {{relational comparison result unused}} +// expected-error@+2 {{increment clause of OpenMP for loop must perform simple addition or subtraction on loop variable 'ii'}} +#pragma omp masked taskloop + for (ii = 0; ii<10; jj> kk + 2) + c[ii] = a[ii]; + +#pragma omp parallel +// expected-error@+2 {{increment clause of OpenMP for loop must perform simple addition or subtraction on loop variable 'ii'}} +#pragma omp masked taskloop + for (ii = 0; ii < 10;) + c[ii] = a[ii]; + +#pragma omp parallel +// expected-warning@+3 {{expression result unused}} +// expected-error@+2 {{increment clause of OpenMP for loop must perform simple addition or subtraction on loop variable 'ii'}} +#pragma omp masked taskloop + for (ii = 0; ii < 10; !ii) + c[ii] = a[ii]; + +#pragma omp parallel +// expected-error@+2 {{increment clause of OpenMP for loop must perform simple addition or subtraction on loop variable 'ii'}} +#pragma omp masked taskloop + for (ii = 0; ii < 10; ii ? ++ii : ++jj) + c[ii] = a[ii]; + +#pragma omp parallel +// expected-error@+2 {{increment clause of OpenMP for loop must perform simple addition or subtraction on loop variable 'ii'}} +#pragma omp masked taskloop + for (ii = 0; ii < 10; ii = ii < 10) + c[ii] = a[ii]; + +#pragma omp parallel +// expected-note@+3 {{loop step is expected to be positive due to this condition}} +// expected-error@+2 {{increment expression must cause 'ii' to increase on each iteration of OpenMP for loop}} +#pragma omp masked taskloop + for (ii = 0; ii < 10; ii = ii + 0) + c[ii] = a[ii]; + +#pragma omp parallel +// expected-note@+3 {{loop step is expected to be positive due to this condition}} +// expected-error@+2 {{increment expression must cause 'ii' to increase on each iteration of OpenMP for loop}} +#pragma omp masked taskloop + for (ii = 0; ii < 10; ii = ii + (int)(0.8 - 0.45)) + c[ii] = a[ii]; + +#pragma omp parallel +// expected-note@+3 {{loop step is expected to be positive due to this condition}} +// expected-error@+2 {{increment expression must cause 'ii' to increase on each iteration of OpenMP for loop}} +#pragma omp masked taskloop + for (ii = 0; (ii) < 10; ii -= 25) + c[ii] = a[ii]; + +#pragma omp parallel +// expected-note@+3 {{loop step is expected to be positive due to this condition}} +// expected-error@+2 {{increment expression must cause 'ii' to increase on each iteration of OpenMP for loop}} +#pragma omp masked taskloop + for (ii = 0; (ii < 10); ii -= 0) + c[ii] = a[ii]; + +#pragma omp parallel +// expected-note@+3 {{loop step is expected to be negative due to this condition}} +// expected-error@+2 {{increment expression must cause 'ii' to decrease on each iteration of OpenMP for loop}} +#pragma omp masked taskloop + for (ii = 0; ii > 10; (ii += 0)) + c[ii] = a[ii]; + +#pragma omp parallel +// expected-note@+3 {{loop step is expected to be positive due to this condition}} +// expected-error@+2 {{increment expression must cause 'ii' to increase on each iteration of OpenMP for loop}} +#pragma omp masked taskloop + for (ii = 0; ii < 10; (ii) = (1 - 1) + (ii)) + c[ii] = a[ii]; + +#pragma omp parallel +// expected-note@+3 {{loop step is expected to be negative due to this condition}} +// expected-error@+2 {{increment expression must cause 'ii' to decrease on each iteration of OpenMP for loop}} +#pragma omp masked taskloop + for ((ii = 0); ii > 10; (ii -= 0)) + c[ii] = a[ii]; + +#pragma omp parallel +// expected-note@+3 {{loop step is expected to be positive due to this condition}} +// expected-error@+2 {{increment expression must cause 'ii' to increase on each iteration of OpenMP for loop}} +#pragma omp masked taskloop + for (ii = 0; (ii < 10); (ii -= 0)) + c[ii] = a[ii]; + +#pragma omp parallel +// expected-note@+2 {{defined as firstprivate}} +// expected-error@+2 {{loop iteration variable in the associated loop of 'omp masked taskloop' directive may not be firstprivate, predetermined as private}} +#pragma omp masked taskloop firstprivate(ii) + for (ii = 0; ii < 10; ii++) + c[ii] = a[ii]; + +#pragma omp parallel +// expected-error@+1 {{unexpected OpenMP clause 'linear' in directive '#pragma omp masked taskloop'}} +#pragma omp masked taskloop linear(ii) + for (ii = 0; ii < 10; ii++) + c[ii] = a[ii]; + +#pragma omp parallel +#pragma omp masked taskloop private(ii) + for (ii = 0; ii < 10; ii++) + c[ii] = a[ii]; + +#pragma omp parallel +#pragma omp masked taskloop lastprivate(ii) + for (ii = 0; ii < 10; ii++) + c[ii] = a[ii]; + +#pragma omp parallel + { +// expected-error@+2 {{loop iteration variable in the associated loop of 'omp masked taskloop' directive may not be threadprivate or thread local, predetermined as private}} +#pragma omp masked taskloop + for (sii = 0; sii < 10; sii += 1) + c[sii] = a[sii]; + } + +#pragma omp parallel + { +#pragma omp masked taskloop + for (reg0 = 0; reg0 < 10; reg0 += 1) + c[reg0] = a[reg0]; + } + +#pragma omp parallel + { +#pragma omp masked taskloop + for (reg = 0; reg < 10; reg += 1) + c[reg] = a[reg]; + } + +#pragma omp parallel + { +#pragma omp masked taskloop + for (globalii = 0; globalii < 10; globalii += 1) + c[globalii] = a[globalii]; + } + +#pragma omp parallel + { +#pragma omp masked taskloop collapse(2) + for (ii = 0; ii < 10; ii += 1) + for (globalii = 0; globalii < 10; globalii += 1) + c[globalii] += a[globalii] + ii; + } + +#pragma omp parallel +// omp4-error@+2 {{statement after '#pragma omp masked taskloop' must be a for loop}} +#pragma omp masked taskloop + for (auto &item : a) { + item = item + 1; + } + +#pragma omp parallel +// expected-note@+3 {{loop step is expected to be positive due to this condition}} +// expected-error@+2 {{increment expression must cause 'i' to increase on each iteration of OpenMP for loop}} +#pragma omp masked taskloop + for (unsigned i = 9; i < 10; i--) { + c[i] = a[i] + b[i]; + } + + int(*lb)[4] = nullptr; +#pragma omp parallel +#pragma omp masked taskloop + for (int(*p)[4] = lb; p < lb + 8; ++p) { + } + +#pragma omp parallel +// expected-warning@+2 {{initialization clause of OpenMP for loop is not in canonical form ('var = init' or 'T var = init')}} +#pragma omp masked taskloop + for (int a{0}; a < 10; ++a) { + } + + return 0; +} + +// Iterators allowed in openmp for-loops. +namespace std { +struct random_access_iterator_tag {}; +template +struct iterator_traits { + typedef typename Iter::difference_type difference_type; + typedef typename Iter::iterator_category iterator_category; +}; +template +typename iterator_traits::difference_type +distance(Iter first, Iter last) { return first - last; } +} +class Iter0 { +public: + Iter0() {} + Iter0(const Iter0 &) {} + Iter0 operator++() { return *this; } + Iter0 operator--() { return *this; } + bool operator<(Iter0 a) { return true; } +}; +// expected-note@+2 {{candidate function not viable: no known conversion from 'GoodIter' to 'Iter0' for 1st argument}} +// expected-note@+1 2 {{candidate function not viable: no known conversion from 'Iter1' to 'Iter0' for 1st argument}} +int operator-(Iter0 a, Iter0 b) { return 0; } +class Iter1 { +public: + Iter1(float f = 0.0f, double d = 0.0) {} + Iter1(const Iter1 &) {} + Iter1 operator++() { return *this; } + Iter1 operator--() { return *this; } + bool operator<(Iter1 a) { return true; } + bool operator>=(Iter1 a) { return false; } +}; +class GoodIter { +public: + GoodIter() {} + GoodIter(const GoodIter &) {} + GoodIter(int fst, int snd) {} + GoodIter &operator=(const GoodIter &that) { return *this; } + GoodIter &operator=(const Iter0 &that) { return *this; } + GoodIter &operator+=(int x) { return *this; } + GoodIter &operator-=(int x) { return *this; } + explicit GoodIter(void *) {} + GoodIter operator++() { return *this; } + GoodIter operator--() { return *this; } + bool operator!() { return true; } + bool operator<(GoodIter a) { return true; } + bool operator<=(GoodIter a) { return true; } + bool operator>=(GoodIter a) { return false; } + typedef int difference_type; + typedef std::random_access_iterator_tag iterator_category; +}; +// expected-note@+2 {{candidate function not viable: no known conversion from 'const Iter0' to 'GoodIter' for 2nd argument}} +// expected-note@+1 2 {{candidate function not viable: no known conversion from 'Iter1' to 'GoodIter' for 1st argument}} +int operator-(GoodIter a, GoodIter b) { return 0; } +// expected-note@+1 3 {{candidate function not viable: requires single argument 'a', but 2 arguments were provided}} +GoodIter operator-(GoodIter a) { return a; } +// expected-note@+2 {{candidate function not viable: no known conversion from 'const Iter0' to 'int' for 2nd argument}} +// expected-note@+1 2 {{candidate function not viable: no known conversion from 'Iter1' to 'GoodIter' for 1st argument}} +GoodIter operator-(GoodIter a, int v) { return GoodIter(); } +// expected-note@+1 2 {{candidate function not viable: no known conversion from 'Iter0' to 'GoodIter' for 1st argument}} +GoodIter operator+(GoodIter a, int v) { return GoodIter(); } +// expected-note@+2 {{candidate function not viable: no known conversion from 'GoodIter' to 'int' for 1st argument}} +// expected-note@+1 2 {{candidate function not viable: no known conversion from 'Iter1' to 'int' for 1st argument}} +GoodIter operator-(int v, GoodIter a) { return GoodIter(); } +// expected-note@+1 2 {{candidate function not viable: no known conversion from 'Iter0' to 'int' for 1st argument}} +GoodIter operator+(int v, GoodIter a) { return GoodIter(); } + +int test_with_random_access_iterator() { + GoodIter begin, end; + Iter0 begin0, end0; +#pragma omp parallel +#pragma omp masked taskloop + for (GoodIter I = begin; I < end; ++I) + ++I; +#pragma omp parallel +// expected-error@+2 {{initialization clause of OpenMP for loop is not in canonical form ('var = init' or 'T var = init')}} +#pragma omp masked taskloop + for (GoodIter &I = begin; I < end; ++I) + ++I; +#pragma omp parallel +#pragma omp masked taskloop + for (GoodIter I = begin; I >= end; --I) + ++I; +#pragma omp parallel +// expected-warning@+2 {{initialization clause of OpenMP for loop is not in canonical form ('var = init' or 'T var = init')}} +#pragma omp masked taskloop + for (GoodIter I(begin); I < end; ++I) + ++I; +#pragma omp parallel +// expected-warning@+2 {{initialization clause of OpenMP for loop is not in canonical form ('var = init' or 'T var = init')}} +#pragma omp masked taskloop + for (GoodIter I(nullptr); I < end; ++I) + ++I; +#pragma omp parallel +// expected-warning@+2 {{initialization clause of OpenMP for loop is not in canonical form ('var = init' or 'T var = init')}} +#pragma omp masked taskloop + for (GoodIter I(0); I < end; ++I) + ++I; +#pragma omp parallel +// expected-warning@+2 {{initialization clause of OpenMP for loop is not in canonical form ('var = init' or 'T var = init')}} +#pragma omp masked taskloop + for (GoodIter I(1, 2); I < end; ++I) + ++I; +#pragma omp parallel +#pragma omp masked taskloop + for (begin = GoodIter(0); begin < end; ++begin) + ++begin; +// expected-error@+4 {{invalid operands to binary expression ('GoodIter' and 'const Iter0')}} +// expected-error@+3 {{could not calculate number of iterations calling 'operator-' with upper and lower loop bounds}} +#pragma omp parallel +#pragma omp masked taskloop + for (begin = begin0; begin < end; ++begin) + ++begin; +#pragma omp parallel +// expected-error@+2 {{initialization clause of OpenMP for loop is not in canonical form ('var = init' or 'T var = init')}} +#pragma omp masked taskloop + for (++begin; begin < end; ++begin) + ++begin; +#pragma omp parallel +#pragma omp masked taskloop + for (begin = end; begin < end; ++begin) + ++begin; +#pragma omp parallel +// omp4-error@+2 {{condition of OpenMP for loop must be a relational comparison ('<', '<=', '>', or '>=') of loop variable 'I'}} omp5-error@+2 {{condition of OpenMP for loop must be a relational comparison ('<', '<=', '>', '>=', or '!=') of loop variable 'I'}} +#pragma omp masked taskloop + for (GoodIter I = begin; I - I; ++I) + ++I; +#pragma omp parallel +// omp4-error@+2 {{condition of OpenMP for loop must be a relational comparison ('<', '<=', '>', or '>=') of loop variable 'I'}} omp5-error@+2 {{condition of OpenMP for loop must be a relational comparison ('<', '<=', '>', '>=', or '!=') of loop variable 'I'}} +#pragma omp masked taskloop + for (GoodIter I = begin; begin < end; ++I) + ++I; +#pragma omp parallel +// omp4-error@+2 {{condition of OpenMP for loop must be a relational comparison ('<', '<=', '>', or '>=') of loop variable 'I'}} omp5-error@+2 {{condition of OpenMP for loop must be a relational comparison ('<', '<=', '>', '>=', or '!=') of loop variable 'I'}} +#pragma omp masked taskloop + for (GoodIter I = begin; !I; ++I) + ++I; +#pragma omp parallel +// expected-note@+3 {{loop step is expected to be negative due to this condition}} +// expected-error@+2 {{increment expression must cause 'I' to decrease on each iteration of OpenMP for loop}} +#pragma omp masked taskloop + for (GoodIter I = begin; I >= end; I = I + 1) + ++I; +#pragma omp parallel +#pragma omp masked taskloop + for (GoodIter I = begin; I >= end; I = I - 1) + ++I; +#pragma omp parallel +// expected-error@+2 {{increment clause of OpenMP for loop must perform simple addition or subtraction on loop variable 'I'}} +#pragma omp masked taskloop + for (GoodIter I = begin; I >= end; I = -I) + ++I; +#pragma omp parallel +// expected-note@+3 {{loop step is expected to be negative due to this condition}} +// expected-error@+2 {{increment expression must cause 'I' to decrease on each iteration of OpenMP for loop}} +#pragma omp masked taskloop + for (GoodIter I = begin; I >= end; I = 2 + I) + ++I; +#pragma omp parallel +// expected-error@+2 {{increment clause of OpenMP for loop must perform simple addition or subtraction on loop variable 'I'}} +#pragma omp masked taskloop + for (GoodIter I = begin; I >= end; I = 2 - I) + ++I; +// In the following example, we cannot update the loop variable using '+=' +// expected-error@+3 {{invalid operands to binary expression ('Iter0' and 'int')}} +#pragma omp parallel +#pragma omp masked taskloop + for (Iter0 I = begin0; I < end0; ++I) + ++I; +#pragma omp parallel +// Initializer is constructor without params. +// expected-error@+3 {{invalid operands to binary expression ('Iter0' and 'int')}} +// expected-warning@+2 {{initialization clause of OpenMP for loop is not in canonical form ('var = init' or 'T var = init')}} +#pragma omp masked taskloop + for (Iter0 I; I < end0; ++I) + ++I; + Iter1 begin1, end1; +// expected-error@+4 {{invalid operands to binary expression ('Iter1' and 'Iter1')}} +// expected-error@+3 {{could not calculate number of iterations calling 'operator-' with upper and lower loop bounds}} +#pragma omp parallel +#pragma omp masked taskloop + for (Iter1 I = begin1; I < end1; ++I) + ++I; +#pragma omp parallel +// expected-note@+3 {{loop step is expected to be negative due to this condition}} +// expected-error@+2 {{increment expression must cause 'I' to decrease on each iteration of OpenMP for loop}} +#pragma omp masked taskloop + for (Iter1 I = begin1; I >= end1; ++I) + ++I; +#pragma omp parallel +// expected-error@+5 {{invalid operands to binary expression ('Iter1' and 'float')}} +// expected-error@+4 {{could not calculate number of iterations calling 'operator-' with upper and lower loop bounds}} +// Initializer is constructor with all default params. +// expected-warning@+2 {{initialization clause of OpenMP for loop is not in canonical form ('var = init' or 'T var = init')}} +#pragma omp masked taskloop + for (Iter1 I; I < end1; ++I) { + } + return 0; +} + +template +class TC { +public: + int dotest_lt(IT begin, IT end) { +#pragma omp parallel +// expected-note@+3 {{loop step is expected to be positive due to this condition}} +// expected-error@+2 {{increment expression must cause 'I' to increase on each iteration of OpenMP for loop}} +#pragma omp masked taskloop + for (IT I = begin; I < end; I = I + ST) { + ++I; + } +#pragma omp parallel +// expected-note@+3 {{loop step is expected to be positive due to this condition}} +// expected-error@+2 {{increment expression must cause 'I' to increase on each iteration of OpenMP for loop}} +#pragma omp masked taskloop + for (IT I = begin; I <= end; I += ST) { + ++I; + } +#pragma omp parallel +#pragma omp masked taskloop + for (IT I = begin; I < end; ++I) { + ++I; + } + } + + static IT step() { + return IT(ST); + } +}; +template +int dotest_gt(IT begin, IT end) { +#pragma omp parallel +// expected-note@+3 2 {{loop step is expected to be negative due to this condition}} +// expected-error@+2 2 {{increment expression must cause 'I' to decrease on each iteration of OpenMP for loop}} +#pragma omp masked taskloop + for (IT I = begin; I >= end; I = I + ST) { + ++I; + } +#pragma omp parallel +// expected-note@+3 2 {{loop step is expected to be negative due to this condition}} +// expected-error@+2 2 {{increment expression must cause 'I' to decrease on each iteration of OpenMP for loop}} +#pragma omp masked taskloop + for (IT I = begin; I >= end; I += ST) { + ++I; + } + +#pragma omp parallel +// expected-note@+3 {{loop step is expected to be negative due to this condition}} +// expected-error@+2 {{increment expression must cause 'I' to decrease on each iteration of OpenMP for loop}} +#pragma omp masked taskloop + for (IT I = begin; I >= end; ++I) { + ++I; + } + +#pragma omp parallel +#pragma omp masked taskloop + for (IT I = begin; I < end; I += TC::step()) { + ++I; + } +} + +void test_with_template() { + GoodIter begin, end; + TC t1; + TC t2; + t1.dotest_lt(begin, end); + t2.dotest_lt(begin, end); // expected-note {{in instantiation of member function 'TC::dotest_lt' requested here}} + dotest_gt(begin, end); // expected-note {{in instantiation of function template specialization 'dotest_gt' requested here}} + dotest_gt(0, 100); // expected-note {{in instantiation of function template specialization 'dotest_gt' requested here}} +} + +void test_loop_break() { + const int N = 100; + float a[N], b[N], c[N]; +#pragma omp parallel +#pragma omp masked taskloop + for (int i = 0; i < 10; i++) { + c[i] = a[i] + b[i]; + for (int j = 0; j < 10; ++j) { + if (a[i] > b[j]) + break; // OK in nested loop + } + switch (i) { + case 1: + b[i]++; + break; + default: + break; + } + if (c[i] > 10) + break; // expected-error {{'break' statement cannot be used in OpenMP for loop}} + + if (c[i] > 11) + break; // expected-error {{'break' statement cannot be used in OpenMP for loop}} + } + +#pragma omp parallel +#pragma omp masked taskloop + for (int i = 0; i < 10; i++) { + for (int j = 0; j < 10; j++) { + c[i] = a[i] + b[i]; + if (c[i] > 10) { + if (c[i] < 20) { + break; // OK + } + } + } + } +} + +void test_loop_eh() { + const int N = 100; + float a[N], b[N], c[N]; +#pragma omp parallel +#pragma omp masked taskloop + for (int i = 0; i < 10; i++) { + c[i] = a[i] + b[i]; + try { + for (int j = 0; j < 10; ++j) { + if (a[i] > b[j]) + throw a[i]; + } + throw a[i]; + } catch (float f) { + if (f > 0.1) + throw a[i]; + return; // expected-error {{cannot return from OpenMP region}} + } + switch (i) { + case 1: + b[i]++; + break; + default: + break; + } + for (int j = 0; j < 10; j++) { + if (c[i] > 10) + throw c[i]; + } + } + if (c[9] > 10) + throw c[9]; // OK + +#pragma omp parallel +#pragma omp masked taskloop + for (int i = 0; i < 10; ++i) { + struct S { + void g() { throw 0; } + }; + } +} + +void test_loop_firstprivate_lastprivate() { + S s(4); +#pragma omp parallel +#pragma omp masked taskloop lastprivate(s) firstprivate(s) + for (int i = 0; i < 16; ++i) + ; +} + diff --git a/clang/test/OpenMP/masked_taskloop_num_tasks_messages.cpp b/clang/test/OpenMP/masked_taskloop_num_tasks_messages.cpp new file mode 100644 index 0000000000000000000000000000000000000000..7cbda03b04bb22c64d31bd0cf9d8dc72247ddf97 --- /dev/null +++ b/clang/test/OpenMP/masked_taskloop_num_tasks_messages.cpp @@ -0,0 +1,103 @@ +// RUN: %clang_cc1 -verify -fopenmp -ferror-limit 100 %s -Wuninitialized + +// RUN: %clang_cc1 -verify -fopenmp-simd -ferror-limit 100 %s -Wuninitialized + +void foo() { +} + +bool foobool(int argc) { + return argc; +} + +struct S1; // expected-note {{declared here}} + +template // expected-note {{declared here}} +int tmain(T argc, S **argv) { + T z; + #pragma omp masked taskloop num_tasks // expected-error {{expected '(' after 'num_tasks'}} + for (int i = 0; i < 10; ++i) + foo(); + #pragma omp masked taskloop num_tasks ( // expected-error {{expected expression}} expected-error {{expected ')'}} expected-note {{to match this '('}} + for (int i = 0; i < 10; ++i) + foo(); + #pragma omp masked taskloop num_tasks () // expected-error {{expected expression}} + for (int i = 0; i < 10; ++i) + foo(); + #pragma omp masked taskloop num_tasks (argc // expected-error {{expected ')'}} expected-note {{to match this '('}} + for (int i = 0; i < 10; ++i) + foo(); + #pragma omp masked taskloop num_tasks (argc)) // expected-warning {{extra tokens at the end of '#pragma omp masked taskloop' are ignored}} + for (int i = 0; i < 10; ++i) + foo(); + #pragma omp masked taskloop num_tasks (argc > 0 ? argv[1][0] : argv[2][argc] + z) + for (int i = 0; i < 10; ++i) + foo(); + #pragma omp masked taskloop num_tasks (foobool(argc)), num_tasks (true) // expected-error {{directive '#pragma omp masked taskloop' cannot contain more than one 'num_tasks' clause}} + for (int i = 0; i < 10; ++i) + foo(); + #pragma omp masked taskloop num_tasks (S) // expected-error {{'S' does not refer to a value}} + for (int i = 0; i < 10; ++i) + foo(); + #pragma omp masked taskloop num_tasks (argc argc) // expected-error {{expected ')'}} expected-note {{to match this '('}} + for (int i = 0; i < 10; ++i) + foo(); + #pragma omp masked taskloop num_tasks(0) // expected-error {{argument to 'num_tasks' clause must be a strictly positive integer value}} + for (int i = 0; i < 10; ++i) + foo(); + #pragma omp masked taskloop num_tasks(-1) // expected-error {{argument to 'num_tasks' clause must be a strictly positive integer value}} + for (int i = 0; i < 10; ++i) + foo(); + #pragma omp masked taskloop num_tasks(argc) grainsize(argc) // expected-error {{'grainsize' and 'num_tasks' clause are mutually exclusive and may not appear on the same directive}} expected-note {{'num_tasks' clause is specified here}} + for (int i = 0; i < 10; ++i) + foo(); + + return 0; +} + +int main(int argc, char **argv) { + int z; + #pragma omp masked taskloop num_tasks // expected-error {{expected '(' after 'num_tasks'}} + for (int i = 0; i < 10; ++i) + foo(); + #pragma omp masked taskloop num_tasks ( // expected-error {{expected expression}} expected-error {{expected ')'}} expected-note {{to match this '('}} + for (int i = 0; i < 10; ++i) + foo(); + #pragma omp masked taskloop num_tasks () // expected-error {{expected expression}} + for (int i = 0; i < 10; ++i) + foo(); + #pragma omp masked taskloop num_tasks (argc // expected-error {{expected ')'}} expected-note {{to match this '('}} + for (int i = 0; i < 10; ++i) + foo(); + #pragma omp masked taskloop num_tasks (argc)) // expected-warning {{extra tokens at the end of '#pragma omp masked taskloop' are ignored}} + for (int i = 0; i < 10; ++i) + foo(); + #pragma omp masked taskloop num_tasks (argc > 0 ? argv[1][0] : argv[2][argc] - z) + for (int i = 0; i < 10; ++i) + foo(); + #pragma omp masked taskloop num_tasks (foobool(argc)), num_tasks (true) // expected-error {{directive '#pragma omp masked taskloop' cannot contain more than one 'num_tasks' clause}} + for (int i = 0; i < 10; ++i) + foo(); + #pragma omp masked taskloop num_tasks (S1) // expected-error {{'S1' does not refer to a value}} + for (int i = 0; i < 10; ++i) + foo(); + #pragma omp masked taskloop num_tasks (argc argc) // expected-error {{expected ')'}} expected-note {{to match this '('}} + for (int i = 0; i < 10; ++i) + foo(); + #pragma omp masked taskloop num_tasks (1 0) // expected-error {{expected ')'}} expected-note {{to match this '('}} + for (int i = 0; i < 10; ++i) + foo(); + #pragma omp masked taskloop num_tasks(if(tmain(argc, argv) // expected-error {{expected expression}} expected-error {{expected ')'}} expected-note {{to match this '('}} + for (int i = 0; i < 10; ++i) + foo(); + #pragma omp masked taskloop num_tasks(0) // expected-error {{argument to 'num_tasks' clause must be a strictly positive integer value}} + for (int i = 0; i < 10; ++i) + foo(); + #pragma omp masked taskloop num_tasks(-1) // expected-error {{argument to 'num_tasks' clause must be a strictly positive integer value}} + for (int i = 0; i < 10; ++i) + foo(); + #pragma omp masked taskloop num_tasks(argc) grainsize(argc) // expected-error {{'grainsize' and 'num_tasks' clause are mutually exclusive and may not appear on the same directive}} expected-note {{'num_tasks' clause is specified here}} + for (int i = 0; i < 10; ++i) + foo(); + + return tmain(argc, argv); +} diff --git a/clang/test/OpenMP/masked_taskloop_priority_messages.cpp b/clang/test/OpenMP/masked_taskloop_priority_messages.cpp new file mode 100644 index 0000000000000000000000000000000000000000..f0ac23cff8ab88d557cd329721302bd247e62460 --- /dev/null +++ b/clang/test/OpenMP/masked_taskloop_priority_messages.cpp @@ -0,0 +1,97 @@ +// RUN: %clang_cc1 -verify -fopenmp -ferror-limit 100 %s -Wuninitialized + +// RUN: %clang_cc1 -verify -fopenmp-simd -ferror-limit 100 %s -Wuninitialized + +void foo() { +} + +bool foobool(int argc) { + return argc; +} + +struct S1; // expected-note {{declared here}} + +template // expected-note {{declared here}} +int tmain(T argc, S **argv) { + T z; + #pragma omp masked taskloop priority // expected-error {{expected '(' after 'priority'}} + for (int i = 0; i < 10; ++i) + foo(); + #pragma omp masked taskloop priority ( // expected-error {{expected expression}} expected-error {{expected ')'}} expected-note {{to match this '('}} + for (int i = 0; i < 10; ++i) + foo(); + #pragma omp masked taskloop priority () // expected-error {{expected expression}} + for (int i = 0; i < 10; ++i) + foo(); + #pragma omp masked taskloop priority (argc // expected-error {{expected ')'}} expected-note {{to match this '('}} + for (int i = 0; i < 10; ++i) + foo(); + #pragma omp masked taskloop priority (argc)) // expected-warning {{extra tokens at the end of '#pragma omp masked taskloop' are ignored}} + for (int i = 0; i < 10; ++i) + foo(); + #pragma omp masked taskloop priority (argc > 0 ? argv[1][0] : argv[2][argc] + z) + for (int i = 0; i < 10; ++i) + foo(); + #pragma omp masked taskloop priority (foobool(argc)), priority (true) // expected-error {{directive '#pragma omp masked taskloop' cannot contain more than one 'priority' clause}} + for (int i = 0; i < 10; ++i) + foo(); + #pragma omp masked taskloop priority (S) // expected-error {{'S' does not refer to a value}} + for (int i = 0; i < 10; ++i) + foo(); + #pragma omp masked taskloop priority (argc argc) // expected-error {{expected ')'}} expected-note {{to match this '('}} + for (int i = 0; i < 10; ++i) + foo(); + #pragma omp masked taskloop priority(0) + for (int i = 0; i < 10; ++i) + foo(); + #pragma omp masked taskloop priority(-1) // expected-error {{argument to 'priority' clause must be a non-negative integer value}} + for (int i = 0; i < 10; ++i) + foo(); + + return 0; +} + +int main(int argc, char **argv) { + int z; + #pragma omp masked taskloop priority // expected-error {{expected '(' after 'priority'}} + for (int i = 0; i < 10; ++i) + foo(); + #pragma omp masked taskloop priority ( // expected-error {{expected expression}} expected-error {{expected ')'}} expected-note {{to match this '('}} + for (int i = 0; i < 10; ++i) + foo(); + #pragma omp masked taskloop priority () // expected-error {{expected expression}} + for (int i = 0; i < 10; ++i) + foo(); + #pragma omp masked taskloop priority (argc // expected-error {{expected ')'}} expected-note {{to match this '('}} + for (int i = 0; i < 10; ++i) + foo(); + #pragma omp masked taskloop priority (argc)) // expected-warning {{extra tokens at the end of '#pragma omp masked taskloop' are ignored}} + for (int i = 0; i < 10; ++i) + foo(); + #pragma omp masked taskloop priority (argc > 0 ? argv[1][0] : argv[2][argc] - z) + for (int i = 0; i < 10; ++i) + foo(); + #pragma omp masked taskloop priority (foobool(argc)), priority (true) // expected-error {{directive '#pragma omp masked taskloop' cannot contain more than one 'priority' clause}} + for (int i = 0; i < 10; ++i) + foo(); + #pragma omp masked taskloop priority (S1) // expected-error {{'S1' does not refer to a value}} + for (int i = 0; i < 10; ++i) + foo(); + #pragma omp masked taskloop priority (argc argc) // expected-error {{expected ')'}} expected-note {{to match this '('}} + for (int i = 0; i < 10; ++i) + foo(); + #pragma omp masked taskloop priority (1 0) // expected-error {{expected ')'}} expected-note {{to match this '('}} + for (int i = 0; i < 10; ++i) + foo(); + #pragma omp masked taskloop priority(if(tmain(argc, argv) // expected-error {{expected expression}} expected-error {{expected ')'}} expected-note {{to match this '('}} + for (int i = 0; i < 10; ++i) + foo(); + #pragma omp masked taskloop priority(0) + for (int i = 0; i < 10; ++i) + foo(); + #pragma omp masked taskloop priority(-1) // expected-error {{argument to 'priority' clause must be a non-negative integer value}} + for (int i = 0; i < 10; ++i) + foo(); + + return tmain(argc, argv); +} diff --git a/clang/test/OpenMP/masked_taskloop_private_messages.cpp b/clang/test/OpenMP/masked_taskloop_private_messages.cpp new file mode 100644 index 0000000000000000000000000000000000000000..9ab21b631f3d101f2f0485d1db18f0a5017ebd17 --- /dev/null +++ b/clang/test/OpenMP/masked_taskloop_private_messages.cpp @@ -0,0 +1,260 @@ +// RUN: %clang_cc1 -verify -fopenmp %s -Wuninitialized + +// RUN: %clang_cc1 -verify -fopenmp-simd %s -Wuninitialized + +typedef void **omp_allocator_handle_t; +extern const omp_allocator_handle_t omp_null_allocator; +extern const omp_allocator_handle_t omp_default_mem_alloc; +extern const omp_allocator_handle_t omp_large_cap_mem_alloc; +extern const omp_allocator_handle_t omp_const_mem_alloc; +extern const omp_allocator_handle_t omp_high_bw_mem_alloc; +extern const omp_allocator_handle_t omp_low_lat_mem_alloc; +extern const omp_allocator_handle_t omp_cgroup_mem_alloc; +extern const omp_allocator_handle_t omp_pteam_mem_alloc; +extern const omp_allocator_handle_t omp_thread_mem_alloc; + +void foo() { +} + +bool foobool(int argc) { + return argc; +} + +struct S1; // expected-note 2 {{declared here}} expected-note 2 {{forward declaration of 'S1'}} +extern S1 a; +class S2 { + mutable int a; + +public: + S2() : a(0) {} +}; +const S2 b; +const S2 ba[5]; +class S3 { + int a; + +public: + S3() : a(0) {} +}; +const S3 ca[5]; +class S4 { + int a; + S4(); // expected-note {{implicitly declared private here}} + +public: + S4(int v) : a(v) { +#pragma omp masked taskloop private(a) private(this->a) + for (int k = 0; k < v; ++k) + ++this->a; + } +}; +class S5 { + int a; + S5() : a(0) {} // expected-note {{implicitly declared private here}} + +public: + S5(int v) : a(v) {} + S5 &operator=(S5 &s) { +#pragma omp masked taskloop private(a) private(this->a) private(s.a) // expected-error {{expected variable name or data member of current class}} + for (int k = 0; k < s.a; ++k) + ++s.a; + return *this; + } +}; + +template +class S6 { +public: + T a; + + S6() : a(0) {} + S6(T v) : a(v) { +#pragma omp masked taskloop private(a) private(this->a) allocate(omp_thread_mem_alloc: a) // expected-warning {{allocator with the 'thread' trait access has unspecified behavior on 'masked taskloop' directive}} + for (int k = 0; k < v; ++k) + ++this->a; + } + S6 &operator=(S6 &s) { +#pragma omp masked taskloop private(a) private(this->a) private(s.a) // expected-error {{expected variable name or data member of current class}} + for (int k = 0; k < s.a; ++k) + ++s.a; + return *this; + } +}; + +template +class S7 : public T { + T a; + S7() : a(0) {} + +public: + S7(T v) : a(v) { +#pragma omp masked taskloop private(a) private(this->a) private(T::a) + for (int k = 0; k < a.a; ++k) + ++this->a.a; + } + S7 &operator=(S7 &s) { +#pragma omp masked taskloop private(a) private(this->a) private(s.a) private(s.T::a) // expected-error 2 {{expected variable name or data member of current class}} + for (int k = 0; k < s.a.a; ++k) + ++s.a.a; + return *this; + } +}; + +S3 h; +#pragma omp threadprivate(h) // expected-note 2 {{defined as threadprivate or thread local}} + +template +int foomain(I argc, C **argv) { + I e(4); + I g(5); + int i, z; + int &j = i; +#pragma omp masked taskloop private // expected-error {{expected '(' after 'private'}} + for (int k = 0; k < argc; ++k) + ++k; +#pragma omp masked taskloop private( // expected-error {{expected expression}} expected-error {{expected ')'}} expected-note {{to match this '('}} + for (int k = 0; k < argc; ++k) + ++k; +#pragma omp masked taskloop private() // expected-error {{expected expression}} + for (int k = 0; k < argc; ++k) + ++k; +#pragma omp masked taskloop private(argc // expected-error {{expected ')'}} expected-note {{to match this '('}} + for (int k = 0; k < argc; ++k) + ++k; +#pragma omp masked taskloop private(argc, // expected-error {{expected expression}} expected-error {{expected ')'}} expected-note {{to match this '('}} + for (int k = 0; k < argc; ++k) + ++k; +#pragma omp masked taskloop private(argc > 0 ? argv[1] : argv[2]) // expected-error {{expected variable name}} + for (int k = 0; k < argc; ++k) + ++k; +#pragma omp masked taskloop private(argc) allocate , allocate(, allocate(omp_default , allocate(omp_default_mem_alloc, allocate(omp_default_mem_alloc:, allocate(omp_default_mem_alloc: argc, allocate(omp_default_mem_alloc: argv), allocate(argv) // expected-error {{expected '(' after 'allocate'}} expected-error 2 {{expected expression}} expected-error 2 {{expected ')'}} expected-error {{use of undeclared identifier 'omp_default'}} expected-note 2 {{to match this '('}} + for (int k = 0; k < argc; ++k) + ++k; +#pragma omp masked taskloop private(S1) // expected-error {{'S1' does not refer to a value}} + for (int k = 0; k < argc; ++k) + ++k; +#pragma omp masked taskloop private(a, b) // expected-error {{private variable with incomplete type 'S1'}} + for (int k = 0; k < argc; ++k) + ++k; +#pragma omp masked taskloop private(argv[1]) // expected-error {{expected variable name}} + for (int k = 0; k < argc; ++k) + ++k; +#pragma omp masked taskloop private(e, g, z) + for (int k = 0; k < argc; ++k) + ++k; +#pragma omp masked taskloop private(h) // expected-error {{threadprivate or thread local variable cannot be private}} + for (int k = 0; k < argc; ++k) + ++k; +#pragma omp masked taskloop shared(i) + for (int k = 0; k < argc; ++k) + ++k; +#pragma omp parallel + { + int v = 0; + int i; +#pragma omp masked taskloop private(i) + for (int k = 0; k < argc; ++k) { + i = k; + v += i; + } + } +#pragma omp parallel shared(i) +#pragma omp parallel private(i) +#pragma omp masked taskloop private(j) + for (int k = 0; k < argc; ++k) + ++k; +#pragma omp masked taskloop private(i) + for (int k = 0; k < argc; ++k) + ++k; + return 0; +} + +void bar(S4 a[2]) { +#pragma omp parallel +#pragma omp masked taskloop private(a) + for (int i = 0; i < 2; ++i) + foo(); +} + +namespace A { +double x; +#pragma omp threadprivate(x) // expected-note {{defined as threadprivate or thread local}} +} +namespace B { +using A::x; +} + +int main(int argc, char **argv) { + S4 e(4); + S5 g(5); + S6 s6(0.0) , s6_0(1.0); // expected-note {{in instantiation of member function 'S6::S6' requested here}} + S7 > s7(0.0) , s7_0(1.0); + int i, z; + int &j = i; +#pragma omp masked taskloop private // expected-error {{expected '(' after 'private'}} + for (int k = 0; k < argc; ++k) + ++k; +#pragma omp masked taskloop private( // expected-error {{expected expression}} expected-error {{expected ')'}} expected-note {{to match this '('}} + for (int k = 0; k < argc; ++k) + ++k; +#pragma omp masked taskloop private() // expected-error {{expected expression}} + for (int k = 0; k < argc; ++k) + ++k; +#pragma omp masked taskloop private(argc // expected-error {{expected ')'}} expected-note {{to match this '('}} + for (int k = 0; k < argc; ++k) + ++k; +#pragma omp masked taskloop private(argc, // expected-error {{expected expression}} expected-error {{expected ')'}} expected-note {{to match this '('}} + for (int k = 0; k < argc; ++k) + ++k; +#pragma omp masked taskloop private(argc > 0 ? argv[1] : argv[2]) // expected-error {{expected variable name}} + for (int k = 0; k < argc; ++k) + ++k; +#pragma omp masked taskloop private(argc) + for (int k = 0; k < argc; ++k) + ++k; +#pragma omp masked taskloop private(S1) // expected-error {{'S1' does not refer to a value}} + for (int k = 0; k < argc; ++k) + ++k; +#pragma omp masked taskloop private(a, b) // expected-error {{private variable with incomplete type 'S1'}} + for (int k = 0; k < argc; ++k) + ++k; +#pragma omp masked taskloop private(argv[1]) // expected-error {{expected variable name}} + for (int k = 0; k < argc; ++k) + ++k; +#pragma omp masked taskloop private(e, g) // expected-error {{calling a private constructor of class 'S4'}} expected-error {{calling a private constructor of class 'S5'}} + for (int k = 0; k < argc; ++k) + ++k; +#pragma omp masked taskloop private(h) // expected-error {{threadprivate or thread local variable cannot be private}} + for (int k = 0; k < argc; ++k) + ++k; +#pragma omp masked taskloop private(B::x) // expected-error {{threadprivate or thread local variable cannot be private}} + for (int k = 0; k < argc; ++k) + ++k; +#pragma omp masked taskloop shared(i) + for (int k = 0; k < argc; ++k) + ++k; +#pragma omp parallel + { + int i; +#pragma omp masked taskloop private(i) + for (int k = 0; k < argc; ++k) + ++k; + } +#pragma omp parallel shared(i) +#pragma omp parallel private(i) +#pragma omp masked taskloop private(j) + for (int k = 0; k < argc; ++k) + ++k; +#pragma omp masked taskloop private(i, z) + for (int k = 0; k < argc; ++k) + ++k; + static int si; +#pragma omp masked taskloop private(si) // OK + for(int k = 0; k < argc; ++k) + si = k + 1; + + s6 = s6_0; // expected-note {{in instantiation of member function 'S6::operator=' requested here}} + s7 = s7_0; // expected-note {{in instantiation of member function 'S7>::operator=' requested here}} + return foomain(argc, argv); // expected-note {{in instantiation of function template specialization 'foomain' requested here}} +} + diff --git a/clang/test/Parser/cxx11-user-defined-literals.cpp b/clang/test/Parser/cxx11-user-defined-literals.cpp index 143cdb502c04125d26709d52683cdc05868e2d65..1a7e7805882299d0b4f036b5af5c7d9f8107567c 100644 --- a/clang/test/Parser/cxx11-user-defined-literals.cpp +++ b/clang/test/Parser/cxx11-user-defined-literals.cpp @@ -131,6 +131,7 @@ int operator""_\u212e""_\U0000212e""_℮""(const char*, size_t); int operator""_\U0000212e""_℮""_\u212e""(const char*, size_t); int operator""_\u{212f}(char); +int operator""_\N{SCRIPT SMALL E}(char); int mix_ucn_utf8 = ""_℮""_\u212e""_\U0000212e""; diff --git a/clang/test/Preprocessor/init-aarch64.c b/clang/test/Preprocessor/init-aarch64.c index 66cab8b1f8d04054f974fbac2a39d1ed045986ec..3c36793d824a5c18ede67196deda5e04a4673cca 100644 --- a/clang/test/Preprocessor/init-aarch64.c +++ b/clang/test/Preprocessor/init-aarch64.c @@ -104,18 +104,20 @@ // AARCH64-NEXT: #define __FLT_MIN_EXP__ (-125) // AARCH64-NEXT: #define __FLT_MIN__ 1.17549435e-38F // AARCH64-NEXT: #define __FLT_RADIX__ 2 +// AARCH64-NEXT: #define __FP_FAST_FMA 1 +// AARCH64-NEXT: #define __FP_FAST_FMAF 1 // AARCH64-NEXT: #define __GCC_HAVE_SYNC_COMPARE_AND_SWAP_1 1 // AARCH64-NEXT: #define __GCC_HAVE_SYNC_COMPARE_AND_SWAP_2 1 // AARCH64-NEXT: #define __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 1 // AARCH64-NEXT: #define __GCC_HAVE_SYNC_COMPARE_AND_SWAP_8 1 // AARCH64_CXX-NEXT: #define __GLIBCXX_BITSIZE_INT_N_0 128 // AARCH64_CXX-NEXT: #define __GLIBCXX_TYPE_INT_N_0 __int128 -// AARCH64-NEXT: #define __INT16_C_SUFFIX__ +// AARCH64-NEXT: #define __INT16_C_SUFFIX__ // AARCH64-NEXT: #define __INT16_FMTd__ "hd" // AARCH64-NEXT: #define __INT16_FMTi__ "hi" // AARCH64-NEXT: #define __INT16_MAX__ 32767 // AARCH64-NEXT: #define __INT16_TYPE__ short -// AARCH64-NEXT: #define __INT32_C_SUFFIX__ +// AARCH64-NEXT: #define __INT32_C_SUFFIX__ // AARCH64-NEXT: #define __INT32_FMTd__ "d" // AARCH64-NEXT: #define __INT32_FMTi__ "i" // AARCH64-NEXT: #define __INT32_MAX__ 2147483647 @@ -125,7 +127,7 @@ // AARCH64-NEXT: #define __INT64_FMTi__ "li" // AARCH64-NEXT: #define __INT64_MAX__ 9223372036854775807L // AARCH64-NEXT: #define __INT64_TYPE__ long int -// AARCH64-NEXT: #define __INT8_C_SUFFIX__ +// AARCH64-NEXT: #define __INT8_C_SUFFIX__ // AARCH64-NEXT: #define __INT8_FMTd__ "hhd" // AARCH64-NEXT: #define __INT8_FMTi__ "hhi" // AARCH64-NEXT: #define __INT8_MAX__ 127 @@ -253,7 +255,7 @@ // AARCH64-NEXT: #define __STDC_UTF_32__ 1 // AARCH64_C: #define __STDC_VERSION__ 201710L // AARCH64-NEXT: #define __STDC__ 1 -// AARCH64-NEXT: #define __UINT16_C_SUFFIX__ +// AARCH64-NEXT: #define __UINT16_C_SUFFIX__ // AARCH64-NEXT: #define __UINT16_FMTX__ "hX" // AARCH64-NEXT: #define __UINT16_FMTo__ "ho" // AARCH64-NEXT: #define __UINT16_FMTu__ "hu" @@ -274,7 +276,7 @@ // AARCH64-NEXT: #define __UINT64_FMTx__ "lx" // AARCH64-NEXT: #define __UINT64_MAX__ 18446744073709551615UL // AARCH64-NEXT: #define __UINT64_TYPE__ long unsigned int -// AARCH64-NEXT: #define __UINT8_C_SUFFIX__ +// AARCH64-NEXT: #define __UINT8_C_SUFFIX__ // AARCH64-NEXT: #define __UINT8_FMTX__ "hhX" // AARCH64-NEXT: #define __UINT8_FMTo__ "hho" // AARCH64-NEXT: #define __UINT8_FMTu__ "hhu" @@ -344,7 +346,7 @@ // AARCH64-NEXT: #define __UINT_LEAST8_FMTx__ "hhx" // AARCH64-NEXT: #define __UINT_LEAST8_MAX__ 255 // AARCH64-NEXT: #define __UINT_LEAST8_TYPE__ unsigned char -// AARCH64-NEXT: #define __USER_LABEL_PREFIX__ +// AARCH64-NEXT: #define __USER_LABEL_PREFIX__ // AARCH64-NEXT: #define __VERSION__ "{{.*}}" // AARCH64-NEXT: #define __WCHAR_MAX__ 4294967295U // AARCH64-NEXT: #define __WCHAR_TYPE__ unsigned int diff --git a/clang/test/Preprocessor/pragma_microsoft.c b/clang/test/Preprocessor/pragma_microsoft.c index ab60902783573ae7f01bdd471c98521727413319..057f5ea006e868003b1dd5dd5ef138354b0b4340 100644 --- a/clang/test/Preprocessor/pragma_microsoft.c +++ b/clang/test/Preprocessor/pragma_microsoft.c @@ -228,7 +228,13 @@ void pragma_function_foo() { #pragma optimize("g" // expected-warning{{expected ',' in '#pragma optimize'}} #pragma optimize("g", // expected-warning{{missing argument to '#pragma optimize'; expected 'on' or 'off'}} #pragma optimize("g",xyz // expected-warning{{unexpected argument 'xyz' to '#pragma optimize'; expected 'on' or 'off'}} -#pragma optimize("g",on) // expected-warning{{#pragma optimize' is not supported}} +#pragma optimize("g",on) // expected-warning{{unexpected argument 'g' to '#pragma optimize'; expected ""}} +#pragma optimize("",on) // no-warning +#pragma optimize("", on) asdf // expected-warning{{extra tokens at end of '#pragma optimize'}} + +void pragma_optimize_foo() { +#pragma optimize("", on) // expected-error {{'#pragma optimize' can only appear at file scope}} +} #pragma execution_character_set // expected-warning {{expected '('}} #pragma execution_character_set( // expected-warning {{expected 'push' or 'pop'}} diff --git a/clang/test/Preprocessor/ucn-pp-identifier.c b/clang/test/Preprocessor/ucn-pp-identifier.c index 1d91ad422b6970078b51777da291c0bcfd112362..44d3d9e0331385559a109db43abc2aa5db85e224 100644 --- a/clang/test/Preprocessor/ucn-pp-identifier.c +++ b/clang/test/Preprocessor/ucn-pp-identifier.c @@ -1,5 +1,6 @@ // RUN: %clang_cc1 %s -fsyntax-only -std=c99 -pedantic -verify -Wundef // RUN: %clang_cc1 %s -fsyntax-only -x c++ -pedantic -verify -Wundef +// RUN: %clang_cc1 %s -fsyntax-only -x c++ -pedantic -verify -Wundef -ftrigraphs -DTRIGRAPHS=1 // RUN: not %clang_cc1 %s -fsyntax-only -std=c99 -pedantic -Wundef 2>&1 | FileCheck -strict-whitespace %s #define \u00FC @@ -29,9 +30,14 @@ // Make sure we reject disallowed UCNs #define \ufffe // expected-error {{macro name must be an identifier}} -#define \U10000000 // expected-error {{macro name must be an identifier}} -#define \u0061 // expected-error {{character 'a' cannot be specified by a universal character name}} expected-error {{macro name must be an identifier}} -#define \u{fffe} // expected-error {{macro name must be an identifier}} expected-warning {{Clang extension}} +#define \U10000000 // expected-error {{macro name must be an identifier}} +#define \u0061 // expected-error {{character 'a' cannot be specified by a universal character name}} expected-error {{macro name must be an identifier}} +#define \u{fffe} // expected-error {{macro name must be an identifier}} expected-warning {{Clang extension}} +#define \N{ALERT} // expected-error {{universal character name refers to a control character}} \ + // expected-error {{macro name must be an identifier}} \ + // expected-warning {{Clang extension}} +#define \N{WASTEBASKET} // expected-error {{macro name must be an identifier}} \ + // expected-warning {{Clang extension}} #define a\u0024 @@ -113,3 +119,20 @@ C 1 #define \u{123456789} // expected-error {{hex escape sequence out of range}} expected-error {{macro name must be an identifier}} #define \u{ // expected-warning {{incomplete delimited universal character name; treating as '\' 'u' '{' identifier}} expected-error {{macro name must be an identifier}} #define \u{fgh} // expected-warning {{incomplete delimited universal character name; treating as '\' 'u' '{' identifier}} expected-error {{macro name must be an identifier}} +#define \N{ // expected-warning {{incomplete delimited universal character name; treating as '\' 'N' '{' identifier}} expected-error {{macro name must be an identifier}} +#define \N{} // expected-warning {{empty delimited universal character name; treating as '\' 'N' '{' '}'}} expected-error {{macro name must be an identifier}} +#define \N{NOTATHING} // expected-error {{'NOTATHING' is not a valid Unicode character name}} \ + // expected-error {{macro name must be an identifier}} +#define \NN // expected-warning {{incomplete universal character name; treating as '\' followed by identifier}} expected-error {{macro name must be an identifier}} +#define \N{GREEK_SMALL-LETTERALPHA} // expected-error {{'GREEK_SMALL-LETTERALPHA' is not a valid Unicode character name}} \ + // expected-note {{characters names in Unicode escape sequences are sensitive to case and whitespaces}} + +#define CONCAT(A, B) A##B +int CONCAT(\N{GREEK, CAPITALLETTERALPHA}); // expected-error{{expected}} \ + // expected-warning {{incomplete delimited universal character name}} + +#ifdef TRIGRAPHS +int \N?? = 0; // expected-warning{{amed escape sequences are a Clang extension}} \ + // expected-warning 2{{trigraph converted}} + +#endif diff --git a/clang/test/Sema/array-bounds-ptr-arith.c b/clang/test/Sema/array-bounds-ptr-arith.c index fd2a00e9e8a4bf6c01c065683297a6e1bee0d806..6ae705a21864142cd6122503f40778086729fe00 100644 --- a/clang/test/Sema/array-bounds-ptr-arith.c +++ b/clang/test/Sema/array-bounds-ptr-arith.c @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -verify -Warray-bounds-pointer-arithmetic %s +// RUN: %clang_cc1 -verify -Warray-bounds-pointer-arithmetic -fstrict-flex-arrays=1 %s // Test case from PR10615 struct ext2_super_block{ @@ -12,40 +12,3 @@ void* broken (struct ext2_super_block *es,int a) { return (void *)es->s_uuid + 80; // expected-warning {{refers past the end of the array}} } - -// Test case reduced from PR11594 -struct S { int n; }; -void pr11594(struct S *s) { - int a[10]; - int *p = a - s->n; -} - -// Test case reduced from . This resulted in -// an assertion failure because of the typedef instead of an explicit -// constant array type. -struct RDar11387038 {}; -typedef struct RDar11387038 RDar11387038Array[1]; -struct RDar11387038_Table { - RDar11387038Array z; -}; -typedef struct RDar11387038_Table * TPtr; -typedef TPtr *TabHandle; -struct RDar11387038_B { TabHandle x; }; -typedef struct RDar11387038_B RDar11387038_B; - -void radar11387038(void) { - RDar11387038_B *pRDar11387038_B; - struct RDar11387038* y = &(*pRDar11387038_B->x)->z[4]; -} - -void pr51682 (void) { - int arr [1]; - switch (0) { - case 0: - break; - case 1: - asm goto (""::"r"(arr[42] >> 1)::failed); // no-warning - break; - } -failed:; -} diff --git a/clang/test/Sema/ucn-identifiers.c b/clang/test/Sema/ucn-identifiers.c index 38106767f9ab5696b0bc0704949e343bd8c768e1..fb7181f0beca2ea9a8d90c584159e2ea985572eb 100644 --- a/clang/test/Sema/ucn-identifiers.c +++ b/clang/test/Sema/ucn-identifiers.c @@ -18,6 +18,7 @@ void goodCalls(void) { über(2); \U000000FCber(3); \u{FC}ber(4); // expected-warning {{Clang extension}} + \N{LATIN SMALL LETTER U WITH DIAERESIS}ber(4); // expected-warning {{Clang extension}} } void badCalls(void) { diff --git a/clang/test/Sema/warn-memset-bad-sizeof.c b/clang/test/Sema/warn-memset-bad-sizeof.c new file mode 100644 index 0000000000000000000000000000000000000000..c4768d8d0edd2faa7855d273edb65476d6dbe7c1 --- /dev/null +++ b/clang/test/Sema/warn-memset-bad-sizeof.c @@ -0,0 +1,16 @@ +// RUN: %clang_cc1 -fsyntax-only -verify %s + +// expected-no-diagnostics + +typedef __SIZE_TYPE__ size_t; +void *memset(void *, int, size_t); + +typedef struct { + int a; +} S; + +void test() { + S s; + __auto_type dstptr = &s; + memset(dstptr, 0, sizeof(s)); +} diff --git a/clang/test/SemaCXX/array-bounds-strict-flex-arrays.cpp b/clang/test/SemaCXX/array-bounds-strict-flex-arrays.cpp new file mode 100644 index 0000000000000000000000000000000000000000..dde683fb5390fef7fea293887aff4132bc429247 --- /dev/null +++ b/clang/test/SemaCXX/array-bounds-strict-flex-arrays.cpp @@ -0,0 +1,19 @@ +// RUN: %clang_cc1 -verify -fstrict-flex-arrays=3 %s + +// We cannot know for sure the size of a flexible array. +void test() { + struct { + int f; + int a[]; + } s2; + s2.a[2] = 0; // no-warning +} + +// Under -fstrict-flex-arrays `a` is not a flexible array. +void test1() { + struct { + int f; + int a[1]; // expected-note {{declared here}} + } s2; + s2.a[2] = 0; // expected-warning 1 {{array index 2 is past the end of the array (which contains 1 element)}} +} diff --git a/clang/test/SemaCXX/warn-empty-body.cpp b/clang/test/SemaCXX/warn-empty-body.cpp index a248c4251d5250c7047534ec1e8872b10978a259..08cfb57e90e075e6e5eb7b41b78165baefc1a046 100644 --- a/clang/test/SemaCXX/warn-empty-body.cpp +++ b/clang/test/SemaCXX/warn-empty-body.cpp @@ -6,6 +6,8 @@ int c(); #define MACRO_A 0 +#define AND(x, y) ((x) && (y)) + void test1(int x, int y) { while(true) { if (x); // expected-warning {{if statement has empty body}} expected-note{{put the semicolon on a separate line to silence this warning}} @@ -15,6 +17,15 @@ void test1(int x, int y) { if (x == MACRO_A); // expected-warning {{if statement has empty body}} expected-note{{put the semicolon on a separate line to silence this warning}} if (MACRO_A == x); // expected-warning {{if statement has empty body}} expected-note{{put the semicolon on a separate line to silence this warning}} + // Check that we handle the case where the condition comes from a macro + // expansion over multiple lines. + if (AND(b(), + c())); // expected-warning {{if statement has empty body}} expected-note{{put the semicolon on a separate line to silence this warning}} + + while (AND(b(), + c())); // expected-warning{{while loop has empty body}} expected-note{{put the semicolon on a separate line to silence this warning}} + a(0); + int i; // PR11329 for (i = 0; i < x; i++); { // expected-warning{{for loop has empty body}} expected-note{{put the semicolon on a separate line to silence this warning}} diff --git a/clang/test/SemaOpenCL/fdeclare-opencl-builtins.cl b/clang/test/SemaOpenCL/fdeclare-opencl-builtins.cl index 737632fdf07b1588d994b4afabacee83734d0769..bf943a400320c3cbd75c3cc92050f9033428f779 100644 --- a/clang/test/SemaOpenCL/fdeclare-opencl-builtins.cl +++ b/clang/test/SemaOpenCL/fdeclare-opencl-builtins.cl @@ -171,14 +171,14 @@ void test_atomic_fetch_with_address_space(volatile __generic atomic_float *a_flo // extension is disabled. Test this by counting the number of notes about // candidate functions. void test_atomic_double_reporting(volatile __generic atomic_int *a) { - atomic_init(a); + atomic_init(a, a); // expected-error@-1{{no matching function for call to 'atomic_init'}} #if defined(NO_FP64) // Expecting 5 candidates: int, uint, long, ulong, float - // expected-note@-4 5 {{candidate function not viable: requires 2 arguments, but 1 was provided}} + // expected-note@-4 5 {{candidate function not viable: no known conversion}} #else // Expecting 6 candidates: int, uint, long, ulong, float, double - // expected-note@-7 6 {{candidate function not viable: requires 2 arguments, but 1 was provided}} + // expected-note@-7 6 {{candidate function not viable: no known conversion}} #endif } @@ -198,7 +198,6 @@ void test_atomics_without_scope_device(volatile __generic atomic_int *a_int) { atomic_exchange_explicit(a_int, d, memory_order_seq_cst); // expected-error@-1{{no matching function for call to 'atomic_exchange_explicit'}} - // expected-note@-2 + {{candidate function not viable}} atomic_exchange_explicit(a_int, d, memory_order_seq_cst, memory_scope_work_group); } @@ -272,9 +271,7 @@ kernel void basic_image_readonly(read_only image2d_t image_read_only_image2d) { res = read_imageh(image_read_only_image2d, i2); #if __OPENCL_C_VERSION__ < CL_VERSION_1_2 && !defined(__OPENCL_CPP_VERSION__) // expected-error@-3{{no matching function for call to 'read_imagef'}} - // expected-note@-4 + {{candidate function not viable}} - // expected-error@-4{{no matching function for call to 'read_imageh'}} - // expected-note@-5 + {{candidate function not viable}} + // expected-error@-3{{no matching function for call to 'read_imageh'}} #endif res = read_imageh(image_read_only_image2d, sampler, i2); @@ -304,7 +301,6 @@ kernel void basic_image_writeonly(write_only image1d_buffer_t image_write_only_i write_imagef(image3dwo, i4, i, f4); #if __OPENCL_C_VERSION__ <= CL_VERSION_1_2 && !defined(__OPENCL_CPP_VERSION__) // expected-error@-2{{no matching function for call to 'write_imagef'}} - // expected-note@-3 + {{candidate function not viable}} #endif } diff --git a/clang/test/SemaTemplate/concepts-PR54629.cpp b/clang/test/SemaTemplate/concepts-PR54629.cpp new file mode 100644 index 0000000000000000000000000000000000000000..cb63c2e134ed5741709315f3360ce2c8d943fb89 --- /dev/null +++ b/clang/test/SemaTemplate/concepts-PR54629.cpp @@ -0,0 +1,58 @@ +// RUN: %clang_cc1 -std=c++20 -verify %s + +template +struct A { + void primary(); +}; + +template + requires requires(T &t) { requires sizeof(t) > 4; } +struct A { + void specialization1(); +}; + +template + requires requires(T &t) { requires sizeof(t) > 8; } +struct A { + void specialization2(); +}; + +int main() { + A().primary(); + A().specialization1(); + A(); // expected-error {{ambiguous partial specialization}} + // expected-note@10 {{partial specialization matches [with T = char[16]}} + // expected-note@16 {{partial specialization matches [with T = char[16]}} +} + +// Check error messages when no overload with constraints matches. +template +void foo() + requires requires(T &t) { requires sizeof(t) < 4; } +{} + +template +void foo() + requires requires(T &t) { requires sizeof(t) > 4; } +{} + +template +void foo() + requires requires(T &t) { requires sizeof(t) > 8; } +{} + +void test() { + foo(); + // expected-error@-1 {{no matching function for call to 'foo'}} + // expected-note@30 {{candidate template ignored: constraints not satisfied}} + // expected-note@31 {{because 'sizeof (t) < 4' (4 < 4) evaluated to false}} + // expected-note@35 {{candidate template ignored: constraints not satisfied}} + // expected-note@36 {{because 'sizeof (t) > 4' (4 > 4) evaluated to false}} + // expected-note@40 {{candidate template ignored: constraints not satisfied}} + // expected-note@41 {{because 'sizeof (t) > 8' (4 > 8) evaluated to false}} + + foo(); + // expected-error@-1 {{call to 'foo' is ambiguous}} + // expected-note@35 {{candidate function}} + // expected-note@40 {{candidate function}} +} diff --git a/clang/tools/clang-format/ClangFormat.cpp b/clang/tools/clang-format/ClangFormat.cpp index b85cb1220d6badcb3f0e5cae9d6bb293def81066..07110a0db09163b59accce220f92a763bdae3899 100644 --- a/clang/tools/clang-format/ClangFormat.cpp +++ b/clang/tools/clang-format/ClangFormat.cpp @@ -79,7 +79,18 @@ static cl::opt AssumeFileName( "assume-filename", cl::desc("Override filename used to determine the language.\n" "When reading from stdin, clang-format assumes this\n" - "filename to determine the language."), + "filename to determine the language.\n" + "Unrecognized filenames are treated as C++.\n" + "supported:\n" + " CSharp: .cs\n" + " Java: .java\n" + " JavaScript: .mjs .js .ts\n" + " Json: .json\n" + " Objective-C: .m .mm\n" + " Proto: .proto .protodevel\n" + " TableGen: .td\n" + " TextProto: .textpb .pb.txt .textproto .asciipb\n" + " Verilog: .sv .svh .v .vh"), cl::init(""), cl::cat(ClangFormatCategory)); static cl::opt Inplace("i", diff --git a/clang/tools/clang-linker-wrapper/ClangLinkerWrapper.cpp b/clang/tools/clang-linker-wrapper/ClangLinkerWrapper.cpp index bb5a1963c6bad58590a7df3e63453f04e4ad9891..ffd17b6cc2dd16ede28410ea14467efb0f165d80 100644 --- a/clang/tools/clang-linker-wrapper/ClangLinkerWrapper.cpp +++ b/clang/tools/clang-linker-wrapper/ClangLinkerWrapper.cpp @@ -68,10 +68,6 @@ static cl::opt LinkerUserPath("linker-path", cl::Required, cl::desc("Path of linker binary"), cl::cat(ClangLinkerWrapperCategory)); -static cl::opt - TargetFeatures("target-feature", cl::desc("Target features for triple"), - cl::cat(ClangLinkerWrapperCategory)); - static cl::opt OptLevel("opt-level", cl::desc("Optimization level for LTO"), cl::init("O2"), @@ -325,10 +321,6 @@ Error extractOffloadFiles(MemoryBufferRef Contents, return BinaryOrErr.takeError(); OffloadBinary &Binary = **BinaryOrErr; - if (Binary.getVersion() != 1) - return createStringError(inconvertibleErrorCode(), - "Incompatible device image version"); - // Create a new owned binary with a copy of the original memory. std::unique_ptr BufferCopy = MemoryBuffer::getMemBufferCopy( Binary.getData().take_front(Binary.getSize()), @@ -726,16 +718,24 @@ void diagnosticHandler(const DiagnosticInfo &DI) { } } -// Get the target features passed in from the driver as =. -std::vector getTargetFeatures(const Triple &TheTriple) { - std::vector Features; - auto TargetAndFeatures = StringRef(TargetFeatures).split('='); - if (TargetAndFeatures.first != TheTriple.getTriple()) - return Features; +// Get the list of target features from the input file and unify them such that +// if there are multiple +xxx or -xxx features we only keep the last one. +std::vector getTargetFeatures(ArrayRef InputFiles) { + SmallVector Features; + for (const OffloadFile &File : InputFiles) { + for (auto Arg : llvm::split(File.getBinary()->getString("feature"), ",")) + Features.emplace_back(Arg); + } + + // Only add a feature if it hasn't been seen before starting from the end. + std::vector UnifiedFeatures; + DenseSet UsedFeatures; + for (StringRef Feature : llvm::reverse(Features)) { + if (UsedFeatures.insert(Feature.drop_front()).second) + UnifiedFeatures.push_back(Feature.str()); + } - for (auto Feature : llvm::split(TargetAndFeatures.second, ',')) - Features.push_back(Feature.str()); - return Features; + return UnifiedFeatures; } CodeGenOpt::Level getCGOptLevel(unsigned OptLevel) { @@ -755,6 +755,7 @@ CodeGenOpt::Level getCGOptLevel(unsigned OptLevel) { template > std::unique_ptr createLTO( const Triple &TheTriple, StringRef Arch, bool WholeProgram, + const std::vector &Features, ModuleHook Hook = [](size_t, const Module &) { return true; }) { lto::Config Conf; lto::ThinBackend Backend; @@ -765,7 +766,7 @@ std::unique_ptr createLTO( Conf.CPU = Arch.str(); Conf.Options = codegen::InitTargetOptionsFromCodeGenFlags(TheTriple); - Conf.MAttrs = getTargetFeatures(TheTriple); + Conf.MAttrs = Features; Conf.CGOptLevel = getCGOptLevel(OptLevel[1] - '0'); Conf.OptLevel = OptLevel[1] - '0'; if (Conf.OptLevel > 0) @@ -902,10 +903,12 @@ Error linkBitcodeFiles(SmallVectorImpl &InputFiles, }; // We assume visibility of the whole program if every input file was bitcode. + auto Features = getTargetFeatures(BitcodeInputFiles); bool WholeProgram = InputFiles.empty(); auto LTOBackend = - (EmbedBitcode) ? createLTO(TheTriple, Arch, WholeProgram, OutputBitcode) - : createLTO(TheTriple, Arch, WholeProgram); + (EmbedBitcode) + ? createLTO(TheTriple, Arch, WholeProgram, Features, OutputBitcode) + : createLTO(TheTriple, Arch, WholeProgram, Features); // We need to resolve the symbols so the LTO backend knows which symbols need // to be kept or can be internalized. This is a simplified symbol resolution diff --git a/clang/tools/clang-offload-packager/ClangOffloadPackager.cpp b/clang/tools/clang-offload-packager/ClangOffloadPackager.cpp index 7ef258396e9898557f98e973d64f2e8a13ebbd50..338b63ad0a223b491893ce98148ebf4c13c43449 100644 --- a/clang/tools/clang-offload-packager/ClangOffloadPackager.cpp +++ b/clang/tools/clang-offload-packager/ClangOffloadPackager.cpp @@ -22,6 +22,7 @@ #include "llvm/Support/MemoryBuffer.h" #include "llvm/Support/Path.h" #include "llvm/Support/Signals.h" +#include "llvm/Support/StringSaver.h" #include "llvm/Support/WithColor.h" using namespace llvm; @@ -71,9 +72,18 @@ int main(int argc, const char **argv) { SmallVector BinaryData; raw_svector_ostream OS(BinaryData); for (StringRef Image : DeviceImages) { + BumpPtrAllocator Alloc; + StringSaver Saver(Alloc); + StringMap Args; - for (StringRef Arg : llvm::split(Image, ",")) - Args.insert(Arg.split("=")); + for (StringRef Arg : llvm::split(Image, ",")) { + auto KeyAndValue = Arg.split("="); + if (Args.count(KeyAndValue.first)) + Args[KeyAndValue.first] = + Saver.save(Args[KeyAndValue.first] + "," + KeyAndValue.second); + else + Args[KeyAndValue.first] = KeyAndValue.second; + } if (!Args.count("triple") || !Args.count("file")) return reportError(createStringError( diff --git a/clang/tools/clang-repl/ClangRepl.cpp b/clang/tools/clang-repl/ClangRepl.cpp index 088615e30a2d6e9dabff993e4e97f38b5f1027d6..4f673bdcb7cc5b55336bec18b8152b5373f10de0 100644 --- a/clang/tools/clang-repl/ClangRepl.cpp +++ b/clang/tools/clang-repl/ClangRepl.cpp @@ -48,6 +48,23 @@ static void LLVMErrorHandler(void *UserData, const char *Message, exit(GenCrashDiag ? 70 : 1); } +// If we are running with -verify a reported has to be returned as unsuccess. +// This is relevant especially for the test suite. +static int checkDiagErrors(const clang::CompilerInstance *CI) { + unsigned Errs = CI->getDiagnostics().getClient()->getNumErrors(); + if (CI->getDiagnosticOpts().VerifyDiagnostics) { + // If there was an error that came from the verifier we must return 1 as + // an exit code for the process. This will make the test fail as expected. + clang::DiagnosticConsumer *Client = CI->getDiagnostics().getClient(); + Client->EndSourceFile(); + Errs = Client->getNumErrors(); + + // The interpreter expects BeginSourceFile/EndSourceFiles to be balanced. + Client->BeginSourceFile(CI->getLangOpts(), &CI->getPreprocessor()); + } + return Errs ? EXIT_FAILURE : EXIT_SUCCESS; +} + llvm::ExitOnError ExitOnErr; int main(int argc, const char **argv) { ExitOnErr.setBanner("clang-repl: "); @@ -92,8 +109,14 @@ int main(int argc, const char **argv) { llvm::LineEditor LE("clang-repl"); // FIXME: Add LE.setListCompleter while (llvm::Optional Line = LE.readLine()) { - if (*Line == "quit") + if (*Line == R"(%quit)") break; + if (*Line == R"(%undo)") { + if (auto Err = Interp->Undo()) + llvm::logAllUnhandledErrors(std::move(Err), llvm::errs(), "error: "); + continue; + } + if (auto Err = Interp->ParseAndExecute(*Line)) llvm::logAllUnhandledErrors(std::move(Err), llvm::errs(), "error: "); } @@ -106,5 +129,5 @@ int main(int argc, const char **argv) { llvm::llvm_shutdown(); - return 0; + return checkDiagErrors(Interp->getCompilerInstance()); } diff --git a/clang/tools/driver/driver.cpp b/clang/tools/driver/driver.cpp index fa1f09b44f4da66bc99ff62205c190d7d152a2fc..0e21106535ec88e532ae65a264ffa58450217ecc 100644 --- a/clang/tools/driver/driver.cpp +++ b/clang/tools/driver/driver.cpp @@ -406,7 +406,7 @@ int clang_main(int Argc, char **Argv) { if (ClangCLMode) { // Arguments in "CL" are prepended. llvm::Optional OptCL = llvm::sys::Process::GetEnv("CL"); - if (OptCL.hasValue()) { + if (OptCL) { SmallVector PrependedOpts; getCLEnvVarOptions(OptCL.getValue(), Saver, PrependedOpts); @@ -415,7 +415,7 @@ int clang_main(int Argc, char **Argv) { } // Arguments in "_CL_" are appended. llvm::Optional Opt_CL_ = llvm::sys::Process::GetEnv("_CL_"); - if (Opt_CL_.hasValue()) { + if (Opt_CL_) { SmallVector AppendedOpts; getCLEnvVarOptions(Opt_CL_.getValue(), Saver, AppendedOpts); diff --git a/clang/tools/libclang/CIndex.cpp b/clang/tools/libclang/CIndex.cpp index 8e0ce6fa844999ebf21d1dfb9932828a847c35d1..75052d6253f667d6a5af2bde47408f0a536c3df8 100644 --- a/clang/tools/libclang/CIndex.cpp +++ b/clang/tools/libclang/CIndex.cpp @@ -536,7 +536,7 @@ bool CursorVisitor::VisitChildren(CXCursor Cursor) { TLEnd = CXXUnit->top_level_end(); TL != TLEnd; ++TL) { const Optional V = handleDeclForVisitation(*TL); - if (!V.hasValue()) + if (!V) continue; return V.getValue(); } @@ -641,7 +641,7 @@ bool CursorVisitor::VisitDeclContext(DeclContext *DC) { if (OMD->isSynthesizedAccessorStub()) continue; const Optional V = handleDeclForVisitation(D); - if (!V.hasValue()) + if (!V) continue; return V.getValue(); } @@ -675,7 +675,7 @@ Optional CursorVisitor::handleDeclForVisitation(const Decl *D) { } const Optional V = shouldVisitCursor(Cursor); - if (!V.hasValue()) + if (!V) return None; if (!V.getValue()) return false; @@ -1074,7 +1074,7 @@ bool CursorVisitor::VisitObjCContainerDecl(ObjCContainerDecl *D) { I != E; ++I) { CXCursor Cursor = MakeCXCursor(*I, TU, RegionOfInterest); const Optional &V = shouldVisitCursor(Cursor); - if (!V.hasValue()) + if (!V) continue; if (!V.getValue()) return false; @@ -2178,6 +2178,7 @@ public: void VisitOMPTaskLoopDirective(const OMPTaskLoopDirective *D); void VisitOMPTaskLoopSimdDirective(const OMPTaskLoopSimdDirective *D); void VisitOMPMasterTaskLoopDirective(const OMPMasterTaskLoopDirective *D); + void VisitOMPMaskedTaskLoopDirective(const OMPMaskedTaskLoopDirective *D); void VisitOMPMasterTaskLoopSimdDirective(const OMPMasterTaskLoopSimdDirective *D); void VisitOMPParallelMasterTaskLoopDirective( @@ -3184,6 +3185,11 @@ void EnqueueVisitor::VisitOMPMasterTaskLoopDirective( VisitOMPLoopDirective(D); } +void EnqueueVisitor::VisitOMPMaskedTaskLoopDirective( + const OMPMaskedTaskLoopDirective *D) { + VisitOMPLoopDirective(D); +} + void EnqueueVisitor::VisitOMPMasterTaskLoopSimdDirective( const OMPMasterTaskLoopSimdDirective *D) { VisitOMPLoopDirective(D); @@ -5822,6 +5828,8 @@ CXString clang_getCursorKindSpelling(enum CXCursorKind Kind) { return cxstring::createRef("OMPTaskLoopSimdDirective"); case CXCursor_OMPMasterTaskLoopDirective: return cxstring::createRef("OMPMasterTaskLoopDirective"); + case CXCursor_OMPMaskedTaskLoopDirective: + return cxstring::createRef("OMPMaskedTaskLoopDirective"); case CXCursor_OMPMasterTaskLoopSimdDirective: return cxstring::createRef("OMPMasterTaskLoopSimdDirective"); case CXCursor_OMPParallelMasterTaskLoopDirective: @@ -8170,13 +8178,13 @@ static CXVersion convertVersion(VersionTuple In) { Out.Major = In.getMajor(); Optional Minor = In.getMinor(); - if (Minor.hasValue()) + if (Minor) Out.Minor = *Minor; else return Out; Optional Subminor = In.getSubminor(); - if (Subminor.hasValue()) + if (Subminor) Out.Subminor = *Subminor; return Out; diff --git a/clang/tools/libclang/CXCursor.cpp b/clang/tools/libclang/CXCursor.cpp index afaa594ef17e069d98f41486bd9d98e78baecf68..f7f9ec6c9cc8a9229998c4e4dc249915d810d7d2 100644 --- a/clang/tools/libclang/CXCursor.cpp +++ b/clang/tools/libclang/CXCursor.cpp @@ -769,6 +769,9 @@ CXCursor cxcursor::MakeCXCursor(const Stmt *S, const Decl *Parent, case Stmt::OMPMasterTaskLoopDirectiveClass: K = CXCursor_OMPMasterTaskLoopDirective; break; + case Stmt::OMPMaskedTaskLoopDirectiveClass: + K = CXCursor_OMPMaskedTaskLoopDirective; + break; case Stmt::OMPMasterTaskLoopSimdDirectiveClass: K = CXCursor_OMPMasterTaskLoopSimdDirective; break; diff --git a/clang/tools/scan-build/man/scan-build.1 b/clang/tools/scan-build/man/scan-build.1 index 5aa29f9d5b20b73ca7fa14ea1cedc04e606f913d..d213298706fea8d3a0e7f43aac15fbd3fd5c5998 100644 --- a/clang/tools/scan-build/man/scan-build.1 +++ b/clang/tools/scan-build/man/scan-build.1 @@ -2,9 +2,9 @@ .\" See https://llvm.org/LICENSE.txt for license information. .\" SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception .\" $Id$ -.Dd Sep 29, 2021 +.Dd Jun 24, 2022 .Dt SCAN-BUILD 1 -.Os "clang" "14" +.Os "clang" "15" .Sh NAME .Nm scan-build .Nd Clang static analyzer diff --git a/clang/unittests/ASTMatchers/ASTMatchersInternalTest.cpp b/clang/unittests/ASTMatchers/ASTMatchersInternalTest.cpp index 6573461c9fc1e1e3ac869f39637c7c2ce32b57db..eb9071bd9cb6d45b4eb893f59f6db903f52bdf01 100644 --- a/clang/unittests/ASTMatchers/ASTMatchersInternalTest.cpp +++ b/clang/unittests/ASTMatchers/ASTMatchersInternalTest.cpp @@ -277,7 +277,7 @@ TEST(Matcher, matchOverEntireASTContext) { TEST(DynTypedMatcherTest, TraversalKindForwardsToImpl) { auto M = DynTypedMatcher(decl()); - EXPECT_FALSE(M.getTraversalKind().hasValue()); + EXPECT_FALSE(M.getTraversalKind()); M = DynTypedMatcher(traverse(TK_AsIs, decl())); EXPECT_THAT(M.getTraversalKind(), llvm::ValueIs(TK_AsIs)); diff --git a/clang/unittests/ASTMatchers/Dynamic/ParserTest.cpp b/clang/unittests/ASTMatchers/Dynamic/ParserTest.cpp index 255432dc3862a82a43880f48395623ccc36fe0e3..eaba6b762c026af2abc34760e4043de257c4077a 100644 --- a/clang/unittests/ASTMatchers/Dynamic/ParserTest.cpp +++ b/clang/unittests/ASTMatchers/Dynamic/ParserTest.cpp @@ -149,7 +149,7 @@ bool matchesRange(SourceRange Range, unsigned StartLine, llvm::Optional getSingleMatcher(const VariantValue &Value) { llvm::Optional Result = Value.getMatcher().getSingleMatcher(); - EXPECT_TRUE(Result.hasValue()); + EXPECT_TRUE(Result); return Result; } @@ -280,7 +280,7 @@ TEST(ParserTest, FullParserTest) { EXPECT_TRUE(matches("unsigned aaaccbb;", M)); Code = "hasInitializer(\n binaryOperator(hasLHS(\"A\")))"; - EXPECT_TRUE(!Parser::parseMatcherExpression(Code, &Error).hasValue()); + EXPECT_TRUE(!Parser::parseMatcherExpression(Code, &Error)); EXPECT_EQ("1:1: Error parsing argument 1 for matcher hasInitializer.\n" "2:5: Error parsing argument 1 for matcher binaryOperator.\n" "2:20: Error building matcher hasLHS.\n" @@ -421,7 +421,7 @@ TEST(ParserTest, ParseMultiline) { ) )matcher"; Diagnostics Error; - EXPECT_TRUE(Parser::parseMatcherExpression(Code, &Error).hasValue()); + EXPECT_TRUE(Parser::parseMatcherExpression(Code, &Error)); } { @@ -432,7 +432,7 @@ TEST(ParserTest, ParseMultiline) { ) )matcher"; Diagnostics Error; - EXPECT_TRUE(Parser::parseMatcherExpression(Code, &Error).hasValue()); + EXPECT_TRUE(Parser::parseMatcherExpression(Code, &Error)); } { @@ -440,7 +440,7 @@ TEST(ParserTest, ParseMultiline) { "paramName") )matcher"; Diagnostics Error; - EXPECT_TRUE(Parser::parseMatcherExpression(Code, &Error).hasValue()); + EXPECT_TRUE(Parser::parseMatcherExpression(Code, &Error)); } { @@ -481,7 +481,7 @@ decl()))matcher"; ("paramName") )matcher"; M = Parser::parseMatcherExpression(Code, nullptr, &NamedValues, &Error); - EXPECT_FALSE(M.hasValue()); + EXPECT_FALSE(M); EXPECT_EQ("1:15: Malformed bind() expression.", Error.toStringFull()); } @@ -494,7 +494,7 @@ decl()))matcher"; bind("paramName") )matcher"; M = Parser::parseMatcherExpression(Code, nullptr, &NamedValues, &Error); - EXPECT_FALSE(M.hasValue()); + EXPECT_FALSE(M); EXPECT_EQ("1:11: Period not followed by valid chained call.", Error.toStringFull()); } @@ -506,7 +506,7 @@ decl()))matcher"; () )matcher"; M = Parser::parseMatcherExpression(Code, nullptr, nullptr, &Error); - EXPECT_FALSE(M.hasValue()); + EXPECT_FALSE(M); EXPECT_EQ("1:8: Error parsing matcher. Found token " " while looking for '('.", Error.toStringFull()); @@ -521,7 +521,7 @@ decl()))matcher"; ) )matcher"; M = Parser::parseMatcherExpression(Code, nullptr, nullptr, &Error); - EXPECT_FALSE(M.hasValue()); + EXPECT_FALSE(M); StringRef Expected = R"error(1:1: Error parsing argument 1 for matcher varDecl. 2:3: Matcher not found: doesNotExist)error"; EXPECT_EQ(Expected, Error.toStringFull()); diff --git a/clang/unittests/Analysis/FlowSensitive/DataflowAnalysisContextTest.cpp b/clang/unittests/Analysis/FlowSensitive/DataflowAnalysisContextTest.cpp index b8a9ef52504aff19a8be325a57eeac2ccd1ed967..26bc37bda617be2d0ba39ad34deaac543962a158 100644 --- a/clang/unittests/Analysis/FlowSensitive/DataflowAnalysisContextTest.cpp +++ b/clang/unittests/Analysis/FlowSensitive/DataflowAnalysisContextTest.cpp @@ -33,63 +33,108 @@ TEST_F(DataflowAnalysisContextTest, } TEST_F(DataflowAnalysisContextTest, - GetOrCreateConjunctionValueReturnsSameExprGivenSameArgs) { + GetOrCreateConjunctionReturnsSameExprGivenSameArgs) { auto &X = Context.createAtomicBoolValue(); - auto &XAndX = Context.getOrCreateConjunctionValue(X, X); + auto &XAndX = Context.getOrCreateConjunction(X, X); EXPECT_EQ(&XAndX, &X); } TEST_F(DataflowAnalysisContextTest, - GetOrCreateConjunctionValueReturnsSameExprOnSubsequentCalls) { + GetOrCreateConjunctionReturnsSameExprOnSubsequentCalls) { auto &X = Context.createAtomicBoolValue(); auto &Y = Context.createAtomicBoolValue(); - auto &XAndY1 = Context.getOrCreateConjunctionValue(X, Y); - auto &XAndY2 = Context.getOrCreateConjunctionValue(X, Y); + auto &XAndY1 = Context.getOrCreateConjunction(X, Y); + auto &XAndY2 = Context.getOrCreateConjunction(X, Y); EXPECT_EQ(&XAndY1, &XAndY2); - auto &YAndX = Context.getOrCreateConjunctionValue(Y, X); + auto &YAndX = Context.getOrCreateConjunction(Y, X); EXPECT_EQ(&XAndY1, &YAndX); auto &Z = Context.createAtomicBoolValue(); - auto &XAndZ = Context.getOrCreateConjunctionValue(X, Z); + auto &XAndZ = Context.getOrCreateConjunction(X, Z); EXPECT_NE(&XAndY1, &XAndZ); } TEST_F(DataflowAnalysisContextTest, - GetOrCreateDisjunctionValueReturnsSameExprGivenSameArgs) { + GetOrCreateDisjunctionReturnsSameExprGivenSameArgs) { auto &X = Context.createAtomicBoolValue(); - auto &XOrX = Context.getOrCreateDisjunctionValue(X, X); + auto &XOrX = Context.getOrCreateDisjunction(X, X); EXPECT_EQ(&XOrX, &X); } TEST_F(DataflowAnalysisContextTest, - GetOrCreateDisjunctionValueReturnsSameExprOnSubsequentCalls) { + GetOrCreateDisjunctionReturnsSameExprOnSubsequentCalls) { auto &X = Context.createAtomicBoolValue(); auto &Y = Context.createAtomicBoolValue(); - auto &XOrY1 = Context.getOrCreateDisjunctionValue(X, Y); - auto &XOrY2 = Context.getOrCreateDisjunctionValue(X, Y); + auto &XOrY1 = Context.getOrCreateDisjunction(X, Y); + auto &XOrY2 = Context.getOrCreateDisjunction(X, Y); EXPECT_EQ(&XOrY1, &XOrY2); - auto &YOrX = Context.getOrCreateDisjunctionValue(Y, X); + auto &YOrX = Context.getOrCreateDisjunction(Y, X); EXPECT_EQ(&XOrY1, &YOrX); auto &Z = Context.createAtomicBoolValue(); - auto &XOrZ = Context.getOrCreateDisjunctionValue(X, Z); + auto &XOrZ = Context.getOrCreateDisjunction(X, Z); EXPECT_NE(&XOrY1, &XOrZ); } TEST_F(DataflowAnalysisContextTest, - GetOrCreateNegationValueReturnsSameExprOnSubsequentCalls) { + GetOrCreateNegationReturnsSameExprOnSubsequentCalls) { auto &X = Context.createAtomicBoolValue(); - auto &NotX1 = Context.getOrCreateNegationValue(X); - auto &NotX2 = Context.getOrCreateNegationValue(X); + auto &NotX1 = Context.getOrCreateNegation(X); + auto &NotX2 = Context.getOrCreateNegation(X); EXPECT_EQ(&NotX1, &NotX2); auto &Y = Context.createAtomicBoolValue(); - auto &NotY = Context.getOrCreateNegationValue(Y); + auto &NotY = Context.getOrCreateNegation(Y); EXPECT_NE(&NotX1, &NotY); } +TEST_F(DataflowAnalysisContextTest, + GetOrCreateImplicationReturnsTrueGivenSameArgs) { + auto &X = Context.createAtomicBoolValue(); + auto &XImpliesX = Context.getOrCreateImplication(X, X); + EXPECT_EQ(&XImpliesX, &Context.getBoolLiteralValue(true)); +} + +TEST_F(DataflowAnalysisContextTest, + GetOrCreateImplicationReturnsSameExprOnSubsequentCalls) { + auto &X = Context.createAtomicBoolValue(); + auto &Y = Context.createAtomicBoolValue(); + auto &XImpliesY1 = Context.getOrCreateImplication(X, Y); + auto &XImpliesY2 = Context.getOrCreateImplication(X, Y); + EXPECT_EQ(&XImpliesY1, &XImpliesY2); + + auto &YImpliesX = Context.getOrCreateImplication(Y, X); + EXPECT_NE(&XImpliesY1, &YImpliesX); + + auto &Z = Context.createAtomicBoolValue(); + auto &XImpliesZ = Context.getOrCreateImplication(X, Z); + EXPECT_NE(&XImpliesY1, &XImpliesZ); +} + +TEST_F(DataflowAnalysisContextTest, GetOrCreateIffReturnsTrueGivenSameArgs) { + auto &X = Context.createAtomicBoolValue(); + auto &XIffX = Context.getOrCreateIff(X, X); + EXPECT_EQ(&XIffX, &Context.getBoolLiteralValue(true)); +} + +TEST_F(DataflowAnalysisContextTest, + GetOrCreateIffReturnsSameExprOnSubsequentCalls) { + auto &X = Context.createAtomicBoolValue(); + auto &Y = Context.createAtomicBoolValue(); + auto &XIffY1 = Context.getOrCreateIff(X, Y); + auto &XIffY2 = Context.getOrCreateIff(X, Y); + EXPECT_EQ(&XIffY1, &XIffY2); + + auto &YIffX = Context.getOrCreateIff(Y, X); + EXPECT_EQ(&XIffY1, &YIffX); + + auto &Z = Context.createAtomicBoolValue(); + auto &XIffZ = Context.getOrCreateIff(X, Z); + EXPECT_NE(&XIffY1, &XIffZ); +} + TEST_F(DataflowAnalysisContextTest, EmptyFlowCondition) { auto &FC = Context.makeFlowConditionToken(); auto &C = Context.createAtomicBoolValue(); @@ -164,9 +209,239 @@ TEST_F(DataflowAnalysisContextTest, FlowConditionTautologies) { // ... but we can prove A || !A is true. auto &FC5 = Context.makeFlowConditionToken(); Context.addFlowConditionConstraint( - FC5, Context.getOrCreateDisjunctionValue( - C1, Context.getOrCreateNegationValue(C1))); + FC5, Context.getOrCreateDisjunction(C1, Context.getOrCreateNegation(C1))); EXPECT_TRUE(Context.flowConditionIsTautology(FC5)); } +TEST_F(DataflowAnalysisContextTest, EquivBoolVals) { + auto &X = Context.createAtomicBoolValue(); + auto &Y = Context.createAtomicBoolValue(); + auto &Z = Context.createAtomicBoolValue(); + auto &True = Context.getBoolLiteralValue(true); + auto &False = Context.getBoolLiteralValue(false); + + // X == X + EXPECT_TRUE(Context.equivalentBoolValues(X, X)); + // X != Y + EXPECT_FALSE(Context.equivalentBoolValues(X, Y)); + + // !X != X + EXPECT_FALSE(Context.equivalentBoolValues(Context.getOrCreateNegation(X), X)); + // !(!X) = X + EXPECT_TRUE(Context.equivalentBoolValues( + Context.getOrCreateNegation(Context.getOrCreateNegation(X)), X)); + + // (X || X) == X + EXPECT_TRUE( + Context.equivalentBoolValues(Context.getOrCreateDisjunction(X, X), X)); + // (X || Y) != X + EXPECT_FALSE( + Context.equivalentBoolValues(Context.getOrCreateDisjunction(X, Y), X)); + // (X || True) == True + EXPECT_TRUE(Context.equivalentBoolValues( + Context.getOrCreateDisjunction(X, True), True)); + // (X || False) == X + EXPECT_TRUE(Context.equivalentBoolValues( + Context.getOrCreateDisjunction(X, False), X)); + + // (X && X) == X + EXPECT_TRUE( + Context.equivalentBoolValues(Context.getOrCreateConjunction(X, X), X)); + // (X && Y) != X + EXPECT_FALSE( + Context.equivalentBoolValues(Context.getOrCreateConjunction(X, Y), X)); + // (X && True) == X + EXPECT_TRUE( + Context.equivalentBoolValues(Context.getOrCreateConjunction(X, True), X)); + // (X && False) == False + EXPECT_TRUE(Context.equivalentBoolValues( + Context.getOrCreateConjunction(X, False), False)); + + // (X || Y) == (Y || X) + EXPECT_TRUE( + Context.equivalentBoolValues(Context.getOrCreateDisjunction(X, Y), + Context.getOrCreateDisjunction(Y, X))); + // (X && Y) == (Y && X) + EXPECT_TRUE( + Context.equivalentBoolValues(Context.getOrCreateConjunction(X, Y), + Context.getOrCreateConjunction(Y, X))); + + // ((X || Y) || Z) == (X || (Y || Z)) + EXPECT_TRUE(Context.equivalentBoolValues( + Context.getOrCreateDisjunction(Context.getOrCreateDisjunction(X, Y), Z), + Context.getOrCreateDisjunction(X, Context.getOrCreateDisjunction(Y, Z)))); + // ((X && Y) && Z) == (X && (Y && Z)) + EXPECT_TRUE(Context.equivalentBoolValues( + Context.getOrCreateConjunction(Context.getOrCreateConjunction(X, Y), Z), + Context.getOrCreateConjunction(X, Context.getOrCreateConjunction(Y, Z)))); +} + +TEST_F(DataflowAnalysisContextTest, SubstituteFlowConditionsAtomicFC) { + auto &X = Context.createAtomicBoolValue(); + auto &True = Context.getBoolLiteralValue(true); + auto &False = Context.getBoolLiteralValue(false); + + // FC = X + auto &FC = Context.makeFlowConditionToken(); + Context.addFlowConditionConstraint(FC, X); + + // If X is true in FC, FC = X must be true + auto &FCWithXTrue = + Context.buildAndSubstituteFlowCondition(FC, {{&X, &True}}); + EXPECT_TRUE(Context.equivalentBoolValues(FCWithXTrue, True)); + + // If X is false in FC, FC = X must be false + auto &FC1WithXFalse = + Context.buildAndSubstituteFlowCondition(FC, {{&X, &False}}); + EXPECT_TRUE(Context.equivalentBoolValues(FC1WithXFalse, False)); +} + +TEST_F(DataflowAnalysisContextTest, SubstituteFlowConditionsNegatedFC) { + auto &X = Context.createAtomicBoolValue(); + auto &True = Context.getBoolLiteralValue(true); + auto &False = Context.getBoolLiteralValue(false); + + // FC = !X + auto &FC = Context.makeFlowConditionToken(); + Context.addFlowConditionConstraint(FC, Context.getOrCreateNegation(X)); + + // If X is true in FC, FC = !X must be false + auto &FCWithXTrue = + Context.buildAndSubstituteFlowCondition(FC, {{&X, &True}}); + EXPECT_TRUE(Context.equivalentBoolValues(FCWithXTrue, False)); + + // If X is false in FC, FC = !X must be true + auto &FC1WithXFalse = + Context.buildAndSubstituteFlowCondition(FC, {{&X, &False}}); + EXPECT_TRUE(Context.equivalentBoolValues(FC1WithXFalse, True)); +} + +TEST_F(DataflowAnalysisContextTest, SubstituteFlowConditionsDisjunctiveFC) { + auto &X = Context.createAtomicBoolValue(); + auto &Y = Context.createAtomicBoolValue(); + auto &True = Context.getBoolLiteralValue(true); + auto &False = Context.getBoolLiteralValue(false); + + // FC = X || Y + auto &FC = Context.makeFlowConditionToken(); + Context.addFlowConditionConstraint(FC, Context.getOrCreateDisjunction(X, Y)); + + // If X is true in FC, FC = X || Y must be true + auto &FCWithXTrue = + Context.buildAndSubstituteFlowCondition(FC, {{&X, &True}}); + EXPECT_TRUE(Context.equivalentBoolValues(FCWithXTrue, True)); + + // If X is false in FC, FC = X || Y is equivalent to evaluating Y + auto &FC1WithXFalse = + Context.buildAndSubstituteFlowCondition(FC, {{&X, &False}}); + EXPECT_TRUE(Context.equivalentBoolValues(FC1WithXFalse, Y)); +} + +TEST_F(DataflowAnalysisContextTest, SubstituteFlowConditionsConjunctiveFC) { + auto &X = Context.createAtomicBoolValue(); + auto &Y = Context.createAtomicBoolValue(); + auto &True = Context.getBoolLiteralValue(true); + auto &False = Context.getBoolLiteralValue(false); + + // FC = X && Y + auto &FC = Context.makeFlowConditionToken(); + Context.addFlowConditionConstraint(FC, Context.getOrCreateConjunction(X, Y)); + + // If X is true in FC, FC = X && Y is equivalent to evaluating Y + auto &FCWithXTrue = + Context.buildAndSubstituteFlowCondition(FC, {{&X, &True}}); + EXPECT_TRUE(Context.equivalentBoolValues(FCWithXTrue, Y)); + + // If X is false in FC, FC = X && Y must be false + auto &FCWithXFalse = + Context.buildAndSubstituteFlowCondition(FC, {{&X, &False}}); + EXPECT_TRUE(Context.equivalentBoolValues(FCWithXFalse, False)); +} + +TEST_F(DataflowAnalysisContextTest, SubstituteFlowConditionsForkedFC) { + auto &X = Context.createAtomicBoolValue(); + auto &Y = Context.createAtomicBoolValue(); + auto &Z = Context.createAtomicBoolValue(); + auto &True = Context.getBoolLiteralValue(true); + auto &False = Context.getBoolLiteralValue(false); + + // FC = X && Y + auto &FC = Context.makeFlowConditionToken(); + Context.addFlowConditionConstraint(FC, Context.getOrCreateConjunction(X, Y)); + // ForkedFC = FC && Z = X && Y && Z + auto &ForkedFC = Context.forkFlowCondition(FC); + Context.addFlowConditionConstraint(ForkedFC, Z); + + // If any of X,Y,Z is true in ForkedFC, ForkedFC = X && Y && Z is equivalent + // to evaluating the conjunction of the remaining values + auto &ForkedFCWithZTrue = + Context.buildAndSubstituteFlowCondition(ForkedFC, {{&Z, &True}}); + EXPECT_TRUE(Context.equivalentBoolValues( + ForkedFCWithZTrue, Context.getOrCreateConjunction(X, Y))); + auto &ForkedFCWithYAndZTrue = Context.buildAndSubstituteFlowCondition( + ForkedFC, {{&Y, &True}, {&Z, &True}}); + EXPECT_TRUE(Context.equivalentBoolValues(ForkedFCWithYAndZTrue, X)); + + // If any of X,Y,Z is false in ForkedFC, ForkedFC = X && Y && Z must be false + auto &ForkedFCWithXFalse = + Context.buildAndSubstituteFlowCondition(ForkedFC, {{&X, &False}}); + auto &ForkedFCWithYFalse = + Context.buildAndSubstituteFlowCondition(ForkedFC, {{&Y, &False}}); + auto &ForkedFCWithZFalse = + Context.buildAndSubstituteFlowCondition(ForkedFC, {{&Z, &False}}); + EXPECT_TRUE(Context.equivalentBoolValues(ForkedFCWithXFalse, False)); + EXPECT_TRUE(Context.equivalentBoolValues(ForkedFCWithYFalse, False)); + EXPECT_TRUE(Context.equivalentBoolValues(ForkedFCWithZFalse, False)); +} + +TEST_F(DataflowAnalysisContextTest, SubstituteFlowConditionsJoinedFC) { + auto &X = Context.createAtomicBoolValue(); + auto &Y = Context.createAtomicBoolValue(); + auto &Z = Context.createAtomicBoolValue(); + auto &True = Context.getBoolLiteralValue(true); + auto &False = Context.getBoolLiteralValue(false); + + // FC1 = X + auto &FC1 = Context.makeFlowConditionToken(); + Context.addFlowConditionConstraint(FC1, X); + // FC2 = Y + auto &FC2 = Context.makeFlowConditionToken(); + Context.addFlowConditionConstraint(FC2, Y); + // JoinedFC = (FC1 || FC2) && Z = (X || Y) && Z + auto &JoinedFC = Context.joinFlowConditions(FC1, FC2); + Context.addFlowConditionConstraint(JoinedFC, Z); + + // If any of X, Y is true in JoinedFC, JoinedFC = (X || Y) && Z is equivalent + // to evaluating the remaining Z + auto &JoinedFCWithXTrue = + Context.buildAndSubstituteFlowCondition(JoinedFC, {{&X, &True}}); + auto &JoinedFCWithYTrue = + Context.buildAndSubstituteFlowCondition(JoinedFC, {{&Y, &True}}); + EXPECT_TRUE(Context.equivalentBoolValues(JoinedFCWithXTrue, Z)); + EXPECT_TRUE(Context.equivalentBoolValues(JoinedFCWithYTrue, Z)); + + // If Z is true in JoinedFC, JoinedFC = (X || Y) && Z is equivalent to + // evaluating the remaining disjunction (X || Y) + auto &JoinedFCWithZTrue = + Context.buildAndSubstituteFlowCondition(JoinedFC, {{&Z, &True}}); + EXPECT_TRUE(Context.equivalentBoolValues( + JoinedFCWithZTrue, Context.getOrCreateDisjunction(X, Y))); + + // If any of X, Y is false in JoinedFC, JoinedFC = (X || Y) && Z is equivalent + // to evaluating the conjunction of the other value and Z + auto &JoinedFCWithXFalse = + Context.buildAndSubstituteFlowCondition(JoinedFC, {{&X, &False}}); + auto &JoinedFCWithYFalse = + Context.buildAndSubstituteFlowCondition(JoinedFC, {{&Y, &False}}); + EXPECT_TRUE(Context.equivalentBoolValues( + JoinedFCWithXFalse, Context.getOrCreateConjunction(Y, Z))); + EXPECT_TRUE(Context.equivalentBoolValues( + JoinedFCWithYFalse, Context.getOrCreateConjunction(X, Z))); + + // If Z is false in JoinedFC, JoinedFC = (X || Y) && Z must be false + auto &JoinedFCWithZFalse = + Context.buildAndSubstituteFlowCondition(JoinedFC, {{&Z, &False}}); + EXPECT_TRUE(Context.equivalentBoolValues(JoinedFCWithZFalse, False)); +} + } // namespace diff --git a/clang/unittests/Analysis/FlowSensitive/DataflowEnvironmentTest.cpp b/clang/unittests/Analysis/FlowSensitive/DataflowEnvironmentTest.cpp index 51b40f231987882a1724e7ccd7437ba8b6287bdd..434bc6b8042ee6c017913bc1ac52955e52d83d7c 100644 --- a/clang/unittests/Analysis/FlowSensitive/DataflowEnvironmentTest.cpp +++ b/clang/unittests/Analysis/FlowSensitive/DataflowEnvironmentTest.cpp @@ -33,18 +33,6 @@ protected: Environment Env; }; -TEST_F(EnvironmentTest, MakeImplicationReturnsTrueGivenSameArgs) { - auto &X = Env.makeAtomicBoolValue(); - auto &XEqX = Env.makeImplication(X, X); - EXPECT_EQ(&XEqX, &Env.getBoolLiteralValue(true)); -} - -TEST_F(EnvironmentTest, MakeIffReturnsTrueGivenSameArgs) { - auto &X = Env.makeAtomicBoolValue(); - auto &XEqX = Env.makeIff(X, X); - EXPECT_EQ(&XEqX, &Env.getBoolLiteralValue(true)); -} - TEST_F(EnvironmentTest, FlowCondition) { EXPECT_TRUE(Env.flowConditionImplies(Env.getBoolLiteralValue(true))); EXPECT_FALSE(Env.flowConditionImplies(Env.getBoolLiteralValue(false))); diff --git a/clang/unittests/Analysis/FlowSensitive/MatchSwitchTest.cpp b/clang/unittests/Analysis/FlowSensitive/MatchSwitchTest.cpp index bd2ae0e96c01e370acda6079d4b76a011dd30990..98319760fd20683788a16d2f54819d67c6ea169a 100644 --- a/clang/unittests/Analysis/FlowSensitive/MatchSwitchTest.cpp +++ b/clang/unittests/Analysis/FlowSensitive/MatchSwitchTest.cpp @@ -204,3 +204,29 @@ TEST_F(MatchSwitchTest, Neither) { RunDataflow(Code, UnorderedElementsAre(Pair("p", Holds(BooleanLattice(false))))); } + +TEST_F(MatchSwitchTest, ReturnNonVoid) { + using namespace ast_matchers; + + auto Unit = + tooling::buildASTFromCode("void f() { int x = 42; }", "input.cc", + std::make_shared()); + auto &Context = Unit->getASTContext(); + const auto *S = + selectFirst( + "f", + match(functionDecl(isDefinition(), hasName("f")).bind("f"), Context)) + ->getBody(); + + MatchSwitch> Switch = + MatchSwitchBuilder>() + .CaseOf(stmt(), + [](const Stmt *, const MatchFinder::MatchResult &, + const int &State) -> std::vector { + return {1, State, 3}; + }) + .Build(); + std::vector Actual = Switch(*S, Context, 7); + std::vector Expected{1, 7, 3}; + EXPECT_EQ(Actual, Expected); +} diff --git a/clang/unittests/Analysis/FlowSensitive/MultiVarConstantPropagationTest.cpp b/clang/unittests/Analysis/FlowSensitive/MultiVarConstantPropagationTest.cpp index 797b2849acedfda0a7978450ae8bbe67d5d1d076..05cd67e52fd48e4ca629e5fd19feab1374124aae 100644 --- a/clang/unittests/Analysis/FlowSensitive/MultiVarConstantPropagationTest.cpp +++ b/clang/unittests/Analysis/FlowSensitive/MultiVarConstantPropagationTest.cpp @@ -94,7 +94,7 @@ struct ValueLattice { }; std::ostream &operator<<(std::ostream &OS, const ValueLattice &L) { - if (L.Value.hasValue()) + if (L.Value) return OS << *L.Value; switch (L.State) { case ValueLattice::ValueState::Undefined: @@ -194,9 +194,7 @@ MATCHER_P(Var, name, return arg->getName() == name; } -MATCHER_P(HasConstantVal, v, "") { - return arg.Value.hasValue() && *arg.Value == v; -} +MATCHER_P(HasConstantVal, v, "") { return arg.Value && *arg.Value == v; } MATCHER(Varies, "") { return arg == arg.top(); } diff --git a/clang/unittests/Analysis/FlowSensitive/SingleVarConstantPropagationTest.cpp b/clang/unittests/Analysis/FlowSensitive/SingleVarConstantPropagationTest.cpp index 842e3502efaaf1b46bbe807990222a0c6d9667eb..6561385cfd84ae6da21d04a4c0fb40759a378758 100644 --- a/clang/unittests/Analysis/FlowSensitive/SingleVarConstantPropagationTest.cpp +++ b/clang/unittests/Analysis/FlowSensitive/SingleVarConstantPropagationTest.cpp @@ -171,9 +171,7 @@ public: using ::testing::Pair; using ::testing::UnorderedElementsAre; -MATCHER_P(HasConstantVal, v, "") { - return arg.Data.hasValue() && arg.Data->Value == v; -} +MATCHER_P(HasConstantVal, v, "") { return arg.Data && arg.Data->Value == v; } MATCHER(IsUnknown, "") { return arg == arg.bottom(); } MATCHER(Varies, "") { return arg == arg.top(); } diff --git a/clang/unittests/Analysis/FlowSensitive/TestingSupport.cpp b/clang/unittests/Analysis/FlowSensitive/TestingSupport.cpp index 4c5efa750404826f609cb67be4165f5c4648153c..7c50453abe509f2f3f02f764044d6888312f6f8d 100644 --- a/clang/unittests/Analysis/FlowSensitive/TestingSupport.cpp +++ b/clang/unittests/Analysis/FlowSensitive/TestingSupport.cpp @@ -40,9 +40,8 @@ isAnnotationDirectlyAfterStatement(const Stmt *Stmt, unsigned AnnotationBegin, auto NextToken = Lexer::findNextToken(Stmt->getEndLoc(), SourceManager, LangOptions); - while (NextToken.hasValue() && - SourceManager.getFileOffset(NextToken->getLocation()) < - AnnotationBegin) { + while (NextToken && SourceManager.getFileOffset(NextToken->getLocation()) < + AnnotationBegin) { if (NextToken->isNot(tok::semi)) return false; diff --git a/clang/unittests/Analysis/FlowSensitive/TestingSupport.h b/clang/unittests/Analysis/FlowSensitive/TestingSupport.h index 957d73fd6d0c4cd9be5e41e42bc35c95b38c2325..ce439f5613028dce3beb50b637786c3895a1b8ee 100644 --- a/clang/unittests/Analysis/FlowSensitive/TestingSupport.h +++ b/clang/unittests/Analysis/FlowSensitive/TestingSupport.h @@ -131,7 +131,7 @@ llvm::Error checkDataflow( std::vector> Results; for (const CFGBlock *Block : CFCtx->getCFG()) { // Skip blocks that were not evaluated. - if (!BlockStates[Block->getBlockID()].hasValue()) + if (!BlockStates[Block->getBlockID()]) continue; transferBlock( diff --git a/clang/unittests/Analysis/FlowSensitive/TransferTest.cpp b/clang/unittests/Analysis/FlowSensitive/TransferTest.cpp index 3a8e2ac8588a584cad7e6098d202a3338c5fcf5b..a0b753ff9e317efe52127f4870c2b75942c0c420 100644 --- a/clang/unittests/Analysis/FlowSensitive/TransferTest.cpp +++ b/clang/unittests/Analysis/FlowSensitive/TransferTest.cpp @@ -2214,6 +2214,93 @@ TEST_F(TransferTest, IntegralToBooleanCastFromBool) { }); } +TEST_F(TransferTest, NullToPointerCast) { + std::string Code = R"( + struct Baz {}; + void target() { + int *FooX = nullptr; + int *FooY = nullptr; + bool **Bar = nullptr; + Baz *Baz = nullptr; + // [[p]] + } + )"; + runDataflow(Code, + [](llvm::ArrayRef< + std::pair>> + Results, + ASTContext &ASTCtx) { + ASSERT_THAT(Results, ElementsAre(Pair("p", _))); + const Environment &Env = Results[0].second.Env; + + const ValueDecl *FooXDecl = findValueDecl(ASTCtx, "FooX"); + ASSERT_THAT(FooXDecl, NotNull()); + + const ValueDecl *FooYDecl = findValueDecl(ASTCtx, "FooY"); + ASSERT_THAT(FooYDecl, NotNull()); + + const ValueDecl *BarDecl = findValueDecl(ASTCtx, "Bar"); + ASSERT_THAT(BarDecl, NotNull()); + + const ValueDecl *BazDecl = findValueDecl(ASTCtx, "Baz"); + ASSERT_THAT(BazDecl, NotNull()); + + const auto *FooXVal = + cast(Env.getValue(*FooXDecl, SkipPast::None)); + const auto *FooYVal = + cast(Env.getValue(*FooYDecl, SkipPast::None)); + const auto *BarVal = + cast(Env.getValue(*BarDecl, SkipPast::None)); + const auto *BazVal = + cast(Env.getValue(*BazDecl, SkipPast::None)); + + EXPECT_EQ(FooXVal, FooYVal); + EXPECT_NE(FooXVal, BarVal); + EXPECT_NE(FooXVal, BazVal); + EXPECT_NE(BarVal, BazVal); + + const StorageLocation &FooPointeeLoc = FooXVal->getPointeeLoc(); + EXPECT_TRUE(isa(FooPointeeLoc)); + EXPECT_THAT(Env.getValue(FooPointeeLoc), IsNull()); + + const StorageLocation &BarPointeeLoc = BarVal->getPointeeLoc(); + EXPECT_TRUE(isa(BarPointeeLoc)); + EXPECT_THAT(Env.getValue(BarPointeeLoc), IsNull()); + + const StorageLocation &BazPointeeLoc = BazVal->getPointeeLoc(); + EXPECT_TRUE(isa(BazPointeeLoc)); + EXPECT_THAT(Env.getValue(BazPointeeLoc), IsNull()); + }); +} + +TEST_F(TransferTest, NullToMemberPointerCast) { + std::string Code = R"( + struct Foo {}; + void target(Foo *Foo) { + int Foo::*MemberPointer = nullptr; + // [[p]] + } + )"; + runDataflow( + Code, [](llvm::ArrayRef< + std::pair>> + Results, + ASTContext &ASTCtx) { + ASSERT_THAT(Results, ElementsAre(Pair("p", _))); + const Environment &Env = Results[0].second.Env; + + const ValueDecl *MemberPointerDecl = + findValueDecl(ASTCtx, "MemberPointer"); + ASSERT_THAT(MemberPointerDecl, NotNull()); + + const auto *MemberPointerVal = cast( + Env.getValue(*MemberPointerDecl, SkipPast::None)); + + const StorageLocation &MemberLoc = MemberPointerVal->getPointeeLoc(); + EXPECT_THAT(Env.getValue(MemberLoc), IsNull()); + }); +} + TEST_F(TransferTest, AddrOfValue) { std::string Code = R"( void target() { diff --git a/clang/unittests/Basic/DarwinSDKInfoTest.cpp b/clang/unittests/Basic/DarwinSDKInfoTest.cpp index aa1feeb293c0ed665746eb7f73870f395c9d0537..8d720c2e0a6f0622e3af51406e59c016f11c0ea6 100644 --- a/clang/unittests/Basic/DarwinSDKInfoTest.cpp +++ b/clang/unittests/Basic/DarwinSDKInfoTest.cpp @@ -19,7 +19,7 @@ TEST(DarwinSDKInfo, VersionMapping) { Optional Mapping = DarwinSDKInfo::RelatedTargetVersionMapping::parseJSON(Obj, VersionTuple()); - EXPECT_TRUE(Mapping.hasValue()); + EXPECT_TRUE(Mapping); EXPECT_EQ(Mapping->getMinimumValue(), VersionTuple(1)); // Exact mapping. @@ -54,7 +54,7 @@ TEST(DarwinSDKInfo, VersionMappingMissingKey) { Optional Mapping = DarwinSDKInfo::RelatedTargetVersionMapping::parseJSON(Obj, VersionTuple()); - EXPECT_TRUE(Mapping.hasValue()); + EXPECT_TRUE(Mapping); EXPECT_EQ( Mapping->map(VersionTuple(4), VersionTuple(0, 1), VersionTuple(100)), None); diff --git a/clang/unittests/Basic/DiagnosticTest.cpp b/clang/unittests/Basic/DiagnosticTest.cpp index 8d018a8b9cef439cb771a78644c2e3e98f03aded..0ac931650e524c3b8a14a7b0690056f45ac068e6 100644 --- a/clang/unittests/Basic/DiagnosticTest.cpp +++ b/clang/unittests/Basic/DiagnosticTest.cpp @@ -14,6 +14,15 @@ using namespace llvm; using namespace clang; +void clang::DiagnosticsTestHelper(DiagnosticsEngine &diag) { + unsigned delayedDiagID = 0U; + + EXPECT_EQ(diag.DelayedDiagID, delayedDiagID); + EXPECT_FALSE(diag.DiagStates.empty()); + EXPECT_TRUE(diag.DiagStatesByLoc.empty()); + EXPECT_TRUE(diag.DiagStateOnPushStack.empty()); +} + namespace { // Check that DiagnosticErrorTrap works with SuppressAllDiagnostics. @@ -71,6 +80,32 @@ TEST(DiagnosticTest, fatalsAsError) { EXPECT_EQ(Diags.getNumWarnings(), FatalsAsError); } } + +// Check that soft RESET works as intended +TEST(DiagnosticTest, softReset) { + DiagnosticsEngine Diags(new DiagnosticIDs(), new DiagnosticOptions, + new IgnoringDiagConsumer()); + + unsigned numWarnings = 0U, numErrors = 0U; + + Diags.Reset(true); + // Check For ErrorOccurred and TrapNumErrorsOccurred + EXPECT_FALSE(Diags.hasErrorOccurred()); + EXPECT_FALSE(Diags.hasFatalErrorOccurred()); + EXPECT_FALSE(Diags.hasUncompilableErrorOccurred()); + // Check for UnrecoverableErrorOccurred and TrapNumUnrecoverableErrorsOccurred + EXPECT_FALSE(Diags.hasUnrecoverableErrorOccurred()); + + EXPECT_EQ(Diags.getNumWarnings(), numWarnings); + EXPECT_EQ(Diags.getNumErrors(), numErrors); + + // Check for private variables of DiagnosticsEngine differentiating soft reset + DiagnosticsTestHelper(Diags); + + EXPECT_FALSE(Diags.isDiagnosticInFlight()); + EXPECT_TRUE(Diags.isLastDiagnosticIgnored()); +} + TEST(DiagnosticTest, diagnosticError) { DiagnosticsEngine Diags(new DiagnosticIDs(), new DiagnosticOptions, new IgnoringDiagConsumer()); diff --git a/clang/unittests/Basic/SourceManagerTest.cpp b/clang/unittests/Basic/SourceManagerTest.cpp index dadfcc94ac0d39916015a4cf85a7d05f3daebb0e..e98100faedbe26433e9d6a15a7dca39df55fd7a6 100644 --- a/clang/unittests/Basic/SourceManagerTest.cpp +++ b/clang/unittests/Basic/SourceManagerTest.cpp @@ -51,6 +51,73 @@ protected: IntrusiveRefCntPtr Target; }; +TEST_F(SourceManagerTest, isInMemoryBuffersNoSourceLocationInfo) { + // Check for invalid source location for each method + SourceLocation LocEmpty; + bool isWrittenInBuiltInFileFalse = SourceMgr.isWrittenInBuiltinFile(LocEmpty); + bool isWrittenInCommandLineFileFalse = + SourceMgr.isWrittenInCommandLineFile(LocEmpty); + bool isWrittenInScratchSpaceFalse = + SourceMgr.isWrittenInScratchSpace(LocEmpty); + + EXPECT_FALSE(isWrittenInBuiltInFileFalse); + EXPECT_FALSE(isWrittenInCommandLineFileFalse); + EXPECT_FALSE(isWrittenInScratchSpaceFalse); + + // Check for valid source location per filename for each method + const char *Source = "int x"; + + std::unique_ptr BuiltInBuf = + llvm::MemoryBuffer::getMemBuffer(Source); + const FileEntry *BuiltInFile = + FileMgr.getVirtualFile("", BuiltInBuf->getBufferSize(), 0); + SourceMgr.overrideFileContents(BuiltInFile, std::move(BuiltInBuf)); + FileID BuiltInFileID = + SourceMgr.getOrCreateFileID(BuiltInFile, SrcMgr::C_User); + SourceMgr.setMainFileID(BuiltInFileID); + SourceLocation LocBuiltIn = + SourceMgr.getLocForStartOfFile(SourceMgr.getMainFileID()); + bool isWrittenInBuiltInFileTrue = + SourceMgr.isWrittenInBuiltinFile(LocBuiltIn); + + std::unique_ptr CommandLineBuf = + llvm::MemoryBuffer::getMemBuffer(Source); + const FileEntry *CommandLineFile = FileMgr.getVirtualFile( + "", CommandLineBuf->getBufferSize(), 0); + SourceMgr.overrideFileContents(CommandLineFile, std::move(CommandLineBuf)); + FileID CommandLineFileID = + SourceMgr.getOrCreateFileID(CommandLineFile, SrcMgr::C_User); + SourceMgr.setMainFileID(CommandLineFileID); + SourceLocation LocCommandLine = + SourceMgr.getLocForStartOfFile(SourceMgr.getMainFileID()); + bool isWrittenInCommandLineFileTrue = + SourceMgr.isWrittenInCommandLineFile(LocCommandLine); + + std::unique_ptr ScratchSpaceBuf = + llvm::MemoryBuffer::getMemBuffer(Source); + const FileEntry *ScratchSpaceFile = FileMgr.getVirtualFile( + "", ScratchSpaceBuf->getBufferSize(), 0); + SourceMgr.overrideFileContents(ScratchSpaceFile, std::move(ScratchSpaceBuf)); + FileID ScratchSpaceFileID = + SourceMgr.getOrCreateFileID(ScratchSpaceFile, SrcMgr::C_User); + SourceMgr.setMainFileID(ScratchSpaceFileID); + SourceLocation LocScratchSpace = + SourceMgr.getLocForStartOfFile(SourceMgr.getMainFileID()); + bool isWrittenInScratchSpaceTrue = + SourceMgr.isWrittenInScratchSpace(LocScratchSpace); + + EXPECT_TRUE(isWrittenInBuiltInFileTrue); + EXPECT_TRUE(isWrittenInCommandLineFileTrue); + EXPECT_TRUE(isWrittenInScratchSpaceTrue); +} + +TEST_F(SourceManagerTest, isInSystemHeader) { + // Check for invalid source location + SourceLocation LocEmpty; + bool isInSystemHeaderFalse = SourceMgr.isInSystemHeader(LocEmpty); + ASSERT_FALSE(isInSystemHeaderFalse); +} + TEST_F(SourceManagerTest, isBeforeInTranslationUnit) { const char *source = "#define M(x) [x]\n" diff --git a/clang/unittests/DirectoryWatcher/DirectoryWatcherTest.cpp b/clang/unittests/DirectoryWatcher/DirectoryWatcherTest.cpp index f0dc55a47bff3e8b55eb3ba5329fe1afbf58dea8..73bfbe54ace639c7f373894ac86944dbe02c7076 100644 --- a/clang/unittests/DirectoryWatcher/DirectoryWatcherTest.cpp +++ b/clang/unittests/DirectoryWatcher/DirectoryWatcherTest.cpp @@ -194,7 +194,7 @@ struct VerifyingConsumer { if (result()) return *result(); - ResultIsReady.wait(L, [this]() { return result().hasValue(); }); + ResultIsReady.wait(L, [this]() { return result().has_value(); }); } return false; // Just to make compiler happy. } @@ -259,11 +259,11 @@ void checkEventualResultWithTimeout(VerifyingConsumer &TestConsumer) { << "The expected result state wasn't reached before the time-out."; std::unique_lock L(TestConsumer.Mtx); EXPECT_TRUE(TestConsumer.result().hasValue()); - if (TestConsumer.result().hasValue()) { + if (TestConsumer.result()) { EXPECT_TRUE(*TestConsumer.result()); } - if ((TestConsumer.result().hasValue() && !TestConsumer.result().getValue()) || - !TestConsumer.result().hasValue()) + if ((TestConsumer.result() && !TestConsumer.result().getValue()) || + !TestConsumer.result()) TestConsumer.printUnmetExpectations(llvm::outs()); } } // namespace diff --git a/clang/unittests/Format/CMakeLists.txt b/clang/unittests/Format/CMakeLists.txt index a4ece033d60732a6fd10ba05c7c7bb62b858dfb1..9cc6c7a96af59788bf7bac2a6941ef73c82d900d 100644 --- a/clang/unittests/Format/CMakeLists.txt +++ b/clang/unittests/Format/CMakeLists.txt @@ -17,6 +17,7 @@ add_clang_unittest(FormatTests FormatTestSelective.cpp FormatTestTableGen.cpp FormatTestTextProto.cpp + FormatTestVerilog.cpp MacroExpanderTest.cpp NamespaceEndCommentsFixerTest.cpp QualifierFixerTest.cpp diff --git a/clang/unittests/Format/FormatTestUtils.h b/clang/unittests/Format/FormatTestUtils.h index ace5a4519d20b4918ade7909a7bfb45cb11c94f2..defe0738c28ced43b244be02b364a9fa0483ba9c 100644 --- a/clang/unittests/Format/FormatTestUtils.h +++ b/clang/unittests/Format/FormatTestUtils.h @@ -19,7 +19,10 @@ namespace clang { namespace format { namespace test { -inline std::string messUp(llvm::StringRef Code) { +// When HandleHash is false, preprocessor directives starting with hash will not +// be on separate lines. This is needed because Verilog uses hash for other +// purposes. +inline std::string messUp(llvm::StringRef Code, bool HandleHash = true) { std::string MessedUp(Code.str()); bool InComment = false; bool InPreprocessorDirective = false; @@ -29,7 +32,7 @@ inline std::string messUp(llvm::StringRef Code) { if (JustReplacedNewline) MessedUp[i - 1] = '\n'; InComment = true; - } else if (MessedUp[i] == '#' && + } else if (HandleHash && MessedUp[i] == '#' && (JustReplacedNewline || i == 0 || MessedUp[i - 1] == '\n')) { if (i != 0) MessedUp[i - 1] = '\n'; diff --git a/clang/unittests/Format/FormatTestVerilog.cpp b/clang/unittests/Format/FormatTestVerilog.cpp new file mode 100644 index 0000000000000000000000000000000000000000..3c48dfad4d1a54889015de682b865908319f09bb --- /dev/null +++ b/clang/unittests/Format/FormatTestVerilog.cpp @@ -0,0 +1,251 @@ +//===- unittest/Format/FormatTestVerilog.cpp ------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "FormatTestUtils.h" +#include "clang/Format/Format.h" +#include "llvm/Support/Debug.h" +#include "gtest/gtest.h" + +#define DEBUG_TYPE "format-test" + +namespace clang { +namespace format { + +class FormatTestVerilog : public ::testing::Test { +protected: + static std::string format(llvm::StringRef Code, unsigned Offset, + unsigned Length, const FormatStyle &Style) { + LLVM_DEBUG(llvm::errs() << "---\n"); + LLVM_DEBUG(llvm::errs() << Code << "\n\n"); + std::vector Ranges(1, tooling::Range(Offset, Length)); + tooling::Replacements Replaces = reformat(Style, Code, Ranges); + auto Result = applyAllReplacements(Code, Replaces); + EXPECT_TRUE(static_cast(Result)); + LLVM_DEBUG(llvm::errs() << "\n" << *Result << "\n\n"); + return *Result; + } + + static std::string + format(llvm::StringRef Code, + const FormatStyle &Style = getLLVMStyle(FormatStyle::LK_Verilog)) { + return format(Code, 0, Code.size(), Style); + } + + static void verifyFormat( + llvm::StringRef Code, + const FormatStyle &Style = getLLVMStyle(FormatStyle::LK_Verilog)) { + EXPECT_EQ(Code.str(), format(Code, Style)) << "Expected code is not stable"; + EXPECT_EQ(Code.str(), + format(test::messUp(Code, /*HandleHash=*/false), Style)); + } +}; + +TEST_F(FormatTestVerilog, Delay) { + // Delay by the default unit. + verifyFormat("#0;"); + verifyFormat("#1;"); + verifyFormat("#10;"); + verifyFormat("#1.5;"); + // Explicit unit. + verifyFormat("#1fs;"); + verifyFormat("#1.5fs;"); + verifyFormat("#1ns;"); + verifyFormat("#1.5ns;"); + verifyFormat("#1us;"); + verifyFormat("#1.5us;"); + verifyFormat("#1ms;"); + verifyFormat("#1.5ms;"); + verifyFormat("#1s;"); + verifyFormat("#1.5s;"); + // The following expression should be on the same line. + verifyFormat("#1 x = x;"); + EXPECT_EQ("#1 x = x;", format("#1\n" + "x = x;")); +} + +TEST_F(FormatTestVerilog, If) { + verifyFormat("if (x)\n" + " x = x;"); + verifyFormat("if (x)\n" + " x = x;\n" + "x = x;"); + + // Test else + verifyFormat("if (x)\n" + " x = x;\n" + "else if (x)\n" + " x = x;\n" + "else\n" + " x = x;"); + verifyFormat("if (x) begin\n" + " x = x;\n" + "end else if (x) begin\n" + " x = x;\n" + "end else begin\n" + " x = x;\n" + "end"); + verifyFormat("if (x) begin : x\n" + " x = x;\n" + "end : x else if (x) begin : x\n" + " x = x;\n" + "end : x else begin : x\n" + " x = x;\n" + "end : x"); + + // Test block keywords. + verifyFormat("if (x) begin\n" + " x = x;\n" + "end"); + verifyFormat("if (x) begin : x\n" + " x = x;\n" + "end : x"); + verifyFormat("if (x) begin\n" + " x = x;\n" + " x = x;\n" + "end"); + verifyFormat("disable fork;\n" + "x = x;"); + verifyFormat("rand join x x;\n" + "x = x;"); + verifyFormat("if (x) fork\n" + " x = x;\n" + "join"); + verifyFormat("if (x) fork\n" + " x = x;\n" + "join_any"); + verifyFormat("if (x) fork\n" + " x = x;\n" + "join_none"); + verifyFormat("if (x) generate\n" + " x = x;\n" + "endgenerate"); + verifyFormat("if (x) generate : x\n" + " x = x;\n" + "endgenerate : x"); + + // Test that concatenation braces don't get regarded as blocks. + verifyFormat("if (x)\n" + " {x} = x;"); + verifyFormat("if (x)\n" + " x = {x};"); + verifyFormat("if (x)\n" + " x = {x};\n" + "else\n" + " {x} = {x};"); +} + +TEST_F(FormatTestVerilog, Preprocessor) { + auto Style = getLLVMStyle(FormatStyle::LK_Verilog); + Style.ColumnLimit = 20; + + // Macro definitions. + EXPECT_EQ("`define X \\\n" + " if (x) \\\n" + " x = x;", + format("`define X if(x)x=x;", Style)); + EXPECT_EQ("`define X(x) \\\n" + " if (x) \\\n" + " x = x;", + format("`define X(x) if(x)x=x;", Style)); + EXPECT_EQ("`define X \\\n" + " x = x; \\\n" + " x = x;", + format("`define X x=x;x=x;", Style)); + // Macro definitions with invocations inside. + EXPECT_EQ("`define LIST \\\n" + " `ENTRY \\\n" + " `ENTRY", + format("`define LIST \\\n" + "`ENTRY \\\n" + "`ENTRY", + Style)); + EXPECT_EQ("`define LIST \\\n" + " `x = `x; \\\n" + " `x = `x;", + format("`define LIST \\\n" + "`x = `x; \\\n" + "`x = `x;", + Style)); + EXPECT_EQ("`define LIST \\\n" + " `x = `x; \\\n" + " `x = `x;", + format("`define LIST `x=`x;`x=`x;", Style)); + // Macro invocations. + verifyFormat("`x = (`x1 + `x2 + x);"); + // Lines starting with a preprocessor directive should not be indented. + std::string Directives[] = { + "begin_keywords", + "celldefine", + "default_nettype", + "define", + "else", + "elsif", + "end_keywords", + "endcelldefine", + "endif", + "ifdef", + "ifndef", + "include", + "line", + "nounconnected_drive", + "pragma", + "resetall", + "timescale", + "unconnected_drive", + "undef", + "undefineall", + }; + for (auto &Name : Directives) { + EXPECT_EQ("if (x)\n" + "`" + + Name + + "\n" + " ;", + format("if (x)\n" + "`" + + Name + + "\n" + ";", + Style)); + } + // Lines starting with a regular macro invocation should be indented as a + // normal line. + EXPECT_EQ("if (x)\n" + " `x = `x;\n" + "`timescale 1ns / 1ps", + format("if (x)\n" + "`x = `x;\n" + "`timescale 1ns / 1ps", + Style)); + EXPECT_EQ("if (x)\n" + "`timescale 1ns / 1ps\n" + " `x = `x;", + format("if (x)\n" + "`timescale 1ns / 1ps\n" + "`x = `x;", + Style)); + std::string NonDirectives[] = { + // For `__FILE__` and `__LINE__`, although the standard classifies them as + // preprocessor directives, they are used like regular macros. + "__FILE__", "__LINE__", "elif", "foo", "x", + }; + for (auto &Name : NonDirectives) { + EXPECT_EQ("if (x)\n" + " `" + + Name + ";", + format("if (x)\n" + "`" + + Name + + "\n" + ";", + Style)); + } +} + +} // namespace format +} // end namespace clang diff --git a/clang/unittests/Format/TokenAnnotatorTest.cpp b/clang/unittests/Format/TokenAnnotatorTest.cpp index aa91f389a5d2e23a5cf5aceff328df4d5f24b85f..2dbc5da07d4db0267a55e374e8b8a3e39651bec3 100644 --- a/clang/unittests/Format/TokenAnnotatorTest.cpp +++ b/clang/unittests/Format/TokenAnnotatorTest.cpp @@ -402,6 +402,33 @@ TEST_F(TokenAnnotatorTest, UnderstandsRequiresClausesAndConcepts) { EXPECT_TOKEN(Tokens[25], tok::less, TT_TemplateOpener); EXPECT_TOKEN(Tokens[27], tok::greater, TT_TemplateCloser); EXPECT_TOKEN(Tokens[28], tok::greater, TT_TemplateCloser); + + Tokens = annotate("auto bar() -> int requires(is_integral_v) {}"); + ASSERT_EQ(Tokens.size(), 16u) << Tokens; + EXPECT_TOKEN(Tokens[6], tok::kw_requires, TT_RequiresClause); + + Tokens = annotate("auto bar() -> void requires(is_integral_v) {}"); + ASSERT_EQ(Tokens.size(), 16u) << Tokens; + EXPECT_TOKEN(Tokens[6], tok::kw_requires, TT_RequiresClause); + + Tokens = annotate("auto bar() -> MyType requires(is_integral_v) {}"); + ASSERT_EQ(Tokens.size(), 16u) << Tokens; + EXPECT_TOKEN(Tokens[6], tok::kw_requires, TT_RequiresClause); + + Tokens = + annotate("auto bar() -> SOME_MACRO_TYPE requires(is_integral_v) {}"); + ASSERT_EQ(Tokens.size(), 16u) << Tokens; + EXPECT_TOKEN(Tokens[6], tok::kw_requires, TT_RequiresClause); + + Tokens = + annotate("auto bar() -> qualified::type requires(is_integral_v) {}"); + ASSERT_EQ(Tokens.size(), 18u) << Tokens; + EXPECT_TOKEN(Tokens[8], tok::kw_requires, TT_RequiresClause); + + Tokens = + annotate("auto bar() -> Template requires(is_integral_v) {}"); + ASSERT_EQ(Tokens.size(), 19u) << Tokens; + EXPECT_TOKEN(Tokens[9], tok::kw_requires, TT_RequiresClause); } TEST_F(TokenAnnotatorTest, UnderstandsRequiresExpressions) { diff --git a/clang/unittests/Interpreter/InterpreterTest.cpp b/clang/unittests/Interpreter/InterpreterTest.cpp index 280c6d7fdae2b6c27c1c03bb925f8bd83b3ab289..720e30fafca7e00e8ff3d64ba61485be7feecd7c 100644 --- a/clang/unittests/Interpreter/InterpreterTest.cpp +++ b/clang/unittests/Interpreter/InterpreterTest.cpp @@ -128,6 +128,51 @@ TEST(InterpreterTest, DeclsAndStatements) { EXPECT_EQ("Parsing failed.", llvm::toString(std::move(Err))); } +TEST(InterpreterTest, UndoCommand) { + Args ExtraArgs = {"-Xclang", "-diagnostic-log-file", "-Xclang", "-"}; + + // Create the diagnostic engine with unowned consumer. + std::string DiagnosticOutput; + llvm::raw_string_ostream DiagnosticsOS(DiagnosticOutput); + auto DiagPrinter = std::make_unique( + DiagnosticsOS, new DiagnosticOptions()); + + auto Interp = createInterpreter(ExtraArgs, DiagPrinter.get()); + + // Fail to undo. + auto Err1 = Interp->Undo(); + EXPECT_EQ("Operation failed. Too many undos", + llvm::toString(std::move(Err1))); + auto Err2 = Interp->Parse("int foo = 42;"); + EXPECT_TRUE(!!Err2); + auto Err3 = Interp->Undo(2); + EXPECT_EQ("Operation failed. Too many undos", + llvm::toString(std::move(Err3))); + + // Succeed to undo. + auto Err4 = Interp->Parse("int x = 42;"); + EXPECT_TRUE(!!Err4); + auto Err5 = Interp->Undo(); + EXPECT_FALSE(Err5); + auto Err6 = Interp->Parse("int x = 24;"); + EXPECT_TRUE(!!Err6); + auto Err7 = Interp->Parse("#define X 42"); + EXPECT_TRUE(!!Err7); + auto Err8 = Interp->Undo(); + EXPECT_FALSE(Err8); + auto Err9 = Interp->Parse("#define X 24"); + EXPECT_TRUE(!!Err9); + + // Undo input contains errors. + auto Err10 = Interp->Parse("int y = ;"); + EXPECT_FALSE(!!Err10); + EXPECT_EQ("Parsing failed.", llvm::toString(Err10.takeError())); + auto Err11 = Interp->Parse("int y = 42;"); + EXPECT_TRUE(!!Err11); + auto Err12 = Interp->Undo(); + EXPECT_FALSE(Err12); +} + static std::string MangleName(NamedDecl *ND) { ASTContext &C = ND->getASTContext(); std::unique_ptr MangleC(C.createMangleContext()); diff --git a/clang/unittests/Lex/LexerTest.cpp b/clang/unittests/Lex/LexerTest.cpp index f534de1ca813a08d17f990eb65ebdce5249bdef8..0ad644ce714652520582808f6e03102fcd8294a3 100644 --- a/clang/unittests/Lex/LexerTest.cpp +++ b/clang/unittests/Lex/LexerTest.cpp @@ -612,7 +612,7 @@ TEST_F(LexerTest, FindNextToken) { SourceMgr.getLocForStartOfFile(SourceMgr.getMainFileID()); while (true) { auto T = Lexer::findNextToken(Loc, SourceMgr, LangOpts); - ASSERT_TRUE(T.hasValue()); + ASSERT_TRUE(T); if (T->is(tok::eof)) break; GeneratedByNextToken.push_back(getSourceText(*T, *T)); diff --git a/clang/unittests/Tooling/RefactoringTest.cpp b/clang/unittests/Tooling/RefactoringTest.cpp index c71a7243396a13cc37369d255109cc79ae32cf6d..f0edff6052c9a512fabc614cd682aba508023534 100644 --- a/clang/unittests/Tooling/RefactoringTest.cpp +++ b/clang/unittests/Tooling/RefactoringTest.cpp @@ -118,18 +118,18 @@ static bool checkReplacementError(llvm::Error &&Error, OS << "Unexpected error code: " << int(RE.get()) << "\n"; if (ExpectedExisting != RE.getExistingReplacement()) { OS << "Expected Existing != Actual Existing.\n"; - if (ExpectedExisting.hasValue()) + if (ExpectedExisting) OS << "Expected existing replacement: " << ExpectedExisting->toString() << "\n"; - if (RE.getExistingReplacement().hasValue()) + if (RE.getExistingReplacement()) OS << "Actual existing replacement: " << RE.getExistingReplacement()->toString() << "\n"; } if (ExpectedNew != RE.getNewReplacement()) { OS << "Expected New != Actual New.\n"; - if (ExpectedNew.hasValue()) + if (ExpectedNew) OS << "Expected new replacement: " << ExpectedNew->toString() << "\n"; - if (RE.getNewReplacement().hasValue()) + if (RE.getNewReplacement()) OS << "Actual new replacement: " << RE.getNewReplacement()->toString() << "\n"; } diff --git a/clang/unittests/Tooling/SourceCodeTest.cpp b/clang/unittests/Tooling/SourceCodeTest.cpp index badc6f88fc0af6a60614058d2ae5b663a0ec5265..2d4757c8c770d8febee33b822e24e42e9fe12803 100644 --- a/clang/unittests/Tooling/SourceCodeTest.cpp +++ b/clang/unittests/Tooling/SourceCodeTest.cpp @@ -474,7 +474,7 @@ int c = BAR 3.0; IntLitVisitor Visitor; Visitor.OnIntLit = [](IntegerLiteral *Expr, ASTContext *Context) { auto Range = CharSourceRange::getTokenRange(Expr->getSourceRange()); - EXPECT_FALSE(getRangeForEdit(Range, *Context).hasValue()); + EXPECT_FALSE(getRangeForEdit(Range, *Context)); }; Visitor.runOver(Code); } diff --git a/clang/utils/TableGen/RISCVVEmitter.cpp b/clang/utils/TableGen/RISCVVEmitter.cpp index 7987d8954eb174b9f5b350dbdd1171eeb3cfe0ec..068e6a0c072c217361beb0ae3a10eeacd6ed2a50 100644 --- a/clang/utils/TableGen/RISCVVEmitter.cpp +++ b/clang/utils/TableGen/RISCVVEmitter.cpp @@ -217,7 +217,7 @@ void RVVEmitter::createHeader(raw_ostream &OS) { for (int Log2LMUL : Log2LMULs) { auto T = RVVType::computeType(BasicType::Int8, Log2LMUL, PrototypeDescriptor::Mask); - if (T.hasValue()) + if (T) printType(T.getValue()); } // Print RVV int/float types. @@ -225,7 +225,7 @@ void RVVEmitter::createHeader(raw_ostream &OS) { BasicType BT = ParseBasicType(I); for (int Log2LMUL : Log2LMULs) { auto T = RVVType::computeType(BT, Log2LMUL, PrototypeDescriptor::Vector); - if (T.hasValue()) { + if (T) { printType(T.getValue()); auto UT = RVVType::computeType( BT, Log2LMUL, @@ -240,7 +240,7 @@ void RVVEmitter::createHeader(raw_ostream &OS) { for (int Log2LMUL : Log2LMULs) { auto T = RVVType::computeType(BasicType::Float16, Log2LMUL, PrototypeDescriptor::Vector); - if (T.hasValue()) + if (T) printType(T.getValue()); } OS << "#endif\n"; @@ -249,7 +249,7 @@ void RVVEmitter::createHeader(raw_ostream &OS) { for (int Log2LMUL : Log2LMULs) { auto T = RVVType::computeType(BasicType::Float32, Log2LMUL, PrototypeDescriptor::Vector); - if (T.hasValue()) + if (T) printType(T.getValue()); } OS << "#endif\n"; @@ -258,7 +258,7 @@ void RVVEmitter::createHeader(raw_ostream &OS) { for (int Log2LMUL : Log2LMULs) { auto T = RVVType::computeType(BasicType::Float64, Log2LMUL, PrototypeDescriptor::Vector); - if (T.hasValue()) + if (T) printType(T.getValue()); } OS << "#endif\n\n"; diff --git a/compiler-rt/cmake/Modules/AddCompilerRT.cmake b/compiler-rt/cmake/Modules/AddCompilerRT.cmake index 502200654f0427c25512cb7f5e1a028feb98003e..b7eb04327bb1da88728edb1e4706a577a8905c03 100644 --- a/compiler-rt/cmake/Modules/AddCompilerRT.cmake +++ b/compiler-rt/cmake/Modules/AddCompilerRT.cmake @@ -83,8 +83,7 @@ function(add_compiler_rt_object_libraries name) "${libname}" MATCHES ".*\.osx.*") foreach(arch ${LIB_ARCHS_${libname}}) list(APPEND target_flags - -target ${arch}-apple-macos${DARWIN_osx_MIN_VER} - -darwin-target-variant ${arch}-apple-ios13.1-macabi) + "SHELL:-target ${arch}-apple-macos${DARWIN_osx_MIN_VER} -darwin-target-variant ${arch}-apple-ios13.1-macabi") endforeach() endif() @@ -251,11 +250,9 @@ function(add_compiler_rt_runtime name type) "${os}" MATCHES "^(osx)$") foreach(arch ${LIB_ARCHS_${libname}}) list(APPEND extra_cflags_${libname} - -target ${arch}-apple-macos${DARWIN_osx_MIN_VER} - -darwin-target-variant ${arch}-apple-ios13.1-macabi) + "SHELL:-target ${arch}-apple-macos${DARWIN_osx_MIN_VER} -darwin-target-variant ${arch}-apple-ios13.1-macabi") list(APPEND extra_link_flags_${libname} - -target ${arch}-apple-macos${DARWIN_osx_MIN_VER} - -darwin-target-variant ${arch}-apple-ios13.1-macabi) + "SHELL:-target ${arch}-apple-macos${DARWIN_osx_MIN_VER} -darwin-target-variant ${arch}-apple-ios13.1-macabi") endforeach() endif() endforeach() diff --git a/compiler-rt/cmake/Modules/CompilerRTAIXUtils.cmake b/compiler-rt/cmake/Modules/CompilerRTAIXUtils.cmake index 0e131d0ba74c07ee56ef3cce5a686c0c6171e1c7..c7dc2b882d056780fd659813ef2dc6c31659b754 100644 --- a/compiler-rt/cmake/Modules/CompilerRTAIXUtils.cmake +++ b/compiler-rt/cmake/Modules/CompilerRTAIXUtils.cmake @@ -2,16 +2,16 @@ include(CMakeParseArguments) include(CompilerRTUtils) function(get_aix_libatomic_default_link_flags link_flags export_list) - set(linkopts - "-Wl,-H512 -Wl,-D0 \ - -Wl,-T512 -Wl,-bhalt:4 -Wl,-bernotok \ - -Wl,-bnoentry -Wl,-bexport:${export_list} \ - -Wl,-bmodtype:SRE -Wl,-lc") +set(linkopts + -Wl,-H512 -Wl,-D0 + -Wl,-T512 -Wl,-bhalt:4 -Wl,-bernotok + -Wl,-bnoentry -Wl,-bexport:${export_list} + -Wl,-bmodtype:SRE -Wl,-lc) # Add `-Wl,-G`. Quoted from release notes of cmake-3.16.0 # > On AIX, runtime linking is no longer enabled by default. # See https://cmake.org/cmake/help/latest/release/3.16.html if(${CMAKE_VERSION} VERSION_GREATER_EQUAL "3.16.0") - set(linkopts "-Wl,-G" "${linkopts}") + set(linkopts -Wl,-G ${linkopts}) endif() set(${link_flags} ${linkopts} PARENT_SCOPE) endfunction() diff --git a/compiler-rt/cmake/Modules/CompilerRTDarwinUtils.cmake b/compiler-rt/cmake/Modules/CompilerRTDarwinUtils.cmake index 4782e727e1c875455abba539f5644f6e7d36becd..2c9983c6a1ae36864f023c570322b95d9684169f 100644 --- a/compiler-rt/cmake/Modules/CompilerRTDarwinUtils.cmake +++ b/compiler-rt/cmake/Modules/CompilerRTDarwinUtils.cmake @@ -304,8 +304,7 @@ macro(darwin_add_builtin_library name suffix) "${LIB_OS}" MATCHES "^osx$") # Build the macOS builtins with Mac Catalyst support. list(APPEND builtin_cflags - -target ${LIB_ARCH}-apple-macos${DARWIN_osx_BUILTIN_MIN_VER} - -darwin-target-variant ${LIB_ARCH}-apple-ios13.1-macabi) + "SHELL:-target ${LIB_ARCH}-apple-macos${DARWIN_osx_BUILTIN_MIN_VER} -darwin-target-variant ${LIB_ARCH}-apple-ios13.1-macabi") endif() set_target_compile_flags(${libname} @@ -400,12 +399,12 @@ endfunction() macro(darwin_add_builtin_libraries) set(DARWIN_EXCLUDE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/Darwin-excludes) - set(CFLAGS "-fPIC -O3 -fvisibility=hidden -DVISIBILITY_HIDDEN -Wall -fomit-frame-pointer") + set(CFLAGS -fPIC -O3 -fvisibility=hidden -DVISIBILITY_HIDDEN -Wall -fomit-frame-pointer) set(CMAKE_C_FLAGS "") set(CMAKE_CXX_FLAGS "") set(CMAKE_ASM_FLAGS "") - append_string_if(COMPILER_RT_HAS_ASM_LSE " -DHAS_ASM_LSE" CFLAGS) + append_list_if(COMPILER_RT_HAS_ASM_LSE -DHAS_ASM_LSE CFLAGS) set(PROFILE_SOURCES ../profile/InstrProfiling.c ../profile/InstrProfilingBuffer.c @@ -515,7 +514,7 @@ macro(darwin_add_embedded_builtin_libraries) set(MACHO_SYM_DIR ${CMAKE_CURRENT_SOURCE_DIR}/macho_embedded) - set(CFLAGS "-Oz -Wall -fomit-frame-pointer -ffreestanding") + set(CFLAGS -Oz -Wall -fomit-frame-pointer -ffreestanding) set(CMAKE_C_FLAGS "") set(CMAKE_CXX_FLAGS "") set(CMAKE_ASM_FLAGS "") @@ -534,8 +533,8 @@ macro(darwin_add_embedded_builtin_libraries) set(DARWIN_macho_embedded_LIBRARY_INSTALL_DIR ${COMPILER_RT_INSTALL_LIBRARY_DIR}/macho_embedded) - set(CFLAGS_armv7 "-target thumbv7-apple-darwin-eabi") - set(CFLAGS_i386 "-march=pentium") + set(CFLAGS_armv7 -target thumbv7-apple-darwin-eabi) + set(CFLAGS_i386 -march=pentium) darwin_read_list_from_file(common_FUNCTIONS ${MACHO_SYM_DIR}/common.txt) darwin_read_list_from_file(thumb2_FUNCTIONS ${MACHO_SYM_DIR}/thumb2.txt) diff --git a/compiler-rt/cmake/Modules/CompilerRTUtils.cmake b/compiler-rt/cmake/Modules/CompilerRTUtils.cmake index 27f68ead412c81cbf1b5164a8efdc709df7c2e63..9b5e03a6607baad5133de104e05970e8ef5f2105 100644 --- a/compiler-rt/cmake/Modules/CompilerRTUtils.cmake +++ b/compiler-rt/cmake/Modules/CompilerRTUtils.cmake @@ -5,19 +5,11 @@ include(CheckSymbolExists) # define a handy helper function for it. The compile flags setting in CMake # has serious issues that make its syntax challenging at best. function(set_target_compile_flags target) - set(argstring "") - foreach(arg ${ARGN}) - set(argstring "${argstring} ${arg}") - endforeach() - set_property(TARGET ${target} PROPERTY COMPILE_FLAGS "${argstring}") + set_property(TARGET ${target} PROPERTY COMPILE_OPTIONS ${ARGN}) endfunction() function(set_target_link_flags target) - set(argstring "") - foreach(arg ${ARGN}) - set(argstring "${argstring} ${arg}") - endforeach() - set_property(TARGET ${target} PROPERTY LINK_FLAGS "${argstring}") + set_property(TARGET ${target} PROPERTY LINK_OPTIONS ${ARGN}) endfunction() # Set the variable var_PYBOOL to True if var holds a true-ish string, @@ -128,7 +120,9 @@ macro(test_target_arch arch def) if(NOT HAS_${arch}_DEF) set(CAN_TARGET_${arch} FALSE) elseif(TEST_COMPILE_ONLY) - try_compile_only(CAN_TARGET_${arch} FLAGS ${TARGET_${arch}_CFLAGS}) + try_compile_only(CAN_TARGET_${arch} + SOURCE "#include \nint foo(int x, int y) { return x + y; }\n" + FLAGS ${TARGET_${arch}_CFLAGS}) else() set(FLAG_NO_EXCEPTIONS "") if(COMPILER_RT_HAS_FNO_EXCEPTIONS_FLAG) diff --git a/compiler-rt/lib/fuzzer/CMakeLists.txt b/compiler-rt/lib/fuzzer/CMakeLists.txt index 856cd732d51750e39d92c7afbf26ad55d63fd417..4500c4f134093306a30bae7db6487f4b61c65548 100644 --- a/compiler-rt/lib/fuzzer/CMakeLists.txt +++ b/compiler-rt/lib/fuzzer/CMakeLists.txt @@ -141,7 +141,7 @@ if(OS_NAME MATCHES "Linux|Fuchsia" AND get_target_flags_for_arch(${arch} target_cflags) if(CMAKE_CXX_COMPILER_ID MATCHES Clang) get_compiler_rt_target(${arch} target) - set(target_cflags "--target=${target} ${target_cflags}") + set(target_cflags --target=${target} ${target_cflags}) endif() set(cxx_${arch}_merge_dir "${CMAKE_CURRENT_BINARY_DIR}/cxx_${arch}_merge.dir") file(MAKE_DIRECTORY ${cxx_${arch}_merge_dir}) diff --git a/compiler-rt/test/asan/TestCases/global-location-nodebug.cpp b/compiler-rt/test/asan/TestCases/global-location-nodebug.cpp index c69624a1791e3b27bdea04983bbfbfa6cb01c726..2f35e3c7871852cee4a1a7b2fb53f3787678580c 100644 --- a/compiler-rt/test/asan/TestCases/global-location-nodebug.cpp +++ b/compiler-rt/test/asan/TestCases/global-location-nodebug.cpp @@ -8,6 +8,9 @@ // RUN: not %run %t f 2>&1 | FileCheck %s --check-prefix=CHECK --check-prefix=FUNC_STATIC-NO-G // RUN: not %run %t l 2>&1 | FileCheck %s --check-prefix=CHECK --check-prefix=LITERAL-NO-G +/// Solaris ld -S has different semantics. +// XFAIL: solaris + // CHECK: AddressSanitizer: global-buffer-overflow // CLASS_STATIC-NO-G: 0x{{.*}} is located 4 bytes to the right of global variable 'C::array' defined in '{{.*}}global-location.cpp' {{.*}} of size 40 // GLOB-NO-G: 0x{{.*}} is located 4 bytes to the right of global variable 'global' defined in '{{.*}}global-location.cpp' {{.*}} of size 40 diff --git a/compiler-rt/test/hwasan/TestCases/global-with-reduction.c b/compiler-rt/test/hwasan/TestCases/global-with-reduction.c new file mode 100644 index 0000000000000000000000000000000000000000..22a4efccc5ed6603537b6ca1b858bb86b32eb3e1 --- /dev/null +++ b/compiler-rt/test/hwasan/TestCases/global-with-reduction.c @@ -0,0 +1,50 @@ +// RUN: %clang_hwasan %s -o %t +// RUN: %run %t 0 +// RUN: not %run %t 1 2>&1 | FileCheck --check-prefixes=CHECK,RSYM %s +// RUN: not %env_hwasan_opts=symbolize=0 %run %t 1 2>&1 | FileCheck --check-prefixes=CHECK,RNOSYM %s +// RUN: not %run %t -1 2>&1 | FileCheck --check-prefixes=CHECK,LSYM %s +// RUN: not %env_hwasan_opts=symbolize=0 %run %t -1 2>&1 | FileCheck --check-prefixes=CHECK,LNOSYM %s + +// Test with and without optimizations, with and without PIC, since different +// backend passes run depending on these flags. +// RUN: %clang_hwasan -fno-pic %s -o %t +// RUN: not %run %t 1 2>&1 | FileCheck --check-prefixes=CHECK,RSYM %s +// RUN: %clang_hwasan -fno-pic -O2 %s -o %t +// RUN: not %run %t 1 2>&1 | FileCheck --check-prefixes=CHECK,RSYM %s +// RUN: %clang_hwasan -O2 %s -o %t +// RUN: not %run %t 1 2>&1 | FileCheck --check-prefixes=CHECK,RSYM %s + +// RUN: %clang_hwasan -DUSE_NOSANITIZE %s -o %t && %run %t 0 +// RUN: %clang_hwasan -DUSE_NOSANITIZE %s -o %t && %run %t 1 +// RUN: %clang_hwasan -DUSE_NOSANITIZE %s -o %t -fno-pic && %run %t 1 +// RUN: %clang_hwasan -DUSE_NOSANITIZE %s -o %t -O2 && %run %t 1 +// RUN: %clang_hwasan -DUSE_NOSANITIZE %s -o %t -fno-pic -O2 && %run %t 1 + +// REQUIRES: pointer-tagging + +#include + +// GlobalOpt may replace the current GV with a new boolean-typed GV. Previously, +// this resulted in the "nosanitize" getting dropped because while the data/code +// references to the GV were updated, the old metadata references weren't. +int* f() { +#ifdef USE_NOSANITIZE +__attribute__((no_sanitize("hwaddress"))) static int x = 1; +#else // USE_NOSANITIZE + static int x = 1; +#endif // USE_NOSANITIZE + if (x == 1) x = 0; + return &x; +} + +int main(int argc, char **argv) { + // CHECK: Cause: global-overflow + // RSYM: is located 0 bytes to the right of 4-byte global variable f.x {{.*}} in {{.*}}global-with-reduction.c.tmp + // RNOSYM: is located to the right of a 4-byte global variable in + // RNOSYM-NEXT: #0 0x{{.*}} ({{.*}}global-with-reduction.c.tmp+{{.*}}) + // LSYM: is located 4 bytes to the left of 4-byte global variable f.x {{.*}} in {{.*}}global-with-reduction.c.tmp + // LNOSYM: is located to the left of a 4-byte global variable in + // LNOSYM-NEXT: #0 0x{{.*}} ({{.*}}global-with-reduction.c.tmp+{{.*}}) + // CHECK-NOT: can not describe + f()[atoi(argv[1])] = 1; +} diff --git a/compiler-rt/test/hwasan/TestCases/global.c b/compiler-rt/test/hwasan/TestCases/global.c index 2da3b6dce964ad8baf1d0427171c1cae2ffa6540..4a790e2d08be2b3fb2bac908177dc3a835000223 100644 --- a/compiler-rt/test/hwasan/TestCases/global.c +++ b/compiler-rt/test/hwasan/TestCases/global.c @@ -14,9 +14,23 @@ // RUN: %clang_hwasan -O2 %s -o %t // RUN: not %run %t 1 2>&1 | FileCheck --check-prefixes=CHECK,RSYM %s +// RUN: %clang_hwasan -DUSE_NOSANITIZE %s -o %t && %run %t 0 +// RUN: %clang_hwasan -DUSE_NOSANITIZE %s -o %t && %run %t 1 +// RUN: %clang_hwasan -DUSE_NOSANITIZE %s -o %t -fno-pic && %run %t 1 +// RUN: %clang_hwasan -DUSE_NOSANITIZE %s -o %t -O2 && %run %t 1 +// RUN: %clang_hwasan -DUSE_NOSANITIZE %s -o %t -fno-pic -O2 && %run %t 1 + // REQUIRES: pointer-tagging +#include + +int a = 1; +#ifdef USE_NOSANITIZE +__attribute__((no_sanitize("hwaddress"))) int x = 1; +#else // USE_NOSANITIZE int x = 1; +#endif // USE_NOSANITIZE +int b = 1; int atoi(const char *); diff --git a/flang/docs/ReleaseNotes.md b/flang/docs/ReleaseNotes.md index 7696f423b8c1d7b9ae5a230b1a1c3d794094e22a..b41a2aabb9733345ccc3ce3284748eeb9a1005d1 100644 --- a/flang/docs/ReleaseNotes.md +++ b/flang/docs/ReleaseNotes.md @@ -30,6 +30,13 @@ page](https://llvm.org/releases/). * The bash wrapper script, `flang`, is renamed as `flang-to-external-fc`. ## New Compiler Flags +* Refined how `-f{no-}color-diagnostics` is treated to better align with Clang. + In particular, both `-fcolor-diagnostics` and `-fno-color-diagnostics` are + now available in `flang-new` (the diagnostics are formatted by default). In + the frontend driver, `flang-new -fc1`, only `-fcolor-diagnostics` is + available (by default, the diagnostics are not formatted). Note that this + will only affect the diagnostics printed by driver (scanning, parsing and + semantic diagnostics are not affected). ## Windows Support diff --git a/flang/include/flang/Evaluate/tools.h b/flang/include/flang/Evaluate/tools.h index c96bc364e3433fb97ab0430f0e37f2db951a67fc..b8908555f8e871348dc6626b4317037526f84ed9 100644 --- a/flang/include/flang/Evaluate/tools.h +++ b/flang/include/flang/Evaluate/tools.h @@ -345,7 +345,7 @@ bool IsArrayElement(const Expr &expr, bool intoSubstring = true, template std::optional ExtractNamedEntity(const A &x) { - if (auto dataRef{ExtractDataRef(x, true)}) { + if (auto dataRef{ExtractDataRef(x)}) { return common::visit( common::visitors{ [](SymbolRef &&symbol) -> std::optional { diff --git a/flang/include/flang/Frontend/CodeGenOptions.def b/flang/include/flang/Frontend/CodeGenOptions.def new file mode 100644 index 0000000000000000000000000000000000000000..d67f3838d446ed53a7827619fd57f4e67cdcfff2 --- /dev/null +++ b/flang/include/flang/Frontend/CodeGenOptions.def @@ -0,0 +1,22 @@ +//===--- CodeGenOptions.def - Code generation option database ----- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file defines the code generation options. Users of this file +// must define the CODEGENOPT macro to make use of this information. +// +//===----------------------------------------------------------------------===// +#ifndef CODEGENOPT +# error Define the CODEGENOPT macro to handle language options +#endif + +CODEGENOPT(OptimizationLevel, 2, 0) ///< The -O[0-3] option specified. + +CODEGENOPT(DebugPassManager, 1, 0) ///< Prints debug information for the new + ///< pass manager. + +#undef CODEGENOPT diff --git a/flang/include/flang/Frontend/CodeGenOptions.h b/flang/include/flang/Frontend/CodeGenOptions.h new file mode 100644 index 0000000000000000000000000000000000000000..fe25bfdd06aa4ad4db91eedd93358fd4eced0182 --- /dev/null +++ b/flang/include/flang/Frontend/CodeGenOptions.h @@ -0,0 +1,52 @@ +//===--- CodeGenOptions.h ---------------------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file defines the CodeGenOptions interface, which holds the +// configuration for LLVM's middle-end and back-end. It controls LLVM's code +// generation into assembly or machine code. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CLANG_BASIC_CODEGENOPTIONS_H +#define LLVM_CLANG_BASIC_CODEGENOPTIONS_H + +#include "llvm/Support/CodeGen.h" +#include "llvm/Support/Regex.h" +#include "llvm/Target/TargetOptions.h" +#include "llvm/Transforms/Instrumentation/AddressSanitizerOptions.h" +#include +#include +#include +#include + +namespace Fortran::frontend { + +/// Bitfields of CodeGenOptions, split out from CodeGenOptions to ensure +/// that this large collection of bitfields is a trivial class type. +class CodeGenOptionsBase { + +public: +#define CODEGENOPT(Name, Bits, Default) unsigned Name : Bits; +#include "flang/Frontend/CodeGenOptions.def" + +protected: +#define CODEGENOPT(Name, Bits, Default) +#include "flang/Frontend/CodeGenOptions.def" +}; + +/// Tracks various options which control how the code is optimized and passed +/// to the LLVM backend. +class CodeGenOptions : public CodeGenOptionsBase { + +public: + CodeGenOptions(); +}; + +} // end namespace Fortran::frontend + +#endif diff --git a/flang/include/flang/Frontend/CompilerInvocation.h b/flang/include/flang/Frontend/CompilerInvocation.h index 55fd5a039b4754f3a2b0785767aceae4caf2baaf..cdc7878e93c31157b317f052f776ebdaeb09459d 100644 --- a/flang/include/flang/Frontend/CompilerInvocation.h +++ b/flang/include/flang/Frontend/CompilerInvocation.h @@ -13,6 +13,7 @@ #ifndef FORTRAN_FRONTEND_COMPILERINVOCATION_H #define FORTRAN_FRONTEND_COMPILERINVOCATION_H +#include "flang/Frontend/CodeGenOptions.h" #include "flang/Frontend/FrontendOptions.h" #include "flang/Frontend/PreprocessorOptions.h" #include "flang/Frontend/TargetOptions.h" @@ -70,6 +71,9 @@ class CompilerInvocation : public CompilerInvocationBase { /// Options controlling the target. Fortran::frontend::TargetOptions targetOpts; + /// Options controlling IRgen and the backend. + Fortran::frontend::CodeGenOptions codeGenOpts; + // Semantics context std::unique_ptr semanticsContext; @@ -129,6 +133,9 @@ public: TargetOptions &getTargetOpts() { return targetOpts; } const TargetOptions &getTargetOpts() const { return targetOpts; } + CodeGenOptions &getCodeGenOpts() { return codeGenOpts; } + const CodeGenOptions &getCodeGenOpts() const { return codeGenOpts; } + Fortran::semantics::SemanticsContext &getSemanticsContext() { return *semanticsContext; } diff --git a/flang/include/flang/Frontend/FrontendActions.h b/flang/include/flang/Frontend/FrontendActions.h index 586932df92ad33ee8d05609f287848f4b4283708..975aaa0b9da2786265c1871c27ce12980568815b 100644 --- a/flang/include/flang/Frontend/FrontendActions.h +++ b/flang/include/flang/Frontend/FrontendActions.h @@ -13,6 +13,7 @@ #ifndef FORTRAN_FRONTEND_FRONTENDACTIONS_H #define FORTRAN_FRONTEND_FRONTENDACTIONS_H +#include "flang/Frontend/CodeGenOptions.h" #include "flang/Frontend/FrontendAction.h" #include "flang/Parser/parsing.h" #include "flang/Semantics/semantics.h" @@ -198,7 +199,11 @@ class CodeGenAction : public FrontendAction { void executeAction() override; /// Runs prescan, parsing, sema and lowers to MLIR. bool beginSourceFileAction() override; + /// Sets up LLVM's TargetMachine, configures llvmModule accordingly. void setUpTargetMachine(); + /// Runs the optimization (aka middle-end) pipeline on the LLVM module + /// associated with this action. + void runOptimizationPipeline(llvm::raw_pwrite_stream &os); protected: CodeGenAction(BackendActionTy act) : action{act} {}; diff --git a/flang/include/flang/Lower/AbstractConverter.h b/flang/include/flang/Lower/AbstractConverter.h index 525d4e4473639b95c64975e74563da2dcd8af70c..b00414fffd5ee0fb83ea4817275a0dcbebf582f6 100644 --- a/flang/include/flang/Lower/AbstractConverter.h +++ b/flang/include/flang/Lower/AbstractConverter.h @@ -103,11 +103,13 @@ public: virtual void copyHostAssociateVar(const Fortran::semantics::Symbol &sym) = 0; - /// Collect the set of symbols flagged as \p flag in \p eval region. + /// Collect the set of ultimate symbols of symbols with \p flag in \p eval + /// region if \p isUltimateSymbol is true. Otherwise, collect the set of + /// symbols with \p flag. virtual void collectSymbolSet( pft::Evaluation &eval, llvm::SetVector &symbolSet, - Fortran::semantics::Symbol::Flag flag) = 0; + Fortran::semantics::Symbol::Flag flag, bool isUltimateSymbol = true) = 0; //===--------------------------------------------------------------------===// // Expressions diff --git a/flang/include/flang/Lower/IntrinsicCall.h b/flang/include/flang/Lower/IntrinsicCall.h index 2267e2c22579886214ee54a9d5c64c42ecd29aac..0846c6d38c4317e4dd60dd21fbd99cfd3d98b2ce 100644 --- a/flang/include/flang/Lower/IntrinsicCall.h +++ b/flang/include/flang/Lower/IntrinsicCall.h @@ -75,9 +75,8 @@ getIntrinsicArgumentLowering(llvm::StringRef intrinsicName); /// Return how argument \p argName should be lowered given the rules for the /// intrinsic function. The argument names are the one defined by the standard. -ArgLoweringRule lowerIntrinsicArgumentAs(mlir::Location, - const IntrinsicArgumentLoweringRules &, - llvm::StringRef argName); +ArgLoweringRule lowerIntrinsicArgumentAs(const IntrinsicArgumentLoweringRules &, + unsigned position); /// Return place-holder for absent intrinsic arguments. fir::ExtendedValue getAbsentIntrinsicArgument(); diff --git a/flang/include/flang/Lower/SymbolMap.h b/flang/include/flang/Lower/SymbolMap.h index 98f4e3cbe486b52498da3f95ca787a831ced87d2..165776f3d7c0e770356c819cb9d17f5b9a1f2bb9 100644 --- a/flang/include/flang/Lower/SymbolMap.h +++ b/flang/include/flang/Lower/SymbolMap.h @@ -302,6 +302,13 @@ public: return shallowLookupSymbol(*sym); } + /// Find `symbol` and return its value if it appears in the one level up map + /// such as for the host variable in host-association in OpenMP code. + SymbolBox lookupOneLevelUpSymbol(semantics::SymbolRef sym); + SymbolBox lookupOneLevelUpSymbol(const semantics::Symbol *sym) { + return lookupOneLevelUpSymbol(*sym); + } + /// Add a new binding from the ac-do-variable `var` to `value`. void pushImpliedDoBinding(AcDoVar var, mlir::Value value) { impliedDoStack.emplace_back(var, value); diff --git a/flang/include/flang/Optimizer/Dialect/FIRTypes.td b/flang/include/flang/Optimizer/Dialect/FIRTypes.td index 165a3a614a5dd4589d71fe8ca10afaccc051baf7..5a9cc62bcbf00a4f87d45973dee894eed9e0fd4e 100644 --- a/flang/include/flang/Optimizer/Dialect/FIRTypes.td +++ b/flang/include/flang/Optimizer/Dialect/FIRTypes.td @@ -580,7 +580,7 @@ def AnyBoxLike : TypeConstraint, "any box">; def AnyRefOrBoxLike : TypeConstraint, + AnyBoxLike.predicate, FunctionType.predicate]>, "any reference or box like">; def AnyRefOrBox : TypeConstraint, diff --git a/flang/include/flang/Runtime/iostat.h b/flang/include/flang/Runtime/iostat.h index bd38e748e4a9293067b28c84735d142ea2c906ae..6d6be7c7b8d57a3208bf87227f0496d9a5708c71 100644 --- a/flang/include/flang/Runtime/iostat.h +++ b/flang/include/flang/Runtime/iostat.h @@ -81,6 +81,7 @@ enum Iostat { IostatTooManyAsyncOps, IostatBadBackspaceUnit, IostatBadUnitNumber, + IostatBadFlushUnit, }; const char *IostatErrorString(int); diff --git a/flang/include/flang/Tools/CLOptions.inc b/flang/include/flang/Tools/CLOptions.inc index 66e0818ef24df02986e012d5151dee9cdb5423ae..c432d7d9cb8391d6d10778107fb96f0abebf5c0d 100644 --- a/flang/include/flang/Tools/CLOptions.inc +++ b/flang/include/flang/Tools/CLOptions.inc @@ -154,10 +154,12 @@ inline void createDefaultFIROptimizerPassPipeline(mlir::PassManager &pm) { // simplify the IR mlir::GreedyRewriteConfig config; config.enableRegionSimplification = false; + pm.addPass(mlir::createCSEPass()); fir::addAVC(pm); pm.addNestedPass(fir::createCharacterConversionPass()); pm.addPass(mlir::createCanonicalizerPass(config)); pm.addPass(fir::createSimplifyRegionLitePass()); + pm.addPass(mlir::createCSEPass()); fir::addMemoryAllocationOpt(pm); // The default inliner pass adds the canonicalizer pass with the default @@ -173,6 +175,7 @@ inline void createDefaultFIROptimizerPassPipeline(mlir::PassManager &pm) { pm.addPass(mlir::createCanonicalizerPass(config)); pm.addPass(fir::createSimplifyRegionLitePass()); + pm.addPass(mlir::createCSEPass()); } #if !defined(FLANG_EXCLUDE_CODEGEN) diff --git a/flang/lib/Evaluate/common.cpp b/flang/lib/Evaluate/common.cpp index cdcd0ef5184ba146f1f4bb3ec9851729b5193742..c659a5002ba0fc6ed768ac808d097f217d20b2a2 100644 --- a/flang/lib/Evaluate/common.cpp +++ b/flang/lib/Evaluate/common.cpp @@ -22,7 +22,7 @@ void RealFlagWarnings( if (std::strcmp(operation, "division") == 0) { context.messages().Say("division by zero"_warn_en_US); } else { - context.messages().Say("division on %s"_warn_en_US, operation); + context.messages().Say("division by zero on %s"_warn_en_US, operation); } } if (flags.test(RealFlag::InvalidArgument)) { diff --git a/flang/lib/Evaluate/intrinsics.cpp b/flang/lib/Evaluate/intrinsics.cpp index 241f008c6d89ba1675766ee14c482baa929e7c1a..bc9e389cfed9068672e9c6074b9be2947d206e19 100644 --- a/flang/lib/Evaluate/intrinsics.cpp +++ b/flang/lib/Evaluate/intrinsics.cpp @@ -1561,14 +1561,14 @@ std::optional IntrinsicInterface::Match( (std::strcmp(name, "shape") == 0 || std::strcmp(name, "size") == 0 || std::strcmp(name, "ubound") == 0)) { - // Check for an assumed-size array argument. + // Check for a whole assumed-size array argument. // These are disallowed for SHAPE, and require DIM= for // SIZE and UBOUND. // (A previous error message for UBOUND will take precedence // over this one, as this error is caught by the second entry // for UBOUND.) - if (const Symbol * argSym{GetLastSymbol(*arg)}) { - if (semantics::IsAssumedSizeArray(*argSym)) { + if (auto named{ExtractNamedEntity(*arg)}) { + if (semantics::IsAssumedSizeArray(named->GetLastSymbol())) { if (strcmp(name, "shape") == 0) { messages.Say(arg->sourceLocation(), "The '%s=' argument to the intrinsic function '%s' may not be assumed-size"_err_en_US, diff --git a/flang/lib/Evaluate/real.cpp b/flang/lib/Evaluate/real.cpp index 3b81d23afe41c86b4ab2ddc8450b27c0e74903df..b7230f891fa80a1d192965f2628728f4c256a873 100644 --- a/flang/lib/Evaluate/real.cpp +++ b/flang/lib/Evaluate/real.cpp @@ -274,6 +274,7 @@ ValueWithRealFlags> Real::SQRT(Rounding rounding) const { // SQRT(-0) == -0 in IEEE-754. result.value = NegativeZero(); } else { + result.flags.set(RealFlag::InvalidArgument); result.value = NotANumber(); } } else if (IsInfinite()) { @@ -297,53 +298,31 @@ ValueWithRealFlags> Real::SQRT(Rounding rounding) const { result.value.GetFraction()); return result; } - // Compute the square root of the reduced value with the slow but - // reliable bit-at-a-time method. Start with a clear significand and - // half of the unbiased exponent, and then try to set significand bits - // in descending order of magnitude without exceeding the exact result. - expo = expo / 2 + exponentBias; - result.value.Normalize(false, expo, Fraction::MASKL(1)); - Real initialSq{result.value.Multiply(result.value).value}; - if (Compare(initialSq) == Relation::Less) { - // Initial estimate is too large; this can happen for values just - // under 1.0. - --expo; - result.value.Normalize(false, expo, Fraction::MASKL(1)); - } - for (int bit{significandBits - 1}; bit >= 0; --bit) { - Word word{result.value.word_}; - result.value.word_ = word.IBSET(bit); - auto squared{result.value.Multiply(result.value, rounding)}; - if (squared.flags.test(RealFlag::Overflow) || - squared.flags.test(RealFlag::Underflow) || - Compare(squared.value) == Relation::Less) { - result.value.word_ = word; - } - } - // The computed square root has a square that's not greater than the - // original argument. Check this square against the square of the next - // larger Real and return that one if its square is closer in magnitude to - // the original argument. - Real resultSq{result.value.Multiply(result.value).value}; - Real diff{Subtract(resultSq).value.ABS()}; - if (diff.IsZero()) { - return result; // exact - } - Real ulp; - ulp.Normalize(false, expo, Fraction::MASKR(1)); - Real nextAfter{result.value.Add(ulp).value}; - auto nextAfterSq{nextAfter.Multiply(nextAfter)}; - if (!nextAfterSq.flags.test(RealFlag::Overflow) && - !nextAfterSq.flags.test(RealFlag::Underflow)) { - Real nextAfterDiff{Subtract(nextAfterSq.value).value.ABS()}; - if (nextAfterDiff.Compare(diff) == Relation::Less) { - result.value = nextAfter; - if (nextAfterDiff.IsZero()) { - return result; // exact - } + // (-1) <= expo <= 1; use it as a shift to set the desired square. + using Extended = typename value::Integer<(binaryPrecision + 2)>; + Extended goal{ + Extended::ConvertUnsigned(GetFraction()).value.SHIFTL(expo + 1)}; + // Calculate the exact square root by maximizing a value whose square + // does not exceed the goal. Use two extra bits of precision for + // rounding. + bool sticky{true}; + Extended extFrac{}; + for (int bit{Extended::bits - 1}; bit >= 0; --bit) { + Extended next{extFrac.IBSET(bit)}; + auto squared{next.MultiplyUnsigned(next)}; + auto cmp{squared.upper.CompareUnsigned(goal)}; + if (cmp == Ordering::Less) { + extFrac = next; + } else if (cmp == Ordering::Equal && squared.lower.IsZero()) { + extFrac = next; + sticky = false; + break; // exact result } } - result.flags.set(RealFlag::Inexact); + RoundingBits roundingBits{extFrac.BTEST(1), extFrac.BTEST(0), sticky}; + NormalizeAndRound(result, false, exponentBias, + Fraction::ConvertUnsigned(extFrac.SHIFTR(2)).value, rounding, + roundingBits); } return result; } diff --git a/flang/lib/Frontend/CMakeLists.txt b/flang/lib/Frontend/CMakeLists.txt index 476dff0a76cfaccce89e78500be08cee4aefe7d4..96769c707f1021ae85e8ada63be0d315ebd6a651 100644 --- a/flang/lib/Frontend/CMakeLists.txt +++ b/flang/lib/Frontend/CMakeLists.txt @@ -3,6 +3,7 @@ get_property(dialect_libs GLOBAL PROPERTY MLIR_DIALECT_LIBS) add_flang_library(flangFrontend CompilerInstance.cpp CompilerInvocation.cpp + CodeGenOptions.cpp FrontendAction.cpp FrontendActions.cpp FrontendOptions.cpp diff --git a/flang/lib/Frontend/CodeGenOptions.cpp b/flang/lib/Frontend/CodeGenOptions.cpp new file mode 100644 index 0000000000000000000000000000000000000000..87641d94f8c80279cefca858eac5ef8343191be7 --- /dev/null +++ b/flang/lib/Frontend/CodeGenOptions.cpp @@ -0,0 +1,23 @@ +//===--- CodeGenOptions.cpp -----------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// Coding style: https://mlir.llvm.org/getting_started/DeveloperGuide/ +// +//===----------------------------------------------------------------------===// + +#include "flang/Frontend/CodeGenOptions.h" +#include + +namespace Fortran::frontend { + +CodeGenOptions::CodeGenOptions() { +#define CODEGENOPT(Name, Bits, Default) Name = Default; +#include "flang/Frontend/CodeGenOptions.def" +} + +} // end namespace Fortran::frontend diff --git a/flang/lib/Frontend/CompilerInvocation.cpp b/flang/lib/Frontend/CompilerInvocation.cpp index f665daf7a9b10e586026f168e33419d292e0a646..f6fde9f4aaf5679389b13bd9157788e2b3b464c0 100644 --- a/flang/lib/Frontend/CompilerInvocation.cpp +++ b/flang/lib/Frontend/CompilerInvocation.cpp @@ -12,6 +12,7 @@ #include "flang/Frontend/CompilerInvocation.h" #include "flang/Common/Fortran-features.h" +#include "flang/Frontend/CodeGenOptions.h" #include "flang/Frontend/PreprocessorOptions.h" #include "flang/Frontend/TargetOptions.h" #include "flang/Semantics/semantics.h" @@ -20,6 +21,7 @@ #include "clang/Basic/DiagnosticDriver.h" #include "clang/Basic/DiagnosticOptions.h" #include "clang/Driver/DriverDiagnostic.h" +#include "clang/Driver/OptionUtils.h" #include "clang/Driver/Options.h" #include "llvm/ADT/StringRef.h" #include "llvm/ADT/StringSwitch.h" @@ -95,6 +97,18 @@ bool Fortran::frontend::parseDiagnosticArgs(clang::DiagnosticOptions &opts, return true; } +static void parseCodeGenArgs(Fortran::frontend::CodeGenOptions &opts, + llvm::opt::ArgList &args, + clang::DiagnosticsEngine &diags) { + unsigned defaultOpt = llvm::CodeGenOpt::None; + opts.OptimizationLevel = clang::getLastArgIntValue( + args, clang::driver::options::OPT_O, defaultOpt, diags); + + if (args.hasFlag(clang::driver::options::OPT_fdebug_pass_manager, + clang::driver::options::OPT_fno_debug_pass_manager, false)) + opts.DebugPassManager = 1; +} + /// Parses all target input arguments and populates the target /// options accordingly. /// @@ -616,6 +630,7 @@ bool CompilerInvocation::createFromArgs( success &= parseFrontendArgs(res.getFrontendOpts(), args, diags); parseTargetArgs(res.getTargetOpts(), args); parsePreprocessorArgs(res.getPreprocessorOpts(), args); + parseCodeGenArgs(res.getCodeGenOpts(), args, diags); success &= parseSemaArgs(res, args, diags); success &= parseDialectArgs(res, args, diags); success &= parseDiagArgs(res, args, diags); diff --git a/flang/lib/Frontend/FrontendActions.cpp b/flang/lib/Frontend/FrontendActions.cpp index a842420be887a2bbc5b9e30f83be500f999990e2..ae7150900ecf05c8f632945e58d4b203e73fb17d 100644 --- a/flang/lib/Frontend/FrontendActions.cpp +++ b/flang/lib/Frontend/FrontendActions.cpp @@ -46,6 +46,7 @@ #include "llvm/IRReader/IRReader.h" #include "llvm/MC/TargetRegistry.h" #include "llvm/Passes/PassBuilder.h" +#include "llvm/Passes/StandardInstrumentations.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/SourceMgr.h" #include "llvm/Target/TargetMachine.h" @@ -538,7 +539,6 @@ void CodeGenAction::setUpTargetMachine() { /*Features=*/"", llvm::TargetOptions(), llvm::None)); assert(tm && "Failed to create TargetMachine"); - llvmModule->setDataLayout(tm->createDataLayout()); } static std::unique_ptr @@ -610,23 +610,59 @@ static void generateMachineCodeOrAssemblyImpl(clang::DiagnosticsEngine &diags, codeGenPasses.run(llvmModule); } -/// Generate LLVM byte code file from the input LLVM module. -/// -/// \param [in] tm Target machine to aid the code-gen pipeline set-up -/// \param [in] llvmModule LLVM module to lower to assembly/machine-code -/// \param [out] os Output stream to emit the generated code to -static void generateLLVMBCImpl(llvm::TargetMachine &tm, - llvm::Module &llvmModule, - llvm::raw_pwrite_stream &os) { - // Set-up the pass manager - llvm::ModulePassManager mpm; +static llvm::OptimizationLevel +mapToLevel(const Fortran::frontend::CodeGenOptions &opts) { + switch (opts.OptimizationLevel) { + default: + llvm_unreachable("Invalid optimization level!"); + case 0: + return llvm::OptimizationLevel::O0; + case 1: + return llvm::OptimizationLevel::O1; + case 2: + return llvm::OptimizationLevel::O2; + case 3: + return llvm::OptimizationLevel::O3; + } +} + +void CodeGenAction::runOptimizationPipeline(llvm::raw_pwrite_stream &os) { + auto opts = getInstance().getInvocation().getCodeGenOpts(); + llvm::OptimizationLevel level = mapToLevel(opts); + + // Create the analysis managers. + llvm::LoopAnalysisManager lam; + llvm::FunctionAnalysisManager fam; + llvm::CGSCCAnalysisManager cgam; llvm::ModuleAnalysisManager mam; - llvm::PassBuilder pb(&tm); + + // Create the pass manager builder. + llvm::PassInstrumentationCallbacks pic; + llvm::PipelineTuningOptions pto; + llvm::Optional pgoOpt; + llvm::StandardInstrumentations si(opts.DebugPassManager); + si.registerCallbacks(pic, &fam); + llvm::PassBuilder pb(tm.get(), pto, pgoOpt, &pic); + + // Register all the basic analyses with the managers. pb.registerModuleAnalyses(mam); - mpm.addPass(llvm::BitcodeWriterPass(os)); + pb.registerCGSCCAnalyses(cgam); + pb.registerFunctionAnalyses(fam); + pb.registerLoopAnalyses(lam); + pb.crossRegisterProxies(lam, fam, cgam, mam); + + // Create the pass manager. + llvm::ModulePassManager mpm; + if (opts.OptimizationLevel == 0) + mpm = pb.buildO0DefaultPipeline(level, false); + else + mpm = pb.buildPerModuleDefaultPipeline(level); - // run the passes - mpm.run(llvmModule, mam); + if (action == BackendActionTy::Backend_EmitBC) + mpm.addPass(llvm::BitcodeWriterPass(os)); + + // Run the passes. + mpm.run(*llvmModule, mam); } void CodeGenAction::executeAction() { @@ -661,11 +697,14 @@ void CodeGenAction::executeAction() { return; } - // generate an LLVM module if it's not already present (it will already be + // Generate an LLVM module if it's not already present (it will already be // present if the input file is an LLVM IR/BC file). if (!llvmModule) generateLLVMIR(); + // Run LLVM's middle-end (i.e. the optimizer). + runOptimizationPipeline(*os); + if (action == BackendActionTy::Backend_EmitLL) { llvmModule->print(ci.isOutputStreamNull() ? *os : ci.getOutputStream(), /*AssemblyAnnotationWriter=*/nullptr); @@ -673,11 +712,14 @@ void CodeGenAction::executeAction() { } setUpTargetMachine(); + llvmModule->setDataLayout(tm->createDataLayout()); + if (action == BackendActionTy::Backend_EmitBC) { - generateLLVMBCImpl(*tm, *llvmModule, *os); + // This action has effectively been completed in runOptimizationPipeline. return; } + // Run LLVM's backend and generate either assembly or machine code if (action == BackendActionTy::Backend_EmitAssembly || action == BackendActionTy::Backend_EmitObj) { generateMachineCodeOrAssemblyImpl( diff --git a/flang/lib/Lower/Bridge.cpp b/flang/lib/Lower/Bridge.cpp index 3ac788e0e6945f44816f9e69b2e85d2c08182be8..b7d180ed7320735d678a310c974f8f60db89bc1d 100644 --- a/flang/lib/Lower/Bridge.cpp +++ b/flang/lib/Lower/Bridge.cpp @@ -481,16 +481,20 @@ public: assert(sym.has() && "No host-association found"); const Fortran::semantics::Symbol &hsym = sym.GetUltimate(); - Fortran::lower::SymbolBox hsb = lookupSymbol(hsym); + Fortran::lower::SymbolBox hsb = lookupOneLevelUpSymbol(hsym); + assert(hsb && "Host symbol box not found"); fir::ExtendedValue hexv = getExtendedValue(hsb); - // 2) Create a copy that will mask the original. - createHostAssociateVarClone(sym); - Fortran::lower::SymbolBox sb = lookupSymbol(sym); + // 2) Fetch the copied one that will mask the original. + Fortran::lower::SymbolBox sb = shallowLookupSymbol(sym); + assert(sb && "Host-associated symbol box not found"); + assert(hsb.getAddr() != sb.getAddr() && + "Host and associated symbol boxes are the same"); fir::ExtendedValue exv = getExtendedValue(sb); // 3) Perform the assignment. - mlir::Location loc = genLocation(sym.name()); + builder->setInsertionPointAfter(fir::getBase(exv).getDefiningOp()); + mlir::Location loc = getCurrentLocation(); mlir::Type symType = genType(sym); if (auto seqTy = symType.dyn_cast()) { Fortran::lower::StatementContext stmtCtx; @@ -514,11 +518,13 @@ public: void collectSymbolSet( Fortran::lower::pft::Evaluation &eval, llvm::SetVector &symbolSet, - Fortran::semantics::Symbol::Flag flag) override final { + Fortran::semantics::Symbol::Flag flag, + bool isUltimateSymbol) override final { auto addToList = [&](const Fortran::semantics::Symbol &sym) { - const Fortran::semantics::Symbol &ultimate = sym.GetUltimate(); - if (ultimate.test(flag)) - symbolSet.insert(&ultimate); + const Fortran::semantics::Symbol &symbol = + isUltimateSymbol ? sym.GetUltimate() : sym; + if (symbol.test(flag)) + symbolSet.insert(&symbol); }; Fortran::lower::pft::visitAllSymbols(eval, addToList); } @@ -609,6 +615,15 @@ private: return {}; } + /// Find the symbol in one level up of symbol map such as for host-association + /// in OpenMP code or return null. + Fortran::lower::SymbolBox + lookupOneLevelUpSymbol(const Fortran::semantics::Symbol &sym) { + if (Fortran::lower::SymbolBox v = localSymbols.lookupOneLevelUpSymbol(sym)) + return v; + return {}; + } + /// Add the symbol to the local map and return `true`. If the symbol is /// already in the map and \p forced is `false`, the map is not updated. /// Instead the value `false` is returned. @@ -2839,10 +2854,10 @@ private: if (!funit.isMainProgram()) { const Fortran::semantics::Symbol &procSymbol = funit.getSubprogramSymbol(); - if (procSymbol.owner().IsSubmodule()) { + if (procSymbol.owner().IsSubmodule()) TODO(toLocation(), "support for submodules"); - return; - } + if (Fortran::semantics::IsSeparateModuleProcedureInterface(&procSymbol)) + TODO(toLocation(), "separate module procedure"); } setCurrentPosition(funit.getStartingSourceLoc()); for (int entryIndex = 0, last = funit.entryPointList.size(); diff --git a/flang/lib/Lower/ConvertExpr.cpp b/flang/lib/Lower/ConvertExpr.cpp index d064fe59e8807848732a664afd51a731639f2ba2..20e7c33df5a6d4fca4f1fc34becb5c80852fb7fd 100644 --- a/flang/lib/Lower/ConvertExpr.cpp +++ b/flang/lib/Lower/ConvertExpr.cpp @@ -535,6 +535,27 @@ createBoxProcCharTuple(Fortran::lower::AbstractConverter &converter, boxProc, charLen); } +/// Given an optional fir.box, returns an fir.box that is the original one if +/// it is present and it otherwise an unallocated box. +/// Absent fir.box are implemented as a null pointer descriptor. Generated +/// code may need to unconditionally read a fir.box that can be absent. +/// This helper allows creating a fir.box that can be read in all cases +/// outside of a fir.if (isPresent) region. However, the usages of the value +/// read from such box should still only be done in a fir.if(isPresent). +static fir::ExtendedValue +absentBoxToUnallocatedBox(fir::FirOpBuilder &builder, mlir::Location loc, + const fir::ExtendedValue &exv, + mlir::Value isPresent) { + mlir::Value box = fir::getBase(exv); + mlir::Type boxType = box.getType(); + assert(boxType.isa() && "argument must be a fir.box"); + mlir::Value emptyBox = + fir::factory::createUnallocatedBox(builder, loc, boxType, llvm::None); + auto safeToReadBox = + builder.create(loc, isPresent, box, emptyBox); + return fir::substBase(exv, safeToReadBox); +} + // Helper to get the ultimate first symbol. This works around the fact that // symbol resolution in the front end doesn't always resolve a symbol to its // ultimate symbol but may leave placeholder indirections for use and host @@ -550,6 +571,16 @@ const Fortran::semantics::Symbol &getLastSym(const A &obj) { return obj.GetLastSymbol().GetUltimate(); } +static bool +isIntrinsicModuleProcRef(const Fortran::evaluate::ProcedureRef &procRef) { + const Fortran::semantics::Symbol *symbol = procRef.proc().GetSymbol(); + if (!symbol) + return false; + const Fortran::semantics::Symbol *module = + symbol->GetUltimate().owner().GetSymbol(); + return module && module->attrs().test(Fortran::semantics::Attr::INTRINSIC); +} + namespace { /// Lowering of Fortran::evaluate::Expr expressions @@ -1270,10 +1301,20 @@ public: // to "0xE2 0x82 0xAC" : UTF-8. mlir::Value bufferSize = boxchar.getLen(); auto kindMap = builder.getKindMap(); - auto fromBits = kindMap.getCharacterBitsize( - fir::unwrapRefType(boxchar.getAddr().getType()) - .cast() - .getFKind()); + mlir::Value boxCharAddr = boxchar.getAddr(); + auto fromTy = boxCharAddr.getType(); + if (auto charTy = fromTy.dyn_cast()) { + // boxchar is a value, not a variable. Turn it into a temporary. + // As a value, it ought to have a constant LEN value. + assert(charTy.hasConstantLen() && "must have constant length"); + mlir::Value tmp = builder.createTemporary(loc, charTy); + builder.create(loc, boxCharAddr, tmp); + boxCharAddr = tmp; + } + auto fromBits = + kindMap.getCharacterBitsize(fir::unwrapRefType(fromTy) + .cast() + .getFKind()); auto toBits = kindMap.getCharacterBitsize( ty.cast().getFKind()); if (toBits < fromBits) { @@ -1285,7 +1326,7 @@ public: } auto dest = builder.create( loc, ty, mlir::ValueRange{bufferSize}); - builder.create(loc, boxchar.getAddr(), + builder.create(loc, boxCharAddr, boxchar.getLen(), dest); return fir::CharBoxValue{dest, boxchar.getLen()}; } else { @@ -2078,17 +2119,20 @@ public: fir::factory::getNonDeferredLengthParams(exv)); } - /// Generate a call to an intrinsic function. - ExtValue - genIntrinsicRef(const Fortran::evaluate::ProcedureRef &procRef, - const Fortran::evaluate::SpecificIntrinsic &intrinsic, - llvm::Optional resultType) { + /// Generate a call to a Fortran intrinsic or intrinsic module procedure. + ExtValue genIntrinsicRef( + const Fortran::evaluate::ProcedureRef &procRef, + llvm::Optional resultType, + llvm::Optional intrinsic = + llvm::None) { llvm::SmallVector operands; - llvm::StringRef name = intrinsic.name; + std::string name = + intrinsic ? intrinsic->name + : procRef.proc().GetSymbol()->GetUltimate().name().ToString(); mlir::Location loc = getLoc(); - if (Fortran::lower::intrinsicRequiresCustomOptionalHandling( - procRef, intrinsic, converter)) { + if (intrinsic && Fortran::lower::intrinsicRequiresCustomOptionalHandling( + procRef, *intrinsic, converter)) { using ExvAndPresence = std::pair>; llvm::SmallVector operands; auto prepareOptionalArg = [&](const Fortran::lower::SomeExpr &expr) { @@ -2101,7 +2145,7 @@ public: operands.emplace_back(genval(expr), llvm::None); }; Fortran::lower::prepareCustomIntrinsicArgument( - procRef, intrinsic, resultType, prepareOptionalArg, prepareOtherArg, + procRef, *intrinsic, resultType, prepareOptionalArg, prepareOtherArg, converter); auto getArgument = [&](std::size_t i) -> ExtValue { @@ -2120,10 +2164,9 @@ public: const Fortran::lower::IntrinsicArgumentLoweringRules *argLowering = Fortran::lower::getIntrinsicArgumentLowering(name); - for (const auto &[arg, dummy] : - llvm::zip(procRef.arguments(), - intrinsic.characteristics.value().dummyArguments)) { - auto *expr = Fortran::evaluate::UnwrapExpr(arg); + for (const auto &arg : llvm::enumerate(procRef.arguments())) { + auto *expr = + Fortran::evaluate::UnwrapExpr(arg.value()); if (!expr) { // Absent optional. operands.emplace_back(Fortran::lower::getAbsentIntrinsicArgument()); @@ -2136,8 +2179,7 @@ public: } // Ad-hoc argument lowering handling. Fortran::lower::ArgLoweringRule argRules = - Fortran::lower::lowerIntrinsicArgumentAs(loc, *argLowering, - dummy.name); + Fortran::lower::lowerIntrinsicArgumentAs(*argLowering, arg.index()); if (argRules.handleDynamicOptional && Fortran::evaluate::MayBePassedAsAbsentOptional( *expr, converter.getFoldingContext())) { @@ -2183,13 +2225,6 @@ public: operands, stmtCtx); } - template - bool isCharacterType(const A &exp) { - if (auto type = exp.GetType()) - return type->category() == Fortran::common::TypeCategory::Character; - return false; - } - /// helper to detect statement functions static bool isStatementFunctionCall(const Fortran::evaluate::ProcedureRef &procRef) { @@ -2199,6 +2234,7 @@ public: return details->stmtFunction().has_value(); return false; } + /// Generate Statement function calls ExtValue genStmtFunctionRef(const Fortran::evaluate::ProcedureRef &procRef) { const Fortran::semantics::Symbol *symbol = procRef.proc().GetSymbol(); @@ -2683,13 +2719,16 @@ public: ExtValue genCopyIn(const ExtValue &actualArg, const Fortran::lower::CallerInterface::PassedEntity &arg, CopyOutPairs ©OutPairs, - llvm::Optional restrictCopyAtRuntime) { + llvm::Optional restrictCopyAtRuntime, + bool byValue) { + const bool doCopyOut = !byValue && arg.mayBeModifiedByCall(); + llvm::StringRef tempName = byValue ? ".copy" : ".copyinout"; if (!restrictCopyAtRuntime) { - ExtValue temp = genArrayTempFromMold(actualArg, ".copyinout"); + ExtValue temp = genArrayTempFromMold(actualArg, tempName); if (arg.mayBeReadByCall()) genArrayCopy(temp, actualArg); - copyOutPairs.emplace_back(CopyOutPair{ - actualArg, temp, arg.mayBeModifiedByCall(), restrictCopyAtRuntime}); + copyOutPairs.emplace_back( + CopyOutPair{actualArg, temp, doCopyOut, restrictCopyAtRuntime}); return temp; } // Otherwise, need to be careful to only copy-in if allowed at runtime. @@ -2701,7 +2740,7 @@ public: .genIfOp(loc, {addrType}, *restrictCopyAtRuntime, /*withElseRegion=*/true) .genThen([&]() { - auto temp = genArrayTempFromMold(actualArg, ".copyinout"); + auto temp = genArrayTempFromMold(actualArg, tempName); if (arg.mayBeReadByCall()) genArrayCopy(temp, actualArg); builder.create(loc, fir::getBase(temp)); @@ -2713,8 +2752,8 @@ public: .getResults()[0]; // Associate the temp address with actualArg lengths and extents. fir::ExtendedValue temp = fir::substBase(readIfBoxValue(actualArg), addr); - copyOutPairs.emplace_back(CopyOutPair{ - actualArg, temp, arg.mayBeModifiedByCall(), restrictCopyAtRuntime}); + copyOutPairs.emplace_back( + CopyOutPair{actualArg, temp, doCopyOut, restrictCopyAtRuntime}); return temp; } @@ -2775,7 +2814,8 @@ public: loc, builder.getI1Type(), actualArgBase); if (!actualArgBase.getType().isa()) return {actualArg, isPresent}; - ExtValue safeToReadBox; + ExtValue safeToReadBox = + absentBoxToUnallocatedBox(builder, loc, actualArg, isPresent); return {safeToReadBox, isPresent}; } @@ -2807,6 +2847,13 @@ public: Fortran::lower::getAdaptToByRefAttr(builder)}); } + template + bool isCharacterType(const A &exp) { + if (auto type = exp.GetType()) + return type->category() == Fortran::common::TypeCategory::Character; + return false; + } + /// Lower an actual argument that must be passed via an address. /// This generates of the copy-in/copy-out if the actual is not contiguous, or /// the creation of the temp if the actual is a variable and \p byValue is @@ -2847,8 +2894,7 @@ public: return actualArg; if (isArray) - return genCopyIn(actualArg, arg, copyOutPairs, - isPresent /*, byValue*/); + return genCopyIn(actualArg, arg, copyOutPairs, isPresent, byValue); // Scalars, create a temp, and use it conditionally at runtime if // the argument is present. ExtValue temp = @@ -2877,7 +2923,7 @@ public: ExtValue box = genBoxArg(expr); if (needsCopy) return genCopyIn(box, arg, copyOutPairs, - /*restrictCopyAtRuntime=*/llvm::None /*, byValue*/); + /*restrictCopyAtRuntime=*/llvm::None, byValue); // Contiguous: just use the box we created above! // This gets "unboxed" below, if needed. return box; @@ -2906,9 +2952,13 @@ public: if (isElementalProcWithArrayArgs(procRef)) fir::emitFatalError(loc, "trying to lower elemental procedure with array " "arguments as normal procedure"); + if (const Fortran::evaluate::SpecificIntrinsic *intrinsic = procRef.proc().GetSpecificIntrinsic()) - return genIntrinsicRef(procRef, *intrinsic, resultType); + return genIntrinsicRef(procRef, resultType, *intrinsic); + + if (isIntrinsicModuleProcRef(procRef)) + return genIntrinsicRef(procRef, resultType); if (isStatementFunctionCall(procRef)) return genStmtFunctionRef(procRef); @@ -2993,116 +3043,19 @@ public: mutableModifiedByCall.emplace_back(std::move(mutableBox)); continue; } - const bool actualArgIsVariable = Fortran::evaluate::IsVariable(*expr); - if (arg.passBy == PassBy::BaseAddressValueAttribute) { - mlir::Value temp; - if (isArray(*expr)) { - auto val = genBoxArg(*expr); - if (!actualArgIsVariable) - temp = getBase(val); - else { - ExtValue copy = genArrayTempFromMold(val, ".copy"); - genArrayCopy(copy, val); - temp = fir::getBase(copy); - } - } else { - mlir::Value val = fir::getBase(genval(*expr)); - temp = builder.createTemporary( - loc, val.getType(), - llvm::ArrayRef{ - Fortran::lower::getAdaptToByRefAttr(builder)}); - builder.create(loc, val, temp); - } - caller.placeInput(arg, temp); - continue; - } - if (arg.passBy == PassBy::BaseAddress || arg.passBy == PassBy::BoxChar) { - const bool actualIsSimplyContiguous = - !actualArgIsVariable || Fortran::evaluate::IsSimplyContiguous( - *expr, converter.getFoldingContext()); - auto argAddr = [&]() -> ExtValue { - ExtValue baseAddr; - if (actualArgIsVariable && arg.isOptional()) { - if (Fortran::evaluate::IsAllocatableOrPointerObject( - *expr, converter.getFoldingContext())) { - // Fortran 2018 15.5.2.12 point 1: If unallocated/disassociated, - // it is as if the argument was absent. The main care here is to - // not do a copy-in/copy-out because the temp address, even though - // pointing to a null size storage, would not be a nullptr and - // therefore the argument would not be considered absent on the - // callee side. Note: if wholeSymbol is optional, it cannot be - // absent as per 15.5.2.12 point 7. and 8. We rely on this to - // un-conditionally read the allocatable/pointer descriptor here. - if (actualIsSimplyContiguous) - return genBoxArg(*expr); - fir::MutableBoxValue mutableBox = genMutableBoxValue(*expr); - mlir::Value isAssociated = - fir::factory::genIsAllocatedOrAssociatedTest(builder, loc, - mutableBox); - fir::ExtendedValue actualExv = - fir::factory::genMutableBoxRead(builder, loc, mutableBox); - return genCopyIn(actualExv, arg, copyOutPairs, isAssociated); - } - if (const Fortran::semantics::Symbol *wholeSymbol = - Fortran::evaluate::UnwrapWholeSymbolOrComponentDataRef( - *expr)) - if (Fortran::semantics::IsOptional(*wholeSymbol)) { - ExtValue actualArg = gen(*expr); - mlir::Value actualArgBase = fir::getBase(actualArg); - if (!actualArgBase.getType().isa()) - return actualArg; - // Do not read wholeSymbol descriptor that may be a nullptr in - // case wholeSymbol is absent. - // Absent descriptor cannot be read. To avoid any issue in - // copy-in/copy-out, and when retrieving the address/length - // create an descriptor pointing to a null address here if the - // fir.box is absent. - mlir::Value isPresent = builder.create( - loc, builder.getI1Type(), actualArgBase); - mlir::Type boxType = actualArgBase.getType(); - mlir::Value emptyBox = fir::factory::createUnallocatedBox( - builder, loc, boxType, llvm::None); - auto safeToReadBox = builder.create( - loc, isPresent, actualArgBase, emptyBox); - fir::ExtendedValue safeToReadExv = - fir::substBase(actualArg, safeToReadBox); - if (actualIsSimplyContiguous) - return safeToReadExv; - return genCopyIn(safeToReadExv, arg, copyOutPairs, isPresent); - } - // Fall through: The actual argument can safely be - // copied-in/copied-out without any care if needed. - } - if (actualArgIsVariable && expr->Rank() > 0) { - ExtValue box = genBoxArg(*expr); - if (!actualIsSimplyContiguous) - return genCopyIn(box, arg, copyOutPairs, - /*restrictCopyAtRuntime=*/llvm::None); - // Contiguous: just use the box we created above! - // This gets "unboxed" below, if needed. - return box; - } - // Actual argument is a non optional/non pointer/non allocatable - // scalar. - if (actualArgIsVariable) - return genExtAddr(*expr); - // Actual argument is not a variable. Make sure a variable address is - // not passed. - return genTempExtAddr(*expr); - }(); - // Scalar and contiguous expressions may be lowered to a fir.box, - // either to account for potential polymorphism, or because lowering - // did not account for some contiguity hints. - // Here, polymorphism does not matter (an entity of the declared type - // is passed, not one of the dynamic type), and the expr is known to - // be simply contiguous, so it is safe to unbox it and pass the - // address without making a copy. - argAddr = readIfBoxValue(argAddr); - - if (arg.passBy == PassBy::BaseAddress) { + if (arg.passBy == PassBy::BaseAddress || arg.passBy == PassBy::BoxChar || + arg.passBy == PassBy::BaseAddressValueAttribute || + arg.passBy == PassBy::CharBoxValueAttribute) { + const bool byValue = arg.passBy == PassBy::BaseAddressValueAttribute || + arg.passBy == PassBy::CharBoxValueAttribute; + ExtValue argAddr = + prepareActualToBaseAddressLike(*expr, arg, copyOutPairs, byValue); + if (arg.passBy == PassBy::BaseAddress || + arg.passBy == PassBy::BaseAddressValueAttribute) { caller.placeInput(arg, fir::getBase(argAddr)); } else { - assert(arg.passBy == PassBy::BoxChar); + assert(arg.passBy == PassBy::BoxChar || + arg.passBy == PassBy::CharBoxValueAttribute); auto helper = fir::factory::CharacterExprHelper{builder, loc}; auto boxChar = argAddr.match( [&](const fir::CharBoxValue &x) { return helper.createEmbox(x); }, @@ -3156,7 +3109,7 @@ public: // Make sure a variable address is only passed if the expression is // actually a variable. mlir::Value box = - actualArgIsVariable + Fortran::evaluate::IsVariable(*expr) ? builder.createBox(loc, genBoxArg(*expr)) : builder.createBox(getLoc(), genTempExtAddr(*expr)); caller.placeInput(arg, box); @@ -3997,8 +3950,9 @@ public: mlir::Value oldInnerArg = modifyOp.getSequence(); std::size_t offset = explicitSpace->argPosition(oldInnerArg); explicitSpace->setInnerArg(offset, fir::getBase(lexv)); - fir::ExtendedValue exv = arrayModifyToExv( - builder, loc, *explicitSpace->getLhsLoad(0), modifyOp.getResult(0)); + fir::ExtendedValue exv = + arrayModifyToExv(builder, loc, explicitSpace->getLhsLoad(0).value(), + modifyOp.getResult(0)); genScalarUserDefinedAssignmentCall(builder, loc, userAssignment, exv, elementalExv); } else { @@ -4148,7 +4102,7 @@ private: mlir::Value origVal) { mlir::Value val = builder.createConvert(loc, eleTy, origVal); if (isBoundsSpec()) { - auto lbs = *lbounds; + auto lbs = lbounds.value(); if (lbs.size() > 0) { // Rebox the value with user-specified shift. auto shiftTy = fir::ShiftType::get(eleTy.getContext(), lbs.size()); @@ -4758,18 +4712,22 @@ private: return genarr(x); } - // A procedure reference to a Fortran elemental intrinsic procedure. + // A reference to a Fortran elemental intrinsic or intrinsic module procedure. CC genElementalIntrinsicProcRef( const Fortran::evaluate::ProcedureRef &procRef, llvm::Optional retTy, - const Fortran::evaluate::SpecificIntrinsic &intrinsic) { + llvm::Optional intrinsic = + llvm::None) { + llvm::SmallVector operands; - llvm::StringRef name = intrinsic.name; + std::string name = + intrinsic ? intrinsic->name + : procRef.proc().GetSymbol()->GetUltimate().name().ToString(); const Fortran::lower::IntrinsicArgumentLoweringRules *argLowering = Fortran::lower::getIntrinsicArgumentLowering(name); mlir::Location loc = getLoc(); - if (Fortran::lower::intrinsicRequiresCustomOptionalHandling( - procRef, intrinsic, converter)) { + if (intrinsic && Fortran::lower::intrinsicRequiresCustomOptionalHandling( + procRef, *intrinsic, converter)) { using CcPairT = std::pair>; llvm::SmallVector operands; auto prepareOptionalArg = [&](const Fortran::lower::SomeExpr &expr) { @@ -4792,11 +4750,10 @@ private: operands.emplace_back(genElementalArgument(expr), llvm::None); }; Fortran::lower::prepareCustomIntrinsicArgument( - procRef, intrinsic, retTy, prepareOptionalArg, prepareOtherArg, + procRef, *intrinsic, retTy, prepareOptionalArg, prepareOtherArg, converter); fir::FirOpBuilder *bldr = &converter.getFirOpBuilder(); - llvm::StringRef name = intrinsic.name; return [=](IterSpace iters) -> ExtValue { auto getArgument = [&](std::size_t i) -> ExtValue { return operands[i].first(iters); @@ -4810,11 +4767,9 @@ private: }; } /// Otherwise, pre-lower arguments and use intrinsic lowering utility. - for (const auto &[arg, dummy] : - llvm::zip(procRef.arguments(), - intrinsic.characteristics.value().dummyArguments)) { + for (const auto &arg : llvm::enumerate(procRef.arguments())) { const auto *expr = - Fortran::evaluate::UnwrapExpr(arg); + Fortran::evaluate::UnwrapExpr(arg.value()); if (!expr) { // Absent optional. operands.emplace_back([=](IterSpace) { return mlir::Value{}; }); @@ -4825,8 +4780,7 @@ private: } else { // Ad-hoc argument lowering handling. Fortran::lower::ArgLoweringRule argRules = - Fortran::lower::lowerIntrinsicArgumentAs(getLoc(), *argLowering, - dummy.name); + Fortran::lower::lowerIntrinsicArgumentAs(*argLowering, arg.index()); if (argRules.handleDynamicOptional && Fortran::evaluate::MayBePassedAsAbsentOptional( *expr, converter.getFoldingContext())) { @@ -5028,6 +4982,8 @@ private: // The intrinsic procedure is called once per element of the array. return genElementalIntrinsicProcRef(procRef, retTy, *intrin); } + if (isIntrinsicModuleProcRef(procRef)) + return genElementalIntrinsicProcRef(procRef, retTy); if (ScalarExprLowering::isStatementFunctionCall(procRef)) fir::emitFatalError(loc, "statement function cannot be elemental"); @@ -5044,12 +5000,12 @@ private: // Elide any implicit loop iters. return [=, &procRef](IterSpace) { return ScalarExprLowering{loc, converter, symMap, stmtCtx} - .genIntrinsicRef(procRef, *intrinsic, retTy); + .genIntrinsicRef(procRef, retTy, *intrinsic); }; } return genarr( ScalarExprLowering{loc, converter, symMap, stmtCtx}.genIntrinsicRef( - procRef, *intrinsic, retTy)); + procRef, retTy, *intrinsic)); } if (explicitSpaceIsActive() && procRef.Rank() == 0) { @@ -6413,7 +6369,7 @@ private: charLen = builder.createTemporary(loc, builder.getI64Type()); mlir::Value castLen = builder.createConvert(loc, builder.getI64Type(), fir::getLen(exv)); - builder.create(loc, castLen, *charLen); + builder.create(loc, castLen, charLen.value()); } } stmtCtx.finalize(/*popScope=*/true); @@ -6427,7 +6383,7 @@ private: // Convert to extended value. if (fir::isa_char(seqTy.getEleTy())) { - auto len = builder.create(loc, *charLen); + auto len = builder.create(loc, charLen.value()); return {fir::CharArrayBoxValue{mem, len, extents}, /*needCopy=*/false}; } return {fir::ArrayBoxValue{mem, extents}, /*needCopy=*/false}; @@ -6495,7 +6451,7 @@ private: charLen = builder.createTemporary(loc, builder.getI64Type()); mlir::Value castLen = builder.createConvert(loc, builder.getI64Type(), fir::getLen(exv)); - builder.create(loc, castLen, *charLen); + builder.create(loc, castLen, charLen.value()); } } mem = builder.createConvert(loc, fir::HeapType::get(resTy), mem); @@ -7057,7 +7013,8 @@ private: return genImplicitArrayAccess(x, components); } if (pathIsEmpty(components)) - return genAsScalar(x); + return components.substring ? genAsScalar(*components.substring) + : genAsScalar(x); mlir::Location loc = getLoc(); return [=](IterSpace) -> ExtValue { fir::emitFatalError(loc, "reached symbol with path"); diff --git a/flang/lib/Lower/IO.cpp b/flang/lib/Lower/IO.cpp index 8baeafd6ae069e741c2d3323ae718a6602a4b584..849288b47b24f685e0d07aba537acd7edec24cdd 100644 --- a/flang/lib/Lower/IO.cpp +++ b/flang/lib/Lower/IO.cpp @@ -530,17 +530,17 @@ static mlir::func::FuncOp getInputFunc(mlir::Location loc, ? getIORuntimeFunc(loc, builder) : getIORuntimeFunc(loc, builder); if (auto ty = type.dyn_cast()) { - if (auto width = ty.getWidth(); width <= 32) + if (auto width = ty.getWidth(); width == 32) return getIORuntimeFunc(loc, builder); - else if (width <= 64) + else if (width == 64) return getIORuntimeFunc(loc, builder); } auto kindMap = fir::getKindMapping(builder.getModule()); if (auto ty = type.dyn_cast()) { auto width = kindMap.getRealBitsize(ty.getFKind()); - if (width <= 32) + if (width == 32) return getIORuntimeFunc(loc, builder); - else if (width <= 64) + else if (width == 64) return getIORuntimeFunc(loc, builder); } if (type.isa()) diff --git a/flang/lib/Lower/IntrinsicCall.cpp b/flang/lib/Lower/IntrinsicCall.cpp index c061d593d7ce2be601653d048eaf3e030ecd5600..8b922e407db5ff8be47754ca88179bc65edd44da 100644 --- a/flang/lib/Lower/IntrinsicCall.cpp +++ b/flang/lib/Lower/IntrinsicCall.cpp @@ -43,9 +43,9 @@ #define PGMATH_DECLARE #include "flang/Evaluate/pgmath.h.inc" -/// This file implements lowering of Fortran intrinsic procedures. -/// Intrinsics are lowered to a mix of FIR and MLIR operations as -/// well as call to runtime functions or LLVM intrinsics. +/// This file implements lowering of Fortran intrinsic procedures and Fortran +/// intrinsic module procedures. A call may be inlined with a mix of FIR and +/// MLIR operations, or as a call to a runtime function or LLVM intrinsic. /// Lowering of intrinsic procedure calls is based on a map that associates /// Fortran intrinsic generic names to FIR generator functions. @@ -493,6 +493,10 @@ struct IntrinsicLibrary { mlir::Value genIbits(mlir::Type, llvm::ArrayRef); mlir::Value genIbset(mlir::Type, llvm::ArrayRef); fir::ExtendedValue genIchar(mlir::Type, llvm::ArrayRef); + mlir::Value genIeeeIsFinite(mlir::Type, llvm::ArrayRef); + template + fir::ExtendedValue genIeeeTypeCompare(mlir::Type, + llvm::ArrayRef); mlir::Value genIeor(mlir::Type, llvm::ArrayRef); fir::ExtendedValue genIndex(mlir::Type, llvm::ArrayRef); mlir::Value genIor(mlir::Type, llvm::ArrayRef); @@ -758,6 +762,11 @@ static constexpr IntrinsicHandler handlers[]{ {"ibits", &I::genIbits}, {"ibset", &I::genIbset}, {"ichar", &I::genIchar}, + {"ieee_class_eq", &I::genIeeeTypeCompare}, + {"ieee_class_ne", &I::genIeeeTypeCompare}, + {"ieee_is_finite", &I::genIeeeIsFinite}, + {"ieee_round_eq", &I::genIeeeTypeCompare}, + {"ieee_round_ne", &I::genIeeeTypeCompare}, {"ieor", &I::genIeor}, {"index", &I::genIndex, @@ -1274,7 +1283,7 @@ static mlir::func::FuncOp getRuntimeFunction(mlir::Location loc, llvm::StringRef name, mlir::FunctionType funcType) { const RuntimeFunction *bestNearMatch = nullptr; - FunctionDistance bestMatchDistance{}; + FunctionDistance bestMatchDistance; mlir::func::FuncOp match; using RtMap = Fortran::common::StaticMultimapView; static constexpr RtMap pgmathF(pgmathFast); @@ -1395,7 +1404,11 @@ mlir::Value toValue(const fir::ExtendedValue &val, fir::FirOpBuilder &builder, mlir::Location loc) { if (const fir::CharBoxValue *charBox = val.getCharBox()) { mlir::Value buffer = charBox->getBuffer(); - if (buffer.getType().isa()) + auto buffTy = buffer.getType(); + if (buffTy.isa()) + fir::emitFatalError( + loc, "A character's buffer type cannot be a function type."); + if (buffTy.isa()) return buffer; return fir::factory::CharacterExprHelper{builder, loc}.createEmboxChar( buffer, charBox->getLen()); @@ -1410,9 +1423,33 @@ mlir::Value toValue(const fir::ExtendedValue &val, fir::FirOpBuilder &builder, // IntrinsicLibrary //===----------------------------------------------------------------------===// -/// Emit a TODO error message for as yet unimplemented intrinsics. -static void crashOnMissingIntrinsic(mlir::Location loc, llvm::StringRef name) { - TODO(loc, "missing intrinsic lowering: " + llvm::Twine(name)); +static bool isIntrinsicModuleProcedure(llvm::StringRef name) { + return name.startswith("c_") || name.startswith("compiler_") || + name.startswith("ieee_"); +} + +/// Return the generic name of an intrinsic module procedure specific name. +/// Remove any "__builtin_" prefix, and any specific suffix of the form +/// {_[ail]?[0-9]+}*, such as _1 or _a4. +llvm::StringRef genericName(llvm::StringRef specificName) { + const std::string builtin = "__builtin_"; + llvm::StringRef name = specificName.startswith(builtin) + ? specificName.drop_front(builtin.size()) + : specificName; + size_t size = name.size(); + if (isIntrinsicModuleProcedure(name)) + while (isdigit(name[size - 1])) + while (name[--size] != '_') + ; + return name.drop_back(name.size() - size); +} + +/// Generate a TODO error message for an as yet unimplemented intrinsic. +void crashOnMissingIntrinsic(mlir::Location loc, llvm::StringRef name) { + if (isIntrinsicModuleProcedure(name)) + TODO(loc, "intrinsic module procedure: " + llvm::Twine(name)); + else + TODO(loc, "intrinsic: " + llvm::Twine(name)); } template @@ -1502,9 +1539,10 @@ invokeHandler(IntrinsicLibrary::SubroutineGenerator generator, } fir::ExtendedValue -IntrinsicLibrary::genIntrinsicCall(llvm::StringRef name, +IntrinsicLibrary::genIntrinsicCall(llvm::StringRef specificName, llvm::Optional resultType, llvm::ArrayRef args) { + llvm::StringRef name = genericName(specificName); if (const IntrinsicHandler *handler = findIntrinsicHandler(name)) { bool outline = handler->outline || outlineAllIntrinsics; return std::visit( @@ -1695,10 +1733,10 @@ IntrinsicLibrary::getRuntimeCallGenerator(llvm::StringRef name, mlir::func::FuncOp funcOp = getRuntimeFunction(loc, builder, name, soughtFuncType); if (!funcOp) { - std::string buffer("not yet implemented: missing intrinsic lowering: "); - llvm::raw_string_ostream sstream(buffer); - sstream << name << "\nrequested type was: " << soughtFuncType << '\n'; - fir::emitFatalError(loc, buffer); + std::string nameAndType; + llvm::raw_string_ostream sstream(nameAndType); + sstream << name << "\nrequested type: " << soughtFuncType; + crashOnMissingIntrinsic(loc, nameAndType); } mlir::FunctionType actualFuncType = funcOp.getFunctionType(); @@ -2523,7 +2561,9 @@ void IntrinsicLibrary::genGetEnvironmentVariable( mlir::Value IntrinsicLibrary::genIand(mlir::Type resultType, llvm::ArrayRef args) { assert(args.size() == 2); - return builder.create(loc, args[0], args[1]); + auto arg0 = builder.createConvert(loc, resultType, args[0]); + auto arg1 = builder.createConvert(loc, resultType, args[1]); + return builder.create(loc, arg0, arg1); } // IBCLR @@ -2621,6 +2661,67 @@ IntrinsicLibrary::genIchar(mlir::Type resultType, return builder.create(loc, resultType, code); } +// IEEE_CLASS_TYPE OPERATOR(==), OPERATOR(/=) +// IEEE_ROUND_TYPE OPERATOR(==), OPERATOR(/=) +template +fir::ExtendedValue +IntrinsicLibrary::genIeeeTypeCompare(mlir::Type resultType, + llvm::ArrayRef args) { + assert(args.size() == 2); + mlir::Value arg0 = fir::getBase(args[0]); + mlir::Value arg1 = fir::getBase(args[1]); + auto recType = + fir::unwrapPassByRefType(arg0.getType()).dyn_cast(); + assert(recType.getTypeList().size() == 1 && "expected exactly one component"); + auto [fieldName, fieldType] = recType.getTypeList().front(); + mlir::Type fieldIndexType = fir::FieldType::get(recType.getContext()); + mlir::Value field = builder.create( + loc, fieldIndexType, fieldName, recType, fir::getTypeParams(arg0)); + mlir::Value left = builder.create( + loc, fieldType, + builder.create(loc, builder.getRefType(fieldType), + arg0, field)); + mlir::Value right = builder.create( + loc, fieldType, + builder.create(loc, builder.getRefType(fieldType), + arg1, field)); + return builder.create(loc, pred, left, right); +} + +// IEEE_IS_FINITE +mlir::Value +IntrinsicLibrary::genIeeeIsFinite(mlir::Type resultType, + llvm::ArrayRef args) { + // IEEE_IS_FINITE(X) is true iff exponent(X) is the max exponent of kind(X). + assert(args.size() == 1); + mlir::Value floatVal = fir::getBase(args[0]); + mlir::FloatType floatType = floatVal.getType().dyn_cast(); + int floatBits = floatType.getWidth(); + mlir::Type intType = builder.getIntegerType( + floatType.isa() ? 128 : floatBits); + mlir::Value intVal = + builder.create(loc, intType, floatVal); + int significandBits; + if (floatType.isa()) + significandBits = 23; + else if (floatType.isa()) + significandBits = 52; + else // problems elsewhere for other kinds + TODO(loc, "intrinsic module procedure: ieee_is_finite"); + mlir::Value significand = + builder.createIntegerConstant(loc, intType, significandBits); + int exponentBits = floatBits - 1 - significandBits; + mlir::Value maxExponent = + builder.createIntegerConstant(loc, intType, (1 << exponentBits) - 1); + mlir::Value exponent = genIbits( + intType, {intVal, significand, + builder.createIntegerConstant(loc, intType, exponentBits)}); + return builder.createConvert( + loc, resultType, + builder.create(loc, mlir::arith::CmpIPredicate::ne, + exponent, maxExponent)); +} + // IEOR mlir::Value IntrinsicLibrary::genIeor(mlir::Type resultType, llvm::ArrayRef args) { @@ -2811,7 +2912,7 @@ IntrinsicLibrary::genLenTrim(mlir::Type resultType, // LGE, LGT, LLE, LLT template fir::ExtendedValue -IntrinsicLibrary::genCharacterCompare(mlir::Type type, +IntrinsicLibrary::genCharacterCompare(mlir::Type resultType, llvm::ArrayRef args) { assert(args.size() == 2); return fir::runtime::genCharCompare( @@ -2852,13 +2953,20 @@ fir::ExtendedValue IntrinsicLibrary::genMerge(mlir::Type, llvm::ArrayRef args) { assert(args.size() == 3); - mlir::Value arg0 = fir::getBase(args[0]); - mlir::Value arg1 = fir::getBase(args[1]); - mlir::Value arg2 = fir::getBase(args[2]); - mlir::Type type0 = fir::unwrapRefType(arg0.getType()); + mlir::Value tsource = fir::getBase(args[0]); + mlir::Value fsource = fir::getBase(args[1]); + mlir::Value rawMask = fir::getBase(args[2]); + mlir::Type type0 = fir::unwrapRefType(tsource.getType()); bool isCharRslt = fir::isa_char(type0); // result is same as first argument - mlir::Value mask = builder.createConvert(loc, builder.getI1Type(), arg2); - auto rslt = builder.create(loc, mask, arg0, arg1); + mlir::Value mask = builder.createConvert(loc, builder.getI1Type(), rawMask); + // FSOURCE has the same type as TSOURCE, but they may not have the same MLIR + // types (one can have dynamic length while the other has constant lengths, + // or one may be a fir.logical<> while the other is an i1). Insert a cast to + // fulfill mlir::SelectOp constraint that the MLIR types must be the same. + mlir::Value fsourceCast = + builder.createConvert(loc, tsource.getType(), fsource); + auto rslt = + builder.create(loc, mask, tsource, fsourceCast); if (isCharRslt) { // Need a CharBoxValue for character results const fir::CharBoxValue *charBox = args[0].getCharBox(); @@ -3361,6 +3469,56 @@ static mlir::Value computeLBOUND(fir::FirOpBuilder &builder, mlir::Location loc, return builder.create(loc, dimIsEmpty, one, lb); } +/// Create a fir.box to be passed to the LBOUND runtime. +/// This ensure that local lower bounds of assumed shape are propagated and that +/// a fir.box with equivalent LBOUNDs but an explicit shape is created for +/// assumed size arrays to avoid undefined behaviors in codegen or the runtime. +static mlir::Value createBoxForLBOUND(mlir::Location loc, + fir::FirOpBuilder &builder, + const fir::ExtendedValue &array) { + if (!array.isAssumedSize()) + return array.match( + [&](const fir::BoxValue &boxValue) -> mlir::Value { + // This entity is mapped to a fir.box that may not contain the local + // lower bound information if it is a dummy. Rebox it with the local + // shape information. + mlir::Value localShape = builder.createShape(loc, array); + mlir::Value oldBox = boxValue.getAddr(); + return builder.create(loc, oldBox.getType(), oldBox, + localShape, + /*slice=*/mlir::Value{}); + }, + [&](const auto &) -> mlir::Value { + // This a pointer/allocatable, or an entity not yet tracked with a + // fir.box. For pointer/allocatable, createBox will forward the + // descriptor that contains the correct lower bound information. For + // other entities, a new fir.box will be made with the local lower + // bounds. + return builder.createBox(loc, array); + }); + // Assumed sized are not meant to be emboxed. This could cause the undefined + // extent cannot safely be understood by the runtime/codegen that will + // consider that the dimension is empty and that the related LBOUND value must + // be one. Pretend that the related extent is one to get the correct LBOUND + // value. + llvm::SmallVector shape = + fir::factory::getExtents(loc, builder, array); + assert(!shape.empty() && "assumed size must have at least one dimension"); + shape.back() = builder.createIntegerConstant(loc, builder.getIndexType(), 1); + auto safeToEmbox = array.match( + [&](const fir::CharArrayBoxValue &x) -> fir::ExtendedValue { + return fir::CharArrayBoxValue{x.getAddr(), x.getLen(), shape, + x.getLBounds()}; + }, + [&](const fir::ArrayBoxValue &x) -> fir::ExtendedValue { + return fir::ArrayBoxValue{x.getAddr(), shape, x.getLBounds()}; + }, + [&](const auto &) -> fir::ExtendedValue { + fir::emitFatalError(loc, "not an assumed size array"); + }); + return builder.createBox(loc, safeToEmbox); +} + // LBOUND fir::ExtendedValue IntrinsicLibrary::genLbound(mlir::Type resultType, @@ -3411,25 +3569,7 @@ IntrinsicLibrary::genLbound(mlir::Type resultType, return builder.createConvert(loc, resultType, lb); } - mlir::Value box = array.match( - [&](const fir::BoxValue &boxValue) -> mlir::Value { - // This entity is mapped to a fir.box that may not contain the local - // lower bound information if it is a dummy. Rebox it with the local - // shape information. - mlir::Value localShape = builder.createShape(loc, array); - mlir::Value oldBox = boxValue.getAddr(); - return builder.create( - loc, oldBox.getType(), oldBox, localShape, /*slice=*/mlir::Value{}); - }, - [&](const auto &) -> mlir::Value { - // This a pointer/allocatable, or an entity not yet tracked with a - // fir.box. For pointer/allocatable, createBox will forward the - // descriptor that contains the correct lower bound information. For - // other entities, a new fir.box will be made with the local lower - // bounds. - return builder.createBox(loc, array); - }); - + fir::ExtendedValue box = createBoxForLBOUND(loc, builder, array); return builder.createConvert( loc, resultType, fir::runtime::genLboundDim(builder, loc, fir::getBase(box), dim)); @@ -3850,15 +3990,11 @@ Fortran::lower::getIntrinsicArgumentLowering(llvm::StringRef intrinsicName) { /// Return how argument \p argName should be lowered given the rules for the /// intrinsic function. Fortran::lower::ArgLoweringRule Fortran::lower::lowerIntrinsicArgumentAs( - mlir::Location loc, const IntrinsicArgumentLoweringRules &rules, - llvm::StringRef argName) { - for (const IntrinsicDummyArgument &arg : rules.args) { - if (arg.name && arg.name == argName) - return {arg.lowerAs, arg.handleDynamicOptional}; - } - fir::emitFatalError( - loc, "internal: unknown intrinsic argument name in lowering '" + argName + - "'"); + const IntrinsicArgumentLoweringRules &rules, unsigned position) { + assert(position < sizeof(rules.args) / sizeof(decltype(*rules.args)) && + "invalid argument"); + return {rules.args[position].lowerAs, + rules.args[position].handleDynamicOptional}; } //===----------------------------------------------------------------------===// diff --git a/flang/lib/Lower/OpenACC.cpp b/flang/lib/Lower/OpenACC.cpp index 533680e967db2c5df724b69006503031c2bd7bac..6172cac852a9502d64351d637ea90bad6e262183 100644 --- a/flang/lib/Lower/OpenACC.cpp +++ b/flang/lib/Lower/OpenACC.cpp @@ -35,20 +35,30 @@ getDesignatorNameIfDataRef(const Fortran::parser::Designator &designator) { static void genObjectList(const Fortran::parser::AccObjectList &objectList, Fortran::lower::AbstractConverter &converter, llvm::SmallVectorImpl &operands) { + auto addOperands = [&](Fortran::lower::SymbolRef sym) { + const auto variable = converter.getSymbolAddress(sym); + // TODO: Might need revisiting to handle for non-shared clauses + if (variable) { + operands.push_back(variable); + } else { + if (const auto *details = + sym->detailsIf()) + operands.push_back(converter.getSymbolAddress(details->symbol())); + } + }; + for (const auto &accObject : objectList.v) { - std::visit( - Fortran::common::visitors{ - [&](const Fortran::parser::Designator &designator) { - if (const auto *name = getDesignatorNameIfDataRef(designator)) { - const auto variable = converter.getSymbolAddress(*name->symbol); - operands.push_back(variable); - } - }, - [&](const Fortran::parser::Name &name) { - const auto variable = converter.getSymbolAddress(*name.symbol); - operands.push_back(variable); - }}, - accObject.u); + std::visit(Fortran::common::visitors{ + [&](const Fortran::parser::Designator &designator) { + if (const auto *name = + getDesignatorNameIfDataRef(designator)) { + addOperands(*name->symbol); + } + }, + [&](const Fortran::parser::Name &name) { + addOperands(*name.symbol); + }}, + accObject.u); } } @@ -98,7 +108,7 @@ createRegionOp(fir::FirOpBuilder &builder, mlir::Location loc, llvm::ArrayRef argTy; Op op = builder.create(loc, argTy, operands); builder.createBlock(&op.getRegion()); - auto &block = op.getRegion().back(); + mlir::Block &block = op.getRegion().back(); builder.setInsertionPointToStart(&block); builder.create(loc); @@ -194,161 +204,166 @@ static void genWaitClause(Fortran::lower::AbstractConverter &converter, } } -static void genACC(Fortran::lower::AbstractConverter &converter, - Fortran::lower::pft::Evaluation &eval, - const Fortran::parser::OpenACCLoopConstruct &loopConstruct) { +static mlir::acc::LoopOp +createLoopOp(Fortran::lower::AbstractConverter &converter, + const Fortran::parser::AccClauseList &accClauseList) { + fir::FirOpBuilder &firOpBuilder = converter.getFirOpBuilder(); + mlir::Location currentLocation = converter.getCurrentLocation(); Fortran::lower::StatementContext stmtCtx; - const auto &beginLoopDirective = - std::get(loopConstruct.t); - const auto &loopDirective = - std::get(beginLoopDirective.t); - if (loopDirective.v == llvm::acc::ACCD_loop) { - auto &firOpBuilder = converter.getFirOpBuilder(); - auto currentLocation = converter.getCurrentLocation(); + mlir::Value workerNum; + mlir::Value vectorNum; + mlir::Value gangNum; + mlir::Value gangStatic; + llvm::SmallVector tileOperands, privateOperands, + reductionOperands; + std::int64_t executionMapping = mlir::acc::OpenACCExecMapping::NONE; - // Add attribute extracted from clauses. - const auto &accClauseList = - std::get(beginLoopDirective.t); - - mlir::Value workerNum; - mlir::Value vectorLength; - mlir::Value gangNum; - mlir::Value gangStatic; - llvm::SmallVector tileOperands, privateOperands, - reductionOperands; - std::int64_t executionMapping = mlir::acc::OpenACCExecMapping::NONE; - - // Lower clauses values mapped to operands. - for (const auto &clause : accClauseList.v) { - if (const auto *gangClause = - std::get_if(&clause.u)) { - if (gangClause->v) { - const Fortran::parser::AccGangArgument &x = *gangClause->v; - if (const auto &gangNumValue = - std::get>( - x.t)) { - gangNum = fir::getBase(converter.genExprValue( - *Fortran::semantics::GetExpr(gangNumValue.value()), stmtCtx)); - } - if (const auto &gangStaticValue = - std::get>(x.t)) { - const auto &expr = - std::get>( - gangStaticValue.value().t); - if (expr) { - gangStatic = fir::getBase(converter.genExprValue( - *Fortran::semantics::GetExpr(*expr), stmtCtx)); - } else { - // * was passed as value and will be represented as a -1 constant - // integer. - gangStatic = firOpBuilder.createIntegerConstant( - currentLocation, firOpBuilder.getIntegerType(32), - /* STAR */ -1); - } - } - } - executionMapping |= mlir::acc::OpenACCExecMapping::GANG; - } else if (const auto *workerClause = - std::get_if( - &clause.u)) { - if (workerClause->v) { - workerNum = fir::getBase(converter.genExprValue( - *Fortran::semantics::GetExpr(*workerClause->v), stmtCtx)); - } - executionMapping |= mlir::acc::OpenACCExecMapping::WORKER; - } else if (const auto *vectorClause = - std::get_if( - &clause.u)) { - if (vectorClause->v) { - vectorLength = fir::getBase(converter.genExprValue( - *Fortran::semantics::GetExpr(*vectorClause->v), stmtCtx)); + for (const Fortran::parser::AccClause &clause : accClauseList.v) { + if (const auto *gangClause = + std::get_if(&clause.u)) { + if (gangClause->v) { + const Fortran::parser::AccGangArgument &x = *gangClause->v; + if (const auto &gangNumValue = + std::get>(x.t)) { + gangNum = fir::getBase(converter.genExprValue( + *Fortran::semantics::GetExpr(gangNumValue.value()), stmtCtx)); } - executionMapping |= mlir::acc::OpenACCExecMapping::VECTOR; - } else if (const auto *tileClause = - std::get_if(&clause.u)) { - const Fortran::parser::AccTileExprList &accTileExprList = tileClause->v; - for (const auto &accTileExpr : accTileExprList.v) { + if (const auto &gangStaticValue = + std::get>(x.t)) { const auto &expr = - std::get>( - accTileExpr.t); + std::get>( + gangStaticValue.value().t); if (expr) { - tileOperands.push_back(fir::getBase(converter.genExprValue( - *Fortran::semantics::GetExpr(*expr), stmtCtx))); + gangStatic = fir::getBase(converter.genExprValue( + *Fortran::semantics::GetExpr(*expr), stmtCtx)); } else { - // * was passed as value and will be represented as a -1 constant - // integer. - mlir::Value tileStar = firOpBuilder.createIntegerConstant( - currentLocation, firOpBuilder.getIntegerType(32), - /* STAR */ -1); - tileOperands.push_back(tileStar); + // * was passed as value and will be represented as a special + // constant. + gangStatic = firOpBuilder.createIntegerConstant( + currentLocation, firOpBuilder.getIndexType(), starCst); } } - } else if (const auto *privateClause = - std::get_if( - &clause.u)) { - genObjectList(privateClause->v, converter, privateOperands); } - // Reduction clause is left out for the moment as the clause will probably - // end up having its own operation. + executionMapping |= mlir::acc::OpenACCExecMapping::GANG; + } else if (const auto *workerClause = + std::get_if(&clause.u)) { + if (workerClause->v) { + workerNum = fir::getBase(converter.genExprValue( + *Fortran::semantics::GetExpr(*workerClause->v), stmtCtx)); + } + executionMapping |= mlir::acc::OpenACCExecMapping::WORKER; + } else if (const auto *vectorClause = + std::get_if(&clause.u)) { + if (vectorClause->v) { + vectorNum = fir::getBase(converter.genExprValue( + *Fortran::semantics::GetExpr(*vectorClause->v), stmtCtx)); + } + executionMapping |= mlir::acc::OpenACCExecMapping::VECTOR; + } else if (const auto *tileClause = + std::get_if(&clause.u)) { + const Fortran::parser::AccTileExprList &accTileExprList = tileClause->v; + for (const auto &accTileExpr : accTileExprList.v) { + const auto &expr = + std::get>( + accTileExpr.t); + if (expr) { + tileOperands.push_back(fir::getBase(converter.genExprValue( + *Fortran::semantics::GetExpr(*expr), stmtCtx))); + } else { + // * was passed as value and will be represented as a -1 constant + // integer. + mlir::Value tileStar = firOpBuilder.createIntegerConstant( + currentLocation, firOpBuilder.getIntegerType(32), + /* STAR */ -1); + tileOperands.push_back(tileStar); + } + } + } else if (const auto *privateClause = + std::get_if( + &clause.u)) { + genObjectList(privateClause->v, converter, privateOperands); } + // Reduction clause is left out for the moment as the clause will probably + // end up having its own operation. + } - // Prepare the operand segement size attribute and the operands value range. - llvm::SmallVector operands; - llvm::SmallVector operandSegments; - addOperand(operands, operandSegments, gangNum); - addOperand(operands, operandSegments, gangStatic); - addOperand(operands, operandSegments, workerNum); - addOperand(operands, operandSegments, vectorLength); - addOperands(operands, operandSegments, tileOperands); - addOperands(operands, operandSegments, privateOperands); - addOperands(operands, operandSegments, reductionOperands); - - auto loopOp = createRegionOp( - firOpBuilder, currentLocation, operands, operandSegments); - - loopOp->setAttr(mlir::acc::LoopOp::getExecutionMappingAttrName(), - firOpBuilder.getI64IntegerAttr(executionMapping)); - - // Lower clauses mapped to attributes - for (const auto &clause : accClauseList.v) { - if (const auto *collapseClause = - std::get_if(&clause.u)) { - const auto *expr = Fortran::semantics::GetExpr(collapseClause->v); - const auto collapseValue = Fortran::evaluate::ToInt64(*expr); - if (collapseValue) { - loopOp->setAttr(mlir::acc::LoopOp::getCollapseAttrName(), - firOpBuilder.getI64IntegerAttr(*collapseValue)); - } - } else if (std::get_if(&clause.u)) { - loopOp->setAttr(mlir::acc::LoopOp::getSeqAttrName(), - firOpBuilder.getUnitAttr()); - } else if (std::get_if( - &clause.u)) { - loopOp->setAttr(mlir::acc::LoopOp::getIndependentAttrName(), - firOpBuilder.getUnitAttr()); - } else if (std::get_if(&clause.u)) { - loopOp->setAttr(mlir::acc::LoopOp::getAutoAttrName(), - firOpBuilder.getUnitAttr()); + // Prepare the operand segement size attribute and the operands value range. + llvm::SmallVector operands; + llvm::SmallVector operandSegments; + addOperand(operands, operandSegments, gangNum); + addOperand(operands, operandSegments, gangStatic); + addOperand(operands, operandSegments, workerNum); + addOperand(operands, operandSegments, vectorNum); + addOperands(operands, operandSegments, tileOperands); + addOperands(operands, operandSegments, privateOperands); + addOperands(operands, operandSegments, reductionOperands); + + auto loopOp = createRegionOp( + firOpBuilder, currentLocation, operands, operandSegments); + + loopOp->setAttr(mlir::acc::LoopOp::getExecutionMappingAttrName(), + firOpBuilder.getI64IntegerAttr(executionMapping)); + + // Lower clauses mapped to attributes + for (const Fortran::parser::AccClause &clause : accClauseList.v) { + if (const auto *collapseClause = + std::get_if(&clause.u)) { + const auto *expr = Fortran::semantics::GetExpr(collapseClause->v); + const std::optional collapseValue = + Fortran::evaluate::ToInt64(*expr); + if (collapseValue) { + loopOp->setAttr(mlir::acc::LoopOp::getCollapseAttrName(), + firOpBuilder.getI64IntegerAttr(*collapseValue)); } + } else if (std::get_if(&clause.u)) { + loopOp->setAttr(mlir::acc::LoopOp::getSeqAttrName(), + firOpBuilder.getUnitAttr()); + } else if (std::get_if( + &clause.u)) { + loopOp->setAttr(mlir::acc::LoopOp::getIndependentAttrName(), + firOpBuilder.getUnitAttr()); + } else if (std::get_if(&clause.u)) { + loopOp->setAttr(mlir::acc::LoopOp::getAutoAttrName(), + firOpBuilder.getUnitAttr()); } } + return loopOp; } -static void -genACCParallelOp(Fortran::lower::AbstractConverter &converter, +static void genACC(Fortran::lower::AbstractConverter &converter, + Fortran::lower::pft::Evaluation &eval, + const Fortran::parser::OpenACCLoopConstruct &loopConstruct) { + + const auto &beginLoopDirective = + std::get(loopConstruct.t); + const auto &loopDirective = + std::get(beginLoopDirective.t); + + if (loopDirective.v == llvm::acc::ACCD_loop) { + const auto &accClauseList = + std::get(beginLoopDirective.t); + createLoopOp(converter, accClauseList); + } +} + +static mlir::acc::ParallelOp +createParallelOp(Fortran::lower::AbstractConverter &converter, const Fortran::parser::AccClauseList &accClauseList) { + + // Parallel operation operands mlir::Value async; mlir::Value numGangs; mlir::Value numWorkers; mlir::Value vectorLength; mlir::Value ifCond; mlir::Value selfCond; + mlir::Value waitDevnum; llvm::SmallVector waitOperands, reductionOperands, copyOperands, copyinOperands, copyinReadonlyOperands, copyoutOperands, copyoutZeroOperands, createOperands, createZeroOperands, noCreateOperands, - presentOperands, devicePtrOperands, attachOperands, privateOperands, - firstprivateOperands; + presentOperands, devicePtrOperands, attachOperands, firstprivateOperands, + privateOperands; // Async, wait and self clause have optional values but can be present with // no value as well. When there is no value, the op has an attribute to @@ -357,38 +372,21 @@ genACCParallelOp(Fortran::lower::AbstractConverter &converter, bool addWaitAttr = false; bool addSelfAttr = false; - auto &firOpBuilder = converter.getFirOpBuilder(); - auto currentLocation = converter.getCurrentLocation(); + fir::FirOpBuilder &firOpBuilder = converter.getFirOpBuilder(); + mlir::Location currentLocation = converter.getCurrentLocation(); Fortran::lower::StatementContext stmtCtx; // Lower clauses values mapped to operands. // Keep track of each group of operands separatly as clauses can appear // more than once. - for (const auto &clause : accClauseList.v) { + for (const Fortran::parser::AccClause &clause : accClauseList.v) { if (const auto *asyncClause = std::get_if(&clause.u)) { - const auto &asyncClauseValue = asyncClause->v; - if (asyncClauseValue) { // async has a value. - async = fir::getBase(converter.genExprValue( - *Fortran::semantics::GetExpr(*asyncClauseValue), stmtCtx)); - } else { - addAsyncAttr = true; - } + genAsyncClause(converter, asyncClause, async, addAsyncAttr, stmtCtx); } else if (const auto *waitClause = std::get_if(&clause.u)) { - const auto &waitClauseValue = waitClause->v; - if (waitClauseValue) { // wait has a value. - const Fortran::parser::AccWaitArgument &waitArg = *waitClauseValue; - const auto &waitList = - std::get>(waitArg.t); - for (const Fortran::parser::ScalarIntExpr &value : waitList) { - auto v = fir::getBase(converter.genExprValue( - *Fortran::semantics::GetExpr(value), stmtCtx)); - waitOperands.push_back(v); - } - } else { - addWaitAttr = true; - } + genWaitClause(converter, waitClause, waitOperands, waitDevnum, + addWaitAttr, stmtCtx); } else if (const auto *numGangsClause = std::get_if( &clause.u)) { @@ -406,10 +404,7 @@ genACCParallelOp(Fortran::lower::AbstractConverter &converter, *Fortran::semantics::GetExpr(vectorLengthClause->v), stmtCtx)); } else if (const auto *ifClause = std::get_if(&clause.u)) { - mlir::Value cond = fir::getBase(converter.genExprValue( - *Fortran::semantics::GetExpr(ifClause->v), stmtCtx)); - ifCond = firOpBuilder.createConvert(currentLocation, - firOpBuilder.getI1Type(), cond); + genIfClause(converter, ifClause, ifCond, stmtCtx); } else if (const auto *selfClause = std::get_if(&clause.u)) { const Fortran::parser::AccSelfClause &accSelfClause = selfClause->v; @@ -424,6 +419,21 @@ genACCParallelOp(Fortran::lower::AbstractConverter &converter, } else { addSelfAttr = true; } + } else if (const auto *accClauseList = + std::get_if( + &accSelfClause.u)) { + // TODO This would be nicer to be done in canonicalization step. + if (accClauseList->v.size() == 1) { + const auto &accObject = accClauseList->v.front(); + if (const auto *designator = + std::get_if(&accObject.u)) { + if (const auto *name = getDesignatorNameIfDataRef(*designator)) { + auto cond = converter.getSymbolAddress(*name->symbol); + selfCond = firOpBuilder.createConvert( + currentLocation, firOpBuilder.getI1Type(), cond); + } + } + } } } else if (const auto *copyClause = std::get_if(&clause.u)) { @@ -498,8 +508,9 @@ genACCParallelOp(Fortran::lower::AbstractConverter &converter, addOperands(operands, operandSegments, privateOperands); addOperands(operands, operandSegments, firstprivateOperands); - auto parallelOp = createRegionOp( - firOpBuilder, currentLocation, operands, operandSegments); + mlir::acc::ParallelOp parallelOp = + createRegionOp( + firOpBuilder, currentLocation, operands, operandSegments); if (addAsyncAttr) parallelOp->setAttr(mlir::acc::ParallelOp::getAsyncAttrName(), @@ -510,6 +521,14 @@ genACCParallelOp(Fortran::lower::AbstractConverter &converter, if (addSelfAttr) parallelOp->setAttr(mlir::acc::ParallelOp::getSelfAttrName(), firOpBuilder.getUnitAttr()); + + return parallelOp; +} + +static void +genACCParallelOp(Fortran::lower::AbstractConverter &converter, + const Fortran::parser::AccClauseList &accClauseList) { + createParallelOp(converter, accClauseList); } static void genACCDataOp(Fortran::lower::AbstractConverter &converter, @@ -609,6 +628,37 @@ genACC(Fortran::lower::AbstractConverter &converter, } } +static void +genACCParallelLoopOps(Fortran::lower::AbstractConverter &converter, + const Fortran::parser::AccClauseList &accClauseList) { + createParallelOp(converter, accClauseList); + createLoopOp(converter, accClauseList); +} + +static void +genACC(Fortran::lower::AbstractConverter &converter, + Fortran::lower::pft::Evaluation &eval, + const Fortran::parser::OpenACCCombinedConstruct &combinedConstruct) { + const auto &beginCombinedDirective = + std::get(combinedConstruct.t); + const auto &combinedDirective = + std::get(beginCombinedDirective.t); + const auto &accClauseList = + std::get(beginCombinedDirective.t); + + if (combinedDirective.v == llvm::acc::ACCD_kernels_loop) { + TODO(converter.getCurrentLocation(), + "OpenACC Kernels Loop construct not lowered yet!"); + } else if (combinedDirective.v == llvm::acc::ACCD_parallel_loop) { + genACCParallelLoopOps(converter, accClauseList); + } else if (combinedDirective.v == llvm::acc::ACCD_serial_loop) { + TODO(converter.getCurrentLocation(), + "OpenACC Serial Loop construct not lowered yet!"); + } else { + llvm::report_fatal_error("Unknown combined construct encountered"); + } +} + static void genACCEnterDataOp(Fortran::lower::AbstractConverter &converter, const Fortran::parser::AccClauseList &accClauseList) { @@ -969,8 +1019,7 @@ void Fortran::lower::genOpenACCConstruct( }, [&](const Fortran::parser::OpenACCCombinedConstruct &combinedConstruct) { - TODO(converter.getCurrentLocation(), - "OpenACC Combined construct not lowered yet!"); + genACC(converter, eval, combinedConstruct); }, [&](const Fortran::parser::OpenACCLoopConstruct &loopConstruct) { genACC(converter, eval, loopConstruct); diff --git a/flang/lib/Lower/OpenMP.cpp b/flang/lib/Lower/OpenMP.cpp index a895e6fea37e8ee24579d78e224ad796a7ad653d..c60fb711f55d07ba8de8bbee4e5a0a9e961198a6 100644 --- a/flang/lib/Lower/OpenMP.cpp +++ b/flang/lib/Lower/OpenMP.cpp @@ -69,12 +69,11 @@ static void createPrivateVarSyms(Fortran::lower::AbstractConverter &converter, // variables) happen separately, for everything else privatize here. if (sym->test(Fortran::semantics::Symbol::Flag::OmpPreDetermined)) continue; + bool success = converter.createHostAssociateVarClone(*sym); + (void)success; + assert(success && "Privatization failed due to existing binding"); if constexpr (std::is_same_v) { converter.copyHostAssociateVar(*sym); - } else { - bool success = converter.createHostAssociateVarClone(*sym); - (void)success; - assert(success && "Privatization failed due to existing binding"); } } } @@ -161,9 +160,10 @@ static void threadPrivatizeVars(Fortran::lower::AbstractConverter &converter, }; llvm::SetVector threadprivateSyms; - converter.collectSymbolSet( - eval, threadprivateSyms, - Fortran::semantics::Symbol::Flag::OmpThreadprivate); + converter.collectSymbolSet(eval, threadprivateSyms, + Fortran::semantics::Symbol::Flag::OmpThreadprivate, + /*isUltimateSymbol=*/false); + std::set threadprivateSymNames; // For a COMMON block, the ThreadprivateOp is generated for itself instead of // its members, so only bind the value of the new copied ThreadprivateOp @@ -173,6 +173,11 @@ static void threadPrivatizeVars(Fortran::lower::AbstractConverter &converter, for (std::size_t i = 0; i < threadprivateSyms.size(); i++) { auto sym = threadprivateSyms[i]; mlir::Value symThreadprivateValue; + // The variable may be used more than once, and each reference has one + // symbol with the same name. Only do once for references of one variable. + if (threadprivateSymNames.find(sym->name()) != threadprivateSymNames.end()) + continue; + threadprivateSymNames.insert(sym->name()); if (const Fortran::semantics::Symbol *common = Fortran::semantics::FindCommonBlockContaining(sym->GetUltimate())) { mlir::Value commonThreadprivateValue; @@ -198,6 +203,36 @@ static void threadPrivatizeVars(Fortran::lower::AbstractConverter &converter, firOpBuilder.restoreInsertionPoint(insPt); } +static void +genCopyinClause(Fortran::lower::AbstractConverter &converter, + const Fortran::parser::OmpClauseList &opClauseList) { + fir::FirOpBuilder &firOpBuilder = converter.getFirOpBuilder(); + mlir::OpBuilder::InsertPoint insPt = firOpBuilder.saveInsertionPoint(); + firOpBuilder.setInsertionPointToStart(firOpBuilder.getAllocaBlock()); + bool hasCopyin = false; + for (const Fortran::parser::OmpClause &clause : opClauseList.v) { + if (const auto ©inClause = + std::get_if(&clause.u)) { + hasCopyin = true; + const Fortran::parser::OmpObjectList &ompObjectList = copyinClause->v; + for (const Fortran::parser::OmpObject &ompObject : ompObjectList.v) { + Fortran::semantics::Symbol *sym = getOmpObjectSymbol(ompObject); + if (sym->has()) + TODO(converter.getCurrentLocation(), "common block in Copyin clause"); + if (Fortran::semantics::IsAllocatableOrPointer(sym->GetUltimate())) + TODO(converter.getCurrentLocation(), + "pointer or allocatable variables in Copyin clause"); + assert(sym->has() && + "No host-association found"); + converter.copyHostAssociateVar(*sym); + } + } + } + if (hasCopyin) + firOpBuilder.create(converter.getCurrentLocation()); + firOpBuilder.restoreInsertionPoint(insPt); +} + static void genObjectList(const Fortran::parser::OmpObjectList &objectList, Fortran::lower::AbstractConverter &converter, llvm::SmallVectorImpl &operands) { @@ -343,8 +378,11 @@ createBodyOfOp(Op &op, Fortran::lower::AbstractConverter &converter, if (clauses && !outerCombined) privatizeVars(converter, *clauses); - if (std::is_same_v) + if (std::is_same_v) { threadPrivatizeVars(converter, eval); + if (clauses) + genCopyinClause(converter, *clauses); + } } static void genOMP(Fortran::lower::AbstractConverter &converter, @@ -490,7 +528,6 @@ createCombinedParallelOp(Fortran::lower::AbstractConverter &converter, std::get(directive.t); // TODO: Handle the following clauses // 1. default - // 2. copyin // Note: rest of the clauses are handled when the inner operation is created for (const Fortran::parser::OmpClause &clause : opClauseList.v) { if (const auto &ifClause = @@ -570,8 +607,9 @@ genOMP(Fortran::lower::AbstractConverter &converter, allocateOperands); } else if (std::get_if(&clause.u) || std::get_if( - &clause.u)) { - // Privatisation clauses are handled elsewhere. + &clause.u) || + std::get_if(&clause.u)) { + // Privatisation and copyin clauses are handled elsewhere. continue; } else if (std::get_if(&clause.u)) { // Nothing needs to be done for threads clause. diff --git a/flang/lib/Lower/SymbolMap.cpp b/flang/lib/Lower/SymbolMap.cpp index 414c4f0f5c9e9f3df49491d4fac7da7d3ef21d29..c081c8aa1e7af6b54f07778f20ea8e44605e47fd 100644 --- a/flang/lib/Lower/SymbolMap.cpp +++ b/flang/lib/Lower/SymbolMap.cpp @@ -51,6 +51,25 @@ Fortran::lower::SymbolBox Fortran::lower::SymMap::shallowLookupSymbol( return SymbolBox::None{}; } +/// Skip one level when looking up the symbol. The use case is such as looking +/// up the host variable symbol box by skipping the associated level in +/// host-association in OpenMP code. +Fortran::lower::SymbolBox Fortran::lower::SymMap::lookupOneLevelUpSymbol( + Fortran::semantics::SymbolRef symRef) { + Fortran::semantics::SymbolRef sym = symRef.get().GetUltimate(); + auto jmap = symbolMapStack.rbegin(); + auto jend = symbolMapStack.rend(); + if (jmap == jend) + return SymbolBox::None{}; + // Skip one level in symbol map stack. + for (++jmap; jmap != jend; ++jmap) { + auto iter = jmap->find(&*sym); + if (iter != jmap->end()) + return iter->second; + } + return SymbolBox::None{}; +} + mlir::Value Fortran::lower::SymMap::lookupImpliedDo(Fortran::lower::SymMap::AcDoVar var) { for (auto [marker, binding] : llvm::reverse(impliedDoStack)) diff --git a/flang/lib/Optimizer/CodeGen/CodeGen.cpp b/flang/lib/Optimizer/CodeGen/CodeGen.cpp index ad59bd734fd052cc41bd369edb1235bcc1c228d3..a451a5da1dcfab6c534b27cd5dd0775fb84019b7 100644 --- a/flang/lib/Optimizer/CodeGen/CodeGen.cpp +++ b/flang/lib/Optimizer/CodeGen/CodeGen.cpp @@ -2818,7 +2818,7 @@ struct SelectCaseOpConversion : public FIROpConversion { caseOp.getSuccessorOperands(adaptor.getOperands(), t); llvm::Optional cmpOps = *caseOp.getCompareOperands(adaptor.getOperands(), t); - mlir::Value caseArg = *(cmpOps->begin()); + mlir::Value caseArg = *(cmpOps.value().begin()); mlir::Attribute attr = cases[t]; if (attr.isa()) { auto cmp = rewriter.create( @@ -2847,7 +2847,7 @@ struct SelectCaseOpConversion : public FIROpConversion { rewriter.setInsertionPointToEnd(thisBlock); rewriter.create(loc, cmp, newBlock1, newBlock2); rewriter.setInsertionPointToEnd(newBlock1); - mlir::Value caseArg0 = *(cmpOps->begin() + 1); + mlir::Value caseArg0 = *(cmpOps.value().begin() + 1); auto cmp0 = rewriter.create( loc, mlir::LLVM::ICmpPredicate::sle, selector, caseArg0); genCondBrOp(loc, cmp0, dest, destOps, rewriter, newBlock2); diff --git a/flang/lib/Semantics/resolve-directives.cpp b/flang/lib/Semantics/resolve-directives.cpp index 99e59f7fc7af858a5bb6a9c8ce70304adcdd2278..bb42638a308ee56cb21e8663402d2ab22a053950 100644 --- a/flang/lib/Semantics/resolve-directives.cpp +++ b/flang/lib/Semantics/resolve-directives.cpp @@ -476,7 +476,8 @@ private: static constexpr Symbol::Flags ompFlagsRequireNewSymbol{ Symbol::Flag::OmpPrivate, Symbol::Flag::OmpLinear, Symbol::Flag::OmpFirstPrivate, Symbol::Flag::OmpLastPrivate, - Symbol::Flag::OmpReduction, Symbol::Flag::OmpCriticalLock}; + Symbol::Flag::OmpReduction, Symbol::Flag::OmpCriticalLock, + Symbol::Flag::OmpCopyIn}; static constexpr Symbol::Flags ompFlagsRequireMark{ Symbol::Flag::OmpThreadprivate}; @@ -580,6 +581,10 @@ Symbol *DirectiveAttributeVisitor::DeclarePrivateAccessEntity( if (object.owner() != currScope()) { auto &symbol{MakeAssocSymbol(object.name(), object, scope)}; symbol.set(flag); + if (flag == Symbol::Flag::OmpCopyIn) { + // The symbol in copyin clause must be threadprivate entity. + symbol.set(Symbol::Flag::OmpThreadprivate); + } return &symbol; } else { object.set(flag); diff --git a/flang/runtime/edit-input.cpp b/flang/runtime/edit-input.cpp index 226bf3570ab3e5ea687f834d198fbdb0395b6738..5be6b0760418118640bf06979a7c928e2c9ae3e3 100644 --- a/flang/runtime/edit-input.cpp +++ b/flang/runtime/edit-input.cpp @@ -730,7 +730,17 @@ bool EditCharacterInput( chunk = 1; } --remaining; - } else { + } else if constexpr (sizeof *x > 1) { + // Read single byte with expansion into multi-byte CHARACTER + chunk = 1; + if (skipping) { + --skip; + } else { + *x++ = static_cast(*input); + --length; + } + --remaining; + } else { // single bytes -> default CHARACTER if (skipping) { chunk = std::min(skip, ready); skip -= chunk; diff --git a/flang/runtime/edit-output.cpp b/flang/runtime/edit-output.cpp index 9b25ef2fba16fb0e2e92c127a300901e7a3ac1a3..7dc60cbd3adbd9f98cf98a29baa1da1cf81a3249 100644 --- a/flang/runtime/edit-output.cpp +++ b/flang/runtime/edit-output.cpp @@ -199,7 +199,7 @@ const char *RealOutputEditingBase::FormatExponent( } *--exponent = expo < 0 ? '-' : '+'; if (edit.expoDigits || edit.IsListDirected() || exponent + 3 == eEnd) { - *--exponent = edit.descriptor == 'D' ? 'D' : 'E'; // not 'G' + *--exponent = edit.descriptor == 'D' ? 'D' : 'E'; // not 'G' or 'Q' } length = eEnd - exponent; return overflow ? nullptr : exponent; @@ -264,9 +264,7 @@ bool RealOutputEditing::EditEorDOutput(const DataEdit &edit) { if (editWidth == 0) { // "the processor selects the field width" if (edit.digits.has_value()) { // E0.d if (editDigits == 0) { // E0.0 - editWidth = 7; // -.0E+ee - } else { - editWidth = editDigits + 6; // -.666E+ee + significantDigits = 1; } } else { // E0 flags |= decimal::Minimize; @@ -485,7 +483,7 @@ DataEdit RealOutputEditing::EditForGOutput(DataEdit edit) { int significantDigits{ edit.digits.value_or(BinaryFloatingPoint::decimalPrecision)}; // 'd' if (editWidth > 0 && significantDigits == 0) { - return edit; // Gw.0 -> Ew.0 for w > 0 + return edit; // Gw.0Ee -> Ew.0Ee for w > 0 } int flags{0}; if (edit.modes.editingFlags & signPlus) { @@ -498,7 +496,10 @@ DataEdit RealOutputEditing::EditForGOutput(DataEdit edit) { } int expo{IsZero() ? 1 : converted.decimalExponent}; // 's' if (expo < 0 || expo > significantDigits) { - return edit; // Ew.d + if (editWidth == 0 && !edit.expoDigits) { // G0.d -> G0.dE0 + edit.expoDigits = 0; + } + return edit; // Ew.dEe } edit.descriptor = 'F'; edit.modes.scale = 0; // kP is ignored for G when no exponent field diff --git a/flang/runtime/io-api.cpp b/flang/runtime/io-api.cpp index d730dd844c2ac9957e18cbf1d6cb1315df5524d5..60e52e50c32bc9ab5262ff621d2c4fe429b5c8e9 100644 --- a/flang/runtime/io-api.cpp +++ b/flang/runtime/io-api.cpp @@ -147,6 +147,18 @@ Cookie IONAME(BeginInternalFormattedInput)(const char *internal, format, formatLength, scratchArea, scratchBytes, sourceFile, sourceLine); } +static Cookie NoopUnit(const Terminator &terminator, int unitNumber, + enum Iostat iostat = IostatOk) { + Cookie cookie{&New{terminator}( + terminator.sourceFileName(), terminator.sourceLine(), unitNumber) + .release() + ->ioStatementState()}; + if (iostat != IostatOk) { + cookie->GetIoErrorHandler().SetPendingError(iostat); + } + return cookie; +} + static ExternalFileUnit *GetOrCreateUnit(int unitNumber, Direction direction, std::optional isUnformatted, const Terminator &terminator, Cookie &errorCookie) { @@ -156,11 +168,7 @@ static ExternalFileUnit *GetOrCreateUnit(int unitNumber, Direction direction, errorCookie = nullptr; return unit; } else { - errorCookie = &New{terminator}( - terminator.sourceFileName(), terminator.sourceLine(), unitNumber) - .release() - ->ioStatementState(); - errorCookie->GetIoErrorHandler().SetPendingError(IostatBadUnitNumber); + errorCookie = NoopUnit(terminator, unitNumber, IostatBadUnitNumber); return nullptr; } } @@ -358,12 +366,7 @@ Cookie IONAME(BeginOpenUnit)( // OPEN(without NEWUNIT=) return &unit->BeginIoStatement( *unit, wasExtant, sourceFile, sourceLine); } else { - auto &io{ - New{terminator}(sourceFile, sourceLine, unitNumber) - .release() - ->ioStatementState()}; - io.GetIoErrorHandler().SetPendingError(IostatBadUnitNumber); - return &io; + return NoopUnit(terminator, unitNumber, IostatBadUnitNumber); } } @@ -378,7 +381,6 @@ Cookie IONAME(BeginOpenNewUnit)( // OPEN(NEWUNIT=j) Cookie IONAME(BeginWait)(ExternalUnit unitNumber, AsynchronousId id, const char *sourceFile, int sourceLine) { - Terminator terminator{sourceFile, sourceLine}; if (ExternalFileUnit * unit{ExternalFileUnit::LookUp(unitNumber)}) { if (unit->Wait(id)) { return &unit->BeginIoStatement( @@ -388,14 +390,9 @@ Cookie IONAME(BeginWait)(ExternalUnit unitNumber, AsynchronousId id, IostatBadWaitId, unit, sourceFile, sourceLine); } } else { - auto &io{ - New{terminator}(sourceFile, sourceLine, unitNumber) - .release() - ->ioStatementState()}; - if (id != 0) { - io.GetIoErrorHandler().SetPendingError(IostatBadWaitUnit); - } - return &io; + Terminator terminator{sourceFile, sourceLine}; + return NoopUnit( + terminator, unitNumber, id == 0 ? IostatOk : IostatBadWaitUnit); } } Cookie IONAME(BeginWaitAll)( @@ -410,10 +407,8 @@ Cookie IONAME(BeginClose)( *unit, sourceFile, sourceLine); } else { // CLOSE(UNIT=bad unit) is just a no-op - Terminator oom{sourceFile, sourceLine}; - return &New{oom}(sourceFile, sourceLine, unitNumber) - .release() - ->ioStatementState(); + Terminator terminator{sourceFile, sourceLine}; + return NoopUnit(terminator, unitNumber); } } @@ -423,11 +418,10 @@ Cookie IONAME(BeginFlush)( return &unit->BeginIoStatement( *unit, ExternalMiscIoStatementState::Flush, sourceFile, sourceLine); } else { - // FLUSH(UNIT=unknown) is a no-op - Terminator oom{sourceFile, sourceLine}; - return &New{oom}(sourceFile, sourceLine, unitNumber) - .release() - ->ioStatementState(); + // FLUSH(UNIT=bad unit) is an error; an unconnected unit is a no-op + Terminator terminator{sourceFile, sourceLine}; + return NoopUnit(terminator, unitNumber, + unitNumber >= 0 ? IostatOk : IostatBadFlushUnit); } } @@ -438,12 +432,7 @@ Cookie IONAME(BeginBackspace)( return &unit->BeginIoStatement( *unit, ExternalMiscIoStatementState::Backspace, sourceFile, sourceLine); } else { - auto &io{ - New{terminator}(sourceFile, sourceLine, unitNumber) - .release() - ->ioStatementState()}; - io.GetIoErrorHandler().SetPendingError(IostatBadBackspaceUnit); - return &io; + return NoopUnit(terminator, unitNumber, IostatBadBackspaceUnit); } } @@ -634,44 +623,22 @@ bool IONAME(SetPad)(Cookie cookie, const char *keyword, std::size_t length) { bool IONAME(SetPos)(Cookie cookie, std::int64_t pos) { IoStatementState &io{*cookie}; - ConnectionState &connection{io.GetConnectionState()}; IoErrorHandler &handler{io.GetIoErrorHandler()}; - if (connection.access != Access::Stream) { - handler.SignalError("POS= may not appear unless ACCESS='STREAM'"); - return false; - } - if (pos < 1) { // POS=1 is beginning of file (12.6.2.11) - handler.SignalError("POS=%zd is invalid", static_cast(pos)); - return false; - } if (auto *unit{io.GetExternalFileUnit()}) { - unit->SetPosition(pos - 1, handler); - return true; + return unit->SetStreamPos(pos, handler); } else if (!io.get_if()) { - io.GetIoErrorHandler().Crash("SetPos() called on internal unit"); + handler.Crash("SetPos() called on internal unit"); } return false; } bool IONAME(SetRec)(Cookie cookie, std::int64_t rec) { IoStatementState &io{*cookie}; - ConnectionState &connection{io.GetConnectionState()}; IoErrorHandler &handler{io.GetIoErrorHandler()}; - if (connection.access != Access::Direct) { - handler.SignalError("REC= may not appear unless ACCESS='DIRECT'"); - return false; - } - if (!connection.openRecl) { - handler.SignalError("RECL= was not specified"); - return false; - } - if (rec < 1) { - handler.SignalError("REC=%zd is invalid", static_cast(rec)); - return false; - } - connection.currentRecordNumber = rec; if (auto *unit{io.GetExternalFileUnit()}) { - unit->SetPosition((rec - 1) * *connection.openRecl, handler); + unit->SetDirectRec(rec, handler); + } else if (!io.get_if()) { + handler.Crash("SetRec() called on internal unit"); } return true; } diff --git a/flang/runtime/io-stmt.cpp b/flang/runtime/io-stmt.cpp index 3bc3eba7a86b447b99026838aa15193a93fc0ef0..ebddf2a53b11eb26cceaa645a5be7cb1b5b59c9f 100644 --- a/flang/runtime/io-stmt.cpp +++ b/flang/runtime/io-stmt.cpp @@ -677,7 +677,8 @@ bool IoStatementState::CheckForEndOfRecord() { if (auto length{connection.EffectiveRecordLength()}) { if (connection.positionInRecord >= *length) { IoErrorHandler &handler{GetIoErrorHandler()}; - if (mutableModes().nonAdvancing) { + const auto &modes{mutableModes()}; + if (modes.nonAdvancing) { if (connection.access == Access::Stream && connection.unterminatedRecord) { // Reading final unterminated record left by a @@ -687,10 +688,10 @@ bool IoStatementState::CheckForEndOfRecord() { } else { handler.SignalEor(); } - } else if (!connection.modes.pad) { + } else if (!modes.pad) { handler.SignalError(IostatRecordReadOverrun); } - return connection.modes.pad; // PAD='YES' + return modes.pad; // PAD='YES' } } } diff --git a/flang/runtime/iostat.cpp b/flang/runtime/iostat.cpp index d39f9b64f5e6502af9fd893175750049728b22c0..747a776aae7cc80d76f6fa2e19fe276c5fdaec0c 100644 --- a/flang/runtime/iostat.cpp +++ b/flang/runtime/iostat.cpp @@ -87,7 +87,7 @@ const char *IostatErrorString(int iostat) { return "READ/WRITE(ASYNCHRONOUS='YES') on unit without " "OPEN(ASYNCHRONOUS='YES')"; case IostatBadWaitUnit: - return "WAIT(UNIT=) for a bad unit number"; + return "WAIT(UNIT=) for a bad or unconnected unit number"; case IostatBOZInputOverflow: return "B/O/Z input value overflows variable"; case IostatIntegerInputOverflow: @@ -107,6 +107,8 @@ const char *IostatErrorString(int iostat) { return "BACKSPACE on unconnected unit"; case IostatBadUnitNumber: return "Negative unit number is not allowed"; + case IostatBadFlushUnit: + return "FLUSH attempted on a bad or unconnected unit number"; default: return nullptr; } diff --git a/flang/runtime/unit.cpp b/flang/runtime/unit.cpp index 729bce8ddfa164b6471846750bcf8c751ab05733..e0daf8c24a2384dae523e00f75ded2c75f498d4f 100644 --- a/flang/runtime/unit.cpp +++ b/flang/runtime/unit.cpp @@ -655,6 +655,45 @@ void ExternalFileUnit::SetPosition(std::int64_t pos, IoErrorHandler &handler) { BeginRecord(); } +bool ExternalFileUnit::SetStreamPos( + std::int64_t oneBasedPos, IoErrorHandler &handler) { + if (access != Access::Stream) { + handler.SignalError("POS= may not appear unless ACCESS='STREAM'"); + return false; + } + if (oneBasedPos < 1) { // POS=1 is beginning of file (12.6.2.11) + handler.SignalError( + "POS=%zd is invalid", static_cast(oneBasedPos)); + return false; + } + SetPosition(oneBasedPos - 1, handler); + // We no longer know which record we're in. Set currentRecordNumber to + // a large value from whence we can both advance and backspace. + currentRecordNumber = std::numeric_limits::max() / 2; + endfileRecordNumber.reset(); + return true; +} + +bool ExternalFileUnit::SetDirectRec( + std::int64_t oneBasedRec, IoErrorHandler &handler) { + if (access != Access::Direct) { + handler.SignalError("REC= may not appear unless ACCESS='DIRECT'"); + return false; + } + if (!openRecl) { + handler.SignalError("RECL= was not specified"); + return false; + } + if (oneBasedRec < 1) { + handler.SignalError( + "REC=%zd is invalid", static_cast(oneBasedRec)); + return false; + } + currentRecordNumber = oneBasedRec; + SetPosition((oneBasedRec - 1) * *openRecl, handler); + return true; +} + void ExternalFileUnit::EndIoStatement() { io_.reset(); u_.emplace(); diff --git a/flang/runtime/unit.h b/flang/runtime/unit.h index 03a4a44fa95af51f20b7acb613280c99ea4d6220..76666c65ab68c8c3ae0ac2e2de311381fe31fb03 100644 --- a/flang/runtime/unit.h +++ b/flang/runtime/unit.h @@ -94,7 +94,8 @@ public: void Endfile(IoErrorHandler &); void Rewind(IoErrorHandler &); void EndIoStatement(); - void SetPosition(std::int64_t, IoErrorHandler &); // zero-based + bool SetStreamPos(std::int64_t, IoErrorHandler &); // one-based, for POS= + bool SetDirectRec(std::int64_t, IoErrorHandler &); // one-based, for REC= std::int64_t InquirePos() const { // 12.6.2.11 defines POS=1 as the beginning of file return frameOffsetInFile_ + recordOffsetInFrame_ + positionInRecord + 1; @@ -110,6 +111,7 @@ public: private: static UnitMap &GetUnitMap(); const char *FrameNextInput(IoErrorHandler &, std::size_t); + void SetPosition(std::int64_t, IoErrorHandler &); // zero-based void BeginSequentialVariableUnformattedInputRecord(IoErrorHandler &); void BeginVariableFormattedInputRecord(IoErrorHandler &); void BackspaceFixedRecord(IoErrorHandler &); diff --git a/flang/test/Driver/default-optimization-pipelines.f90 b/flang/test/Driver/default-optimization-pipelines.f90 new file mode 100644 index 0000000000000000000000000000000000000000..d46acf4965be6e8b6d56b35cde98560dfce7e9b9 --- /dev/null +++ b/flang/test/Driver/default-optimization-pipelines.f90 @@ -0,0 +1,27 @@ +! Verify that`-O{n}` is indeed taken into account when defining the LLVM optimization/middle-end pass pipeline. + +!----------- +! RUN LINES +!----------- +! RUN: %flang -S -O0 %s -Xflang -fdebug-pass-manager -o /dev/null 2>&1 | FileCheck %s --check-prefix=CHECK-O0 +! RUN: %flang_fc1 -S -O0 %s -fdebug-pass-manager -o /dev/null 2>&1 | FileCheck %s --check-prefix=CHECK-O0 + +! RUN: %flang -S -O2 %s -Xflang -fdebug-pass-manager -o /dev/null 2>&1 | FileCheck %s --check-prefix=CHECK-O2 +! RUN: %flang_fc1 -S -O2 %s -fdebug-pass-manager -o /dev/null 2>&1 | FileCheck %s --check-prefix=CHECK-O2 + +!----------------------- +! EXPECTED OUTPUT +!----------------------- +! CHECK-O0-NOT: Running pass: SimplifyCFGPass on simple_loop_ +! CHECK-O0: Running analysis: TargetLibraryAnalysis on simple_loop_ + +! CHECK-O2: Running pass: SimplifyCFGPass on simple_loop_ + +!------- +! INPUT +!------- +subroutine simple_loop + integer :: i + do i=1,5 + end do +end subroutine diff --git a/flang/test/Driver/driver-help.f90 b/flang/test/Driver/driver-help.f90 index fade5abc3b2a531a5bdc0b87d3c47c07f15910ee..9d55c0570958d425fcd79c2c87fed0f0853133fa 100644 --- a/flang/test/Driver/driver-help.f90 +++ b/flang/test/Driver/driver-help.f90 @@ -96,6 +96,7 @@ ! HELP-FC1-NEXT: -fdebug-measure-parse-tree ! HELP-FC1-NEXT: Measure the parse tree ! HELP-FC1-NEXT: -fdebug-module-writer Enable debug messages while writing module files +! HELP-FC1-NEXT: -fdebug-pass-manager Prints debug information for the new pass manage ! HELP-FC1-NEXT: -fdebug-pre-fir-tree Dump the pre-FIR tree ! HELP-FC1-NEXT: -fdebug-unparse-no-sema Unparse and stop (skips the semantic checks) ! HELP-FC1-NEXT: -fdebug-unparse-with-symbols @@ -120,6 +121,7 @@ ! HELP-FC1-NEXT: -fno-analyzed-objects-for-unparse ! HELP-FC1-NEXT: Do not use the analyzed objects when unparsing ! HELP-FC1-NEXT: -fno-automatic Implies the SAVE attribute for non-automatic local objects in subprograms unless RECURSIVE +! HELP-FC1-NEXT: -fno-debug-pass-manager Disables debug printing for the new pass manager ! HELP-FC1-NEXT: -fno-reformat Dump the cooked character stream in -E mode ! HELP-FC1-NEXT: -fopenacc Enable OpenACC ! HELP-FC1-NEXT: -fopenmp Parse OpenMP pragmas and generate parallel code. diff --git a/flang/test/Driver/flang_f_opts.f90 b/flang/test/Driver/flang_f_opts.f90 new file mode 100644 index 0000000000000000000000000000000000000000..d06e2a342d7daeb17b9316e29078fcd05777caea --- /dev/null +++ b/flang/test/Driver/flang_f_opts.f90 @@ -0,0 +1,14 @@ +! Test for warnings generated when parsing driver options. You can use this file for relatively small tests and to avoid creating +! new test files. + +!----------- +! RUN LINES +!----------- +! RUN: %flang -### -S -O4 %s 2>&1 | FileCheck %s + +!----------------------- +! EXPECTED OUTPUT +!----------------------- +! CHECK: warning: -O4 is equivalent to -O3 +! CHECK-LABEL: "-fc1" +! CHECK: -O3 diff --git a/flang/test/Driver/mlir-pass-pipeline.f90 b/flang/test/Driver/mlir-pass-pipeline.f90 index ba006f7d7ef02d5446fd173123a30a2047890ecc..344520a1511c8746e800be035ff0e54025a7d5c2 100644 --- a/flang/test/Driver/mlir-pass-pipeline.f90 +++ b/flang/test/Driver/mlir-pass-pipeline.f90 @@ -5,11 +5,13 @@ end program ! CHECK: Pass statistics report +! CHECK: CSE ! CHECK-LABEL: 'func.func' Pipeline ! CHECK: ArrayValueCopy ! CHECK: CharacterConversion ! CHECK: Canonicalizer ! CHECK: SimplifyRegionLite +! CHECK: CSE ! CHECK-LABEL: 'func.func' Pipeline ! CHECK: MemoryAllocationOpt @@ -21,6 +23,7 @@ end program ! CHECK: SCFToControlFlow ! CHECK: Canonicalizer ! CHECK: SimplifyRegionLite +! CHECK: CSE ! CHECK: BoxedProcedurePass ! CHECK-LABEL: 'func.func' Pipeline diff --git a/flang/test/Evaluate/folding28.f90 b/flang/test/Evaluate/folding28.f90 index 004c661692fee34bae2a7fb15bc10904c0525931..642919de7414a8c49dec5f6926886424d88f6c4d 100644 --- a/flang/test/Evaluate/folding28.f90 +++ b/flang/test/Evaluate/folding28.f90 @@ -49,4 +49,25 @@ module m logical, parameter :: test_sqrt_zero_4 = sqrt_zero_4 == 0.0 real(8), parameter :: sqrt_zero_8 = sqrt(0.0) logical, parameter :: test_sqrt_zero_8 = sqrt_zero_8 == 0.0 + ! Some common values to get right + real(8), parameter :: sqrt_1_8 = sqrt(1.d0) + logical, parameter :: test_sqrt_1_8 = sqrt_1_8 == 1.d0 + real(8), parameter :: sqrt_2_8 = sqrt(2.d0) + logical, parameter :: test_sqrt_2_8 = sqrt_2_8 == 1.4142135623730951454746218587388284504413604736328125d0 + real(8), parameter :: sqrt_3_8 = sqrt(3.d0) + logical, parameter :: test_sqrt_3_8 = sqrt_3_8 == 1.732050807568877193176604123436845839023590087890625d0 + real(8), parameter :: sqrt_4_8 = sqrt(4.d0) + logical, parameter :: test_sqrt_4_8 = sqrt_4_8 == 2.d0 + real(8), parameter :: sqrt_5_8 = sqrt(5.d0) + logical, parameter :: test_sqrt_5_8 = sqrt_5_8 == 2.236067977499789805051477742381393909454345703125d0 + real(8), parameter :: sqrt_6_8 = sqrt(6.d0) + logical, parameter :: test_sqrt_6_8 = sqrt_6_8 == 2.44948974278317788133563226438127458095550537109375d0 + real(8), parameter :: sqrt_7_8 = sqrt(7.d0) + logical, parameter :: test_sqrt_7_8 = sqrt_7_8 == 2.64575131106459071617109657381661236286163330078125d0 + real(8), parameter :: sqrt_8_8 = sqrt(8.d0) + logical, parameter :: test_sqrt_8_8 = sqrt_8_8 == 2.828427124746190290949243717477656900882720947265625d0 + real(8), parameter :: sqrt_9_8 = sqrt(9.d0) + logical, parameter :: test_sqrt_9_8 = sqrt_9_8 == 3.d0 + real(8), parameter :: sqrt_10_8 = sqrt(10.d0) + logical, parameter :: test_sqrt_10_8 = sqrt_10_8 == 3.162277660168379522787063251598738133907318115234375d0 end module diff --git a/flang/test/Fir/achar.f90 b/flang/test/Fir/achar.f90 new file mode 100644 index 0000000000000000000000000000000000000000..691f4ae811a9da0ab152713f73df60c023b9d253 --- /dev/null +++ b/flang/test/Fir/achar.f90 @@ -0,0 +1,24 @@ +! RUN: bbc -emit-fir %s -o - | FileCheck %s + +! Tests ACHAR lowering (converting an INTEGER to a CHARACTER (singleton, LEN=1) +! along with conversion of CHARACTER to another KIND. +subroutine achar_test1(a) + integer, parameter :: ckind = 2 + integer, intent(in) :: a + character(kind=ckind, len=1) :: ch + + ch = achar(a) + call achar_test1_foo(ch) +end subroutine achar_test1 + +! CHECK-LABEL: func @_QPachar_test1( +! CHECK-SAME: %[[arg:.*]]: !fir.ref {fir.bindc_name = "a"}) { +! CHECK: %[[VAL_0:.*]] = fir.alloca !fir.char<1> +! CHECK: %[[VAL_1:.*]] = fir.alloca !fir.char<2> {bindc_name = "ch", uniq_name = "_QFachar_test1Ech"} +! CHECK: %[[VAL_2:.*]] = fir.load %[[arg]] : !fir.ref +! CHECK: %[[VAL_5:.*]] = fir.undefined !fir.char<1> +! CHECK: %[[VAL_6:.*]] = fir.insert_value %[[VAL_5]], %{{.*}}, [0 : index] : (!fir.char<1>, i8) -> !fir.char<1> +! CHECK: fir.store %[[VAL_6]] to %[[VAL_0]] : !fir.ref> +! CHECK: %[[VAL_7:.*]] = fir.alloca !fir.char<2,?>(%{{.*}} : index) +! CHECK: fir.char_convert %[[VAL_0]] for %{{.*}} to %[[VAL_7]] : !fir.ref>, index, !fir.ref> +! CHECK: fir.call @_QPachar_test1_foo(%{{.*}}) : (!fir.boxchar<2>) -> () diff --git a/flang/test/Fir/basic-program.fir b/flang/test/Fir/basic-program.fir index b74ebb2a6f6fa548082aa755058f2bd60a2d2bb2..b47ff59007614f1ad8867518054a41409657a5b8 100644 --- a/flang/test/Fir/basic-program.fir +++ b/flang/test/Fir/basic-program.fir @@ -14,11 +14,13 @@ func.func @_QQmain() { // PASSES: Pass statistics report +// PASSES: CSE // PASSES-LABEL: 'func.func' Pipeline // PASSES: ArrayValueCopy // PASSES: CharacterConversion // PASSES: Canonicalizer // PASSES: SimplifyRegionLite +// PASSES: CSE // PASSES-LABEL: 'func.func' Pipeline // PASSES: MemoryAllocationOpt @@ -30,6 +32,7 @@ func.func @_QQmain() { // PASSES: SCFToControlFlow // PASSES: Canonicalizer // PASSES: SimplifyRegionLite +// PASSES: CSE // PASSES: BoxedProcedurePass // PASSES-LABEL: 'func.func' Pipeline diff --git a/flang/test/Fir/optional.fir b/flang/test/Fir/optional.fir index 644a7ef8d2198a42250d9e534804e4ffb4d6ecb5..3b350d6fa941957c3a122367c0095d8c502796f5 100644 --- a/flang/test/Fir/optional.fir +++ b/flang/test/Fir/optional.fir @@ -51,3 +51,37 @@ func.func @bar3() -> i1 { %1 = fir.call @foo3(%0) : (!fir.boxchar<1>) -> i1 return %1 : i1 } + +// CHECK-LABEL: @foo4( +// CHECK-SAME: ptr %[[arg:.*]]) +func.func @foo4(%arg0: !fir.boxproc<(i32)->(i64)>) -> i1 { + // CHECK: %[[ptr:.*]] = ptrtoint ptr %[[arg]] to i64 + // CHECK: icmp ne i64 %[[ptr]], 0 + %0 = fir.is_present %arg0 : (!fir.boxproc<(i32)->(i64)>) -> i1 + return %0 : i1 +} + +// CHECK-LABEL: @bar4 +func.func @bar4() -> i1 { + %0 = fir.absent !fir.boxproc<(i32)->(i64)> + // CHECK: call i1 @foo4(ptr null) + %1 = fir.call @foo4(%0) : (!fir.boxproc<(i32)->(i64)>) -> i1 + return %1 : i1 +} + +// CHECK-LABEL: @foo5( +// CHECK-SAME: ptr %[[arg:.*]]) +func.func @foo5(%arg0: (i32)->(i64)) -> i1 { + // CHECK: %[[ptr:.*]] = ptrtoint ptr %[[arg]] to i64 + // CHECK: icmp ne i64 %[[ptr]], 0 + %0 = fir.is_present %arg0 : ((i32)->(i64)) -> i1 + return %0 : i1 +} + +// CHECK-LABEL: @bar5 +func.func @bar5() -> i1 { + %0 = fir.absent (i32)->(i64) + // CHECK: call i1 @foo5(ptr null) + %1 = fir.call @foo5(%0) : ((i32)->(i64)) -> i1 + return %1 : i1 +} diff --git a/flang/test/Lower/Intrinsics/lbound.f90 b/flang/test/Lower/Intrinsics/lbound.f90 index 2a84d760c89f6bc1096b987ff2d1df1244e6d96d..3ea08800b0fc2293f00f0f3879d1711aca885b25 100644 --- a/flang/test/Lower/Intrinsics/lbound.f90 +++ b/flang/test/Lower/Intrinsics/lbound.f90 @@ -37,11 +37,11 @@ subroutine lbound_test_2(a, dim, res) res = lbound(a, dim, 8) end subroutine -! CHECK: %[[VAL_0:.*]] = fir.undefined index subroutine lbound_test_3(a, dim, res) real, dimension(2:10, 3:*) :: a integer(8):: dim, res ! CHECK: %[[VAL_1:.*]] = fir.load %arg1 : !fir.ref +! CHECK: %[[VAL_0:.*]] = arith.constant 1 : index ! CHECK: %[[VAL_2:.*]] = fir.shape_shift %{{.*}}, %{{.*}}, %{{.*}}, %[[VAL_0]] : (index, index, index, index) -> !fir.shapeshift<2> ! CHECK: %[[VAL_3:.*]] = fir.embox %arg0(%[[VAL_2]]) : (!fir.ref>, !fir.shapeshift<2>) -> !fir.box> ! CHECK: %[[VAL_4:.*]] = fir.address_of( diff --git a/flang/test/Lower/Intrinsics/merge.f90 b/flang/test/Lower/Intrinsics/merge.f90 index a9668e4104268ae59ddfe0aca6f57d145a9d9187..d8a3b597eff5641316123cc12b9c05eff8520a68 100644 --- a/flang/test/Lower/Intrinsics/merge.f90 +++ b/flang/test/Lower/Intrinsics/merge.f90 @@ -41,3 +41,40 @@ result = merge(o1, o2, mask) ! CHECK: %[[mask_cast:.*]] = fir.convert %[[mask]] : (!fir.logical<4>) -> i1 ! CHECK: = arith.select %[[mask_cast]], %[[arg1]], %[[arg2]] : !fir.ref> end + +! CHECK-LABEL: func @_QPmerge_logical_var_and_expr( +subroutine merge_logical_var_and_expr(l1, l2) +! CHECK-SAME: %[[VAL_0:.*]]: !fir.ref> {fir.bindc_name = "l1"}, +! CHECK-SAME: %[[VAL_1:.*]]: !fir.ref> {fir.bindc_name = "l2"}) { + logical :: l1, l2 + call bar(merge(l1, .true., l2)) +! CHECK: %[[VAL_2:.*]] = fir.alloca !fir.logical<4> +! CHECK: %[[VAL_3:.*]] = fir.load %[[VAL_0]] : !fir.ref> +! CHECK: %[[VAL_4:.*]] = arith.constant true +! CHECK: %[[VAL_5:.*]] = fir.load %[[VAL_1]] : !fir.ref> +! CHECK: %[[VAL_6:.*]] = fir.convert %[[VAL_5]] : (!fir.logical<4>) -> i1 +! CHECK: %[[VAL_7:.*]] = fir.convert %[[VAL_4]] : (i1) -> !fir.logical<4> +! CHECK: %[[VAL_8:.*]] = arith.select %[[VAL_6]], %[[VAL_3]], %[[VAL_7]] : !fir.logical<4> +! CHECK: fir.store %[[VAL_8]] to %[[VAL_2]] : !fir.ref> +! CHECK: fir.call @_QPbar(%[[VAL_2]]) : (!fir.ref>) -> () +end subroutine + +! CHECK-LABEL: func @_QPmerge_cst_and_dyn_char( +subroutine merge_cst_and_dyn_char(dyn, l) +! CHECK-SAME: %[[VAL_0:.*]]: !fir.boxchar<1> {fir.bindc_name = "dyn"}, +! CHECK-SAME: %[[VAL_1:.*]]: !fir.ref> {fir.bindc_name = "l"}) { + character(4) :: cst = "abcde" + character(*) :: dyn + logical :: l + print *, merge(cst, dyn, l) +! CHECK: %[[VAL_2:.*]] = fir.address_of(@_QFmerge_cst_and_dyn_charEcst) : !fir.ref> +! CHECK: %[[VAL_3:.*]] = arith.constant 4 : index +! CHECK: %[[VAL_4:.*]]:2 = fir.unboxchar %[[VAL_0]] : (!fir.boxchar<1>) -> (!fir.ref>, index) +! CHECK: %[[VAL_10:.*]] = fir.load %[[VAL_1]] : !fir.ref> +! CHECK: %[[VAL_11:.*]] = fir.convert %[[VAL_10]] : (!fir.logical<4>) -> i1 +! CHECK: %[[VAL_12:.*]] = fir.convert %[[VAL_4]]#0 : (!fir.ref>) -> !fir.ref> +! CHECK: %[[VAL_13:.*]] = arith.select %[[VAL_11]], %[[VAL_2]], %[[VAL_12]] : !fir.ref> +! CHECK: %[[VAL_14:.*]] = fir.convert %[[VAL_13]] : (!fir.ref>) -> !fir.ref +! CHECK: %[[VAL_15:.*]] = fir.convert %[[VAL_3]] : (index) -> i64 +! CHECK: fir.call @_FortranAioOutputAscii(%{{.*}}, %[[VAL_14]], %[[VAL_15]]) : (!fir.ref, !fir.ref, i64) -> i1 +end subroutine diff --git a/flang/test/Lower/OpenACC/acc-loop.f90 b/flang/test/Lower/OpenACC/acc-loop.f90 new file mode 100644 index 0000000000000000000000000000000000000000..ac9be539cd6d37120c58ecf3b2f72a7e7c0565b9 --- /dev/null +++ b/flang/test/Lower/OpenACC/acc-loop.f90 @@ -0,0 +1,268 @@ +! This test checks lowering of OpenACC loop directive. + +! RUN: bbc -fopenacc -emit-fir %s -o - | FileCheck %s + +program acc_loop + + integer :: i, j + integer, parameter :: n = 10 + real, dimension(n) :: a, b + real, dimension(n, n) :: c, d + integer :: gangNum = 8 + integer :: gangStatic = 8 + integer :: vectorLength = 128 + integer, parameter :: tileSize = 2 + + + !$acc loop + DO i = 1, n + a(i) = b(i) + END DO + +!CHECK: acc.loop { +!CHECK: fir.do_loop +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} + + !$acc loop seq + DO i = 1, n + a(i) = b(i) + END DO + +!CHECK: acc.loop { +!CHECK: fir.do_loop +!CHECK: acc.yield +!CHECK-NEXT: } attributes {seq} + + !$acc loop auto + DO i = 1, n + a(i) = b(i) + END DO + +!CHECK: acc.loop { +!CHECK: fir.do_loop +!CHECK: acc.yield +!CHECK-NEXT: } attributes {auto} + + !$acc loop independent + DO i = 1, n + a(i) = b(i) + END DO + +!CHECK: acc.loop { +!CHECK: fir.do_loop +!CHECK: acc.yield +!CHECK-NEXT: } attributes {independent} + + !$acc loop gang + DO i = 1, n + a(i) = b(i) + END DO + +!CHECK: acc.loop gang { +!CHECK: fir.do_loop +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} + + !$acc loop gang(num: 8) + DO i = 1, n + a(i) = b(i) + END DO + +!CHECK: [[GANGNUM1:%.*]] = arith.constant 8 : i32 +!CHECK-NEXT: acc.loop gang(num=[[GANGNUM1]]: i32) { +!CHECK: fir.do_loop +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} + + !$acc loop gang(num: gangNum) + DO i = 1, n + a(i) = b(i) + END DO + +!CHECK: [[GANGNUM2:%.*]] = fir.load %{{.*}} : !fir.ref +!CHECK-NEXT: acc.loop gang(num=[[GANGNUM2]]: i32) { +!CHECK: fir.do_loop +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} + + !$acc loop gang(num: gangNum, static: gangStatic) + DO i = 1, n + a(i) = b(i) + END DO + +!CHECK: acc.loop gang(num=%{{.*}}: i32, static=%{{.*}}: i32) { +!CHECK: fir.do_loop +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} + + !$acc loop vector + DO i = 1, n + a(i) = b(i) + END DO + +!CHECK: acc.loop vector { +!CHECK: fir.do_loop +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} + + !$acc loop vector(128) + DO i = 1, n + a(i) = b(i) + END DO + +!CHECK: [[CONSTANT128:%.*]] = arith.constant 128 : i32 +!CHECK: acc.loop vector([[CONSTANT128]]: i32) { +!CHECK: fir.do_loop +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} + + !$acc loop vector(vectorLength) + DO i = 1, n + a(i) = b(i) + END DO + +!CHECK: [[VECTORLENGTH:%.*]] = fir.load %{{.*}} : !fir.ref +!CHECK: acc.loop vector([[VECTORLENGTH]]: i32) { +!CHECK: fir.do_loop +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} + +!$acc loop worker + DO i = 1, n + a(i) = b(i) + END DO + +!CHECK: acc.loop worker { +!CHECK: fir.do_loop +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} + + !$acc loop worker(128) + DO i = 1, n + a(i) = b(i) + END DO + +!CHECK: [[WORKER128:%.*]] = arith.constant 128 : i32 +!CHECK: acc.loop worker([[WORKER128]]: i32) { +!CHECK: fir.do_loop +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} + + !$acc loop private(c) + DO i = 1, n + a(i) = b(i) + END DO + +!CHECK: acc.loop private(%{{.*}}: !fir.ref>) { +!CHECK: fir.do_loop +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} + + !$acc loop private(c, d) + DO i = 1, n + a(i) = b(i) + END DO + +!CHECK: acc.loop private(%{{.*}}: !fir.ref>, %{{.*}}: !fir.ref>) { +!CHECK: fir.do_loop +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} + + !$acc loop private(c) private(d) + DO i = 1, n + a(i) = b(i) + END DO + +!CHECK: acc.loop private(%{{.*}}: !fir.ref>, %{{.*}}: !fir.ref>) { +!CHECK: fir.do_loop +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} + + !$acc loop tile(2) + DO i = 1, n + a(i) = b(i) + END DO +!CHECK: [[TILESIZE:%.*]] = arith.constant 2 : i32 +!CHECK: acc.loop tile([[TILESIZE]]: i32) { +!CHECK: fir.do_loop +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} + + !$acc loop tile(*) + DO i = 1, n + a(i) = b(i) + END DO +!CHECK: [[TILESIZEM1:%.*]] = arith.constant -1 : i32 +!CHECK: acc.loop tile([[TILESIZEM1]]: i32) { +!CHECK: fir.do_loop +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} + + !$acc loop tile(2, 2) + DO i = 1, n + DO j = 1, n + c(i, j) = d(i, j) + END DO + END DO + +!CHECK: [[TILESIZE1:%.*]] = arith.constant 2 : i32 +!CHECK: [[TILESIZE2:%.*]] = arith.constant 2 : i32 +!CHECK: acc.loop tile([[TILESIZE1]]: i32, [[TILESIZE2]]: i32) { +!CHECK: fir.do_loop +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} + + !$acc loop tile(tileSize) + DO i = 1, n + a(i) = b(i) + END DO + +!CHECK: acc.loop tile(%{{.*}}: i32) { +!CHECK: fir.do_loop +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} + + !$acc loop tile(tileSize, tileSize) + DO i = 1, n + DO j = 1, n + c(i, j) = d(i, j) + END DO + END DO + +!CHECK: acc.loop tile(%{{.*}}: i32, %{{.*}}: i32) { +!CHECK: fir.do_loop +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} + + !$acc loop collapse(2) + DO i = 1, n + DO j = 1, n + c(i, j) = d(i, j) + END DO + END DO + +!CHECK: acc.loop { +!CHECK: fir.do_loop +!CHECK: fir.do_loop +!CHECK: acc.yield +!CHECK-NEXT: } attributes {collapse = 2 : i64} + + !$acc loop + DO i = 1, n + !$acc loop + DO j = 1, n + c(i, j) = d(i, j) + END DO + END DO + +!CHECK: acc.loop { +!CHECK: fir.do_loop +!CHECK: acc.loop { +!CHECK: fir.do_loop +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} + +end program diff --git a/flang/test/Lower/OpenACC/acc-parallel-loop.f90 b/flang/test/Lower/OpenACC/acc-parallel-loop.f90 new file mode 100644 index 0000000000000000000000000000000000000000..1a04b84a689fa4bf0d61fcca9627967d6ba8b155 --- /dev/null +++ b/flang/test/Lower/OpenACC/acc-parallel-loop.f90 @@ -0,0 +1,697 @@ +! This test checks lowering of OpenACC parallel loop combined directive. + +! RUN: bbc -fopenacc -emit-fir %s -o - | FileCheck %s + +subroutine acc_parallel_loop + integer :: i, j + + integer :: async = 1 + integer :: wait1 = 1 + integer :: wait2 = 2 + integer :: numGangs = 1 + integer :: numWorkers = 10 + integer :: vectorLength = 128 + logical :: ifCondition = .TRUE. + integer, parameter :: n = 10 + real, dimension(n) :: a, b, c + real, dimension(n, n) :: d, e + real, pointer :: f, g + + integer :: gangNum = 8 + integer :: gangStatic = 8 + integer :: vectorNum = 128 + integer, parameter :: tileSize = 2 + +!CHECK: [[A:%.*]] = fir.alloca !fir.array<10xf32> {{{.*}}uniq_name = "{{.*}}Ea"} +!CHECK: [[B:%.*]] = fir.alloca !fir.array<10xf32> {{{.*}}uniq_name = "{{.*}}Eb"} +!CHECK: [[C:%.*]] = fir.alloca !fir.array<10xf32> {{{.*}}uniq_name = "{{.*}}Ec"} +!CHECK: [[F:%.*]] = fir.alloca !fir.box> {bindc_name = "f", uniq_name = "{{.*}}Ef"} +!CHECK: [[G:%.*]] = fir.alloca !fir.box> {bindc_name = "g", uniq_name = "{{.*}}Eg"} +!CHECK: [[IFCONDITION:%.*]] = fir.address_of(@{{.*}}ifcondition) : !fir.ref> + + !$acc parallel loop + DO i = 1, n + a(i) = b(i) + END DO + +!CHECK: acc.parallel { +!CHECK: acc.loop { +!CHECK: fir.do_loop +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} + + !$acc parallel loop async + DO i = 1, n + a(i) = b(i) + END DO + !$acc end parallel loop + +!CHECK: acc.parallel { +!CHECK: acc.loop { +!CHECK: fir.do_loop +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} +!CHECK: acc.yield +!CHECK-NEXT: } attributes {asyncAttr} + + !$acc parallel loop async(1) + DO i = 1, n + a(i) = b(i) + END DO + +!CHECK: [[ASYNC1:%.*]] = arith.constant 1 : i32 +!CHECK: acc.parallel async([[ASYNC1]]: i32) { +!CHECK: acc.loop { +!CHECK: fir.do_loop +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} + + !$acc parallel loop async(async) + DO i = 1, n + a(i) = b(i) + END DO + +!CHECK: [[ASYNC2:%.*]] = fir.load %{{.*}} : !fir.ref +!CHECK: acc.parallel async([[ASYNC2]]: i32) { +!CHECK: acc.loop { +!CHECK: fir.do_loop +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} + + !$acc parallel loop wait + DO i = 1, n + a(i) = b(i) + END DO + +!CHECK: acc.parallel { +!CHECK: acc.loop { +!CHECK: fir.do_loop +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} +!CHECK: acc.yield +!CHECK-NEXT: } attributes {waitAttr} + + !$acc parallel loop wait(1) + DO i = 1, n + a(i) = b(i) + END DO + +!CHECK: [[WAIT1:%.*]] = arith.constant 1 : i32 +!CHECK: acc.parallel wait([[WAIT1]]: i32) { +!CHECK: acc.loop { +!CHECK: fir.do_loop +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} + + !$acc parallel loop wait(1, 2) + DO i = 1, n + a(i) = b(i) + END DO + +!CHECK: [[WAIT2:%.*]] = arith.constant 1 : i32 +!CHECK: [[WAIT3:%.*]] = arith.constant 2 : i32 +!CHECK: acc.parallel wait([[WAIT2]]: i32, [[WAIT3]]: i32) { +!CHECK: acc.loop { +!CHECK: fir.do_loop +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} + + !$acc parallel loop wait(wait1, wait2) + DO i = 1, n + a(i) = b(i) + END DO + +!CHECK: [[WAIT4:%.*]] = fir.load %{{.*}} : !fir.ref +!CHECK: [[WAIT5:%.*]] = fir.load %{{.*}} : !fir.ref +!CHECK: acc.parallel wait([[WAIT4]]: i32, [[WAIT5]]: i32) { +!CHECK: acc.loop { +!CHECK: fir.do_loop +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} + + !$acc parallel loop num_gangs(1) + DO i = 1, n + a(i) = b(i) + END DO + +!CHECK: [[NUMGANGS1:%.*]] = arith.constant 1 : i32 +!CHECK: acc.parallel num_gangs([[NUMGANGS1]]: i32) { +!CHECK: acc.loop { +!CHECK: fir.do_loop +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} + + !$acc parallel loop num_gangs(numGangs) + DO i = 1, n + a(i) = b(i) + END DO + +!CHECK: [[NUMGANGS2:%.*]] = fir.load %{{.*}} : !fir.ref +!CHECK: acc.parallel num_gangs([[NUMGANGS2]]: i32) { +!CHECK: acc.loop { +!CHECK: fir.do_loop +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} + + !$acc parallel loop num_workers(10) + DO i = 1, n + a(i) = b(i) + END DO + +!CHECK: [[NUMWORKERS1:%.*]] = arith.constant 10 : i32 +!CHECK: acc.parallel num_workers([[NUMWORKERS1]]: i32) { +!CHECK: acc.loop { +!CHECK: fir.do_loop +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} + + !$acc parallel loop num_workers(numWorkers) + DO i = 1, n + a(i) = b(i) + END DO + +!CHECK: [[NUMWORKERS2:%.*]] = fir.load %{{.*}} : !fir.ref +!CHECK: acc.parallel num_workers([[NUMWORKERS2]]: i32) { +!CHECK: acc.loop { +!CHECK: fir.do_loop +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} + + !$acc parallel loop vector_length(128) + DO i = 1, n + a(i) = b(i) + END DO + +!CHECK: [[VECTORLENGTH1:%.*]] = arith.constant 128 : i32 +!CHECK: acc.parallel vector_length([[VECTORLENGTH1]]: i32) { +!CHECK: acc.loop { +!CHECK: fir.do_loop +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} + + !$acc parallel loop vector_length(vectorLength) + DO i = 1, n + a(i) = b(i) + END DO + +!CHECK: [[VECTORLENGTH2:%.*]] = fir.load %{{.*}} : !fir.ref +!CHECK: acc.parallel vector_length([[VECTORLENGTH2]]: i32) { +!CHECK: acc.loop { +!CHECK: fir.do_loop +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} + + !$acc parallel loop if(.TRUE.) + DO i = 1, n + a(i) = b(i) + END DO + +!CHECK: [[IF1:%.*]] = arith.constant true +!CHECK: acc.parallel if([[IF1]]) { +!CHECK: acc.loop { +!CHECK: fir.do_loop +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} + + !$acc parallel loop if(ifCondition) + DO i = 1, n + a(i) = b(i) + END DO + +!CHECK: [[IFCOND:%.*]] = fir.load %{{.*}} : !fir.ref> +!CHECK: [[IF2:%.*]] = fir.convert [[IFCOND]] : (!fir.logical<4>) -> i1 +!CHECK: acc.parallel if([[IF2]]) { +!CHECK: acc.loop { +!CHECK: fir.do_loop +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} + + !$acc parallel loop self(.TRUE.) + DO i = 1, n + a(i) = b(i) + END DO + +!CHECK: [[SELF1:%.*]] = arith.constant true +!CHECK: acc.parallel self([[SELF1]]) { +!CHECK: acc.loop { +!CHECK: fir.do_loop +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} + + !$acc parallel loop self + DO i = 1, n + a(i) = b(i) + END DO + +!CHECK: acc.parallel { +!CHECK: acc.loop { +!CHECK: fir.do_loop +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} +!CHECK: acc.yield +!CHECK-NEXT: } attributes {selfAttr} + + !$acc parallel loop self(ifCondition) + DO i = 1, n + a(i) = b(i) + END DO + +!CHECK: [[SELF2:%.*]] = fir.convert [[IFCONDITION]] : (!fir.ref>) -> i1 +!CHECK: acc.parallel self([[SELF2]]) { +!CHECK: acc.loop { +!CHECK: fir.do_loop +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} + + !$acc parallel loop copy(a, b) + DO i = 1, n + a(i) = b(i) + END DO + +!CHECK: acc.parallel copy([[A]]: !fir.ref>, [[B]]: !fir.ref>) { +!CHECK: acc.loop { +!CHECK: fir.do_loop +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} + + !$acc parallel loop copy(a) copy(b) + DO i = 1, n + a(i) = b(i) + END DO + +!CHECK: acc.parallel copy([[A]]: !fir.ref>, [[B]]: !fir.ref>) { +!CHECK: acc.loop { +!CHECK: fir.do_loop +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} + + !$acc parallel loop copyin(a) copyin(readonly: b) + DO i = 1, n + a(i) = b(i) + END DO + +!CHECK: acc.parallel copyin([[A]]: !fir.ref>) copyin_readonly([[B]]: !fir.ref>) { +!CHECK: acc.loop { +!CHECK: fir.do_loop +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} + + !$acc parallel loop copyout(a) copyout(zero: b) + DO i = 1, n + a(i) = b(i) + END DO + +!CHECK: acc.parallel copyout([[A]]: !fir.ref>) copyout_zero([[B]]: !fir.ref>) { +!CHECK: acc.loop { +!CHECK: fir.do_loop +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} + + !$acc parallel loop create(b) create(zero: a) + DO i = 1, n + a(i) = b(i) + END DO + +!CHECK: acc.parallel create([[B]]: !fir.ref>) create_zero([[A]]: !fir.ref>) { +!CHECK: acc.loop { +!CHECK: fir.do_loop +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} + + !$acc parallel loop no_create(a, b) + DO i = 1, n + a(i) = b(i) + END DO + +!CHECK: acc.parallel no_create([[A]]: !fir.ref>, [[B]]: !fir.ref>) { +!CHECK: acc.loop { +!CHECK: fir.do_loop +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} + + !$acc parallel loop present(a, b) + DO i = 1, n + a(i) = b(i) + END DO + +!CHECK: acc.parallel present([[A]]: !fir.ref>, [[B]]: !fir.ref>) { +!CHECK: acc.loop { +!CHECK: fir.do_loop +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} + + !$acc parallel loop deviceptr(a) deviceptr(b) + DO i = 1, n + a(i) = b(i) + END DO + +!CHECK: acc.parallel deviceptr([[A]]: !fir.ref>, [[B]]: !fir.ref>) { +!CHECK: acc.loop { +!CHECK: fir.do_loop +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} + + !$acc parallel loop attach(f, g) + DO i = 1, n + a(i) = b(i) + END DO + +!CHECK: acc.parallel attach([[F]]: !fir.ref>>, [[G]]: !fir.ref>>) { +!CHECK: acc.loop { +!CHECK: fir.do_loop +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} + + !$acc parallel loop private(a) firstprivate(b) + DO i = 1, n + a(i) = b(i) + END DO + +!CHECK: acc.parallel private([[A]]: !fir.ref>) firstprivate([[B]]: !fir.ref>) { +!CHECK: acc.loop private([[A]]: !fir.ref>) { +!CHECK: fir.do_loop +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} + + !$acc parallel loop seq + DO i = 1, n + a(i) = b(i) + END DO + +!CHECK: acc.parallel { +!CHECK: acc.loop { +!CHECK: fir.do_loop +!CHECK: acc.yield +!CHECK-NEXT: } attributes {seq} +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} + + !$acc parallel loop auto + DO i = 1, n + a(i) = b(i) + END DO + +!CHECK: acc.parallel { +!CHECK: acc.loop { +!CHECK: fir.do_loop +!CHECK: acc.yield +!CHECK-NEXT: } attributes {auto} +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} + + !$acc parallel loop independent + DO i = 1, n + a(i) = b(i) + END DO + +!CHECK: acc.parallel { +!CHECK: acc.loop { +!CHECK: fir.do_loop +!CHECK: acc.yield +!CHECK-NEXT: } attributes {independent} +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} + + !$acc parallel loop gang + DO i = 1, n + a(i) = b(i) + END DO + +!CHECK: acc.parallel { +!CHECK: acc.loop gang { +!CHECK: fir.do_loop +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} + + !$acc parallel loop gang(num: 8) + DO i = 1, n + a(i) = b(i) + END DO + +!CHECK: acc.parallel { +!CHECK: [[GANGNUM1:%.*]] = arith.constant 8 : i32 +!CHECK-NEXT: acc.loop gang(num=[[GANGNUM1]]: i32) { +!CHECK: fir.do_loop +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} + + !$acc parallel loop gang(num: gangNum) + DO i = 1, n + a(i) = b(i) + END DO + +!CHECK: acc.parallel { +!CHECK: [[GANGNUM2:%.*]] = fir.load %{{.*}} : !fir.ref +!CHECK-NEXT: acc.loop gang(num=[[GANGNUM2]]: i32) { +!CHECK: fir.do_loop +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} + + !$acc parallel loop gang(num: gangNum, static: gangStatic) + DO i = 1, n + a(i) = b(i) + END DO + +!CHECK: acc.parallel { +!CHECK: acc.loop gang(num=%{{.*}}: i32, static=%{{.*}}: i32) { +!CHECK: fir.do_loop +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} + + !$acc parallel loop vector + DO i = 1, n + a(i) = b(i) + END DO +!CHECK: acc.parallel { +!CHECK: acc.loop vector { +!CHECK: fir.do_loop +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} + + !$acc parallel loop vector(128) + DO i = 1, n + a(i) = b(i) + END DO + +!CHECK: acc.parallel { +!CHECK: [[CONSTANT128:%.*]] = arith.constant 128 : i32 +!CHECK: acc.loop vector([[CONSTANT128]]: i32) { +!CHECK: fir.do_loop +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} + + !$acc parallel loop vector(vectorLength) + DO i = 1, n + a(i) = b(i) + END DO + +!CHECK: acc.parallel { +!CHECK: [[VECTORLENGTH:%.*]] = fir.load %{{.*}} : !fir.ref +!CHECK: acc.loop vector([[VECTORLENGTH]]: i32) { +!CHECK: fir.do_loop +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} + + !$acc parallel loop worker + DO i = 1, n + a(i) = b(i) + END DO + +!CHECK: acc.parallel { +!CHECK: acc.loop worker { +!CHECK: fir.do_loop +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} + + !$acc parallel loop worker(128) + DO i = 1, n + a(i) = b(i) + END DO + +!CHECK: acc.parallel { +!CHECK: [[WORKER128:%.*]] = arith.constant 128 : i32 +!CHECK: acc.loop worker([[WORKER128]]: i32) { +!CHECK: fir.do_loop +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} + + !$acc parallel loop collapse(2) + DO i = 1, n + DO j = 1, n + d(i, j) = e(i, j) + END DO + END DO + +!CHECK: acc.parallel { +!CHECK: acc.loop { +!CHECK: fir.do_loop +!CHECK: fir.do_loop +!CHECK: acc.yield +!CHECK-NEXT: } attributes {collapse = 2 : i64} +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} + + !$acc parallel loop + DO i = 1, n + !$acc loop + DO j = 1, n + d(i, j) = e(i, j) + END DO + END DO + +!CHECK: acc.parallel { +!CHECK: acc.loop { +!CHECK: fir.do_loop +!CHECK: acc.loop { +!CHECK: fir.do_loop +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} + + !$acc parallel loop tile(2) + DO i = 1, n + a(i) = b(i) + END DO + +!CHECK: acc.parallel { +!CHECK: [[TILESIZE:%.*]] = arith.constant 2 : i32 +!CHECK: acc.loop tile([[TILESIZE]]: i32) { +!CHECK: fir.do_loop +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} + + !$acc parallel loop tile(*) + DO i = 1, n + a(i) = b(i) + END DO + +!CHECK: acc.parallel { +!CHECK: [[TILESIZEM1:%.*]] = arith.constant -1 : i32 +!CHECK: acc.loop tile([[TILESIZEM1]]: i32) { +!CHECK: fir.do_loop +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} + + !$acc parallel loop tile(2, 2) + DO i = 1, n + DO j = 1, n + d(i, j) = e(i, j) + END DO + END DO + +!CHECK: acc.parallel { +!CHECK: [[TILESIZE1:%.*]] = arith.constant 2 : i32 +!CHECK: [[TILESIZE2:%.*]] = arith.constant 2 : i32 +!CHECK: acc.loop tile([[TILESIZE1]]: i32, [[TILESIZE2]]: i32) { +!CHECK: fir.do_loop +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} + + !$acc parallel loop tile(tileSize) + DO i = 1, n + a(i) = b(i) + END DO + +!CHECK: acc.parallel { +!CHECK: acc.loop tile(%{{.*}}: i32) { +!CHECK: fir.do_loop +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} + + !$acc parallel loop tile(tileSize, tileSize) + DO i = 1, n + DO j = 1, n + d(i, j) = e(i, j) + END DO + END DO + +!CHECK: acc.parallel { +!CHECK: acc.loop tile(%{{.*}}: i32, %{{.*}}: i32) { +!CHECK: fir.do_loop +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} + +end subroutine acc_parallel_loop diff --git a/flang/test/Lower/OpenACC/acc-parallel.f90 b/flang/test/Lower/OpenACC/acc-parallel.f90 new file mode 100644 index 0000000000000000000000000000000000000000..bc596581a7e5abfc199274e54c1f9591535307b8 --- /dev/null +++ b/flang/test/Lower/OpenACC/acc-parallel.f90 @@ -0,0 +1,246 @@ +! This test checks lowering of OpenACC parallel directive. + +! RUN: bbc -fopenacc -emit-fir %s -o - | FileCheck %s + +subroutine acc_parallel + integer :: i, j + + integer :: async = 1 + integer :: wait1 = 1 + integer :: wait2 = 2 + integer :: numGangs = 1 + integer :: numWorkers = 10 + integer :: vectorLength = 128 + logical :: ifCondition = .TRUE. + real, dimension(10, 10) :: a, b, c + real, pointer :: d, e + +!CHECK: [[A:%.*]] = fir.alloca !fir.array<10x10xf32> {{{.*}}uniq_name = "{{.*}}Ea"} +!CHECK: [[B:%.*]] = fir.alloca !fir.array<10x10xf32> {{{.*}}uniq_name = "{{.*}}Eb"} +!CHECK: [[C:%.*]] = fir.alloca !fir.array<10x10xf32> {{{.*}}uniq_name = "{{.*}}Ec"} +!CHECK: [[D:%.*]] = fir.alloca !fir.box> {bindc_name = "d", uniq_name = "{{.*}}Ed"} +!CHECK: [[E:%.*]] = fir.alloca !fir.box> {bindc_name = "e", uniq_name = "{{.*}}Ee"} +!CHECK: [[IFCONDITION:%.*]] = fir.address_of(@{{.*}}ifcondition) : !fir.ref> + + !$acc parallel + !$acc end parallel + +!CHECK: acc.parallel { +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} + + !$acc parallel async + !$acc end parallel + +!CHECK: acc.parallel { +!CHECK: acc.yield +!CHECK-NEXT: } attributes {asyncAttr} + + !$acc parallel async(1) + !$acc end parallel + +!CHECK: [[ASYNC1:%.*]] = arith.constant 1 : i32 +!CHECK: acc.parallel async([[ASYNC1]]: i32) { +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} + + !$acc parallel async(async) + !$acc end parallel + +!CHECK: [[ASYNC2:%.*]] = fir.load %{{.*}} : !fir.ref +!CHECK: acc.parallel async([[ASYNC2]]: i32) { +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} + + !$acc parallel wait + !$acc end parallel + +!CHECK: acc.parallel { +!CHECK: acc.yield +!CHECK-NEXT: } attributes {waitAttr} + + !$acc parallel wait(1) + !$acc end parallel + +!CHECK: [[WAIT1:%.*]] = arith.constant 1 : i32 +!CHECK: acc.parallel wait([[WAIT1]]: i32) { +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} + + !$acc parallel wait(1, 2) + !$acc end parallel + +!CHECK: [[WAIT2:%.*]] = arith.constant 1 : i32 +!CHECK: [[WAIT3:%.*]] = arith.constant 2 : i32 +!CHECK: acc.parallel wait([[WAIT2]]: i32, [[WAIT3]]: i32) { +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} + + !$acc parallel wait(wait1, wait2) + !$acc end parallel + +!CHECK: [[WAIT4:%.*]] = fir.load %{{.*}} : !fir.ref +!CHECK: [[WAIT5:%.*]] = fir.load %{{.*}} : !fir.ref +!CHECK: acc.parallel wait([[WAIT4]]: i32, [[WAIT5]]: i32) { +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} + + !$acc parallel num_gangs(1) + !$acc end parallel + +!CHECK: [[NUMGANGS1:%.*]] = arith.constant 1 : i32 +!CHECK: acc.parallel num_gangs([[NUMGANGS1]]: i32) { +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} + + !$acc parallel num_gangs(numGangs) + !$acc end parallel + +!CHECK: [[NUMGANGS2:%.*]] = fir.load %{{.*}} : !fir.ref +!CHECK: acc.parallel num_gangs([[NUMGANGS2]]: i32) { +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} + + !$acc parallel num_workers(10) + !$acc end parallel + +!CHECK: [[NUMWORKERS1:%.*]] = arith.constant 10 : i32 +!CHECK: acc.parallel num_workers([[NUMWORKERS1]]: i32) { +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} + + !$acc parallel num_workers(numWorkers) + !$acc end parallel + +!CHECK: [[NUMWORKERS2:%.*]] = fir.load %{{.*}} : !fir.ref +!CHECK: acc.parallel num_workers([[NUMWORKERS2]]: i32) { +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} + + !$acc parallel vector_length(128) + !$acc end parallel + +!CHECK: [[VECTORLENGTH1:%.*]] = arith.constant 128 : i32 +!CHECK: acc.parallel vector_length([[VECTORLENGTH1]]: i32) { +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} + + !$acc parallel vector_length(vectorLength) + !$acc end parallel + +!CHECK: [[VECTORLENGTH2:%.*]] = fir.load %{{.*}} : !fir.ref +!CHECK: acc.parallel vector_length([[VECTORLENGTH2]]: i32) { +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} + + !$acc parallel if(.TRUE.) + !$acc end parallel + +!CHECK: [[IF1:%.*]] = arith.constant true +!CHECK: acc.parallel if([[IF1]]) { +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} + + !$acc parallel if(ifCondition) + !$acc end parallel + +!CHECK: [[IFCOND:%.*]] = fir.load %{{.*}} : !fir.ref> +!CHECK: [[IF2:%.*]] = fir.convert [[IFCOND]] : (!fir.logical<4>) -> i1 +!CHECK: acc.parallel if([[IF2]]) { +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} + + !$acc parallel self(.TRUE.) + !$acc end parallel + +!CHECK: [[SELF1:%.*]] = arith.constant true +!CHECK: acc.parallel self([[SELF1]]) { +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} + + !$acc parallel self + !$acc end parallel + +!CHECK: acc.parallel { +!CHECK: acc.yield +!CHECK-NEXT: } attributes {selfAttr} + + !$acc parallel self(ifCondition) + !$acc end parallel + +!CHECK: [[SELF2:%.*]] = fir.convert [[IFCONDITION]] : (!fir.ref>) -> i1 +!CHECK: acc.parallel self([[SELF2]]) { +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} + + !$acc parallel copy(a, b, c) + !$acc end parallel + +!CHECK: acc.parallel copy([[A]]: !fir.ref>, [[B]]: !fir.ref>, [[C]]: !fir.ref>) { +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} + + !$acc parallel copy(a) copy(b) copy(c) + !$acc end parallel + +!CHECK: acc.parallel copy([[A]]: !fir.ref>, [[B]]: !fir.ref>, [[C]]: !fir.ref>) { +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} + + !$acc parallel copyin(a) copyin(readonly: b, c) + !$acc end parallel + +!CHECK: acc.parallel copyin([[A]]: !fir.ref>) copyin_readonly([[B]]: !fir.ref>, [[C]]: !fir.ref>) { +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} + + !$acc parallel copyout(a) copyout(zero: b) copyout(c) + !$acc end parallel + +!CHECK: acc.parallel copyout([[A]]: !fir.ref>, [[C]]: !fir.ref>) copyout_zero([[B]]: !fir.ref>) { +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} + + !$acc parallel create(a, b) create(zero: c) + !$acc end parallel + +!CHECK: acc.parallel create([[A]]: !fir.ref>, [[B]]: !fir.ref>) create_zero([[C]]: !fir.ref>) { +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} + + !$acc parallel no_create(a, b) create(zero: c) + !$acc end parallel + +!CHECK: acc.parallel create_zero([[C]]: !fir.ref>) no_create([[A]]: !fir.ref>, [[B]]: !fir.ref>) { +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} + + !$acc parallel present(a, b, c) + !$acc end parallel + +!CHECK: acc.parallel present([[A]]: !fir.ref>, [[B]]: !fir.ref>, [[C]]: !fir.ref>) { +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} + + !$acc parallel deviceptr(a) deviceptr(c) + !$acc end parallel + +!CHECK: acc.parallel deviceptr([[A]]: !fir.ref>, [[C]]: !fir.ref>) { +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} + + !$acc parallel attach(d, e) + !$acc end parallel + +!CHECK: acc.parallel attach([[D]]: !fir.ref>>, [[E]]: !fir.ref>>) { +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} + + !$acc parallel private(a) firstprivate(b) private(c) + !$acc end parallel + +!CHECK: acc.parallel private([[A]]: !fir.ref>, [[C]]: !fir.ref>) firstprivate([[B]]: !fir.ref>) { +!CHECK: acc.yield +!CHECK-NEXT: }{{$}} + +end subroutine acc_parallel diff --git a/flang/test/Lower/OpenMP/copyin.f90 b/flang/test/Lower/OpenMP/copyin.f90 new file mode 100644 index 0000000000000000000000000000000000000000..cabfb147d2dc2c59ade08e7faa7c724d5be71748 --- /dev/null +++ b/flang/test/Lower/OpenMP/copyin.f90 @@ -0,0 +1,206 @@ +! This test checks lowering of `COPYIN` clause. +! RUN: bbc -fopenmp -emit-fir %s -o - | FileCheck %s +! RUN: %flang_fc1 -emit-fir -fopenmp %s -o - | FileCheck %s + +! CHECK-LABEL: func.func @_QPcopyin_scalar_array() { +! CHECK: %[[VAL_0:.*]] = fir.address_of(@_QFcopyin_scalar_arrayEx1) : !fir.ref +! CHECK: %[[VAL_1:.*]] = omp.threadprivate %[[VAL_0]] : !fir.ref -> !fir.ref +! CHECK: %[[VAL_2:.*]] = fir.address_of(@_QFcopyin_scalar_arrayEx2) : !fir.ref> +! CHECK: %[[VAL_3:.*]] = arith.constant 10 : index +! CHECK: %[[VAL_4:.*]] = omp.threadprivate %[[VAL_2]] : !fir.ref> -> !fir.ref> +! CHECK: omp.parallel { +! CHECK: %[[VAL_5:.*]] = omp.threadprivate %[[VAL_0]] : !fir.ref -> !fir.ref +! CHECK: %[[VAL_6:.*]] = fir.load %[[VAL_1]] : !fir.ref +! CHECK: fir.store %[[VAL_6]] to %[[VAL_5]] : !fir.ref +! CHECK: %[[VAL_7:.*]] = omp.threadprivate %[[VAL_2]] : !fir.ref> -> !fir.ref> +! CHECK: %[[VAL_8:.*]] = fir.shape %[[VAL_3]] : (index) -> !fir.shape<1> +! CHECK: %[[VAL_9:.*]] = fir.array_load %[[VAL_7]](%[[VAL_8]]) : (!fir.ref>, !fir.shape<1>) -> !fir.array<10xi64> +! CHECK: %[[VAL_10:.*]] = fir.shape %[[VAL_3]] : (index) -> !fir.shape<1> +! CHECK: %[[VAL_11:.*]] = fir.array_load %[[VAL_4]](%[[VAL_10]]) : (!fir.ref>, !fir.shape<1>) -> !fir.array<10xi64> +! CHECK: %[[VAL_12:.*]] = arith.constant 1 : index +! CHECK: %[[VAL_13:.*]] = arith.constant 0 : index +! CHECK: %[[VAL_14:.*]] = arith.subi %[[VAL_3]], %[[VAL_12]] : index +! CHECK: %[[VAL_15:.*]] = fir.do_loop %[[VAL_16:.*]] = %[[VAL_13]] to %[[VAL_14]] step %[[VAL_12]] unordered iter_args(%[[VAL_17:.*]] = %[[VAL_9]]) -> (!fir.array<10xi64>) { +! CHECK: %[[VAL_18:.*]] = fir.array_fetch %[[VAL_11]], %[[VAL_16]] : (!fir.array<10xi64>, index) -> i64 +! CHECK: %[[VAL_19:.*]] = fir.array_update %[[VAL_17]], %[[VAL_18]], %[[VAL_16]] : (!fir.array<10xi64>, i64, index) -> !fir.array<10xi64> +! CHECK: fir.result %[[VAL_19]] : !fir.array<10xi64> +! CHECK: } +! CHECK: fir.array_merge_store %[[VAL_9]], %[[VAL_20:.*]] to %[[VAL_7]] : !fir.array<10xi64>, !fir.array<10xi64>, !fir.ref> +! CHECK: omp.barrier +! CHECK: fir.call @_QPsub1(%[[VAL_5]], %[[VAL_7]]) : (!fir.ref, !fir.ref>) -> () +! CHECK: omp.terminator +! CHECK: } +! CHECK: return +! CHECK: } + +subroutine copyin_scalar_array() + integer(kind=4), save :: x1 + integer(kind=8), save :: x2(10) + !$omp threadprivate(x1, x2) + + !$omp parallel copyin(x1) copyin(x2) + call sub1(x1, x2) + !$omp end parallel + +end + +! CHECK-LABEL: func.func @_QPcopyin_char_chararray() { +! CHECK: %[[VAL_0:.*]] = fir.address_of(@_QFcopyin_char_chararrayEx3) : !fir.ref> +! CHECK: %[[VAL_1:.*]] = arith.constant 5 : index +! CHECK: %[[VAL_2:.*]] = omp.threadprivate %[[VAL_0]] : !fir.ref> -> !fir.ref> +! CHECK: %[[VAL_3:.*]] = fir.address_of(@_QFcopyin_char_chararrayEx4) : !fir.ref>> +! CHECK: %[[VAL_4:.*]] = arith.constant 5 : index +! CHECK: %[[VAL_5:.*]] = arith.constant 10 : index +! CHECK: %[[VAL_6:.*]] = omp.threadprivate %[[VAL_3]] : !fir.ref>> -> !fir.ref>> +! CHECK: omp.parallel { +! CHECK: %[[VAL_7:.*]] = omp.threadprivate %[[VAL_0]] : !fir.ref> -> !fir.ref> +! CHECK: %[[VAL_8:.*]] = arith.constant 1 : i64 +! CHECK: %[[VAL_9:.*]] = fir.convert %[[VAL_1]] : (index) -> i64 +! CHECK: %[[VAL_10:.*]] = arith.muli %[[VAL_8]], %[[VAL_9]] : i64 +! CHECK: %[[VAL_11:.*]] = arith.constant false +! CHECK: %[[VAL_12:.*]] = fir.convert %[[VAL_7]] : (!fir.ref>) -> !fir.ref +! CHECK: %[[VAL_13:.*]] = fir.convert %[[VAL_2]] : (!fir.ref>) -> !fir.ref +! CHECK: fir.call @llvm.memmove.p0.p0.i64(%[[VAL_12]], %[[VAL_13]], %[[VAL_10]], %[[VAL_11]]) : (!fir.ref, !fir.ref, i64, i1) -> () +! CHECK: %[[VAL_14:.*]] = omp.threadprivate %[[VAL_3]] : !fir.ref>> -> !fir.ref>> +! CHECK: %[[VAL_15:.*]] = fir.shape %[[VAL_5]] : (index) -> !fir.shape<1> +! CHECK: %[[VAL_16:.*]] = fir.array_load %[[VAL_14]](%[[VAL_15]]) : (!fir.ref>>, !fir.shape<1>) -> !fir.array<10x!fir.char<1,5>> +! CHECK: %[[VAL_17:.*]] = fir.shape %[[VAL_5]] : (index) -> !fir.shape<1> +! CHECK: %[[VAL_18:.*]] = fir.array_load %[[VAL_6]](%[[VAL_17]]) : (!fir.ref>>, !fir.shape<1>) -> !fir.array<10x!fir.char<1,5>> +! CHECK: %[[VAL_19:.*]] = arith.constant 1 : index +! CHECK: %[[VAL_20:.*]] = arith.constant 0 : index +! CHECK: %[[VAL_21:.*]] = arith.subi %[[VAL_5]], %[[VAL_19]] : index +! CHECK: %[[VAL_22:.*]] = fir.do_loop %[[VAL_23:.*]] = %[[VAL_20]] to %[[VAL_21]] step %[[VAL_19]] unordered iter_args(%[[VAL_24:.*]] = %[[VAL_16]]) -> (!fir.array<10x!fir.char<1,5>>) { +! CHECK: %[[VAL_25:.*]] = fir.array_access %[[VAL_18]], %[[VAL_23]] : (!fir.array<10x!fir.char<1,5>>, index) -> !fir.ref> +! CHECK: %[[VAL_26:.*]] = fir.array_access %[[VAL_24]], %[[VAL_23]] : (!fir.array<10x!fir.char<1,5>>, index) -> !fir.ref> +! CHECK: %[[VAL_27:.*]] = arith.constant 5 : index +! CHECK: %[[VAL_28:.*]] = arith.constant 1 : i64 +! CHECK: %[[VAL_29:.*]] = fir.convert %[[VAL_27]] : (index) -> i64 +! CHECK: %[[VAL_30:.*]] = arith.muli %[[VAL_28]], %[[VAL_29]] : i64 +! CHECK: %[[VAL_31:.*]] = arith.constant false +! CHECK: %[[VAL_32:.*]] = fir.convert %[[VAL_26]] : (!fir.ref>) -> !fir.ref +! CHECK: %[[VAL_33:.*]] = fir.convert %[[VAL_25]] : (!fir.ref>) -> !fir.ref +! CHECK: fir.call @llvm.memmove.p0.p0.i64(%[[VAL_32]], %[[VAL_33]], %[[VAL_30]], %[[VAL_31]]) : (!fir.ref, !fir.ref, i64, i1) -> () +! CHECK: %[[VAL_34:.*]] = fir.array_amend %[[VAL_24]], %[[VAL_26]] : (!fir.array<10x!fir.char<1,5>>, !fir.ref>) -> !fir.array<10x!fir.char<1,5>> +! CHECK: fir.result %[[VAL_34]] : !fir.array<10x!fir.char<1,5>> +! CHECK: } +! CHECK: fir.array_merge_store %[[VAL_16]], %[[VAL_35:.*]] to %[[VAL_14]] : !fir.array<10x!fir.char<1,5>>, !fir.array<10x!fir.char<1,5>>, !fir.ref>> +! CHECK: omp.barrier +! CHECK: %[[VAL_36:.*]] = fir.convert %[[VAL_7]] : (!fir.ref>) -> !fir.ref> +! CHECK: %[[VAL_37:.*]] = fir.emboxchar %[[VAL_36]], %[[VAL_1]] : (!fir.ref>, index) -> !fir.boxchar<1> +! CHECK: %[[VAL_38:.*]] = fir.convert %[[VAL_14]] : (!fir.ref>>) -> !fir.ref> +! CHECK: %[[VAL_39:.*]] = fir.emboxchar %[[VAL_38]], %[[VAL_4]] : (!fir.ref>, index) -> !fir.boxchar<1> +! CHECK: fir.call @_QPsub2(%[[VAL_37]], %[[VAL_39]]) : (!fir.boxchar<1>, !fir.boxchar<1>) -> () +! CHECK: omp.terminator +! CHECK: } +! CHECK: return +! CHECK: } + +subroutine copyin_char_chararray() + character(5), save :: x3, x4(10) + !$omp threadprivate(x3, x4) + + !$omp parallel copyin(x3) copyin(x4) + call sub2(x3, x4) + !$omp end parallel + +end + +! CHECK-LABEL: func.func @_QPcopyin_derived_type() { +! CHECK: %[[VAL_0:.*]] = fir.address_of(@_QFcopyin_derived_typeEx5) : !fir.ref}>> +! CHECK: %[[VAL_1:.*]] = omp.threadprivate %[[VAL_0]] : !fir.ref}>> -> !fir.ref}>> +! CHECK: omp.parallel { +! CHECK: %[[VAL_2:.*]] = omp.threadprivate %[[VAL_0]] : !fir.ref}>> -> !fir.ref}>> +! CHECK: %[[VAL_3:.*]] = fir.load %[[VAL_1]] : !fir.ref}>> +! CHECK: fir.store %[[VAL_3]] to %[[VAL_2]] : !fir.ref}>> +! CHECK: omp.barrier +! CHECK: fir.call @_QPsub3(%[[VAL_2]]) : (!fir.ref}>>) -> () +! CHECK: omp.terminator +! CHECK: } +! CHECK: return +! CHECK: } + +subroutine copyin_derived_type() + type my_type + integer :: t_i + integer :: t_arr(5) + end type my_type + type(my_type), save :: x5 + !$omp threadprivate(x5) + + !$omp parallel copyin(x5) + call sub3(x5) + !$omp end parallel + +end + +! CHECK-LABEL: func.func @_QPcombined_parallel_worksharing_loop() { +! CHECK: %[[VAL_0:.*]] = fir.alloca i32 {bindc_name = "i", uniq_name = "_QFcombined_parallel_worksharing_loopEi"} +! CHECK: %[[VAL_1:.*]] = fir.address_of(@_QFcombined_parallel_worksharing_loopEx6) : !fir.ref +! CHECK: %[[VAL_2:.*]] = omp.threadprivate %[[VAL_1]] : !fir.ref -> !fir.ref +! CHECK: omp.parallel { +! CHECK: %[[VAL_3:.*]] = fir.alloca i32 {adapt.valuebyref, pinned} +! CHECK: %[[VAL_4:.*]] = omp.threadprivate %[[VAL_1]] : !fir.ref -> !fir.ref +! CHECK: %[[VAL_5:.*]] = fir.load %[[VAL_2]] : !fir.ref +! CHECK: fir.store %[[VAL_5]] to %[[VAL_4]] : !fir.ref +! CHECK: omp.barrier +! CHECK: %[[VAL_6:.*]] = arith.constant 1 : i32 +! CHECK: %[[VAL_7:.*]] = fir.load %[[VAL_4]] : !fir.ref +! CHECK: %[[VAL_8:.*]] = arith.constant 1 : i32 +! CHECK: omp.wsloop for (%[[VAL_9:.*]]) : i32 = (%[[VAL_6]]) to (%[[VAL_7]]) inclusive step (%[[VAL_8]]) { +! CHECK: fir.store %[[VAL_9]] to %[[VAL_3]] : !fir.ref +! CHECK: fir.call @_QPsub4(%[[VAL_4]]) : (!fir.ref) -> () +! CHECK: omp.yield +! CHECK: } +! CHECK: omp.terminator +! CHECK: } +! CHECK: return +! CHECK: } + +subroutine combined_parallel_worksharing_loop() + integer, save :: x6 + !$omp threadprivate(x6) + + !$omp parallel do copyin(x6) + do i=1, x6 + call sub4(x6) + end do + !$omp end parallel do + +end + +! CHECK-LABEL: func.func @_QPcombined_parallel_sections() { +! CHECK: %[[VAL_0:.*]] = fir.address_of(@_QFcombined_parallel_sectionsEx7) : !fir.ref +! CHECK: %[[VAL_1:.*]] = omp.threadprivate %[[VAL_0]] : !fir.ref -> !fir.ref +! CHECK: omp.parallel { +! CHECK: %[[VAL_2:.*]] = omp.threadprivate %[[VAL_0]] : !fir.ref -> !fir.ref +! CHECK: %[[VAL_3:.*]] = fir.load %[[VAL_1]] : !fir.ref +! CHECK: fir.store %[[VAL_3]] to %[[VAL_2]] : !fir.ref +! CHECK: omp.barrier +! CHECK: omp.sections { +! CHECK: omp.section { +! CHECK: fir.call @_QPsub5(%[[VAL_2]]) : (!fir.ref) -> () +! CHECK: omp.terminator +! CHECK: } +! CHECK: omp.section { +! CHECK: fir.call @_QPsub6(%[[VAL_2]]) : (!fir.ref) -> () +! CHECK: omp.terminator +! CHECK: } +! CHECK: omp.terminator +! CHECK: } +! CHECK: omp.terminator +! CHECK: } +! CHECK: return +! CHECK: } + +subroutine combined_parallel_sections() + integer, save :: x7 + !$omp threadprivate(x7) + + !$omp parallel sections copyin(x7) + !$omp section + call sub5(x7) + !$omp section + call sub6(x7) + !$omp end parallel sections + +end diff --git a/flang/test/Lower/array-derived-assignments.f90 b/flang/test/Lower/array-derived-assignments.f90 new file mode 100644 index 0000000000000000000000000000000000000000..f2e1105bdc25434a3a51fe62cfe959165ed10a25 --- /dev/null +++ b/flang/test/Lower/array-derived-assignments.f90 @@ -0,0 +1,104 @@ +! Test derived type assignment lowering inside array expression +! RUN: bbc %s -o - | FileCheck %s + +module array_derived_assign + type simple_copy + integer :: i + character(10) :: c(20) + real, pointer :: p(:) + end type + type deep_copy + integer :: i + real, allocatable :: a(:) + end type +contains + +! Simple copies are implemented inline component by component. +! CHECK-LABEL: func @_QMarray_derived_assignPtest_simple_copy( +! CHECK-SAME: %[[VAL_0:.*]]: !fir.ref>,p:!fir.box>>}>>>{{.*}}, %[[VAL_1:.*]]: !fir.ref>,p:!fir.box>>}>>>{{.*}}) { +subroutine test_simple_copy(t1, t2) + type(simple_copy) :: t1(10), t2(10) + ! CHECK-DAG: %[[VAL_2:.*]] = arith.constant 20 : index + ! CHECK-DAG: %[[VAL_3:.*]] = arith.constant 10 : index + ! CHECK-DAG: %[[VAL_4:.*]] = arith.constant false + ! CHECK-DAG: %[[VAL_5:.*]] = arith.constant 0 : index + ! CHECK-DAG: %[[VAL_6:.*]] = arith.constant 1 : index + ! CHECK-DAG: %[[VAL_7:.*]] = fir.shape %[[VAL_3]] : (index) -> !fir.shape<1> + ! CHECK: br ^bb1(%[[VAL_5]], %[[VAL_3]] : index, index) + ! CHECK: ^bb1(%[[VAL_8:.*]]: index, %[[VAL_9:.*]]: index): + ! CHECK: %[[VAL_10:.*]] = arith.cmpi sgt, %[[VAL_9]], %[[VAL_5]] : index + ! CHECK: cond_br %[[VAL_10]], ^bb2, ^bb6 + ! CHECK: ^bb2: + ! CHECK: %[[VAL_11:.*]] = arith.addi %[[VAL_8]], %[[VAL_6]] : index + ! CHECK: %[[VAL_12:.*]] = fir.array_coor %[[VAL_1]](%[[VAL_7]]) %[[VAL_11]] : (!fir.ref>,p:!fir.box>>}>>>, !fir.shape<1>, index) -> !fir.ref>,p:!fir.box>>}>> + ! CHECK: %[[VAL_13:.*]] = fir.array_coor %[[VAL_0]](%[[VAL_7]]) %[[VAL_11]] : (!fir.ref>,p:!fir.box>>}>>>, !fir.shape<1>, index) -> !fir.ref>,p:!fir.box>>}>> + ! CHECK: %[[VAL_14:.*]] = fir.field_index i, !fir.type<_QMarray_derived_assignTsimple_copy{i:i32,c:!fir.array<20x!fir.char<1,10>>,p:!fir.box>>}> + ! CHECK: %[[VAL_15:.*]] = fir.coordinate_of %[[VAL_12]], %[[VAL_14]] : (!fir.ref>,p:!fir.box>>}>>, !fir.field) -> !fir.ref + ! CHECK: %[[VAL_16:.*]] = fir.coordinate_of %[[VAL_13]], %[[VAL_14]] : (!fir.ref>,p:!fir.box>>}>>, !fir.field) -> !fir.ref + ! CHECK: %[[VAL_17:.*]] = fir.load %[[VAL_15]] : !fir.ref + ! CHECK: fir.store %[[VAL_17]] to %[[VAL_16]] : !fir.ref + ! CHECK: %[[VAL_18:.*]] = fir.field_index c, !fir.type<_QMarray_derived_assignTsimple_copy{i:i32,c:!fir.array<20x!fir.char<1,10>>,p:!fir.box>>}> + ! CHECK: %[[VAL_19:.*]] = fir.coordinate_of %[[VAL_12]], %[[VAL_18]] : (!fir.ref>,p:!fir.box>>}>>, !fir.field) -> !fir.ref>> + ! CHECK: %[[VAL_20:.*]] = fir.coordinate_of %[[VAL_13]], %[[VAL_18]] : (!fir.ref>,p:!fir.box>>}>>, !fir.field) -> !fir.ref>> + ! CHECK: br ^bb3(%[[VAL_5]], %[[VAL_2]] : index, index) + ! CHECK: ^bb3(%[[VAL_21:.*]]: index, %[[VAL_22:.*]]: index): + ! CHECK: %[[VAL_23:.*]] = arith.cmpi sgt, %[[VAL_22]], %[[VAL_5]] : index + ! CHECK: cond_br %[[VAL_23]], ^bb4, ^bb5 + ! CHECK: ^bb4: + ! CHECK: %[[VAL_24:.*]] = fir.coordinate_of %[[VAL_20]], %[[VAL_21]] : (!fir.ref>>, index) -> !fir.ref> + ! CHECK: %[[VAL_25:.*]] = fir.coordinate_of %[[VAL_19]], %[[VAL_21]] : (!fir.ref>>, index) -> !fir.ref> + ! CHECK: %[[VAL_26:.*]] = fir.convert %[[VAL_3]] : (index) -> i64 + ! CHECK: %[[VAL_27:.*]] = fir.convert %[[VAL_24]] : (!fir.ref>) -> !fir.ref + ! CHECK: %[[VAL_28:.*]] = fir.convert %[[VAL_25]] : (!fir.ref>) -> !fir.ref + ! CHECK: fir.call @llvm.memmove.p0.p0.i64(%[[VAL_27]], %[[VAL_28]], %[[VAL_26]], %[[VAL_4]]) : (!fir.ref, !fir.ref, i64, i1) -> () + ! CHECK: %[[VAL_29:.*]] = arith.addi %[[VAL_21]], %[[VAL_6]] : index + ! CHECK: %[[VAL_30:.*]] = arith.subi %[[VAL_22]], %[[VAL_6]] : index + ! CHECK: br ^bb3(%[[VAL_29]], %[[VAL_30]] : index, index) + ! CHECK: ^bb5: + ! CHECK: %[[VAL_31:.*]] = fir.field_index p, !fir.type<_QMarray_derived_assignTsimple_copy{i:i32,c:!fir.array<20x!fir.char<1,10>>,p:!fir.box>>}> + ! CHECK: %[[VAL_32:.*]] = fir.coordinate_of %[[VAL_12]], %[[VAL_31]] : (!fir.ref>,p:!fir.box>>}>>, !fir.field) -> !fir.ref>>> + ! CHECK: %[[VAL_33:.*]] = fir.coordinate_of %[[VAL_13]], %[[VAL_31]] : (!fir.ref>,p:!fir.box>>}>>, !fir.field) -> !fir.ref>>> + ! CHECK: %[[VAL_34:.*]] = fir.load %[[VAL_32]] : !fir.ref>>> + ! CHECK: fir.store %[[VAL_34]] to %[[VAL_33]] : !fir.ref>>> + ! CHECK: %[[VAL_35:.*]] = arith.subi %[[VAL_9]], %[[VAL_6]] : index + ! CHECK: br ^bb1(%[[VAL_11]], %[[VAL_35]] : index, index) + ! CHECK: ^bb6: + t1 = t2 + ! CHECK: return + ! CHECK: } +end subroutine + +! Types require more complex assignments are passed to the runtime +! CHECK-LABEL: func @_QMarray_derived_assignPtest_deep_copy( +! CHECK-SAME: %[[VAL_0:.*]]: !fir.ref>>}>>>{{.*}}, %[[VAL_1:.*]]: !fir.ref>>}>>>{{.*}}) { +subroutine test_deep_copy(t1, t2) + ! CHECK-DAG: %[[VAL_3:.*]] = arith.constant 10 : index + ! CHECK-DAG: %[[VAL_4:.*]] = arith.constant 0 : index + ! CHECK-DAG: %[[VAL_5:.*]] = arith.constant 1 : index + ! CHECK: %[[VAL_6:.*]] = fir.alloca !fir.box>>}>> + ! CHECK: %[[VAL_7:.*]] = fir.shape %[[VAL_3]] : (index) -> !fir.shape<1> + ! CHECK: br ^bb1(%[[VAL_4]], %[[VAL_3]] : index, index) + ! CHECK: ^bb1(%[[VAL_8:.*]]: index, %[[VAL_9:.*]]: index): + ! CHECK: %[[VAL_10:.*]] = arith.cmpi sgt, %[[VAL_9]], %[[VAL_4]] : index + ! CHECK: cond_br %[[VAL_10]], ^bb2, ^bb3 + ! CHECK: ^bb2: + ! CHECK: %[[VAL_11:.*]] = arith.addi %[[VAL_8]], %[[VAL_5]] : index + ! CHECK: %[[VAL_12:.*]] = fir.array_coor %[[VAL_1]](%[[VAL_7]]) %[[VAL_11]] : (!fir.ref>>}>>>, !fir.shape<1>, index) -> !fir.ref>>}>> + ! CHECK: %[[VAL_13:.*]] = fir.array_coor %[[VAL_0]](%[[VAL_7]]) %[[VAL_11]] : (!fir.ref>>}>>>, !fir.shape<1>, index) -> !fir.ref>>}>> + ! CHECK: %[[VAL_14:.*]] = fir.embox %[[VAL_13]] : (!fir.ref>>}>>) -> !fir.box>>}>> + ! CHECK: %[[VAL_15:.*]] = fir.embox %[[VAL_12]] : (!fir.ref>>}>>) -> !fir.box>>}>> + ! CHECK: fir.store %[[VAL_14]] to %[[VAL_6]] : !fir.ref>>}>>> + ! CHECK: %[[VAL_16:.*]] = fir.address_of(@_QQcl.{{.*}}) : !fir.ref>>}>>>) -> !fir.ref> + ! CHECK: %[[VAL_18:.*]] = fir.convert %[[VAL_15]] : (!fir.box>>}>>) -> !fir.box + ! CHECK: %[[VAL_19:.*]] = fir.convert %[[VAL_16]] : (!fir.ref>) -> !fir.ref + ! CHECK: %[[VAL_20:.*]] = fir.call @_FortranAAssign(%[[VAL_17]], %[[VAL_18]], %[[VAL_19]], %{{.*}}) : (!fir.ref>, !fir.box, !fir.ref, i32) -> none + ! CHECK: %[[VAL_21:.*]] = arith.subi %[[VAL_9]], %[[VAL_5]] : index + ! CHECK: br ^bb1(%[[VAL_11]], %[[VAL_21]] : index, index) + type(deep_copy) :: t1(10), t2(10) + t1 = t2 + ! CHECK: return + ! CHECK: } +end subroutine + +end module diff --git a/flang/test/Lower/array-derived.f90 b/flang/test/Lower/array-derived.f90 new file mode 100644 index 0000000000000000000000000000000000000000..00df7beef9afdd7a9cb9590c6d3435c6e24ac1c5 --- /dev/null +++ b/flang/test/Lower/array-derived.f90 @@ -0,0 +1,141 @@ +! RUN: bbc %s -o - | FileCheck %s + +module cs + type r + integer n, d + end type r + + type t2 + integer :: f1(5) + type(r) :: f2 + end type t2 + + type t3 + type(t2) :: f(3,3) + end type t3 + +contains + + ! CHECK: func @_QMcsPc1( + ! CHECK-SAME: %[[arg0:[^:]+]]: !fir.box>>{{.*}}, %[[arg1:[^:]+]]: !fir.box>>{{.*}}) + function c1(e, c) + type(r), intent(in) :: e(:), c(:) + ! CHECK-DAG: fir.alloca !fir.logical<1> {bindc_name = "c1", uniq_name = "_QMcsFc1Ec1"} + logical*1 :: c1 + ! CHECK-DAG: %[[fldn:.*]] = fir.field_index n, !fir.type<_QMcsTr{n:i32,d:i32}> + ! CHECK: %[[ext1:.*]]:3 = fir.box_dims %[[arg1]], %c0{{.*}} : (!fir.box>>, index) -> (index, index, index) + ! CHECK-DAG: %[[slice1:.*]] = fir.slice %c1{{.*}}, %[[ext1]]#1, %c1{{.*}} path %[[fldn]] : (index, index, index, !fir.field) -> !fir.slice<1> + ! CHECK-DAG: %[[ext0:.*]]:3 = fir.box_dims %[[arg0]], %c0{{.*}} : (!fir.box>>, index) -> (index, index, index) + ! CHECK: %[[slice0:.*]] = fir.slice %c1{{.*}}, %[[ext0]]#1, %c1{{.*}} path %[[fldn]] : (index, index, index, !fir.field) -> !fir.slice<1> + ! CHECK-DAG: = fir.array_coor %[[arg1]] [%[[slice1]]] %[[index:.*]] : (!fir.box>>, !fir.slice<1>, index) -> !fir.ref + ! CHECK-DAG: = fir.array_coor %[[arg0]] [%[[slice0]]] %[[index]] : (!fir.box>>, !fir.slice<1>, index) -> !fir.ref + ! CHECK: = fir.call @_FortranAAll( + c1 = all(c%n == e%n) + end function c1 + +! CHECK-LABEL: func @_QMcsPtest2( +! CHECK-SAME: %[[VAL_0:.*]]: !fir.box,f2:!fir.type<_QMcsTr{n:i32,d:i32}>}>>>{{.*}}, %[[VAL_1:.*]]: !fir.box,f2:!fir.type<_QMcsTr{n:i32,d:i32}>}>>>{{.*}}) { +! CHECK-DAG: %[[VAL_2:.*]] = arith.constant 2 : index +! CHECK-DAG: %[[VAL_3:.*]] = arith.constant 4 : index +! CHECK-DAG: %[[VAL_4:.*]] = arith.constant 0 : index +! CHECK-DAG: %[[VAL_5:.*]] = arith.constant 1 : index +! CHECK: %[[VAL_6:.*]] = fir.field_index f2, !fir.type<_QMcsTt2{f1:!fir.array<5xi32>,f2:!fir.type<_QMcsTr{n:i32,d:i32}>}> +! CHECK: %[[VAL_7:.*]] = fir.field_index d, !fir.type<_QMcsTr{n:i32,d:i32}> +! CHECK: %[[VAL_8:.*]]:3 = fir.box_dims %[[VAL_0]], %[[VAL_4]] : (!fir.box,f2:!fir.type<_QMcsTr{n:i32,d:i32}>}>>>, index) -> (index, index, index) +! CHECK: %[[VAL_9:.*]] = fir.slice %[[VAL_5]], %[[VAL_8]]#1, %[[VAL_5]] path %[[VAL_6]], %[[VAL_7]] : (index, index, index, !fir.field, !fir.field) -> !fir.slice<1> +! CHECK: %[[VAL_8_2:.*]] = arith.cmpi sgt, %[[VAL_8]]#1, %[[VAL_4]] : index +! CHECK: %[[VAL_8_3:.*]] = arith.select %[[VAL_8_2]], %[[VAL_8]]#1, %[[VAL_4]] : index +! CHECK: %[[VAL_10:.*]] = fir.field_index f1, !fir.type<_QMcsTt2{f1:!fir.array<5xi32>,f2:!fir.type<_QMcsTr{n:i32,d:i32}>}> +! CHECK: %[[VAL_11:.*]]:3 = fir.box_dims %[[VAL_1]], %[[VAL_4]] : (!fir.box,f2:!fir.type<_QMcsTr{n:i32,d:i32}>}>>>, index) -> (index, index, index) +! CHECK: %[[VAL_12:.*]] = fir.slice %[[VAL_5]], %[[VAL_11]]#1, %[[VAL_5]] path %[[VAL_10]], %[[VAL_4]] : (index, index, index, !fir.field, index) -> !fir.slice<1> +! CHECK: %[[VAL_13:.*]] = fir.slice %[[VAL_5]], %[[VAL_11]]#1, %[[VAL_5]] path %[[VAL_10]], %[[VAL_3]] : (index, index, index, !fir.field, index) -> !fir.slice<1> +! CHECK: %[[VAL_14:.*]] = fir.slice %[[VAL_5]], %[[VAL_11]]#1, %[[VAL_5]] path %[[VAL_10]], %[[VAL_2]] : (index, index, index, !fir.field, index) -> !fir.slice<1> +! CHECK: br ^bb1(%[[VAL_4]], %[[VAL_8_3]] : index, index) +! CHECK: ^bb1(%[[VAL_15:.*]]: index, %[[VAL_16:.*]]: index): +! CHECK: %[[VAL_17:.*]] = arith.cmpi sgt, %[[VAL_16]], %[[VAL_4]] : index +! CHECK: cond_br %[[VAL_17]], ^bb2, ^bb3 +! CHECK: ^bb2: +! CHECK: %[[VAL_18:.*]] = arith.addi %[[VAL_15]], %[[VAL_5]] : index +! CHECK: %[[VAL_19:.*]] = fir.array_coor %[[VAL_1]] {{\[}}%[[VAL_12]]] %[[VAL_18]] : (!fir.box,f2:!fir.type<_QMcsTr{n:i32,d:i32}>}>>>, !fir.slice<1>, index) -> !fir.ref +! CHECK: %[[VAL_20:.*]] = fir.load %[[VAL_19]] : !fir.ref +! CHECK: %[[VAL_21:.*]] = fir.array_coor %[[VAL_1]] {{\[}}%[[VAL_13]]] %[[VAL_18]] : (!fir.box,f2:!fir.type<_QMcsTr{n:i32,d:i32}>}>>>, !fir.slice<1>, index) -> !fir.ref +! CHECK: %[[VAL_22:.*]] = fir.load %[[VAL_21]] : !fir.ref +! CHECK: %[[VAL_23:.*]] = fir.array_coor %[[VAL_1]] {{\[}}%[[VAL_14]]] %[[VAL_18]] : (!fir.box,f2:!fir.type<_QMcsTr{n:i32,d:i32}>}>>>, !fir.slice<1>, index) -> !fir.ref +! CHECK: %[[VAL_24:.*]] = fir.load %[[VAL_23]] : !fir.ref +! CHECK: %[[VAL_25:.*]] = arith.divsi %[[VAL_22]], %[[VAL_24]] : i32 +! CHECK: %[[VAL_26:.*]] = arith.addi %[[VAL_20]], %[[VAL_25]] : i32 +! CHECK: %[[VAL_27:.*]] = fir.array_coor %[[VAL_0]] {{\[}}%[[VAL_9]]] %[[VAL_18]] : (!fir.box,f2:!fir.type<_QMcsTr{n:i32,d:i32}>}>>>, !fir.slice<1>, index) -> !fir.ref +! CHECK: fir.store %[[VAL_26]] to %[[VAL_27]] : !fir.ref +! CHECK: %[[VAL_28:.*]] = arith.subi %[[VAL_16]], %[[VAL_5]] : index +! CHECK: br ^bb1(%[[VAL_18]], %[[VAL_28]] : index, index) +! CHECK: ^bb3: +! CHECK: return +! CHECK: } + + + subroutine test2(a1, a2) + type(t2) :: a1(:), a2(:) + a1%f2%d = a2%f1(1) + a2%f1(5) / a2%f1(3) + end subroutine test2 + +! CHECK-LABEL: func @_QMcsPtest3( +! CHECK-SAME: %[[VAL_0:.*]]: !fir.box,f2:!fir.type<_QMcsTr{n:i32,d:i32}>}>>}>>>{{.*}}, %[[VAL_1:.*]]: !fir.box,f2:!fir.type<_QMcsTr{n:i32,d:i32}>}>>}>>>{{.*}}) { +! CHECK-DAG: %[[VAL_2:.*]] = arith.constant 2 : index +! CHECK-DAG: %[[VAL_3:.*]] = arith.constant 3 : index +! CHECK-DAG: %[[VAL_4:.*]] = arith.constant 4 : i32 +! CHECK-DAG: %[[VAL_5:.*]] = arith.constant 1 : index +! CHECK-DAG: %[[VAL_6:.*]] = arith.constant 0 : index +! CHECK: %[[VAL_7:.*]] = fir.field_index f, !fir.type<_QMcsTt3{f:!fir.array<3x3x!fir.type<_QMcsTt2{f1:!fir.array<5xi32>,f2:!fir.type<_QMcsTr{n:i32,d:i32}>}>>}> +! CHECK: %[[VAL_8:.*]] = fir.field_index f2, !fir.type<_QMcsTt2{f1:!fir.array<5xi32>,f2:!fir.type<_QMcsTr{n:i32,d:i32}>}> +! CHECK: %[[VAL_9:.*]] = fir.field_index n, !fir.type<_QMcsTr{n:i32,d:i32}> +! CHECK: %[[VAL_10:.*]]:3 = fir.box_dims %[[VAL_0]], %[[VAL_6]] : (!fir.box,f2:!fir.type<_QMcsTr{n:i32,d:i32}>}>>}>>>, index) -> (index, index, index) +! CHECK: %[[VAL_11:.*]] = fir.slice %[[VAL_5]], %[[VAL_10]]#1, %[[VAL_5]] path %[[VAL_7]], %[[VAL_6]], %[[VAL_6]], %[[VAL_8]], %[[VAL_9]] : (index, index, index, !fir.field, index, index, !fir.field, !fir.field) -> !fir.slice<1> +! CHECK: %[[VAL_10_2:.*]] = arith.cmpi sgt, %[[VAL_10]]#1, %[[VAL_6]] : index +! CHECK: %[[VAL_10_3:.*]] = arith.select %[[VAL_10_2]], %[[VAL_10]]#1, %[[VAL_6]] : index +! CHECK: %[[VAL_12:.*]] = fir.field_index f1, !fir.type<_QMcsTt2{f1:!fir.array<5xi32>,f2:!fir.type<_QMcsTr{n:i32,d:i32}>}> +! CHECK: %[[VAL_13:.*]]:3 = fir.box_dims %[[VAL_1]], %[[VAL_6]] : (!fir.box,f2:!fir.type<_QMcsTr{n:i32,d:i32}>}>>}>>>, index) -> (index, index, index) +! CHECK: %[[VAL_14:.*]] = fir.slice %[[VAL_5]], %[[VAL_13]]#1, %[[VAL_5]] path %[[VAL_7]], %[[VAL_5]], %[[VAL_5]], %[[VAL_12]], %[[VAL_3]] : (index, index, index, !fir.field, index, index, !fir.field, index) -> !fir.slice<1> +! CHECK: br ^bb1(%[[VAL_6]], %[[VAL_10_3]] : index, index) +! CHECK: ^bb1(%[[VAL_15:.*]]: index, %[[VAL_16:.*]]: index): +! CHECK: %[[VAL_17:.*]] = arith.cmpi sgt, %[[VAL_16]], %[[VAL_6]] : index +! CHECK: cond_br %[[VAL_17]], ^bb2, ^bb3 +! CHECK: ^bb2: +! CHECK: %[[VAL_18:.*]] = arith.addi %[[VAL_15]], %[[VAL_5]] : index +! CHECK: %[[VAL_19:.*]] = fir.array_coor %[[VAL_1]] {{\[}}%[[VAL_14]]] %[[VAL_18]] : (!fir.box,f2:!fir.type<_QMcsTr{n:i32,d:i32}>}>>}>>>, !fir.slice<1>, index) -> !fir.ref +! CHECK: %[[VAL_20:.*]] = fir.load %[[VAL_19]] : !fir.ref +! CHECK: %[[VAL_21:.*]] = arith.subi %[[VAL_20]], %[[VAL_4]] : i32 +! CHECK: %[[VAL_22:.*]] = fir.array_coor %[[VAL_0]] {{\[}}%[[VAL_11]]] %[[VAL_18]] : (!fir.box,f2:!fir.type<_QMcsTr{n:i32,d:i32}>}>>}>>>, !fir.slice<1>, index) -> !fir.ref +! CHECK: fir.store %[[VAL_21]] to %[[VAL_22]] : !fir.ref +! CHECK: %[[VAL_23:.*]] = arith.subi %[[VAL_16]], %[[VAL_5]] : index +! CHECK: br ^bb1(%[[VAL_18]], %[[VAL_23]] : index, index) +! CHECK: ^bb3: +! CHECK: %[[VAL_24:.*]] = fir.slice %[[VAL_5]], %[[VAL_13]]#1, %[[VAL_5]] path %[[VAL_7]], %[[VAL_2]], %[[VAL_2]], %[[VAL_12]], %[[VAL_5]] : (index, index, index, !fir.field, index, index, !fir.field, index) -> !fir.slice<1> +! CHECK: %[[VAL_13_2:.*]] = arith.cmpi sgt, %[[VAL_13]]#1, %[[VAL_6]] : index +! CHECK: %[[VAL_13_3:.*]] = arith.select %[[VAL_13_2]], %[[VAL_13]]#1, %[[VAL_6]] : index +! CHECK: %[[VAL_25:.*]] = fir.field_index d, !fir.type<_QMcsTr{n:i32,d:i32}> +! CHECK: %[[VAL_26:.*]] = fir.slice %[[VAL_5]], %[[VAL_10]]#1, %[[VAL_5]] path %[[VAL_7]], %[[VAL_6]], %[[VAL_5]], %[[VAL_8]], %[[VAL_25]] : (index, index, index, !fir.field, index, index, !fir.field, !fir.field) -> !fir.slice<1> +! CHECK: br ^bb4(%[[VAL_6]], %[[VAL_13_3]] : index, index) +! CHECK: ^bb4(%[[VAL_27:.*]]: index, %[[VAL_28:.*]]: index): +! CHECK: %[[VAL_29:.*]] = arith.cmpi sgt, %[[VAL_28]], %[[VAL_6]] : index +! CHECK: cond_br %[[VAL_29]], ^bb5, ^bb6 +! CHECK: ^bb5: +! CHECK: %[[VAL_30:.*]] = arith.addi %[[VAL_27]], %[[VAL_5]] : index +! CHECK: %[[VAL_31:.*]] = fir.array_coor %[[VAL_0]] {{\[}}%[[VAL_26]]] %[[VAL_30]] : (!fir.box,f2:!fir.type<_QMcsTr{n:i32,d:i32}>}>>}>>>, !fir.slice<1>, index) -> !fir.ref +! CHECK: %[[VAL_32:.*]] = fir.load %[[VAL_31]] : !fir.ref +! CHECK: %[[VAL_33:.*]] = arith.addi %[[VAL_32]], %[[VAL_4]] : i32 +! CHECK: %[[VAL_34:.*]] = fir.array_coor %[[VAL_1]] {{\[}}%[[VAL_24]]] %[[VAL_30]] : (!fir.box,f2:!fir.type<_QMcsTr{n:i32,d:i32}>}>>}>>>, !fir.slice<1>, index) -> !fir.ref +! CHECK: fir.store %[[VAL_33]] to %[[VAL_34]] : !fir.ref +! CHECK: %[[VAL_35:.*]] = arith.subi %[[VAL_28]], %[[VAL_5]] : index +! CHECK: br ^bb4(%[[VAL_30]], %[[VAL_35]] : index, index) +! CHECK: ^bb6: +! CHECK: return +! CHECK: } + + subroutine test3(a3, a4) + type(t3) :: a3(:), a4(:) + a3%f(1,1)%f2%n = a4%f(2,2)%f1(4) - 4 + a4%f(3,3)%f1(2) = a3%f(1,2)%f2%d + 4 + end subroutine test3 +end module cs + +! CHECK: func private @_FortranAAll(!fir.box, !fir.ref, i32, i32) -> i1 attributes {fir.runtime} diff --git a/flang/test/Lower/array-elemental-calls-char-byval.f90 b/flang/test/Lower/array-elemental-calls-char-byval.f90 new file mode 100644 index 0000000000000000000000000000000000000000..4f5c020790449c4b3700294a5888610d61037c7f --- /dev/null +++ b/flang/test/Lower/array-elemental-calls-char-byval.f90 @@ -0,0 +1,198 @@ +! Test lowering of elemental calls with character argument +! with the VALUE attribute. +! RUN: bbc -o - %s | FileCheck %s + + +module char_elem_byval + +interface +elemental integer function elem(c, j) + character(*), value :: c + integer, intent(in) :: j +end function +end interface + +contains +! CHECK-LABEL: func @_QMchar_elem_byvalPfoo1( +! CHECK-SAME: %[[VAL_22:[^:]+]]: !fir.ref>{{.*}}, %[[VAL_19:[^:]+]]: !fir.ref>{{.*}}, %[[VAL_5:.*]]: !fir.boxchar<1>{{.*}}) { +subroutine foo1(i, j, c) + integer :: i(10), j(10) + character(*) :: c(10) +! CHECK-DAG: %[[VAL_0:.*]] = arith.constant false +! CHECK-DAG: %[[VAL_1:.*]] = arith.constant 10 : index +! CHECK-DAG: %[[VAL_2:.*]] = arith.constant 0 : index +! CHECK-DAG: %[[VAL_3:.*]] = arith.constant 1 : index +! CHECK: %[[VAL_4:.*]]:2 = fir.unboxchar %[[VAL_5]] : (!fir.boxchar<1>) -> (!fir.ref>, index) +! CHECK: %[[VAL_6:.*]] = fir.convert %[[VAL_4]]#0 : (!fir.ref>) -> !fir.ref>> +! CHECK: %[[VAL_7:.*]] = fir.shape %[[VAL_1]] : (index) -> !fir.shape<1> +! CHECK: br ^bb1(%[[VAL_2]], %[[VAL_1]] : index, index) +! CHECK: ^bb1(%[[VAL_8:.*]]: index, %[[VAL_9:.*]]: index): +! CHECK: %[[VAL_10:.*]] = arith.cmpi sgt, %[[VAL_9]], %[[VAL_2]] : index +! CHECK: cond_br %[[VAL_10]], ^bb2, ^bb3 +! CHECK: ^bb2: +! CHECK: %[[VAL_11:.*]] = arith.addi %[[VAL_8]], %[[VAL_3]] : index +! CHECK: %[[VAL_12:.*]] = fir.array_coor %[[VAL_6]](%[[VAL_7]]) %[[VAL_11]] typeparams %[[VAL_4]]#1 : (!fir.ref>>, !fir.shape<1>, index, index) -> !fir.ref> +! CHECK: %[[VAL_13:.*]] = fir.alloca !fir.char<1,?>(%[[VAL_4]]#1 : index) {bindc_name = ".chrtmp"} +! CHECK: %[[VAL_14:.*]] = fir.convert %[[VAL_4]]#1 : (index) -> i64 +! CHECK: %[[VAL_15:.*]] = fir.convert %[[VAL_13]] : (!fir.ref>) -> !fir.ref +! CHECK: %[[VAL_16:.*]] = fir.convert %[[VAL_12]] : (!fir.ref>) -> !fir.ref +! CHECK: fir.call @llvm.memmove.p0.p0.i64(%[[VAL_15]], %[[VAL_16]], %[[VAL_14]], %[[VAL_0]]) : (!fir.ref, !fir.ref, i64, i1) -> () +! CHECK: %[[VAL_17:.*]] = fir.emboxchar %[[VAL_13]], %[[VAL_4]]#1 : (!fir.ref>, index) -> !fir.boxchar<1> +! CHECK: %[[VAL_18:.*]] = fir.array_coor %[[VAL_19]](%[[VAL_7]]) %[[VAL_11]] : (!fir.ref>, !fir.shape<1>, index) -> !fir.ref +! CHECK: %[[VAL_20:.*]] = fir.call @_QPelem(%[[VAL_17]], %[[VAL_18]]) : (!fir.boxchar<1>, !fir.ref) -> i32 +! CHECK: %[[VAL_21:.*]] = fir.array_coor %[[VAL_22]](%[[VAL_7]]) %[[VAL_11]] : (!fir.ref>, !fir.shape<1>, index) -> !fir.ref +! CHECK: fir.store %[[VAL_20]] to %[[VAL_21]] : !fir.ref +! CHECK: %[[VAL_23:.*]] = arith.subi %[[VAL_9]], %[[VAL_3]] : index +! CHECK: br ^bb1(%[[VAL_11]], %[[VAL_23]] : index, index) +! CHECK: ^bb3: +! CHECK: return + i = elem(c, j) +end subroutine + +! CHECK-LABEL: func @_QMchar_elem_byvalPfoo2( +! CHECK-SAME: %[[VAL_44:[^:]+]]: !fir.ref>{{.*}}, %[[VAL_41:[^:]+]]: !fir.ref>{{.*}}, %[[VAL_29:.*]]: !fir.boxchar<1>{{.*}}) { +subroutine foo2(i, j, c) + integer :: i(10), j(10) + character(*) :: c +! CHECK-DAG: %[[VAL_24:.*]] = arith.constant false +! CHECK-DAG: %[[VAL_25:.*]] = arith.constant 10 : index +! CHECK-DAG: %[[VAL_26:.*]] = arith.constant 0 : index +! CHECK-DAG: %[[VAL_27:.*]] = arith.constant 1 : index +! CHECK: %[[VAL_28:.*]]:2 = fir.unboxchar %[[VAL_29]] : (!fir.boxchar<1>) -> (!fir.ref>, index) +! CHECK: %[[VAL_30:.*]] = fir.shape %[[VAL_25]] : (index) -> !fir.shape<1> +! CHECK: %[[VAL_31:.*]] = fir.alloca !fir.char<1,?>(%[[VAL_28]]#1 : index) {bindc_name = ".chrtmp"} +! CHECK: %[[VAL_32:.*]] = fir.convert %[[VAL_28]]#1 : (index) -> i64 +! CHECK: %[[VAL_33:.*]] = fir.convert %[[VAL_31]] : (!fir.ref>) -> !fir.ref +! CHECK: %[[VAL_34:.*]] = fir.convert %[[VAL_28]]#0 : (!fir.ref>) -> !fir.ref +! CHECK: fir.call @llvm.memmove.p0.p0.i64(%[[VAL_33]], %[[VAL_34]], %[[VAL_32]], %[[VAL_24]]) : (!fir.ref, !fir.ref, i64, i1) -> () +! CHECK: br ^bb1(%[[VAL_26]], %[[VAL_25]] : index, index) +! CHECK: ^bb1(%[[VAL_35:.*]]: index, %[[VAL_36:.*]]: index): +! CHECK: %[[VAL_37:.*]] = arith.cmpi sgt, %[[VAL_36]], %[[VAL_26]] : index +! CHECK: cond_br %[[VAL_37]], ^bb2, ^bb3 +! CHECK: ^bb2: +! CHECK: %[[VAL_38:.*]] = fir.emboxchar %[[VAL_31]], %[[VAL_28]]#1 : (!fir.ref>, index) -> !fir.boxchar<1> +! CHECK: %[[VAL_39:.*]] = arith.addi %[[VAL_35]], %[[VAL_27]] : index +! CHECK: %[[VAL_40:.*]] = fir.array_coor %[[VAL_41]](%[[VAL_30]]) %[[VAL_39]] : (!fir.ref>, !fir.shape<1>, index) -> !fir.ref +! CHECK: %[[VAL_42:.*]] = fir.call @_QPelem(%[[VAL_38]], %[[VAL_40]]) : (!fir.boxchar<1>, !fir.ref) -> i32 +! CHECK: %[[VAL_43:.*]] = fir.array_coor %[[VAL_44]](%[[VAL_30]]) %[[VAL_39]] : (!fir.ref>, !fir.shape<1>, index) -> !fir.ref +! CHECK: fir.store %[[VAL_42]] to %[[VAL_43]] : !fir.ref +! CHECK: %[[VAL_45:.*]] = arith.subi %[[VAL_36]], %[[VAL_27]] : index +! CHECK: br ^bb1(%[[VAL_39]], %[[VAL_45]] : index, index) +! CHECK: ^bb3: +! CHECK: return + i = elem(c, j) +end subroutine + +! CHECK-LABEL: func @_QMchar_elem_byvalPfoo3( +! CHECK-SAME: %[[VAL_65:[^:]+]]: !fir.ref>{{.*}}, %[[VAL_55:[^:]+]]: !fir.ref>{{.*}}) { +subroutine foo3(i, j) + integer :: i(10), j(10) +! CHECK-DAG: %[[VAL_46:.*]] = arith.constant 10 : index +! CHECK-DAG: %[[VAL_47:.*]] = arith.constant 0 : index +! CHECK-DAG: %[[VAL_48:.*]] = arith.constant 1 : index +! CHECK: %[[VAL_49:.*]] = fir.shape %[[VAL_46]] : (index) -> !fir.shape<1> +! CHECK: br ^bb1(%[[VAL_47]], %[[VAL_46]] : index, index) +! CHECK: ^bb1(%[[VAL_50:.*]]: index, %[[VAL_51:.*]]: index): +! CHECK: %[[VAL_52:.*]] = arith.cmpi sgt, %[[VAL_51]], %[[VAL_47]] : index +! CHECK: cond_br %[[VAL_52]], ^bb2, ^bb3 +! CHECK: ^bb2: +! CHECK: %[[VAL_53:.*]] = arith.addi %[[VAL_50]], %[[VAL_48]] : index +! CHECK: %[[VAL_54:.*]] = fir.array_coor %[[VAL_55]](%[[VAL_49]]) %[[VAL_53]] : (!fir.ref>, !fir.shape<1>, index) -> !fir.ref +! CHECK: %[[VAL_56:.*]] = fir.load %[[VAL_54]] : !fir.ref +! CHECK: %[[VAL_57:.*]] = fir.convert %[[VAL_56]] : (i32) -> i8 +! CHECK: %[[VAL_58:.*]] = fir.undefined !fir.char<1> +! CHECK: %[[VAL_59:.*]] = fir.insert_value %[[VAL_58]], %[[VAL_57]], [0 : index] : (!fir.char<1>, i8) -> !fir.char<1> +! CHECK: %[[VAL_60:.*]] = fir.alloca !fir.char<1> {bindc_name = ".chrtmp"} +! CHECK: fir.store %[[VAL_59]] to %[[VAL_60]] : !fir.ref> +! CHECK: %[[VAL_61:.*]] = fir.convert %[[VAL_60]] : (!fir.ref>) -> !fir.ref> +! CHECK: %[[VAL_62:.*]] = fir.emboxchar %[[VAL_61]], %[[VAL_48]] : (!fir.ref>, index) -> !fir.boxchar<1> +! CHECK: %[[VAL_63:.*]] = fir.call @_QPelem(%[[VAL_62]], %[[VAL_54]]) : (!fir.boxchar<1>, !fir.ref) -> i32 +! CHECK: %[[VAL_64:.*]] = fir.array_coor %[[VAL_65]](%[[VAL_49]]) %[[VAL_53]] : (!fir.ref>, !fir.shape<1>, index) -> !fir.ref +! CHECK: fir.store %[[VAL_63]] to %[[VAL_64]] : !fir.ref +! CHECK: %[[VAL_66:.*]] = arith.subi %[[VAL_51]], %[[VAL_48]] : index +! CHECK: br ^bb1(%[[VAL_53]], %[[VAL_66]] : index, index) +! CHECK: ^bb3: +! CHECK: return + i = elem(char(j), j) +end subroutine + +! CHECK-LABEL: func @_QMchar_elem_byvalPfoo4( +! CHECK-SAME: %[[VAL_93:[^:]+]]: !fir.ref>{{.*}}, %[[VAL_74:[^:]+]]: !fir.ref>{{.*}}) { +subroutine foo4(i, j) + integer :: i(10), j(10) +! CHECK-DAG: %[[VAL_67:.*]] = arith.constant 0 : i64 +! CHECK-DAG: %[[VAL_68:.*]] = arith.constant false +! CHECK-DAG: %[[VAL_69:.*]] = arith.constant 10 : index +! CHECK-DAG: %[[VAL_70:.*]] = arith.constant 0 : index +! CHECK-DAG: %[[VAL_71:.*]] = arith.constant 1 : index +! CHECK: %[[VAL_66:.*]] = fir.alloca !fir.char<1> {adapt.valuebyref} +! CHECK: %[[VAL_72:.*]] = fir.shape %[[VAL_69]] : (index) -> !fir.shape<1> +! CHECK: %[[VAL_73:.*]] = fir.coordinate_of %[[VAL_74]], %[[VAL_67]] : (!fir.ref>, i64) -> !fir.ref +! CHECK: %[[VAL_75:.*]] = fir.load %[[VAL_73]] : !fir.ref +! CHECK: %[[VAL_76:.*]] = fir.convert %[[VAL_75]] : (i32) -> i8 +! CHECK: %[[VAL_77:.*]] = fir.undefined !fir.char<1> +! CHECK: %[[VAL_78:.*]] = fir.insert_value %[[VAL_77]], %[[VAL_76]], [0 : index] : (!fir.char<1>, i8) -> !fir.char<1> +! CHECK: fir.store %[[VAL_78]] to %[[VAL_66]] : !fir.ref> +! CHECK: %[[VAL_80:.*]] = fir.alloca !fir.char<1> {bindc_name = ".chrtmp"} +! CHECK: %[[VAL_81:.*]] = fir.convert %[[VAL_71]] : (index) -> i64 +! CHECK: %[[VAL_82:.*]] = fir.convert %[[VAL_80]] : (!fir.ref>) -> !fir.ref +! CHECK: %[[VAL_83:.*]] = fir.convert %[[VAL_66]] : (!fir.ref>) -> !fir.ref +! CHECK: fir.call @llvm.memmove.p0.p0.i64(%[[VAL_82]], %[[VAL_83]], %[[VAL_81]], %[[VAL_68]]) : (!fir.ref, !fir.ref, i64, i1) -> () +! CHECK: br ^bb1(%[[VAL_70]], %[[VAL_69]] : index, index) +! CHECK: ^bb1(%[[VAL_84:.*]]: index, %[[VAL_85:.*]]: index): +! CHECK: %[[VAL_86:.*]] = arith.cmpi sgt, %[[VAL_85]], %[[VAL_70]] : index +! CHECK: cond_br %[[VAL_86]], ^bb2, ^bb3 +! CHECK: ^bb2: +! CHECK: %[[VAL_87:.*]] = fir.convert %[[VAL_80]] : (!fir.ref>) -> !fir.ref> +! CHECK: %[[VAL_88:.*]] = fir.emboxchar %[[VAL_87]], %[[VAL_71]] : (!fir.ref>, index) -> !fir.boxchar<1> +! CHECK: %[[VAL_89:.*]] = arith.addi %[[VAL_84]], %[[VAL_71]] : index +! CHECK: %[[VAL_90:.*]] = fir.array_coor %[[VAL_74]](%[[VAL_72]]) %[[VAL_89]] : (!fir.ref>, !fir.shape<1>, index) -> !fir.ref +! CHECK: %[[VAL_91:.*]] = fir.call @_QPelem(%[[VAL_88]], %[[VAL_90]]) : (!fir.boxchar<1>, !fir.ref) -> i32 +! CHECK: %[[VAL_92:.*]] = fir.array_coor %[[VAL_93]](%[[VAL_72]]) %[[VAL_89]] : (!fir.ref>, !fir.shape<1>, index) -> !fir.ref +! CHECK: fir.store %[[VAL_91]] to %[[VAL_92]] : !fir.ref +! CHECK: %[[VAL_94:.*]] = arith.subi %[[VAL_85]], %[[VAL_71]] : index +! CHECK: br ^bb1(%[[VAL_89]], %[[VAL_94]] : index, index) +! CHECK: ^bb3: +! CHECK: return + i = elem(char(j(1)), j) +end subroutine + +! Note: the copy of the constant is important because VALUE argument can be +! modified on the caller side. + +! CHECK-LABEL: func @_QMchar_elem_byvalPfoo5( +! CHECK-SAME: %[[VAL_116:[^:]+]]: !fir.ref>{{.*}}, %[[VAL_113:[^:]+]]: !fir.ref>{{.*}}) { +subroutine foo5(i, j) + integer :: i(10), j(10) +! CHECK-DAG: %[[VAL_95:.*]] = arith.constant 5 : index +! CHECK-DAG: %[[VAL_96:.*]] = arith.constant false +! CHECK-DAG: %[[VAL_97:.*]] = arith.constant 10 : index +! CHECK-DAG: %[[VAL_98:.*]] = arith.constant 0 : index +! CHECK-DAG: %[[VAL_99:.*]] = arith.constant 1 : index +! CHECK: %[[VAL_100:.*]] = fir.shape %[[VAL_97]] : (index) -> !fir.shape<1> +! CHECK: %[[VAL_101:.*]] = fir.address_of(@{{.*}}) : !fir.ref> +! CHECK: %[[VAL_102:.*]] = fir.alloca !fir.char<1,5> {bindc_name = ".chrtmp"} +! CHECK: %[[VAL_103:.*]] = fir.convert %[[VAL_95]] : (index) -> i64 +! CHECK: %[[VAL_104:.*]] = fir.convert %[[VAL_102]] : (!fir.ref>) -> !fir.ref +! CHECK: %[[VAL_105:.*]] = fir.convert %[[VAL_101]] : (!fir.ref>) -> !fir.ref +! CHECK: fir.call @llvm.memmove.p0.p0.i64(%[[VAL_104]], %[[VAL_105]], %[[VAL_103]], %[[VAL_96]]) : (!fir.ref, !fir.ref, i64, i1) -> () +! CHECK: br ^bb1(%[[VAL_98]], %[[VAL_97]] : index, index) +! CHECK: ^bb1(%[[VAL_106:.*]]: index, %[[VAL_107:.*]]: index): +! CHECK: %[[VAL_108:.*]] = arith.cmpi sgt, %[[VAL_107]], %[[VAL_98]] : index +! CHECK: cond_br %[[VAL_108]], ^bb2, ^bb3 +! CHECK: ^bb2: +! CHECK: %[[VAL_109:.*]] = fir.convert %[[VAL_102]] : (!fir.ref>) -> !fir.ref> +! CHECK: %[[VAL_110:.*]] = fir.emboxchar %[[VAL_109]], %[[VAL_95]] : (!fir.ref>, index) -> !fir.boxchar<1> +! CHECK: %[[VAL_111:.*]] = arith.addi %[[VAL_106]], %[[VAL_99]] : index +! CHECK: %[[VAL_112:.*]] = fir.array_coor %[[VAL_113]](%[[VAL_100]]) %[[VAL_111]] : (!fir.ref>, !fir.shape<1>, index) -> !fir.ref +! CHECK: %[[VAL_114:.*]] = fir.call @_QPelem(%[[VAL_110]], %[[VAL_112]]) : (!fir.boxchar<1>, !fir.ref) -> i32 +! CHECK: %[[VAL_115:.*]] = fir.array_coor %[[VAL_116]](%[[VAL_100]]) %[[VAL_111]] : (!fir.ref>, !fir.shape<1>, index) -> !fir.ref +! CHECK: fir.store %[[VAL_114]] to %[[VAL_115]] : !fir.ref +! CHECK: %[[VAL_117:.*]] = arith.subi %[[VAL_107]], %[[VAL_99]] : index +! CHECK: br ^bb1(%[[VAL_111]], %[[VAL_117]] : index, index) +! CHECK: ^bb3: +! CHECK: return + i = elem("hello", j) +end subroutine + +end module diff --git a/flang/test/Lower/array-elemental-calls-char.f90 b/flang/test/Lower/array-elemental-calls-char.f90 new file mode 100644 index 0000000000000000000000000000000000000000..e695f9bb8b89bb2328321f718bde4d9fe806e468 --- /dev/null +++ b/flang/test/Lower/array-elemental-calls-char.f90 @@ -0,0 +1,267 @@ +! Test lowering of elemental calls with character argument +! without the VALUE attribute. +! RUN: bbc -o - %s | FileCheck %s + +module char_elem + +interface +elemental integer function elem(c) + character(*), intent(in) :: c +end function + +elemental integer function elem2(c, j) + character(*), intent(in) :: c + integer, intent(in) :: j +end function + +end interface + +contains + +! CHECK-LABEL: func @_QMchar_elemPfoo1( +! CHECK-SAME: %[[VAL_15:.*]]: !fir.ref>{{.*}}, %[[VAL_4:.*]]: !fir.boxchar<1>{{.*}}) { +subroutine foo1(i, c) + integer :: i(10) + character(*) :: c(10) +! CHECK-DAG: %[[VAL_0:.*]] = arith.constant 10 : index +! CHECK-DAG: %[[VAL_1:.*]] = arith.constant 0 : index +! CHECK-DAG: %[[VAL_2:.*]] = arith.constant 1 : index +! CHECK: %[[VAL_3:.*]]:2 = fir.unboxchar %[[VAL_4]] : (!fir.boxchar<1>) -> (!fir.ref>, index) +! CHECK: %[[VAL_5:.*]] = fir.convert %[[VAL_3]]#0 : (!fir.ref>) -> !fir.ref>> +! CHECK: %[[VAL_6:.*]] = fir.shape %[[VAL_0]] : (index) -> !fir.shape<1> +! CHECK: br ^bb1(%[[VAL_1]], %[[VAL_0]] : index, index) +! CHECK: ^bb1(%[[VAL_7:.*]]: index, %[[VAL_8:.*]]: index): +! CHECK: %[[VAL_9:.*]] = arith.cmpi sgt, %[[VAL_8]], %[[VAL_1]] : index +! CHECK: cond_br %[[VAL_9]], ^bb2, ^bb3 +! CHECK: ^bb2: +! CHECK: %[[VAL_10:.*]] = arith.addi %[[VAL_7]], %[[VAL_2]] : index +! CHECK: %[[VAL_11:.*]] = fir.array_coor %[[VAL_5]](%[[VAL_6]]) %[[VAL_10]] typeparams %[[VAL_3]]#1 : (!fir.ref>>, !fir.shape<1>, index, index) -> !fir.ref> +! CHECK: %[[VAL_12:.*]] = fir.emboxchar %[[VAL_11]], %[[VAL_3]]#1 : (!fir.ref>, index) -> !fir.boxchar<1> +! CHECK: %[[VAL_13:.*]] = fir.call @_QPelem(%[[VAL_12]]) : (!fir.boxchar<1>) -> i32 +! CHECK: %[[VAL_14:.*]] = fir.array_coor %[[VAL_15]](%[[VAL_6]]) %[[VAL_10]] : (!fir.ref>, !fir.shape<1>, index) -> !fir.ref +! CHECK: fir.store %[[VAL_13]] to %[[VAL_14]] : !fir.ref +! CHECK: %[[VAL_16:.*]] = arith.subi %[[VAL_8]], %[[VAL_2]] : index +! CHECK: br ^bb1(%[[VAL_10]], %[[VAL_16]] : index, index) +! CHECK: ^bb3: +! CHECK: return + i = elem(c) +end subroutine + +! CHECK-LABEL: func @_QMchar_elemPfoo1b( +! CHECK-SAME: %[[VAL_33:.*]]: !fir.ref>{{.*}}, %[[VAL_21:.*]]: !fir.boxchar<1>{{.*}}) { +subroutine foo1b(i, c) + integer :: i(10) + character(10) :: c(10) +! CHECK-DAG: %[[VAL_17:.*]] = arith.constant 10 : index +! CHECK-DAG: %[[VAL_18:.*]] = arith.constant 0 : index +! CHECK-DAG: %[[VAL_19:.*]] = arith.constant 1 : index +! CHECK: %[[VAL_20:.*]]:2 = fir.unboxchar %[[VAL_21]] : (!fir.boxchar<1>) -> (!fir.ref>, index) +! CHECK: %[[VAL_22:.*]] = fir.convert %[[VAL_20]]#0 : (!fir.ref>) -> !fir.ref>> +! CHECK: %[[VAL_23:.*]] = fir.shape %[[VAL_17]] : (index) -> !fir.shape<1> +! CHECK: br ^bb1(%[[VAL_18]], %[[VAL_17]] : index, index) +! CHECK: ^bb1(%[[VAL_24:.*]]: index, %[[VAL_25:.*]]: index): +! CHECK: %[[VAL_26:.*]] = arith.cmpi sgt, %[[VAL_25]], %[[VAL_18]] : index +! CHECK: cond_br %[[VAL_26]], ^bb2, ^bb3 +! CHECK: ^bb2: +! CHECK: %[[VAL_27:.*]] = arith.addi %[[VAL_24]], %[[VAL_19]] : index +! CHECK: %[[VAL_28:.*]] = fir.array_coor %[[VAL_22]](%[[VAL_23]]) %[[VAL_27]] : (!fir.ref>>, !fir.shape<1>, index) -> !fir.ref> +! CHECK: %[[VAL_29:.*]] = fir.convert %[[VAL_28]] : (!fir.ref>) -> !fir.ref> +! CHECK: %[[VAL_30:.*]] = fir.emboxchar %[[VAL_29]], %[[VAL_17]] : (!fir.ref>, index) -> !fir.boxchar<1> +! CHECK: %[[VAL_31:.*]] = fir.call @_QPelem(%[[VAL_30]]) : (!fir.boxchar<1>) -> i32 +! CHECK: %[[VAL_32:.*]] = fir.array_coor %[[VAL_33]](%[[VAL_23]]) %[[VAL_27]] : (!fir.ref>, !fir.shape<1>, index) -> !fir.ref +! CHECK: fir.store %[[VAL_31]] to %[[VAL_32]] : !fir.ref +! CHECK: %[[VAL_34:.*]] = arith.subi %[[VAL_25]], %[[VAL_19]] : index +! CHECK: br ^bb1(%[[VAL_27]], %[[VAL_34]] : index, index) +! CHECK: ^bb3: +! CHECK: return + i = elem(c) +end subroutine + +! CHECK-LABEL: func @_QMchar_elemPfoo2( +! CHECK-SAME: %[[VAL_50:[^:]+]]: !fir.ref>{{.*}}, %[[VAL_47:[^:]+]]: !fir.ref>{{.*}}, %[[VAL_39:.*]]: !fir.boxchar<1>{{.*}}) { +subroutine foo2(i, j, c) +! CHECK-DAG: %[[VAL_35:.*]] = arith.constant 10 : index +! CHECK-DAG: %[[VAL_36:.*]] = arith.constant 0 : index +! CHECK-DAG: %[[VAL_37:.*]] = arith.constant 1 : index +! CHECK: %[[VAL_38:.*]]:2 = fir.unboxchar %[[VAL_39]] : (!fir.boxchar<1>) -> (!fir.ref>, index) +! CHECK: %[[VAL_40:.*]] = fir.shape %[[VAL_35]] : (index) -> !fir.shape<1> +! CHECK: br ^bb1(%[[VAL_36]], %[[VAL_35]] : index, index) +! CHECK: ^bb1(%[[VAL_41:.*]]: index, %[[VAL_42:.*]]: index): +! CHECK: %[[VAL_43:.*]] = arith.cmpi sgt, %[[VAL_42]], %[[VAL_36]] : index +! CHECK: cond_br %[[VAL_43]], ^bb2, ^bb3 +! CHECK: ^bb2: +! CHECK: %[[VAL_44:.*]] = fir.emboxchar %[[VAL_38]]#0, %[[VAL_38]]#1 : (!fir.ref>, index) -> !fir.boxchar<1> +! CHECK: %[[VAL_45:.*]] = arith.addi %[[VAL_41]], %[[VAL_37]] : index +! CHECK: %[[VAL_46:.*]] = fir.array_coor %[[VAL_47]](%[[VAL_40]]) %[[VAL_45]] : (!fir.ref>, !fir.shape<1>, index) -> !fir.ref +! CHECK: %[[VAL_48:.*]] = fir.call @_QPelem2(%[[VAL_44]], %[[VAL_46]]) : (!fir.boxchar<1>, !fir.ref) -> i32 +! CHECK: %[[VAL_49:.*]] = fir.array_coor %[[VAL_50]](%[[VAL_40]]) %[[VAL_45]] : (!fir.ref>, !fir.shape<1>, index) -> !fir.ref +! CHECK: fir.store %[[VAL_48]] to %[[VAL_49]] : !fir.ref +! CHECK: %[[VAL_51:.*]] = arith.subi %[[VAL_42]], %[[VAL_37]] : index +! CHECK: br ^bb1(%[[VAL_45]], %[[VAL_51]] : index, index) +! CHECK: ^bb3: +! CHECK: return + integer :: i(10), j(10) + character(*) :: c + i = elem2(c, j) +end subroutine + +! CHECK-LABEL: func @_QMchar_elemPfoo2b( +! CHECK-SAME: %[[VAL_67:[^:]+]]: !fir.ref>{{.*}}, %[[VAL_64:[^:]+]]: !fir.ref>{{.*}}, %[[VAL_56:.*]]: !fir.boxchar<1>{{.*}}) { +subroutine foo2b(i, j, c) + integer :: i(10), j(10) + character(10) :: c +! CHECK-DAG: %[[VAL_52:.*]] = arith.constant 10 : index +! CHECK-DAG: %[[VAL_53:.*]] = arith.constant 0 : index +! CHECK-DAG: %[[VAL_54:.*]] = arith.constant 1 : index +! CHECK: %[[VAL_55:.*]]:2 = fir.unboxchar %[[VAL_56]] : (!fir.boxchar<1>) -> (!fir.ref>, index) +! CHECK: %[[VAL_57:.*]] = fir.shape %[[VAL_52]] : (index) -> !fir.shape<1> +! CHECK: br ^bb1(%[[VAL_53]], %[[VAL_52]] : index, index) +! CHECK: ^bb1(%[[VAL_58:.*]]: index, %[[VAL_59:.*]]: index): +! CHECK: %[[VAL_60:.*]] = arith.cmpi sgt, %[[VAL_59]], %[[VAL_53]] : index +! CHECK: cond_br %[[VAL_60]], ^bb2, ^bb3 +! CHECK: ^bb2: +! CHECK: %[[VAL_61:.*]] = fir.emboxchar %[[VAL_55]]#0, %[[VAL_52]] : (!fir.ref>, index) -> !fir.boxchar<1> +! CHECK: %[[VAL_62:.*]] = arith.addi %[[VAL_58]], %[[VAL_54]] : index +! CHECK: %[[VAL_63:.*]] = fir.array_coor %[[VAL_64]](%[[VAL_57]]) %[[VAL_62]] : (!fir.ref>, !fir.shape<1>, index) -> !fir.ref +! CHECK: %[[VAL_65:.*]] = fir.call @_QPelem2(%[[VAL_61]], %[[VAL_63]]) : (!fir.boxchar<1>, !fir.ref) -> i32 +! CHECK: %[[VAL_66:.*]] = fir.array_coor %[[VAL_67]](%[[VAL_57]]) %[[VAL_62]] : (!fir.ref>, !fir.shape<1>, index) -> !fir.ref +! CHECK: fir.store %[[VAL_65]] to %[[VAL_66]] : !fir.ref +! CHECK: %[[VAL_68:.*]] = arith.subi %[[VAL_59]], %[[VAL_54]] : index +! CHECK: br ^bb1(%[[VAL_62]], %[[VAL_68]] : index, index) +! CHECK: ^bb3: +! CHECK: return + i = elem2(c, j) +end subroutine + +! CHECK-LABEL: func @_QMchar_elemPfoo3( +! CHECK-SAME: %[[VAL_88:[^:]+]]: !fir.ref>{{.*}}, %[[VAL_79:[^:]+]]: !fir.ref>{{.*}}) +subroutine foo3(i, j) + integer :: i(10), j(10) +! CHECK-DAG: %[[VAL_69:.*]] = arith.constant 10 : index +! CHECK-DAG: %[[VAL_70:.*]] = arith.constant 0 : index +! CHECK-DAG: %[[VAL_71:.*]] = arith.constant 1 : index +! CHECK-DAG: %[[VAL_72:.*]] = fir.alloca !fir.char<1> +! CHECK: %[[VAL_73:.*]] = fir.shape %[[VAL_69]] : (index) -> !fir.shape<1> +! CHECK: br ^bb1(%[[VAL_70]], %[[VAL_69]] : index, index) +! CHECK: ^bb1(%[[VAL_74:.*]]: index, %[[VAL_75:.*]]: index): +! CHECK: %[[VAL_76:.*]] = arith.cmpi sgt, %[[VAL_75]], %[[VAL_70]] : index +! CHECK: cond_br %[[VAL_76]], ^bb2, ^bb3 +! CHECK: ^bb2: +! CHECK: %[[VAL_77:.*]] = arith.addi %[[VAL_74]], %[[VAL_71]] : index +! CHECK: %[[VAL_78:.*]] = fir.array_coor %[[VAL_79]](%[[VAL_73]]) %[[VAL_77]] : (!fir.ref>, !fir.shape<1>, index) -> !fir.ref +! CHECK: %[[VAL_80:.*]] = fir.load %[[VAL_78]] : !fir.ref +! CHECK: %[[VAL_81:.*]] = fir.convert %[[VAL_80]] : (i32) -> i8 +! CHECK: %[[VAL_82:.*]] = fir.undefined !fir.char<1> +! CHECK: %[[VAL_83:.*]] = fir.insert_value %[[VAL_82]], %[[VAL_81]], [0 : index] : (!fir.char<1>, i8) -> !fir.char<1> +! CHECK: fir.store %[[VAL_83]] to %[[VAL_72]] : !fir.ref> +! CHECK: %[[VAL_84:.*]] = fir.convert %[[VAL_72]] : (!fir.ref>) -> !fir.ref> +! CHECK: %[[VAL_85:.*]] = fir.emboxchar %[[VAL_84]], %[[VAL_71]] : (!fir.ref>, index) -> !fir.boxchar<1> +! CHECK: %[[VAL_86:.*]] = fir.call @_QPelem(%[[VAL_85]]) : (!fir.boxchar<1>) -> i32 +! CHECK: %[[VAL_87:.*]] = fir.array_coor %[[VAL_88]](%[[VAL_73]]) %[[VAL_77]] : (!fir.ref>, !fir.shape<1>, index) -> !fir.ref +! CHECK: fir.store %[[VAL_86]] to %[[VAL_87]] : !fir.ref +! CHECK: %[[VAL_89:.*]] = arith.subi %[[VAL_75]], %[[VAL_71]] : index +! CHECK: br ^bb1(%[[VAL_77]], %[[VAL_89]] : index, index) +! CHECK: ^bb3: +! CHECK: return + i = elem(char(j)) +end subroutine + +! CHECK-LABEL: func @_QMchar_elemPfoo4( +! CHECK-SAME: %[[VAL_106:[^:]+]]: !fir.ref>{{.*}}, %[[VAL_103:[^:]+]]: !fir.ref>{{.*}}) +subroutine foo4(i, j) + integer :: i(10), j(10) +! CHECK-DAG: %[[VAL_90:.*]] = arith.constant 5 : index +! CHECK-DAG: %[[VAL_91:.*]] = arith.constant 10 : index +! CHECK-DAG: %[[VAL_92:.*]] = arith.constant 0 : index +! CHECK-DAG: %[[VAL_93:.*]] = arith.constant 1 : index +! CHECK: %[[VAL_94:.*]] = fir.shape %[[VAL_91]] : (index) -> !fir.shape<1> +! CHECK: %[[VAL_95:.*]] = fir.address_of(@{{.*}}) : !fir.ref> +! CHECK: br ^bb1(%[[VAL_92]], %[[VAL_91]] : index, index) +! CHECK: ^bb1(%[[VAL_96:.*]]: index, %[[VAL_97:.*]]: index): +! CHECK: %[[VAL_98:.*]] = arith.cmpi sgt, %[[VAL_97]], %[[VAL_92]] : index +! CHECK: cond_br %[[VAL_98]], ^bb2, ^bb3 +! CHECK: ^bb2: +! CHECK: %[[VAL_99:.*]] = fir.convert %[[VAL_95]] : (!fir.ref>) -> !fir.ref> +! CHECK: %[[VAL_100:.*]] = fir.emboxchar %[[VAL_99]], %[[VAL_90]] : (!fir.ref>, index) -> !fir.boxchar<1> +! CHECK: %[[VAL_101:.*]] = arith.addi %[[VAL_96]], %[[VAL_93]] : index +! CHECK: %[[VAL_102:.*]] = fir.array_coor %[[VAL_103]](%[[VAL_94]]) %[[VAL_101]] : (!fir.ref>, !fir.shape<1>, index) -> !fir.ref +! CHECK: %[[VAL_104:.*]] = fir.call @_QPelem2(%[[VAL_100]], %[[VAL_102]]) : (!fir.boxchar<1>, !fir.ref) -> i32 +! CHECK: %[[VAL_105:.*]] = fir.array_coor %[[VAL_106]](%[[VAL_94]]) %[[VAL_101]] : (!fir.ref>, !fir.shape<1>, index) -> !fir.ref +! CHECK: fir.store %[[VAL_104]] to %[[VAL_105]] : !fir.ref +! CHECK: %[[VAL_107:.*]] = arith.subi %[[VAL_97]], %[[VAL_93]] : index +! CHECK: br ^bb1(%[[VAL_101]], %[[VAL_107]] : index, index) +! CHECK: ^bb3: +! CHECK: return + i = elem2("hello", j) +end subroutine + +! Test character return for elemental functions. + +! CHECK-LABEL: func @_QMchar_elemPelem_return_char( +! CHECK-SAME: %{{.*}}: !fir.ref>{{.*}}, %{{.*}}: index{{.*}}, %{{.*}}: !fir.boxchar<1>{{.*}}) -> !fir.boxchar<1> +elemental function elem_return_char(c) + character(*), intent(in) :: c + character(len(c)) :: elem_return_char + elem_return_char = "ab" // c +end function + +! CHECK-LABEL: func @_QMchar_elemPfoo6( +! CHECK-SAME: %[[VAL_0:.*]]: !fir.boxchar<1> {fir.bindc_name = "c"}) { +subroutine foo6(c) + ! CHECK-DAG: %[[VAL_1:.*]] = arith.constant 10 : index + ! CHECK-DAG: %[[VAL_2:.*]] = arith.constant 1 : index + ! CHECK-DAG: %[[VAL_3:.*]] = arith.constant 0 : index + ! CHECK-DAG: %[[VAL_4:.*]] = arith.constant false + ! CHECK-DAG: %[[VAL_5:.*]] = arith.constant 32 : i8 + ! CHECK: %[[VAL_6:.*]]:2 = fir.unboxchar %[[VAL_0]] : (!fir.boxchar<1>) -> (!fir.ref>, index) + ! CHECK: %[[VAL_7:.*]] = fir.convert %[[VAL_6]]#0 : (!fir.ref>) -> !fir.ref>> + ! CHECK: %[[VAL_8:.*]] = fir.shape %[[VAL_1]] : (index) -> !fir.shape<1> + ! CHECK: br ^bb1(%[[VAL_3]], %[[VAL_1]] : index, index) + ! CHECK: ^bb1(%[[VAL_9:.*]]: index, %[[VAL_10:.*]]: index): + ! CHECK: %[[VAL_11:.*]] = arith.cmpi sgt, %[[VAL_10]], %[[VAL_3]] : index + ! CHECK: cond_br %[[VAL_11]], ^bb2, ^bb6 + ! CHECK: ^bb2: + ! CHECK: %[[VAL_12:.*]] = arith.addi %[[VAL_9]], %[[VAL_2]] : index + ! CHECK: %[[VAL_13:.*]] = fir.array_coor %[[VAL_7]](%[[VAL_8]]) %[[VAL_12]] typeparams %[[VAL_6]]#1 : (!fir.ref>>, !fir.shape<1>, index, index) -> !fir.ref> + ! CHECK: %[[VAL_14:.*]] = fir.emboxchar %[[VAL_13]], %[[VAL_6]]#1 : (!fir.ref>, index) -> !fir.boxchar<1> + ! CHECK: %[[VAL_15:.*]] = fir.convert %[[VAL_6]]#1 : (index) -> i32 + ! CHECK: %[[VAL_16:.*]] = fir.convert %[[VAL_15]] : (i32) -> index + ! CHECK: %[[VAL_17:.*]] = fir.call @llvm.stacksave() : () -> !fir.ref + ! CHECK: %[[VAL_18:.*]] = fir.alloca !fir.char<1,?>(%[[VAL_16]] : index) {bindc_name = ".result"} + ! CHECK: %[[VAL_19:.*]] = fir.call @_QMchar_elemPelem_return_char(%[[VAL_18]], %[[VAL_16]], %[[VAL_14]]) : (!fir.ref>, index, !fir.boxchar<1>) -> !fir.boxchar<1> + ! CHECK: %[[VAL_20:.*]] = arith.cmpi slt, %[[VAL_6]]#1, %[[VAL_16]] : index + ! CHECK: %[[VAL_21:.*]] = arith.select %[[VAL_20]], %[[VAL_6]]#1, %[[VAL_16]] : index + ! CHECK: %[[VAL_22:.*]] = fir.convert %[[VAL_21]] : (index) -> i64 + ! CHECK: %[[VAL_23:.*]] = fir.convert %[[VAL_13]] : (!fir.ref>) -> !fir.ref + ! CHECK: %[[VAL_24:.*]] = fir.convert %[[VAL_18]] : (!fir.ref>) -> !fir.ref + ! CHECK: fir.call @llvm.memmove.p0.p0.i64(%[[VAL_23]], %[[VAL_24]], %[[VAL_22]], %[[VAL_4]]) : (!fir.ref, !fir.ref, i64, i1) -> () + ! CHECK: %[[VAL_25:.*]] = arith.subi %[[VAL_6]]#1, %[[VAL_2]] : index + ! CHECK: %[[VAL_26:.*]] = fir.undefined !fir.char<1> + ! CHECK: %[[VAL_27:.*]] = fir.insert_value %[[VAL_26]], %[[VAL_5]], [0 : index] : (!fir.char<1>, i8) -> !fir.char<1> + ! CHECK: %[[VAL_28:.*]] = arith.subi %[[VAL_25]], %[[VAL_21]] : index + ! CHECK: %[[VAL_29:.*]] = arith.addi %[[VAL_28]], %[[VAL_2]] : index + ! CHECK: br ^bb3(%[[VAL_21]], %[[VAL_29]] : index, index) + ! CHECK: ^bb3(%[[VAL_30:.*]]: index, %[[VAL_31:.*]]: index): + ! CHECK: %[[VAL_32:.*]] = arith.cmpi sgt, %[[VAL_31]], %[[VAL_3]] : index + ! CHECK: cond_br %[[VAL_32]], ^bb4, ^bb5 + ! CHECK: ^bb4: + ! CHECK: %[[VAL_33:.*]] = fir.convert %[[VAL_13]] : (!fir.ref>) -> !fir.ref>> + ! CHECK: %[[VAL_34:.*]] = fir.coordinate_of %[[VAL_33]], %[[VAL_30]] : (!fir.ref>>, index) -> !fir.ref> + ! CHECK: fir.store %[[VAL_27]] to %[[VAL_34]] : !fir.ref> + ! CHECK: %[[VAL_35:.*]] = arith.addi %[[VAL_30]], %[[VAL_2]] : index + ! CHECK: %[[VAL_36:.*]] = arith.subi %[[VAL_31]], %[[VAL_2]] : index + ! CHECK: br ^bb3(%[[VAL_35]], %[[VAL_36]] : index, index) + ! CHECK: ^bb5: + ! CHECK: fir.call @llvm.stackrestore(%[[VAL_17]]) : (!fir.ref) -> () + ! CHECK: %[[VAL_37:.*]] = arith.subi %[[VAL_10]], %[[VAL_2]] : index + ! CHECK: br ^bb1(%[[VAL_12]], %[[VAL_37]] : index, index) + ! CHECK: ^bb6: + + implicit none + character(*) :: c(10) + c = elem_return_char(c) + ! CHECK: return + ! CHECK: } +end subroutine + +end module diff --git a/flang/test/Lower/array-elemental-subroutines.f90 b/flang/test/Lower/array-elemental-subroutines.f90 new file mode 100644 index 0000000000000000000000000000000000000000..652e614fa9fabf9073b5c822d16fb4abf1b7e02a --- /dev/null +++ b/flang/test/Lower/array-elemental-subroutines.f90 @@ -0,0 +1,64 @@ +! Test lowering of elemental subroutine calls with array arguments +! RUN: bbc -o - -emit-fir %s | FileCheck %s + +! CHECK-LABEL: func @_QPtest_elem_sub( +! CHECK-SAME: %[[VAL_0:.*]]: !fir.box>{{.*}}, %[[VAL_1:.*]]: !fir.box>>{{.*}}, %[[VAL_2:.*]]: !fir.ref{{.*}}, %[[VAL_3:.*]]: !fir.ref>{{.*}}) { +! CHECK: %[[VAL_4:.*]] = fir.alloca !fir.complex<4> {adapt.valuebyref} +! CHECK: %[[VAL_5:.*]] = arith.constant 0 : index +! CHECK: %[[VAL_6:.*]]:3 = fir.box_dims %[[VAL_0]], %[[VAL_5]] : (!fir.box>, index) -> (index, index, index) +! CHECK: %[[VAL_7:.*]] = arith.constant 10 : i64 +! CHECK: %[[VAL_8:.*]] = fir.convert %[[VAL_7]] : (i64) -> index +! CHECK: %[[VAL_9:.*]] = arith.constant -1 : i64 +! CHECK: %[[VAL_10:.*]] = fir.convert %[[VAL_9]] : (i64) -> index +! CHECK: %[[VAL_11:.*]] = arith.constant 1 : i64 +! CHECK: %[[VAL_12:.*]] = fir.convert %[[VAL_11]] : (i64) -> index +! CHECK: %[[VAL_13:.*]] = fir.slice %[[VAL_8]], %[[VAL_12]], %[[VAL_10]] : (index, index, index) -> !fir.slice<1> +! CHECK: %[[VAL_14:.*]] = fir.load %[[VAL_3]] : !fir.ref> +! CHECK: fir.store %[[VAL_14]] to %[[VAL_4]] : !fir.ref> +! CHECK: %[[VAL_15:.*]] = arith.constant 1 : index +! CHECK: %[[VAL_16:.*]] = arith.constant 0 : index +! CHECK: %[[VAL_17:.*]] = arith.subi %[[VAL_6]]#1, %[[VAL_15]] : index +! CHECK: fir.do_loop %[[VAL_18:.*]] = %[[VAL_16]] to %[[VAL_17]] step %[[VAL_15]] { +! CHECK: %[[VAL_19:.*]] = arith.constant 1 : index +! CHECK: %[[VAL_20:.*]] = arith.addi %[[VAL_18]], %[[VAL_19]] : index +! CHECK: %[[VAL_21:.*]] = fir.array_coor %[[VAL_0]] %[[VAL_20]] : (!fir.box>, index) -> !fir.ref +! CHECK: %[[VAL_22:.*]] = arith.constant 1 : index +! CHECK: %[[VAL_23:.*]] = arith.addi %[[VAL_18]], %[[VAL_22]] : index +! CHECK: %[[VAL_24:.*]] = fir.array_coor %[[VAL_1]] {{\[}}%[[VAL_13]]] %[[VAL_23]] : (!fir.box>>, !fir.slice<1>, index) -> !fir.ref> +! CHECK: %[[VAL_25:.*]] = fir.box_elesize %[[VAL_1]] : (!fir.box>>) -> index +! CHECK: %[[VAL_26:.*]] = fir.emboxchar %[[VAL_24]], %[[VAL_25]] : (!fir.ref>, index) -> !fir.boxchar<1> +! CHECK: fir.call @_QPfoo(%[[VAL_21]], %[[VAL_26]], %[[VAL_2]], %[[VAL_4]]) : (!fir.ref, !fir.boxchar<1>, !fir.ref, !fir.ref>) -> () +! CHECK: } +! CHECK: return +! CHECK: } + +subroutine test_elem_sub(x, c, i, z) + real :: x(:) + character(*) :: c(:) + integer :: i + complex :: z + interface + elemental subroutine foo(x, c, i, z) + real, intent(out) :: x + character(*), intent(inout) :: c + integer, intent(in) :: i + complex, value :: z + end subroutine + end interface + + call foo(x, c(10:1:-1), i, z) +end subroutine + +! CHECK-LABEL: func @_QPtest_elem_sub_no_array_args( +! CHECK-SAME: %[[VAL_0:.*]]: !fir.ref{{.*}}, %[[VAL_1:.*]]: !fir.ref{{.*}}) { +subroutine test_elem_sub_no_array_args(i, j) + integer :: i, j + interface + elemental subroutine bar(i, j) + integer, intent(out) :: i + integer, intent(in) :: j + end subroutine + end interface + call bar(i, j) + ! CHECK: fir.call @_QPbar(%[[VAL_0]], %[[VAL_1]]) : (!fir.ref, !fir.ref) -> () +end subroutine diff --git a/flang/test/Lower/array-expression-assumed-size.f90 b/flang/test/Lower/array-expression-assumed-size.f90 new file mode 100644 index 0000000000000000000000000000000000000000..8fc3e94ffaf8074e214b0f4a18c31ea3e9001305 --- /dev/null +++ b/flang/test/Lower/array-expression-assumed-size.f90 @@ -0,0 +1,303 @@ +! RUN: bbc --emit-fir %s -o - | FileCheck %s +! RUN: bbc %s -o - | FileCheck --check-prefix=PostOpt %s + + +subroutine assumed_size_test(a) + integer :: a(10,*) + a(:, 1:2) = a(:, 3:4) +end subroutine assumed_size_test + +subroutine assumed_size_forall_test(b) + integer :: b(10,*) + forall (i=2:6) + b(i, 1:2) = b(i, 3:4) + end forall +end subroutine assumed_size_forall_test + +! CHECK-LABEL: func @_QPassumed_size_test( +! CHECK-SAME: %[[VAL_0:.*]]: !fir.ref>{{.*}}) { +! CHECK: %[[VAL_1A:.*]] = fir.convert %c10{{.*}} : (i64) -> index +! CHECK: %[[VAL_1B:.*]] = arith.cmpi sgt, %[[VAL_1A]], %c0{{.*}} : index +! CHECK: %[[VAL_1:.*]] = arith.select %[[VAL_1B]], %[[VAL_1A]], %c0{{.*}} : index +! CHECK: %[[VAL_2:.*]] = fir.undefined index +! CHECK: %[[VAL_3:.*]] = arith.constant 1 : index +! CHECK: %[[VAL_4:.*]] = arith.constant 1 : i64 +! CHECK: %[[VAL_5:.*]] = fir.convert %[[VAL_4]] : (i64) -> index +! CHECK: %[[VAL_6:.*]] = arith.addi %[[VAL_3]], %[[VAL_1]] : index +! CHECK: %[[VAL_7:.*]] = arith.subi %[[VAL_6]], %[[VAL_3]] : index +! CHECK: %[[VAL_8:.*]] = arith.constant 0 : index +! CHECK: %[[VAL_9:.*]] = arith.subi %[[VAL_7]], %[[VAL_3]] : index +! CHECK: %[[VAL_10:.*]] = arith.addi %[[VAL_9]], %[[VAL_5]] : index +! CHECK: %[[VAL_11:.*]] = arith.divsi %[[VAL_10]], %[[VAL_5]] : index +! CHECK: %[[VAL_12:.*]] = arith.cmpi sgt, %[[VAL_11]], %[[VAL_8]] : index +! CHECK: %[[VAL_13:.*]] = arith.select %[[VAL_12]], %[[VAL_11]], %[[VAL_8]] : index +! CHECK: %[[VAL_14:.*]] = arith.constant 1 : i64 +! CHECK: %[[VAL_15:.*]] = fir.convert %[[VAL_14]] : (i64) -> index +! CHECK: %[[VAL_16:.*]] = arith.constant 1 : i64 +! CHECK: %[[VAL_17:.*]] = fir.convert %[[VAL_16]] : (i64) -> index +! CHECK: %[[VAL_18:.*]] = arith.constant 2 : i64 +! CHECK: %[[VAL_19:.*]] = fir.convert %[[VAL_18]] : (i64) -> index +! CHECK: %[[VAL_20:.*]] = arith.constant 0 : index +! CHECK: %[[VAL_21:.*]] = arith.subi %[[VAL_19]], %[[VAL_15]] : index +! CHECK: %[[VAL_22:.*]] = arith.addi %[[VAL_21]], %[[VAL_17]] : index +! CHECK: %[[VAL_23:.*]] = arith.divsi %[[VAL_22]], %[[VAL_17]] : index +! CHECK: %[[VAL_24:.*]] = arith.cmpi sgt, %[[VAL_23]], %[[VAL_20]] : index +! CHECK: %[[VAL_25:.*]] = arith.select %[[VAL_24]], %[[VAL_23]], %[[VAL_20]] : index +! CHECK: %[[VAL_26:.*]] = fir.shape %[[VAL_1]], %[[VAL_2]] : (index, index) -> !fir.shape<2> +! CHECK: %[[VAL_27:.*]] = fir.slice %[[VAL_3]], %[[VAL_7]], %[[VAL_5]], %[[VAL_15]], %[[VAL_19]], %[[VAL_17]] : (index, index, index, index, index, index) -> !fir.slice<2> +! CHECK: %[[VAL_28:.*]] = fir.array_load %[[VAL_0]](%[[VAL_26]]) {{\[}}%[[VAL_27]]] : (!fir.ref>, !fir.shape<2>, !fir.slice<2>) -> !fir.array<10x?xi32> +! CHECK: %[[VAL_29:.*]] = arith.constant 1 : index +! CHECK: %[[VAL_30:.*]] = arith.constant 1 : i64 +! CHECK: %[[VAL_31:.*]] = fir.convert %[[VAL_30]] : (i64) -> index +! CHECK: %[[VAL_32:.*]] = arith.addi %[[VAL_29]], %[[VAL_1]] : index +! CHECK: %[[VAL_33:.*]] = arith.subi %[[VAL_32]], %[[VAL_29]] : index +! CHECK: %[[VAL_34:.*]] = arith.constant 3 : i64 +! CHECK: %[[VAL_35:.*]] = fir.convert %[[VAL_34]] : (i64) -> index +! CHECK: %[[VAL_36:.*]] = arith.constant 1 : i64 +! CHECK: %[[VAL_37:.*]] = fir.convert %[[VAL_36]] : (i64) -> index +! CHECK: %[[VAL_38:.*]] = arith.constant 4 : i64 +! CHECK: %[[VAL_39:.*]] = fir.convert %[[VAL_38]] : (i64) -> index +! CHECK: %[[VAL_40:.*]] = fir.shape %[[VAL_1]], %[[VAL_2]] : (index, index) -> !fir.shape<2> +! CHECK: %[[VAL_41:.*]] = fir.slice %[[VAL_29]], %[[VAL_33]], %[[VAL_31]], %[[VAL_35]], %[[VAL_39]], %[[VAL_37]] : (index, index, index, index, index, index) -> !fir.slice<2> +! CHECK: %[[VAL_42:.*]] = fir.array_load %[[VAL_0]](%[[VAL_40]]) {{\[}}%[[VAL_41]]] : (!fir.ref>, !fir.shape<2>, !fir.slice<2>) -> !fir.array<10x?xi32> +! CHECK: %[[VAL_43:.*]] = arith.constant 1 : index +! CHECK: %[[VAL_44:.*]] = arith.constant 0 : index +! CHECK: %[[VAL_45:.*]] = arith.subi %[[VAL_13]], %[[VAL_43]] : index +! CHECK: %[[VAL_46:.*]] = arith.subi %[[VAL_25]], %[[VAL_43]] : index +! CHECK: %[[VAL_47:.*]] = fir.do_loop %[[VAL_48:.*]] = %[[VAL_44]] to %[[VAL_46]] step %[[VAL_43]] unordered iter_args(%[[VAL_49:.*]] = %[[VAL_28]]) -> (!fir.array<10x?xi32>) { +! CHECK: %[[VAL_50:.*]] = fir.do_loop %[[VAL_51:.*]] = %[[VAL_44]] to %[[VAL_45]] step %[[VAL_43]] unordered iter_args(%[[VAL_52:.*]] = %[[VAL_49]]) -> (!fir.array<10x?xi32>) { +! CHECK: %[[VAL_53:.*]] = fir.array_fetch %[[VAL_42]], %[[VAL_51]], %[[VAL_48]] : (!fir.array<10x?xi32>, index, index) -> i32 +! CHECK: %[[VAL_54:.*]] = fir.array_update %[[VAL_52]], %[[VAL_53]], %[[VAL_51]], %[[VAL_48]] : (!fir.array<10x?xi32>, i32, index, index) -> !fir.array<10x?xi32> +! CHECK: fir.result %[[VAL_54]] : !fir.array<10x?xi32> +! CHECK: } +! CHECK: fir.result %[[VAL_55:.*]] : !fir.array<10x?xi32> +! CHECK: } +! CHECK: fir.array_merge_store %[[VAL_28]], %[[VAL_56:.*]] to %[[VAL_0]]{{\[}}%[[VAL_27]]] : !fir.array<10x?xi32>, !fir.array<10x?xi32>, !fir.ref>, !fir.slice<2> +! CHECK: return +! CHECK: } + +! CHECK-LABEL: func @_QPassumed_size_forall_test( +! CHECK-SAME: %[[VAL_0:.*]]: !fir.ref>{{.*}}) { +! CHECK: %[[VAL_1:.*]] = fir.alloca i32 {adapt.valuebyref, bindc_name = "i"} +! CHECK: %[[VAL_2A:.*]] = fir.convert %c10{{.*}} : (i64) -> index +! CHECK: %[[VAL_2B:.*]] = arith.cmpi sgt, %[[VAL_2A]], %c0{{.*}} : index +! CHECK: %[[VAL_2:.*]] = arith.select %[[VAL_2B]], %[[VAL_2A]], %c0{{.*}} : index +! CHECK: %[[VAL_3:.*]] = fir.undefined index +! CHECK: %[[VAL_4:.*]] = arith.constant 2 : i32 +! CHECK: %[[VAL_5:.*]] = fir.convert %[[VAL_4]] : (i32) -> index +! CHECK: %[[VAL_6:.*]] = arith.constant 6 : i32 +! CHECK: %[[VAL_7:.*]] = fir.convert %[[VAL_6]] : (i32) -> index +! CHECK: %[[VAL_8:.*]] = arith.constant 1 : index +! CHECK: %[[VAL_9:.*]] = fir.shape %[[VAL_2]], %[[VAL_3]] : (index, index) -> !fir.shape<2> +! CHECK: %[[VAL_10:.*]] = fir.array_load %[[VAL_0]](%[[VAL_9]]) : (!fir.ref>, !fir.shape<2>) -> !fir.array<10x?xi32> +! CHECK: %[[VAL_11:.*]] = fir.shape %[[VAL_2]], %[[VAL_3]] : (index, index) -> !fir.shape<2> +! CHECK: %[[VAL_12:.*]] = fir.array_load %[[VAL_0]](%[[VAL_11]]) : (!fir.ref>, !fir.shape<2>) -> !fir.array<10x?xi32> +! CHECK: %[[VAL_13:.*]] = fir.do_loop %[[VAL_14:.*]] = %[[VAL_5]] to %[[VAL_7]] step %[[VAL_8]] unordered iter_args(%[[VAL_15:.*]] = %[[VAL_10]]) -> (!fir.array<10x?xi32>) { +! CHECK: %[[VAL_16:.*]] = fir.convert %[[VAL_14]] : (index) -> i32 +! CHECK: fir.store %[[VAL_16]] to %[[VAL_1]] : !fir.ref +! CHECK: %[[VAL_17:.*]] = arith.constant 1 : index +! CHECK: %[[VAL_18:.*]] = fir.load %[[VAL_1]] : !fir.ref +! CHECK: %[[VAL_19:.*]] = fir.convert %[[VAL_18]] : (i32) -> i64 +! CHECK: %[[VAL_20:.*]] = fir.convert %[[VAL_19]] : (i64) -> index +! CHECK: %[[VAL_21:.*]] = arith.subi %[[VAL_20]], %[[VAL_17]] : index +! CHECK: %[[VAL_22:.*]] = arith.constant 1 : i64 +! CHECK: %[[VAL_23:.*]] = fir.convert %[[VAL_22]] : (i64) -> index +! CHECK: %[[VAL_24:.*]] = arith.constant 1 : i64 +! CHECK: %[[VAL_25:.*]] = fir.convert %[[VAL_24]] : (i64) -> index +! CHECK: %[[VAL_26:.*]] = arith.constant 2 : i64 +! CHECK: %[[VAL_27:.*]] = fir.convert %[[VAL_26]] : (i64) -> index +! CHECK: %[[VAL_28:.*]] = arith.constant 0 : index +! CHECK: %[[VAL_29:.*]] = arith.subi %[[VAL_27]], %[[VAL_23]] : index +! CHECK: %[[VAL_30:.*]] = arith.addi %[[VAL_29]], %[[VAL_25]] : index +! CHECK: %[[VAL_31:.*]] = arith.divsi %[[VAL_30]], %[[VAL_25]] : index +! CHECK: %[[VAL_32:.*]] = arith.cmpi sgt, %[[VAL_31]], %[[VAL_28]] : index +! CHECK: %[[VAL_33:.*]] = arith.select %[[VAL_32]], %[[VAL_31]], %[[VAL_28]] : index +! CHECK: %[[VAL_34:.*]] = arith.constant 1 : index +! CHECK: %[[VAL_35:.*]] = fir.load %[[VAL_1]] : !fir.ref +! CHECK: %[[VAL_36:.*]] = fir.convert %[[VAL_35]] : (i32) -> i64 +! CHECK: %[[VAL_37:.*]] = fir.convert %[[VAL_36]] : (i64) -> index +! CHECK: %[[VAL_38:.*]] = arith.subi %[[VAL_37]], %[[VAL_34]] : index +! CHECK: %[[VAL_39:.*]] = arith.constant 3 : i64 +! CHECK: %[[VAL_40:.*]] = fir.convert %[[VAL_39]] : (i64) -> index +! CHECK: %[[VAL_41:.*]] = arith.constant 1 : i64 +! CHECK: %[[VAL_42:.*]] = fir.convert %[[VAL_41]] : (i64) -> index +! CHECK: %[[VAL_43:.*]] = arith.constant 1 : index +! CHECK: %[[VAL_44:.*]] = arith.constant 0 : index +! CHECK: %[[VAL_45:.*]] = arith.subi %[[VAL_33]], %[[VAL_43]] : index +! CHECK: %[[VAL_46:.*]] = fir.do_loop %[[VAL_47:.*]] = %[[VAL_44]] to %[[VAL_45]] step %[[VAL_43]] unordered iter_args(%[[VAL_48:.*]] = %[[VAL_15]]) -> (!fir.array<10x?xi32>) { +! CHECK: %[[VAL_49:.*]] = arith.subi %[[VAL_40]], %[[VAL_34]] : index +! CHECK: %[[VAL_50:.*]] = arith.muli %[[VAL_47]], %[[VAL_42]] : index +! CHECK: %[[VAL_51:.*]] = arith.addi %[[VAL_49]], %[[VAL_50]] : index +! CHECK: %[[VAL_52:.*]] = fir.array_fetch %[[VAL_12]], %[[VAL_38]], %[[VAL_51]] : (!fir.array<10x?xi32>, index, index) -> i32 +! CHECK: %[[VAL_53:.*]] = arith.subi %[[VAL_23]], %[[VAL_17]] : index +! CHECK: %[[VAL_54:.*]] = arith.muli %[[VAL_47]], %[[VAL_25]] : index +! CHECK: %[[VAL_55:.*]] = arith.addi %[[VAL_53]], %[[VAL_54]] : index +! CHECK: %[[VAL_56:.*]] = fir.array_update %[[VAL_48]], %[[VAL_52]], %[[VAL_21]], %[[VAL_55]] : (!fir.array<10x?xi32>, i32, index, index) -> !fir.array<10x?xi32> +! CHECK: fir.result %[[VAL_56]] : !fir.array<10x?xi32> +! CHECK: } +! CHECK: fir.result %[[VAL_57:.*]] : !fir.array<10x?xi32> +! CHECK: } +! CHECK: fir.array_merge_store %[[VAL_10]], %[[VAL_58:.*]] to %[[VAL_0]] : !fir.array<10x?xi32>, !fir.array<10x?xi32>, !fir.ref> +! CHECK: return +! CHECK: } + +! PostOpt-LABEL: func @_QPassumed_size_test( +! PostOpt-SAME: %[[VAL_0:.*]]: !fir.ref>{{.*}}) { +! PostOpt-DAG: %[[VAL_1:.*]] = arith.constant 10 : index +! PostOpt-DAG: %[[VAL_2:.*]] = arith.constant 1 : index +! PostOpt-DAG: %[[VAL_3:.*]] = arith.constant 2 : index +! PostOpt-DAG: %[[VAL_4:.*]] = arith.constant 0 : index +! PostOpt-DAG: %[[VAL_5:.*]] = arith.constant 3 : index +! PostOpt-DAG: %[[VAL_6:.*]] = arith.constant 4 : index +! PostOpt: %[[VAL_7:.*]] = fir.undefined index +! PostOpt: %[[VAL_8:.*]] = fir.shape %[[VAL_1]], %[[VAL_7]] : (index, index) -> !fir.shape<2> +! PostOpt: %[[VAL_9:.*]] = fir.slice %[[VAL_2]], %[[VAL_1]], %[[VAL_2]], %[[VAL_2]], %[[VAL_3]], %[[VAL_2]] : (index, index, index, index, index, index) -> !fir.slice<2> +! PostOpt: %[[VAL_10:.*]] = fir.allocmem !fir.array<10x?xi32>, %[[VAL_3]] +! PostOpt: br ^bb1(%[[VAL_4]], %[[VAL_3]] : index, index) +! PostOpt: ^bb1(%[[VAL_11:.*]]: index, %[[VAL_12:.*]]: index): +! PostOpt: %[[VAL_13:.*]] = arith.cmpi sgt, %[[VAL_12]], %[[VAL_4]] : index +! PostOpt: cond_br %[[VAL_13]], ^bb2(%[[VAL_4]], %[[VAL_1]] : index, index), ^bb5 +! PostOpt: ^bb2(%[[VAL_14:.*]]: index, %[[VAL_15:.*]]: index): +! PostOpt: %[[VAL_16:.*]] = arith.cmpi sgt, %[[VAL_15]], %[[VAL_4]] : index +! PostOpt: cond_br %[[VAL_16]], ^bb3, ^bb4 +! PostOpt: ^bb3: +! PostOpt: %[[VAL_17:.*]] = arith.addi %[[VAL_14]], %[[VAL_2]] : index +! PostOpt: %[[VAL_18:.*]] = arith.addi %[[VAL_11]], %[[VAL_2]] : index +! PostOpt: %[[VAL_19:.*]] = fir.array_coor %[[VAL_0]](%[[VAL_8]]) {{\[}}%[[VAL_9]]] %[[VAL_17]], %[[VAL_18]] : (!fir.ref>, !fir.shape<2>, !fir.slice<2>, index, index) -> !fir.ref +! PostOpt: %[[VAL_20:.*]] = fir.array_coor %[[VAL_10]](%[[VAL_8]]) %[[VAL_17]], %[[VAL_18]] : (!fir.heap>, !fir.shape<2>, index, index) -> !fir.ref +! PostOpt: %[[VAL_21:.*]] = fir.load %[[VAL_19]] : !fir.ref +! PostOpt: fir.store %[[VAL_21]] to %[[VAL_20]] : !fir.ref +! PostOpt: %[[VAL_22:.*]] = arith.subi %[[VAL_15]], %[[VAL_2]] : index +! PostOpt: br ^bb2(%[[VAL_17]], %[[VAL_22]] : index, index) +! PostOpt: ^bb4: +! PostOpt: %[[VAL_23:.*]] = arith.addi %[[VAL_11]], %[[VAL_2]] : index +! PostOpt: %[[VAL_24:.*]] = arith.subi %[[VAL_12]], %[[VAL_2]] : index +! PostOpt: br ^bb1(%[[VAL_23]], %[[VAL_24]] : index, index) +! PostOpt: ^bb5: +! PostOpt: %[[VAL_25:.*]] = fir.slice %[[VAL_2]], %[[VAL_1]], %[[VAL_2]], %[[VAL_5]], %[[VAL_6]], %[[VAL_2]] : (index, index, index, index, index, index) -> !fir.slice<2> +! PostOpt: br ^bb6(%[[VAL_4]], %[[VAL_3]] : index, index) +! PostOpt: ^bb6(%[[VAL_26:.*]]: index, %[[VAL_27:.*]]: index): +! PostOpt: %[[VAL_28:.*]] = arith.cmpi sgt, %[[VAL_27]], %[[VAL_4]] : index +! PostOpt: cond_br %[[VAL_28]], ^bb7(%[[VAL_4]], %[[VAL_1]] : index, index), ^bb10(%[[VAL_4]], %[[VAL_3]] : index, index) +! PostOpt: ^bb7(%[[VAL_29:.*]]: index, %[[VAL_30:.*]]: index): +! PostOpt: %[[VAL_31:.*]] = arith.cmpi sgt, %[[VAL_30]], %[[VAL_4]] : index +! PostOpt: cond_br %[[VAL_31]], ^bb8, ^bb9 +! PostOpt: ^bb8: +! PostOpt: %[[VAL_32:.*]] = arith.addi %[[VAL_29]], %[[VAL_2]] : index +! PostOpt: %[[VAL_33:.*]] = arith.addi %[[VAL_26]], %[[VAL_2]] : index +! PostOpt: %[[VAL_34:.*]] = fir.array_coor %[[VAL_0]](%[[VAL_8]]) {{\[}}%[[VAL_25]]] %[[VAL_32]], %[[VAL_33]] : (!fir.ref>, !fir.shape<2>, !fir.slice<2>, index, index) -> !fir.ref +! PostOpt: %[[VAL_35:.*]] = fir.load %[[VAL_34]] : !fir.ref +! PostOpt: %[[VAL_36:.*]] = fir.array_coor %[[VAL_10]](%[[VAL_8]]) %[[VAL_32]], %[[VAL_33]] : (!fir.heap>, !fir.shape<2>, index, index) -> !fir.ref +! PostOpt: fir.store %[[VAL_35]] to %[[VAL_36]] : !fir.ref +! PostOpt: %[[VAL_37:.*]] = arith.subi %[[VAL_30]], %[[VAL_2]] : index +! PostOpt: br ^bb7(%[[VAL_32]], %[[VAL_37]] : index, index) +! PostOpt: ^bb9: +! PostOpt: %[[VAL_38:.*]] = arith.addi %[[VAL_26]], %[[VAL_2]] : index +! PostOpt: %[[VAL_39:.*]] = arith.subi %[[VAL_27]], %[[VAL_2]] : index +! PostOpt: br ^bb6(%[[VAL_38]], %[[VAL_39]] : index, index) +! PostOpt: ^bb10(%[[VAL_40:.*]]: index, %[[VAL_41:.*]]: index): +! PostOpt: %[[VAL_42:.*]] = arith.cmpi sgt, %[[VAL_41]], %[[VAL_4]] : index +! PostOpt: cond_br %[[VAL_42]], ^bb11(%[[VAL_4]], %[[VAL_1]] : index, index), ^bb14 +! PostOpt: ^bb11(%[[VAL_43:.*]]: index, %[[VAL_44:.*]]: index): +! PostOpt: %[[VAL_45:.*]] = arith.cmpi sgt, %[[VAL_44]], %[[VAL_4]] : index +! PostOpt: cond_br %[[VAL_45]], ^bb12, ^bb13 +! PostOpt: ^bb12: +! PostOpt: %[[VAL_46:.*]] = arith.addi %[[VAL_43]], %[[VAL_2]] : index +! PostOpt: %[[VAL_47:.*]] = arith.addi %[[VAL_40]], %[[VAL_2]] : index +! PostOpt: %[[VAL_48:.*]] = fir.array_coor %[[VAL_10]](%[[VAL_8]]) %[[VAL_46]], %[[VAL_47]] : (!fir.heap>, !fir.shape<2>, index, index) -> !fir.ref +! PostOpt: %[[VAL_49:.*]] = fir.array_coor %[[VAL_0]](%[[VAL_8]]) {{\[}}%[[VAL_9]]] %[[VAL_46]], %[[VAL_47]] : (!fir.ref>, !fir.shape<2>, !fir.slice<2>, index, index) -> !fir.ref +! PostOpt: %[[VAL_50:.*]] = fir.load %[[VAL_48]] : !fir.ref +! PostOpt: fir.store %[[VAL_50]] to %[[VAL_49]] : !fir.ref +! PostOpt: %[[VAL_51:.*]] = arith.subi %[[VAL_44]], %[[VAL_2]] : index +! PostOpt: br ^bb11(%[[VAL_46]], %[[VAL_51]] : index, index) +! PostOpt: ^bb13: +! PostOpt: %[[VAL_52:.*]] = arith.addi %[[VAL_40]], %[[VAL_2]] : index +! PostOpt: %[[VAL_53:.*]] = arith.subi %[[VAL_41]], %[[VAL_2]] : index +! PostOpt: br ^bb10(%[[VAL_52]], %[[VAL_53]] : index, index) +! PostOpt: ^bb14: +! PostOpt: fir.freemem %[[VAL_10]] : !fir.heap> +! PostOpt: return +! PostOpt: } + +! PostOpt-LABEL: func @_QPassumed_size_forall_test( +! PostOpt-SAME: %[[VAL_0:.*]]: !fir.ref>{{.*}}) { +! PostOpt-DAG: %[[VAL_1:.*]] = arith.constant 3 : index +! PostOpt-DAG: %[[VAL_2:.*]] = arith.constant 10 : index +! PostOpt-DAG: %[[VAL_3:.*]] = arith.constant 2 : index +! PostOpt-DAG: %[[VAL_4:.*]] = arith.constant 1 : index +! PostOpt-DAG: %[[VAL_5:.*]] = arith.constant 0 : index +! PostOpt-DAG: %[[VAL_6:.*]] = arith.constant 5 : index +! PostOpt: %[[VAL_7:.*]] = fir.alloca i32 {adapt.valuebyref, bindc_name = "i"} +! PostOpt: %[[VAL_8:.*]] = fir.undefined index +! PostOpt: %[[VAL_9:.*]] = fir.shape %[[VAL_2]], %[[VAL_8]] : (index, index) -> !fir.shape<2> +! PostOpt: %[[VAL_10:.*]] = fir.allocmem !fir.array<10x?xi32>, %[[VAL_4]] +! PostOpt: br ^bb1(%[[VAL_5]], %[[VAL_4]] : index, index) +! PostOpt: ^bb1(%[[VAL_11:.*]]: index, %[[VAL_12:.*]]: index): +! PostOpt: %[[VAL_13:.*]] = arith.cmpi sgt, %[[VAL_12]], %[[VAL_5]] : index +! PostOpt: cond_br %[[VAL_13]], ^bb2(%[[VAL_5]], %[[VAL_2]] : index, index), ^bb5 +! PostOpt: ^bb2(%[[VAL_14:.*]]: index, %[[VAL_15:.*]]: index): +! PostOpt: %[[VAL_16:.*]] = arith.cmpi sgt, %[[VAL_15]], %[[VAL_5]] : index +! PostOpt: cond_br %[[VAL_16]], ^bb3, ^bb4 +! PostOpt: ^bb3: +! PostOpt: %[[VAL_17:.*]] = arith.addi %[[VAL_14]], %[[VAL_4]] : index +! PostOpt: %[[VAL_18:.*]] = arith.addi %[[VAL_11]], %[[VAL_4]] : index +! PostOpt: %[[VAL_19:.*]] = fir.array_coor %[[VAL_0]](%[[VAL_9]]) %[[VAL_17]], %[[VAL_18]] : (!fir.ref>, !fir.shape<2>, index, index) -> !fir.ref +! PostOpt: %[[VAL_20:.*]] = fir.array_coor %[[VAL_10]](%[[VAL_9]]) %[[VAL_17]], %[[VAL_18]] : (!fir.heap>, !fir.shape<2>, index, index) -> !fir.ref +! PostOpt: %[[VAL_21:.*]] = fir.load %[[VAL_19]] : !fir.ref +! PostOpt: fir.store %[[VAL_21]] to %[[VAL_20]] : !fir.ref +! PostOpt: %[[VAL_22:.*]] = arith.subi %[[VAL_15]], %[[VAL_4]] : index +! PostOpt: br ^bb2(%[[VAL_17]], %[[VAL_22]] : index, index) +! PostOpt: ^bb4: +! PostOpt: %[[VAL_23:.*]] = arith.addi %[[VAL_11]], %[[VAL_4]] : index +! PostOpt: %[[VAL_24:.*]] = arith.subi %[[VAL_12]], %[[VAL_4]] : index +! PostOpt: br ^bb1(%[[VAL_23]], %[[VAL_24]] : index, index) +! PostOpt: ^bb5: +! PostOpt: br ^bb6(%[[VAL_3]], %[[VAL_6]] : index, index) +! PostOpt: ^bb6(%[[VAL_25:.*]]: index, %[[VAL_26:.*]]: index): +! PostOpt: %[[VAL_27:.*]] = arith.cmpi sgt, %[[VAL_26]], %[[VAL_5]] : index +! PostOpt: cond_br %[[VAL_27]], ^bb7, ^bb11(%[[VAL_5]], %[[VAL_4]] : index, index) +! PostOpt: ^bb7: +! PostOpt: %[[VAL_28:.*]] = fir.convert %[[VAL_25]] : (index) -> i32 +! PostOpt: fir.store %[[VAL_28]] to %[[VAL_7]] : !fir.ref +! PostOpt: %[[VAL_29:.*]] = fir.load %[[VAL_7]] : !fir.ref +! PostOpt: %[[VAL_30:.*]] = fir.convert %[[VAL_29]] : (i32) -> index +! PostOpt: br ^bb8(%[[VAL_5]], %[[VAL_3]] : index, index) +! PostOpt: ^bb8(%[[VAL_31:.*]]: index, %[[VAL_32:.*]]: index): +! PostOpt: %[[VAL_33:.*]] = arith.cmpi sgt, %[[VAL_32]], %[[VAL_5]] : index +! PostOpt: cond_br %[[VAL_33]], ^bb9, ^bb10 +! PostOpt: ^bb9: +! PostOpt: %[[VAL_34:.*]] = arith.addi %[[VAL_31]], %[[VAL_1]] : index +! PostOpt: %[[VAL_35:.*]] = fir.array_coor %[[VAL_0]](%[[VAL_9]]) %[[VAL_30]], %[[VAL_34]] : (!fir.ref>, !fir.shape<2>, index, index) -> !fir.ref +! PostOpt: %[[VAL_36:.*]] = fir.load %[[VAL_35]] : !fir.ref +! PostOpt: %[[VAL_37:.*]] = arith.addi %[[VAL_31]], %[[VAL_4]] : index +! PostOpt: %[[VAL_38:.*]] = fir.array_coor %[[VAL_10]](%[[VAL_9]]) %[[VAL_30]], %[[VAL_37]] : (!fir.heap>, !fir.shape<2>, index, index) -> !fir.ref +! PostOpt: fir.store %[[VAL_36]] to %[[VAL_38]] : !fir.ref +! PostOpt: %[[VAL_39:.*]] = arith.subi %[[VAL_32]], %[[VAL_4]] : index +! PostOpt: br ^bb8(%[[VAL_37]], %[[VAL_39]] : index, index) +! PostOpt: ^bb10: +! PostOpt: %[[VAL_40:.*]] = arith.addi %[[VAL_25]], %[[VAL_4]] : index +! PostOpt: %[[VAL_41:.*]] = arith.subi %[[VAL_26]], %[[VAL_4]] : index +! PostOpt: br ^bb6(%[[VAL_40]], %[[VAL_41]] : index, index) +! PostOpt: ^bb11(%[[VAL_42:.*]]: index, %[[VAL_43:.*]]: index): +! PostOpt: %[[VAL_44:.*]] = arith.cmpi sgt, %[[VAL_43]], %[[VAL_5]] : index +! PostOpt: cond_br %[[VAL_44]], ^bb12(%[[VAL_5]], %[[VAL_2]] : index, index), ^bb15 +! PostOpt: ^bb12(%[[VAL_45:.*]]: index, %[[VAL_46:.*]]: index): +! PostOpt: %[[VAL_47:.*]] = arith.cmpi sgt, %[[VAL_46]], %[[VAL_5]] : index +! PostOpt: cond_br %[[VAL_47]], ^bb13, ^bb14 +! PostOpt: ^bb13: +! PostOpt: %[[VAL_48:.*]] = arith.addi %[[VAL_45]], %[[VAL_4]] : index +! PostOpt: %[[VAL_49:.*]] = arith.addi %[[VAL_42]], %[[VAL_4]] : index +! PostOpt: %[[VAL_50:.*]] = fir.array_coor %[[VAL_10]](%[[VAL_9]]) %[[VAL_48]], %[[VAL_49]] : (!fir.heap>, !fir.shape<2>, index, index) -> !fir.ref +! PostOpt: %[[VAL_51:.*]] = fir.array_coor %[[VAL_0]](%[[VAL_9]]) %[[VAL_48]], %[[VAL_49]] : (!fir.ref>, !fir.shape<2>, index, index) -> !fir.ref +! PostOpt: %[[VAL_52:.*]] = fir.load %[[VAL_50]] : !fir.ref +! PostOpt: fir.store %[[VAL_52]] to %[[VAL_51]] : !fir.ref +! PostOpt: %[[VAL_53:.*]] = arith.subi %[[VAL_46]], %[[VAL_4]] : index +! PostOpt: br ^bb12(%[[VAL_48]], %[[VAL_53]] : index, index) +! PostOpt: ^bb14: +! PostOpt: %[[VAL_54:.*]] = arith.addi %[[VAL_42]], %[[VAL_4]] : index +! PostOpt: %[[VAL_55:.*]] = arith.subi %[[VAL_43]], %[[VAL_4]] : index +! PostOpt: br ^bb11(%[[VAL_54]], %[[VAL_55]] : index, index) +! PostOpt: ^bb15: +! PostOpt: fir.freemem %[[VAL_10]] : !fir.heap> +! PostOpt: return +! PostOpt: } diff --git a/flang/test/Lower/array-expression-slice-1.f90 b/flang/test/Lower/array-expression-slice-1.f90 new file mode 100644 index 0000000000000000000000000000000000000000..30f17de23c04bdece33243b83dca310c247eb201 --- /dev/null +++ b/flang/test/Lower/array-expression-slice-1.f90 @@ -0,0 +1,413 @@ +! RUN: bbc -o - %s | FileCheck %s + +! CHECK-LABEL: func @_QQmain() { +! CHECK-DAG: %[[VAL_0:.*]] = arith.constant 10 : index +! CHECK-DAG: %[[VAL_4:.*]] = arith.constant 2 : index +! CHECK-DAG: %[[VAL_5:.*]] = arith.constant 1 : index +! CHECK-DAG: %[[VAL_6:.*]] = arith.constant 0 : index +! CHECK-DAG: %[[VAL_8:.*]] = arith.constant 8 : i64 +! CHECK-DAG: %[[VAL_11:.*]] = arith.constant 3 : index +! CHECK-DAG: %[[VAL_13:.*]] = arith.constant 2 : i64 +! CHECK-DAG: %[[VAL_14:.*]] = arith.constant 7 : i64 +! CHECK-DAG: %[[VAL_16:.*]] = arith.constant 4 : i64 +! CHECK-DAG: %[[VAL_18:.*]] = arith.constant -1 : i32 +! CHECK-DAG: %[[VAL_19:.*]] = arith.constant 0 : i64 +! CHECK-DAG: %[[VAL_20:.*]] = arith.constant 1 : i64 +! CHECK-DAG: %[[VAL_21:.*]] = arith.constant 3 : i64 +! CHECK-DAG: %[[VAL_22:.*]] = arith.constant 4 : index +! CHECK-DAG: %[[VAL_23:.*]] = arith.constant 1 : i32 +! CHECK-DAG: %[[VAL_24:.*]] = arith.constant 0 : i32 +! CHECK-DAG: %[[VAL_25:.*]] = fir.address_of(@_QFEa1) : !fir.ref> +! CHECK-DAG: %[[VAL_26:.*]] = fir.address_of(@_QFEa2) : !fir.ref> +! CHECK-DAG: %[[VAL_27:.*]] = fir.address_of(@_QFEa3) : !fir.ref> +! CHECK-DAG: %[[VAL_28:.*]] = fir.alloca i32 {bindc_name = "i", uniq_name = "_QFEi"} +! CHECK-DAG: %[[VAL_29:.*]] = fir.address_of(@_QFEiv) : !fir.ref> +! CHECK-DAG: %[[VAL_30:.*]] = fir.alloca i32 {bindc_name = "j", uniq_name = "_QFEj"} +! CHECK-DAG: %[[VAL_31:.*]] = fir.alloca i32 {bindc_name = "k", uniq_name = "_QFEk"} +! CHECK: fir.store %[[VAL_24]] to %[[VAL_31]] : !fir.ref +! CHECK: br ^bb1(%[[VAL_5]], %[[VAL_0]] : index, index) +! CHECK: ^bb1(%[[VAL_32:.*]]: index, %[[VAL_33:.*]]: index): +! CHECK: %[[VAL_34:.*]] = arith.cmpi sgt, %[[VAL_33]], %[[VAL_6]] : index +! CHECK: cond_br %[[VAL_34]], ^bb2, ^bb6 +! CHECK: ^bb2: +! CHECK: %[[VAL_35:.*]] = fir.convert %[[VAL_32]] : (index) -> i32 +! CHECK: fir.store %[[VAL_35]] to %[[VAL_30]] : !fir.ref +! CHECK: br ^bb3(%[[VAL_5]], %[[VAL_0]] : index, index) +! CHECK: ^bb3(%[[VAL_36:.*]]: index, %[[VAL_37:.*]]: index): +! CHECK: %[[VAL_38:.*]] = arith.cmpi sgt, %[[VAL_37]], %[[VAL_6]] : index +! CHECK: cond_br %[[VAL_38]], ^bb4, ^bb5 +! CHECK: ^bb4: +! CHECK: %[[VAL_39:.*]] = fir.convert %[[VAL_36]] : (index) -> i32 +! CHECK: fir.store %[[VAL_39]] to %[[VAL_28]] : !fir.ref +! CHECK: %[[VAL_40:.*]] = fir.load %[[VAL_31]] : !fir.ref +! CHECK: %[[VAL_41:.*]] = arith.addi %[[VAL_40]], %[[VAL_23]] : i32 +! CHECK: fir.store %[[VAL_41]] to %[[VAL_31]] : !fir.ref +! CHECK: %[[VAL_42:.*]] = fir.load %[[VAL_31]] : !fir.ref +! CHECK: %[[VAL_43:.*]] = fir.convert %[[VAL_42]] : (i32) -> f32 +! CHECK: %[[VAL_44:.*]] = fir.call @fir.cos.f32.f32(%[[VAL_43]]) : (f32) -> f32 +! CHECK: %[[VAL_45:.*]] = fir.load %[[VAL_28]] : !fir.ref +! CHECK: %[[VAL_46:.*]] = fir.convert %[[VAL_45]] : (i32) -> i64 +! CHECK: %[[VAL_47:.*]] = arith.subi %[[VAL_46]], %[[VAL_20]] : i64 +! CHECK: %[[VAL_48:.*]] = fir.load %[[VAL_30]] : !fir.ref +! CHECK: %[[VAL_49:.*]] = fir.convert %[[VAL_48]] : (i32) -> i64 +! CHECK: %[[VAL_50:.*]] = arith.subi %[[VAL_49]], %[[VAL_20]] : i64 +! CHECK: %[[VAL_51:.*]] = fir.coordinate_of %[[VAL_25]], %[[VAL_47]], %[[VAL_50]] : (!fir.ref>, i64, i64) -> !fir.ref +! CHECK: fir.store %[[VAL_44]] to %[[VAL_51]] : !fir.ref +! CHECK: %[[VAL_52:.*]] = arith.addi %[[VAL_36]], %[[VAL_5]] : index +! CHECK: %[[VAL_53:.*]] = arith.subi %[[VAL_37]], %[[VAL_5]] : index +! CHECK: br ^bb3(%[[VAL_52]], %[[VAL_53]] : index, index) +! CHECK: ^bb5: +! CHECK: %[[VAL_54:.*]] = fir.convert %[[VAL_36]] : (index) -> i32 +! CHECK: fir.store %[[VAL_54]] to %[[VAL_28]] : !fir.ref +! CHECK: %[[VAL_55:.*]] = fir.load %[[VAL_31]] : !fir.ref +! CHECK: %[[VAL_56:.*]] = fir.convert %[[VAL_55]] : (i32) -> f32 +! CHECK: %[[VAL_57:.*]] = fir.call @fir.sin.f32.f32(%[[VAL_56]]) : (f32) -> f32 +! CHECK: %[[VAL_58:.*]] = fir.load %[[VAL_30]] : !fir.ref +! CHECK: %[[VAL_59:.*]] = fir.convert %[[VAL_58]] : (i32) -> i64 +! CHECK: %[[VAL_60:.*]] = arith.subi %[[VAL_59]], %[[VAL_20]] : i64 +! CHECK: %[[VAL_61:.*]] = fir.coordinate_of %[[VAL_27]], %[[VAL_60]] : (!fir.ref>, i64) -> !fir.ref +! CHECK: fir.store %[[VAL_57]] to %[[VAL_61]] : !fir.ref +! CHECK: %[[VAL_62:.*]] = arith.addi %[[VAL_32]], %[[VAL_5]] : index +! CHECK: %[[VAL_63:.*]] = arith.subi %[[VAL_33]], %[[VAL_5]] : index +! CHECK: br ^bb1(%[[VAL_62]], %[[VAL_63]] : index, index) +! CHECK: ^bb6: +! CHECK: %[[VAL_64:.*]] = fir.convert %[[VAL_32]] : (index) -> i32 +! CHECK: fir.store %[[VAL_64]] to %[[VAL_30]] : !fir.ref +! CHECK: %[[VAL_65:.*]] = fir.shape %[[VAL_11]] : (index) -> !fir.shape<1> +! CHECK: %[[VAL_66:.*]] = fir.undefined index +! CHECK: %[[VAL_67:.*]] = fir.shape %[[VAL_0]], %[[VAL_0]] : (index, index) -> !fir.shape<2> +! CHECK: %[[VAL_68:.*]] = fir.slice %[[VAL_16]], %[[VAL_66]], %[[VAL_66]], %[[VAL_4]], %[[VAL_0]], %[[VAL_11]] : (i64, index, index, index, index, index) -> !fir.slice<2> +! CHECK: br ^bb7(%[[VAL_6]], %[[VAL_11]] : index, index) +! CHECK: ^bb7(%[[VAL_69:.*]]: index, %[[VAL_70:.*]]: index): +! CHECK: %[[VAL_71:.*]] = arith.cmpi sgt, %[[VAL_70]], %[[VAL_6]] : index +! CHECK: cond_br %[[VAL_71]], ^bb8, ^bb9 +! CHECK: ^bb8: +! CHECK: %[[VAL_72:.*]] = arith.addi %[[VAL_69]], %[[VAL_5]] : index +! CHECK: %[[VAL_73:.*]] = fir.array_coor %[[VAL_25]](%[[VAL_67]]) {{\[}}%[[VAL_68]]] %[[VAL_22]], %[[VAL_72]] : (!fir.ref>, !fir.shape<2>, !fir.slice<2>, index, index) -> !fir.ref +! CHECK: %[[VAL_74:.*]] = fir.load %[[VAL_73]] : !fir.ref +! CHECK: %[[VAL_75:.*]] = fir.array_coor %[[VAL_26]](%[[VAL_65]]) %[[VAL_72]] : (!fir.ref>, !fir.shape<1>, index) -> !fir.ref +! CHECK: fir.store %[[VAL_74]] to %[[VAL_75]] : !fir.ref +! CHECK: %[[VAL_76:.*]] = arith.subi %[[VAL_70]], %[[VAL_5]] : index +! CHECK: br ^bb7(%[[VAL_72]], %[[VAL_76]] : index, index) +! CHECK: ^bb9: +! CHECK: %[[VAL_77:.*]] = fir.coordinate_of %[[VAL_25]], %[[VAL_21]], %[[VAL_20]] : (!fir.ref>, i64, i64) -> !fir.ref +! CHECK: %[[VAL_78:.*]] = fir.load %[[VAL_77]] : !fir.ref +! CHECK: %[[VAL_79:.*]] = fir.coordinate_of %[[VAL_26]], %[[VAL_19]] : (!fir.ref>, i64) -> !fir.ref +! CHECK: %[[VAL_80:.*]] = fir.load %[[VAL_79]] : !fir.ref +! CHECK: %[[VAL_81:.*]] = arith.cmpf une, %[[VAL_78]], %[[VAL_80]] : f32 +! CHECK: cond_br %[[VAL_81]], ^bb10, ^bb11 +! CHECK: ^bb10: +! CHECK: %[[VAL_82:.*]] = fir.address_of(@_QQcl.{{.*}}) : !fir.ref>) -> !fir.ref +! CHECK: %[[VAL_84:.*]] = fir.call @_FortranAioBeginExternalListOutput(%[[VAL_18]], %[[VAL_83]], %{{.*}}) : (i32, !fir.ref, i32) -> !fir.ref +! CHECK: %[[VAL_85:.*]] = fir.address_of(@_QQcl.6D69736D617463682031) : !fir.ref> +! CHECK: %[[VAL_86:.*]] = fir.convert %[[VAL_85]] : (!fir.ref>) -> !fir.ref +! CHECK: %[[VAL_87:.*]] = fir.convert %[[VAL_0]] : (index) -> i64 +! CHECK: %[[VAL_88:.*]] = fir.call @_FortranAioOutputAscii(%[[VAL_84]], %[[VAL_86]], %[[VAL_87]]) : (!fir.ref, !fir.ref, i64) -> i1 +! CHECK: %[[VAL_89:.*]] = fir.load %[[VAL_79]] : !fir.ref +! CHECK: %[[VAL_90:.*]] = fir.call @_FortranAioOutputReal32(%[[VAL_84]], %[[VAL_89]]) : (!fir.ref, f32) -> i1 +! CHECK: %[[VAL_91:.*]] = fir.load %[[VAL_77]] : !fir.ref +! CHECK: %[[VAL_92:.*]] = fir.call @_FortranAioOutputReal32(%[[VAL_84]], %[[VAL_91]]) : (!fir.ref, f32) -> i1 +! CHECK: %[[VAL_93:.*]] = fir.call @_FortranAioEndIoStatement(%[[VAL_84]]) : (!fir.ref) -> i32 +! CHECK: br ^bb11 +! CHECK: ^bb11: +! CHECK: %[[VAL_94:.*]] = fir.coordinate_of %[[VAL_25]], %[[VAL_21]], %[[VAL_16]] : (!fir.ref>, i64, i64) -> !fir.ref +! CHECK: %[[VAL_95:.*]] = fir.load %[[VAL_94]] : !fir.ref +! CHECK: %[[VAL_96:.*]] = fir.coordinate_of %[[VAL_26]], %[[VAL_20]] : (!fir.ref>, i64) -> !fir.ref +! CHECK: %[[VAL_97:.*]] = fir.load %[[VAL_96]] : !fir.ref +! CHECK: %[[VAL_98:.*]] = arith.cmpf une, %[[VAL_95]], %[[VAL_97]] : f32 +! CHECK: cond_br %[[VAL_98]], ^bb12, ^bb13 +! CHECK: ^bb12: +! CHECK: %[[VAL_99:.*]] = fir.address_of(@_QQcl.{{.*}}) : !fir.ref>) -> !fir.ref +! CHECK: %[[VAL_101:.*]] = fir.call @_FortranAioBeginExternalListOutput(%[[VAL_18]], %[[VAL_100]], %{{.*}}) : (i32, !fir.ref, i32) -> !fir.ref +! CHECK: %[[VAL_102:.*]] = fir.address_of(@_QQcl.6D69736D617463682032) : !fir.ref> +! CHECK: %[[VAL_103:.*]] = fir.convert %[[VAL_102]] : (!fir.ref>) -> !fir.ref +! CHECK: %[[VAL_104:.*]] = fir.convert %[[VAL_0]] : (index) -> i64 +! CHECK: %[[VAL_105:.*]] = fir.call @_FortranAioOutputAscii(%[[VAL_101]], %[[VAL_103]], %[[VAL_104]]) : (!fir.ref, !fir.ref, i64) -> i1 +! CHECK: %[[VAL_106:.*]] = fir.load %[[VAL_96]] : !fir.ref +! CHECK: %[[VAL_107:.*]] = fir.call @_FortranAioOutputReal32(%[[VAL_101]], %[[VAL_106]]) : (!fir.ref, f32) -> i1 +! CHECK: %[[VAL_108:.*]] = fir.load %[[VAL_94]] : !fir.ref +! CHECK: %[[VAL_109:.*]] = fir.call @_FortranAioOutputReal32(%[[VAL_101]], %[[VAL_108]]) : (!fir.ref, f32) -> i1 +! CHECK: %[[VAL_110:.*]] = fir.call @_FortranAioEndIoStatement(%[[VAL_101]]) : (!fir.ref) -> i32 +! CHECK: br ^bb13 +! CHECK: ^bb13: +! CHECK: %[[VAL_111:.*]] = fir.coordinate_of %[[VAL_25]], %[[VAL_21]], %[[VAL_14]] : (!fir.ref>, i64, i64) -> !fir.ref +! CHECK: %[[VAL_112:.*]] = fir.load %[[VAL_111]] : !fir.ref +! CHECK: %[[VAL_113:.*]] = fir.coordinate_of %[[VAL_26]], %[[VAL_13]] : (!fir.ref>, i64) -> !fir.ref +! CHECK: %[[VAL_114:.*]] = fir.load %[[VAL_113]] : !fir.ref +! CHECK: %[[VAL_115:.*]] = arith.cmpf une, %[[VAL_112]], %[[VAL_114]] : f32 +! CHECK: cond_br %[[VAL_115]], ^bb14, ^bb15 +! CHECK: ^bb14: +! CHECK: %[[VAL_116:.*]] = fir.address_of(@_QQcl.{{.*}} : !fir.ref>) -> !fir.ref +! CHECK: %[[VAL_118:.*]] = fir.call @_FortranAioBeginExternalListOutput(%[[VAL_18]], %[[VAL_117]], %{{.*}}) : (i32, !fir.ref, i32) -> !fir.ref +! CHECK: %[[VAL_119:.*]] = fir.address_of(@_QQcl.6D69736D617463682033) : !fir.ref> +! CHECK: %[[VAL_120:.*]] = fir.convert %[[VAL_119]] : (!fir.ref>) -> !fir.ref +! CHECK: %[[VAL_121:.*]] = fir.convert %[[VAL_0]] : (index) -> i64 +! CHECK: %[[VAL_122:.*]] = fir.call @_FortranAioOutputAscii(%[[VAL_118]], %[[VAL_120]], %[[VAL_121]]) : (!fir.ref, !fir.ref, i64) -> i1 +! CHECK: %[[VAL_123:.*]] = fir.load %[[VAL_113]] : !fir.ref +! CHECK: %[[VAL_124:.*]] = fir.call @_FortranAioOutputReal32(%[[VAL_118]], %[[VAL_123]]) : (!fir.ref, f32) -> i1 +! CHECK: %[[VAL_125:.*]] = fir.load %[[VAL_111]] : !fir.ref +! CHECK: %[[VAL_126:.*]] = fir.call @_FortranAioOutputReal32(%[[VAL_118]], %[[VAL_125]]) : (!fir.ref, f32) -> i1 +! CHECK: %[[VAL_127:.*]] = fir.call @_FortranAioEndIoStatement(%[[VAL_118]]) : (!fir.ref) -> i32 +! CHECK: br ^bb15 +! CHECK: ^bb15: +! CHECK: %[[VAL_128:.*]] = fir.shape %[[VAL_0]] : (index) -> !fir.shape<1> +! CHECK: %[[VAL_129:.*]] = fir.slice %[[VAL_5]], %[[VAL_0]], %[[VAL_22]] : (index, index, index) -> !fir.slice<1> +! CHECK: br ^bb16(%[[VAL_6]], %[[VAL_11]] : index, index) +! CHECK: ^bb16(%[[VAL_130:.*]]: index, %[[VAL_131:.*]]: index): +! CHECK: %[[VAL_132:.*]] = arith.cmpi sgt, %[[VAL_131]], %[[VAL_6]] : index +! CHECK: cond_br %[[VAL_132]], ^bb17, ^bb18 +! CHECK: ^bb17: +! CHECK: %[[VAL_133:.*]] = arith.addi %[[VAL_130]], %[[VAL_5]] : index +! CHECK: %[[VAL_134:.*]] = fir.array_coor %[[VAL_26]](%[[VAL_65]]) %[[VAL_133]] : (!fir.ref>, !fir.shape<1>, index) -> !fir.ref +! CHECK: %[[VAL_135:.*]] = fir.load %[[VAL_134]] : !fir.ref +! CHECK: %[[VAL_136:.*]] = fir.array_coor %[[VAL_27]](%[[VAL_128]]) {{\[}}%[[VAL_129]]] %[[VAL_133]] : (!fir.ref>, !fir.shape<1>, !fir.slice<1>, index) -> !fir.ref +! CHECK: fir.store %[[VAL_135]] to %[[VAL_136]] : !fir.ref +! CHECK: %[[VAL_137:.*]] = arith.subi %[[VAL_131]], %[[VAL_5]] : index +! CHECK: br ^bb16(%[[VAL_133]], %[[VAL_137]] : index, index) +! CHECK: ^bb18: +! CHECK: %[[VAL_138:.*]] = fir.load %[[VAL_77]] : !fir.ref +! CHECK: %[[VAL_139:.*]] = fir.coordinate_of %[[VAL_27]], %[[VAL_19]] : (!fir.ref>, i64) -> !fir.ref +! CHECK: %[[VAL_140:.*]] = fir.load %[[VAL_139]] : !fir.ref +! CHECK: %[[VAL_141:.*]] = arith.cmpf une, %[[VAL_138]], %[[VAL_140]] : f32 +! CHECK: cond_br %[[VAL_141]], ^bb19, ^bb20 +! CHECK: ^bb19: +! CHECK: %[[VAL_142:.*]] = fir.address_of(@_QQcl.{{.*}}) : !fir.ref>) -> !fir.ref +! CHECK: %[[VAL_144:.*]] = fir.call @_FortranAioBeginExternalListOutput(%[[VAL_18]], %[[VAL_143]], %{{.*}}) : (i32, !fir.ref, i32) -> !fir.ref +! CHECK: %[[VAL_145:.*]] = fir.address_of(@_QQcl.6D69736D617463682034) : !fir.ref> +! CHECK: %[[VAL_146:.*]] = fir.convert %[[VAL_145]] : (!fir.ref>) -> !fir.ref +! CHECK: %[[VAL_147:.*]] = fir.convert %[[VAL_0]] : (index) -> i64 +! CHECK: %[[VAL_148:.*]] = fir.call @_FortranAioOutputAscii(%[[VAL_144]], %[[VAL_146]], %[[VAL_147]]) : (!fir.ref, !fir.ref, i64) -> i1 +! CHECK: %[[VAL_149:.*]] = fir.load %[[VAL_77]] : !fir.ref +! CHECK: %[[VAL_150:.*]] = fir.call @_FortranAioOutputReal32(%[[VAL_144]], %[[VAL_149]]) : (!fir.ref, f32) -> i1 +! CHECK: %[[VAL_151:.*]] = fir.load %[[VAL_139]] : !fir.ref +! CHECK: %[[VAL_152:.*]] = fir.call @_FortranAioOutputReal32(%[[VAL_144]], %[[VAL_151]]) : (!fir.ref, f32) -> i1 +! CHECK: %[[VAL_153:.*]] = fir.call @_FortranAioEndIoStatement(%[[VAL_144]]) : (!fir.ref) -> i32 +! CHECK: br ^bb20 +! CHECK: ^bb20: +! CHECK: %[[VAL_154:.*]] = fir.load %[[VAL_94]] : !fir.ref +! CHECK: %[[VAL_155:.*]] = fir.coordinate_of %[[VAL_27]], %[[VAL_16]] : (!fir.ref>, i64) -> !fir.ref +! CHECK: %[[VAL_156:.*]] = fir.load %[[VAL_155]] : !fir.ref +! CHECK: %[[VAL_157:.*]] = arith.cmpf une, %[[VAL_154]], %[[VAL_156]] : f32 +! CHECK: cond_br %[[VAL_157]], ^bb21, ^bb22 +! CHECK: ^bb21: +! CHECK: %[[VAL_158:.*]] = fir.address_of(@_QQcl.{{.*}}) : !fir.ref>) -> !fir.ref +! CHECK: %[[VAL_160:.*]] = fir.call @_FortranAioBeginExternalListOutput(%[[VAL_18]], %[[VAL_159]], %{{.*}}) : (i32, !fir.ref, i32) -> !fir.ref +! CHECK: %[[VAL_161:.*]] = fir.address_of(@_QQcl.6D69736D617463682035) : !fir.ref> +! CHECK: %[[VAL_162:.*]] = fir.convert %[[VAL_161]] : (!fir.ref>) -> !fir.ref +! CHECK: %[[VAL_163:.*]] = fir.convert %[[VAL_0]] : (index) -> i64 +! CHECK: %[[VAL_164:.*]] = fir.call @_FortranAioOutputAscii(%[[VAL_160]], %[[VAL_162]], %[[VAL_163]]) : (!fir.ref, !fir.ref, i64) -> i1 +! CHECK: %[[VAL_165:.*]] = fir.load %[[VAL_94]] : !fir.ref +! CHECK: %[[VAL_166:.*]] = fir.call @_FortranAioOutputReal32(%[[VAL_160]], %[[VAL_165]]) : (!fir.ref, f32) -> i1 +! CHECK: %[[VAL_167:.*]] = fir.load %[[VAL_155]] : !fir.ref +! CHECK: %[[VAL_168:.*]] = fir.call @_FortranAioOutputReal32(%[[VAL_160]], %[[VAL_167]]) : (!fir.ref, f32) -> i1 +! CHECK: %[[VAL_169:.*]] = fir.call @_FortranAioEndIoStatement(%[[VAL_160]]) : (!fir.ref) -> i32 +! CHECK: br ^bb22 +! CHECK: ^bb22: +! CHECK: %[[VAL_170:.*]] = fir.load %[[VAL_111]] : !fir.ref +! CHECK: %[[VAL_171:.*]] = fir.coordinate_of %[[VAL_27]], %[[VAL_8]] : (!fir.ref>, i64) -> !fir.ref +! CHECK: %[[VAL_172:.*]] = fir.load %[[VAL_171]] : !fir.ref +! CHECK: %[[VAL_173:.*]] = arith.cmpf une, %[[VAL_170]], %[[VAL_172]] : f32 +! CHECK: cond_br %[[VAL_173]], ^bb23, ^bb24 +! CHECK: ^bb23: +! CHECK: %[[VAL_174:.*]] = fir.address_of(@_QQcl.{{.*}}) : !fir.ref>) -> !fir.ref +! CHECK: %[[VAL_176:.*]] = fir.call @_FortranAioBeginExternalListOutput(%[[VAL_18]], %[[VAL_175]], %{{.*}}) : (i32, !fir.ref, i32) -> !fir.ref +! CHECK: %[[VAL_177:.*]] = fir.address_of(@_QQcl.6D69736D617463682036) : !fir.ref> +! CHECK: %[[VAL_178:.*]] = fir.convert %[[VAL_177]] : (!fir.ref>) -> !fir.ref +! CHECK: %[[VAL_179:.*]] = fir.convert %[[VAL_0]] : (index) -> i64 +! CHECK: %[[VAL_180:.*]] = fir.call @_FortranAioOutputAscii(%[[VAL_176]], %[[VAL_178]], %[[VAL_179]]) : (!fir.ref, !fir.ref, i64) -> i1 +! CHECK: %[[VAL_181:.*]] = fir.load %[[VAL_111]] : !fir.ref +! CHECK: %[[VAL_182:.*]] = fir.call @_FortranAioOutputReal32(%[[VAL_176]], %[[VAL_181]]) : (!fir.ref, f32) -> i1 +! CHECK: %[[VAL_183:.*]] = fir.load %[[VAL_171]] : !fir.ref +! CHECK: %[[VAL_184:.*]] = fir.call @_FortranAioOutputReal32(%[[VAL_176]], %[[VAL_183]]) : (!fir.ref, f32) -> i1 +! CHECK: %[[VAL_185:.*]] = fir.call @_FortranAioEndIoStatement(%[[VAL_176]]) : (!fir.ref) -> i32 +! CHECK: br ^bb24 +! CHECK: ^bb24: +! CHECK: %[[VAL_186:.*]] = fir.address_of(@_QQro.3xi4.b7f1b733471804c07debf489e49d9c2f) : !fir.ref> +! CHECK: br ^bb25(%[[VAL_6]], %[[VAL_11]] : index, index) +! CHECK: ^bb25(%[[VAL_187:.*]]: index, %[[VAL_188:.*]]: index): +! CHECK: %[[VAL_189:.*]] = arith.cmpi sgt, %[[VAL_188]], %[[VAL_6]] : index +! CHECK: cond_br %[[VAL_189]], ^bb26, ^bb27 +! CHECK: ^bb26: +! CHECK: %[[VAL_190:.*]] = arith.addi %[[VAL_187]], %[[VAL_5]] : index +! CHECK: %[[VAL_191:.*]] = fir.array_coor %[[VAL_186]](%[[VAL_65]]) %[[VAL_190]] : (!fir.ref>, !fir.shape<1>, index) -> !fir.ref +! CHECK: %[[VAL_192:.*]] = fir.load %[[VAL_191]] : !fir.ref +! CHECK: %[[VAL_193:.*]] = fir.array_coor %[[VAL_29]](%[[VAL_65]]) %[[VAL_190]] : (!fir.ref>, !fir.shape<1>, index) -> !fir.ref +! CHECK: fir.store %[[VAL_192]] to %[[VAL_193]] : !fir.ref +! CHECK: %[[VAL_194:.*]] = arith.subi %[[VAL_188]], %[[VAL_5]] : index +! CHECK: br ^bb25(%[[VAL_190]], %[[VAL_194]] : index, index) +! CHECK: ^bb27: +! CHECK: %[[VAL_195:.*]] = fir.allocmem !fir.array<3xf32> +! CHECK: br ^bb28(%[[VAL_6]], %[[VAL_11]] : index, index) +! CHECK: ^bb28(%[[VAL_196:.*]]: index, %[[VAL_197:.*]]: index): +! CHECK: %[[VAL_198:.*]] = arith.cmpi sgt, %[[VAL_197]], %[[VAL_6]] : index +! CHECK: cond_br %[[VAL_198]], ^bb29, ^bb30 +! CHECK: ^bb29: +! CHECK: %[[VAL_199:.*]] = arith.addi %[[VAL_196]], %[[VAL_5]] : index +! CHECK: %[[VAL_200:.*]] = fir.array_coor %[[VAL_26]](%[[VAL_65]]) %[[VAL_199]] : (!fir.ref>, !fir.shape<1>, index) -> !fir.ref +! CHECK: %[[VAL_201:.*]] = fir.array_coor %[[VAL_195]](%[[VAL_65]]) %[[VAL_199]] : (!fir.heap>, !fir.shape<1>, index) -> !fir.ref +! CHECK: %[[VAL_202:.*]] = fir.load %[[VAL_200]] : !fir.ref +! CHECK: fir.store %[[VAL_202]] to %[[VAL_201]] : !fir.ref +! CHECK: %[[VAL_203:.*]] = arith.subi %[[VAL_197]], %[[VAL_5]] : index +! CHECK: br ^bb28(%[[VAL_199]], %[[VAL_203]] : index, index) +! CHECK: ^bb30(%[[VAL_205:.*]]: index, %[[VAL_206:.*]]: index): +! CHECK: %[[VAL_207:.*]] = arith.cmpi sgt, %[[VAL_206]], %[[VAL_6]] : index +! CHECK: cond_br %[[VAL_207]], ^bb31, ^bb32(%[[VAL_6]], %[[VAL_11]] : index, index) +! CHECK: ^bb31: +! CHECK: %[[VAL_208:.*]] = arith.addi %[[VAL_205]], %[[VAL_5]] : index +! CHECK: %[[VAL_209:.*]] = fir.array_coor %[[VAL_29]](%[[VAL_65]]) %[[VAL_208]] : (!fir.ref>, !fir.shape<1>, index) -> !fir.ref +! CHECK: %[[VAL_210:.*]] = fir.load %[[VAL_209]] : !fir.ref +! CHECK: %[[VAL_211:.*]] = fir.convert %[[VAL_210]] : (i32) -> index +! CHECK: %[[VAL_212:.*]] = fir.array_coor %[[VAL_26]](%[[VAL_65]]) %[[VAL_211]] : (!fir.ref>, !fir.shape<1>, index) -> !fir.ref +! CHECK: %[[VAL_213:.*]] = fir.load %[[VAL_212]] : !fir.ref +! CHECK: %[[VAL_214:.*]] = fir.array_coor %[[VAL_195]](%[[VAL_65]]) %[[VAL_208]] : (!fir.heap>, !fir.shape<1>, index) -> !fir.ref +! CHECK: fir.store %[[VAL_213]] to %[[VAL_214]] : !fir.ref +! CHECK: %[[VAL_215:.*]] = arith.subi %[[VAL_206]], %[[VAL_5]] : index +! CHECK: br ^bb30(%[[VAL_208]], %[[VAL_215]] : index, index) +! CHECK: ^bb32(%[[VAL_216:.*]]: index, %[[VAL_217:.*]]: index): +! CHECK: %[[VAL_218:.*]] = arith.cmpi sgt, %[[VAL_217]], %[[VAL_6]] : index +! CHECK: cond_br %[[VAL_218]], ^bb33, ^bb34 +! CHECK: ^bb33: +! CHECK: %[[VAL_219:.*]] = arith.addi %[[VAL_216]], %[[VAL_5]] : index +! CHECK: %[[VAL_220:.*]] = fir.array_coor %[[VAL_195]](%[[VAL_65]]) %[[VAL_219]] : (!fir.heap>, !fir.shape<1>, index) -> !fir.ref +! CHECK: %[[VAL_221:.*]] = fir.array_coor %[[VAL_26]](%[[VAL_65]]) %[[VAL_219]] : (!fir.ref>, !fir.shape<1>, index) -> !fir.ref +! CHECK: %[[VAL_222:.*]] = fir.load %[[VAL_220]] : !fir.ref +! CHECK: fir.store %[[VAL_222]] to %[[VAL_221]] : !fir.ref +! CHECK: %[[VAL_223:.*]] = arith.subi %[[VAL_217]], %[[VAL_5]] : index +! CHECK: br ^bb32(%[[VAL_219]], %[[VAL_223]] : index, index) +! CHECK: ^bb34: +! CHECK: fir.freemem %[[VAL_195]] : !fir.heap> +! CHECK: %[[VAL_224:.*]] = fir.load %[[VAL_77]] : !fir.ref +! CHECK: %[[VAL_225:.*]] = fir.load %[[VAL_96]] : !fir.ref +! CHECK: %[[VAL_226:.*]] = arith.cmpf une, %[[VAL_224]], %[[VAL_225]] : f32 +! CHECK: cond_br %[[VAL_226]], ^bb35, ^bb36 +! CHECK: ^bb35: +! CHECK: %[[VAL_227:.*]] = fir.address_of(@_QQcl.{{.*}}) : !fir.ref>) -> !fir.ref +! CHECK: %[[VAL_229:.*]] = fir.call @_FortranAioBeginExternalListOutput(%[[VAL_18]], %[[VAL_228]], %{{.*}}) : (i32, !fir.ref, i32) -> !fir.ref +! CHECK: %[[VAL_230:.*]] = fir.address_of(@_QQcl.6D69736D617463682037) : !fir.ref> +! CHECK: %[[VAL_231:.*]] = fir.convert %[[VAL_230]] : (!fir.ref>) -> !fir.ref +! CHECK: %[[VAL_232:.*]] = fir.convert %[[VAL_0]] : (index) -> i64 +! CHECK: %[[VAL_233:.*]] = fir.call @_FortranAioOutputAscii(%[[VAL_229]], %[[VAL_231]], %[[VAL_232]]) : (!fir.ref, !fir.ref, i64) -> i1 +! CHECK: %[[VAL_234:.*]] = fir.load %[[VAL_77]] : !fir.ref +! CHECK: %[[VAL_235:.*]] = fir.call @_FortranAioOutputReal32(%[[VAL_229]], %[[VAL_234]]) : (!fir.ref, f32) -> i1 +! CHECK: %[[VAL_236:.*]] = fir.load %[[VAL_96]] : !fir.ref +! CHECK: %[[VAL_237:.*]] = fir.call @_FortranAioOutputReal32(%[[VAL_229]], %[[VAL_236]]) : (!fir.ref, f32) -> i1 +! CHECK: %[[VAL_238:.*]] = fir.call @_FortranAioEndIoStatement(%[[VAL_229]]) : (!fir.ref) -> i32 +! CHECK: br ^bb36 +! CHECK: ^bb36: +! CHECK: %[[VAL_239:.*]] = fir.load %[[VAL_94]] : !fir.ref +! CHECK: %[[VAL_240:.*]] = fir.load %[[VAL_113]] : !fir.ref +! CHECK: %[[VAL_241:.*]] = arith.cmpf une, %[[VAL_239]], %[[VAL_240]] : f32 +! CHECK: cond_br %[[VAL_241]], ^bb37, ^bb38 +! CHECK: ^bb37: +! CHECK: %[[VAL_242:.*]] = fir.address_of(@_QQcl.{{.*}}) : !fir.ref>) -> !fir.ref +! CHECK: %[[VAL_244:.*]] = fir.call @_FortranAioBeginExternalListOutput(%[[VAL_18]], %[[VAL_243]], %{{.*}}) : (i32, !fir.ref, i32) -> !fir.ref +! CHECK: %[[VAL_245:.*]] = fir.address_of(@_QQcl.6D69736D617463682038) : !fir.ref> +! CHECK: %[[VAL_246:.*]] = fir.convert %[[VAL_245]] : (!fir.ref>) -> !fir.ref +! CHECK: %[[VAL_247:.*]] = fir.convert %[[VAL_0]] : (index) -> i64 +! CHECK: %[[VAL_248:.*]] = fir.call @_FortranAioOutputAscii(%[[VAL_244]], %[[VAL_246]], %[[VAL_247]]) : (!fir.ref, !fir.ref, i64) -> i1 +! CHECK: %[[VAL_249:.*]] = fir.load %[[VAL_94]] : !fir.ref +! CHECK: %[[VAL_250:.*]] = fir.call @_FortranAioOutputReal32(%[[VAL_244]], %[[VAL_249]]) : (!fir.ref, f32) -> i1 +! CHECK: %[[VAL_251:.*]] = fir.load %[[VAL_113]] : !fir.ref +! CHECK: %[[VAL_252:.*]] = fir.call @_FortranAioOutputReal32(%[[VAL_244]], %[[VAL_251]]) : (!fir.ref, f32) -> i1 +! CHECK: %[[VAL_253:.*]] = fir.call @_FortranAioEndIoStatement(%[[VAL_244]]) : (!fir.ref) -> i32 +! CHECK: br ^bb38 +! CHECK: ^bb38: +! CHECK: %[[VAL_254:.*]] = fir.load %[[VAL_111]] : !fir.ref +! CHECK: %[[VAL_255:.*]] = fir.load %[[VAL_79]] : !fir.ref +! CHECK: %[[VAL_256:.*]] = arith.cmpf une, %[[VAL_254]], %[[VAL_255]] : f32 +! CHECK: cond_br %[[VAL_256]], ^bb39, ^bb40 +! CHECK: ^bb39: +! CHECK: %[[VAL_257:.*]] = fir.address_of(@_QQcl.{{.*}}) : !fir.ref>) -> !fir.ref +! CHECK: %[[VAL_259:.*]] = fir.call @_FortranAioBeginExternalListOutput(%[[VAL_18]], %[[VAL_258]], %{{.*}}) : (i32, !fir.ref, i32) -> !fir.ref +! CHECK: %[[VAL_260:.*]] = fir.address_of(@_QQcl.6D69736D617463682039) : !fir.ref> +! CHECK: %[[VAL_261:.*]] = fir.convert %[[VAL_260]] : (!fir.ref>) -> !fir.ref +! CHECK: %[[VAL_262:.*]] = fir.convert %[[VAL_0]] : (index) -> i64 +! CHECK: %[[VAL_263:.*]] = fir.call @_FortranAioOutputAscii(%[[VAL_259]], %[[VAL_261]], %[[VAL_262]]) : (!fir.ref, !fir.ref, i64) -> i1 +! CHECK: %[[VAL_264:.*]] = fir.load %[[VAL_111]] : !fir.ref +! CHECK: %[[VAL_265:.*]] = fir.call @_FortranAioOutputReal32(%[[VAL_259]], %[[VAL_264]]) : (!fir.ref, f32) -> i1 +! CHECK: %[[VAL_266:.*]] = fir.load %[[VAL_79]] : !fir.ref +! CHECK: %[[VAL_267:.*]] = fir.call @_FortranAioOutputReal32(%[[VAL_259]], %[[VAL_266]]) : (!fir.ref, f32) -> i1 +! CHECK: %[[VAL_268:.*]] = fir.call @_FortranAioEndIoStatement(%[[VAL_259]]) : (!fir.ref) -> i32 +! CHECK: br ^bb40 +! CHECK: ^bb40: +! CHECK: return +! CHECK: } + +program p + real :: a1(10,10) + real :: a2(3) + real :: a3(10) + integer iv(3) + integer k + + k = 0 + do j = 1, 10 + do i = 1, 10 + k = k + 1 + a1(i,j) = cos(real(k)) + end do + a3(j) = sin(real(k)) + end do + + a2 = a1(4, 2:10:3) + + if (a1(4,2) .ne. a2(1)) print *, "mismatch 1", a2(1), a1(4,2) + if (a1(4,5) .ne. a2(2)) print *, "mismatch 2", a2(2), a1(4,5) + if (a1(4,8) .ne. a2(3)) print *, "mismatch 3", a2(3), a1(4,8) + + a3(1:10:4) = a2 + + if (a1(4,2) .ne. a3(1)) print *, "mismatch 4", a1(4,2), a3(1) + if (a1(4,5) .ne. a3(5)) print *, "mismatch 5", a1(4,5), a3(5) + if (a1(4,8) .ne. a3(9)) print *, "mismatch 6", a1(4,8), a3(9) + + iv = (/ 3, 1, 2 /) + + a2 = a2(iv) + + if (a1(4,2) .ne. a2(2)) print *, "mismatch 7", a1(4,2), a2(2) + if (a1(4,5) .ne. a2(3)) print *, "mismatch 8", a1(4,5), a2(3) + if (a1(4,8) .ne. a2(1)) print *, "mismatch 9", a1(4,8), a2(1) + +end program p + +! CHECK-LABEL: func @_QPsub( +! CHECK-SAME: %[[VAL_0:.*]]: !fir.boxchar<1>{{.*}}) { +! CHECK-DAG: %[[VAL_1:.*]] = arith.constant 5 : index +! CHECK-DAG: %[[VAL_2:.*]] = arith.constant 2 : index +! CHECK-DAG: %[[VAL_3:.*]] = arith.constant 1 : index +! CHECK-DAG: %[[VAL_4:.*]] = arith.constant 4 : index +! CHECK-DAG: %[[VAL_6:.*]] = arith.constant -1 : i32 +! CHECK-DAG: %[[VAL_7:.*]] = arith.constant 10 : index +! CHECK: %[[VAL_8:.*]]:2 = fir.unboxchar %[[VAL_0]] : (!fir.boxchar<1>) -> (!fir.ref>, index) +! CHECK: %[[VAL_9:.*]] = fir.convert %[[VAL_8]]#0 : (!fir.ref>) -> !fir.ref>> +! CHECK: %[[VAL_10:.*]] = fir.address_of(@_QQcl.{{.*}}) : !fir.ref>) -> !fir.ref +! CHECK: %[[VAL_12:.*]] = fir.call @_FortranAioBeginExternalListOutput(%[[VAL_6]], %[[VAL_11]], %{{.*}}) : (i32, !fir.ref, i32) -> !fir.ref +! CHECK: %[[VAL_13:.*]] = fir.address_of(@_QQcl.61203D20) : !fir.ref> +! CHECK: %[[VAL_14:.*]] = fir.convert %[[VAL_13]] : (!fir.ref>) -> !fir.ref +! CHECK: %[[VAL_15:.*]] = fir.convert %[[VAL_4]] : (index) -> i64 +! CHECK: %[[VAL_16:.*]] = fir.call @_FortranAioOutputAscii(%[[VAL_12]], %[[VAL_14]], %[[VAL_15]]) : (!fir.ref, !fir.ref, i64) -> i1 +! CHECK: %[[VAL_17:.*]] = fir.shape %[[VAL_7]] : (index) -> !fir.shape<1> +! CHECK: %[[VAL_18:.*]] = fir.slice %[[VAL_3]], %[[VAL_1]], %[[VAL_2]] : (index, index, index) -> !fir.slice<1> +! CHECK: %[[VAL_19:.*]] = fir.embox %[[VAL_9]](%[[VAL_17]]) {{\[}}%[[VAL_18]]] : (!fir.ref>>, !fir.shape<1>, !fir.slice<1>) -> !fir.box>> +! CHECK: %[[VAL_20:.*]] = fir.convert %[[VAL_19]] : (!fir.box>>) -> !fir.box +! CHECK: %[[VAL_21:.*]] = fir.call @_FortranAioOutputDescriptor(%[[VAL_12]], %[[VAL_20]]) : (!fir.ref, !fir.box) -> i1 +! CHECK: %[[VAL_22:.*]] = fir.call @_FortranAioEndIoStatement(%[[VAL_12]]) : (!fir.ref) -> i32 +! CHECK: return +! CHECK: } + +! Slice operation on array of CHARACTER +subroutine sub(a) + character :: a(10) + print *, "a = ", a(1:5:2) +end subroutine sub diff --git a/flang/test/Lower/array-temp.f90 b/flang/test/Lower/array-temp.f90 new file mode 100644 index 0000000000000000000000000000000000000000..4a58ae5315f44da36c3c5a19b020d7e30165ac27 --- /dev/null +++ b/flang/test/Lower/array-temp.f90 @@ -0,0 +1,413 @@ +! RUN: bbc %s -o - | FileCheck %s + +! CHECK-LABEL: func @_QPss1() +subroutine ss1 + ! CHECK: %[[aa:[0-9]+]] = fir.alloca !fir.array<2650000xf32> {bindc_name = "aa", uniq_name = "_QFss1Eaa"} + ! CHECK: %[[shape:[0-9]+]] = fir.shape {{.*}} : (index) -> !fir.shape<1> + integer, parameter :: N = 2650000 + real aa(N) + ! CHECK: fir.array_coor %[[aa]](%[[shape]]) {{.*}} : (!fir.ref>, !fir.shape<1>, index) -> !fir.ref + aa = -2 + ! CHECK: %[[temp:[0-9]+]] = fir.allocmem !fir.array<2650000xf32> + ! CHECK: fir.array_coor %[[aa]](%[[shape]]) {{.*}} : (!fir.ref>, !fir.shape<1>, index) -> !fir.ref + ! CHECK: fir.array_coor %[[temp]](%[[shape]]) {{.*}} : (!fir.heap>, !fir.shape<1>, index) -> !fir.ref + ! CHECK: fir.array_coor %[[aa]](%[[shape]]) [{{.*}}] {{.*}} : (!fir.ref>, !fir.shape<1>, !fir.slice<1>, index) -> !fir.ref + ! CHECK: fir.array_coor %[[temp]](%[[shape]]) [{{.*}}] {{.*}} : (!fir.heap>, !fir.shape<1>, !fir.slice<1>, index) -> !fir.ref + ! CHECK: fir.array_coor %[[temp]](%[[shape]]) {{.*}} : (!fir.heap>, !fir.shape<1>, index) -> !fir.ref + ! CHECK: fir.array_coor %[[aa]](%[[shape]]) {{.*}} : (!fir.ref>, !fir.shape<1>, index) -> !fir.ref + ! CHECK: fir.freemem %[[temp]] : !fir.heap> + aa(2:N) = aa(1:N-1) + 7.0 +! print*, aa(1:2), aa(N-1:N) +end + +subroutine ss2(N) + real aa(N) + aa = -2 + aa(2:N) = aa(1:N-1) + 7.0 + print*, aa(1:2), aa(N-1:N) +end + +subroutine ss3(N) + real aa(2,N) + aa = -2 + aa(:,2:N) = aa(:,1:N-1) + 7.0 + print*, aa(:,1:2), aa(:,N-1:N) +end + +subroutine ss4(N) + real aa(N,2) + aa = -2 + aa(2:N,:) = aa(1:N-1,:) + 7.0 + print*, aa(1:2,:), aa(N-1:N,:) +end + +! CHECK-LABEL: func @_QPss2( +! CHECK-SAME: %[[VAL_0:.*]]: !fir.ref {fir.bindc_name = "n"}) { +! CHECK-DAG: %[[VAL_1:.*]] = arith.constant -1 : index +! CHECK-DAG: %[[VAL_2:.*]] = arith.constant -2 : i32 +! CHECK-DAG: %[[VAL_3:.*]] = arith.constant 1 : index +! CHECK-DAG: %[[VAL_4:.*]] = arith.constant 0 : index +! CHECK-DAG: %[[VAL_5:.*]] = arith.constant 2 : index +! CHECK-DAG: %[[VAL_6:.*]] = arith.constant 1 : i32 +! CHECK-DAG: %[[VAL_7:.*]] = arith.constant 7.000000e+00 : f32 +! CHECK-DAG: %[[VAL_8:.*]] = arith.constant -1 : i32 +! CHECK: %[[VAL_10:.*]] = fir.load %[[VAL_0]] : !fir.ref +! CHECK: %[[VAL_11A:.*]] = fir.convert %[[VAL_10]] : (i32) -> index +! CHECK: %[[CMP:.*]] = arith.cmpi sgt, %[[VAL_11A]], %[[VAL_4]] : index +! CHECK: %[[VAL_11:.*]] = arith.select %[[CMP]], %[[VAL_11A]], %[[VAL_4]] : index +! CHECK: %[[VAL_12:.*]] = fir.alloca !fir.array, %[[VAL_11]] {bindc_name = "aa", uniq_name = "_QFss2Eaa"} +! CHECK: %[[VAL_13:.*]] = fir.shape %[[VAL_11]] : (index) -> !fir.shape<1> +! CHECK: %[[VAL_14:.*]] = fir.convert %[[VAL_2]] : (i32) -> f32 +! CHECK: br ^bb1(%[[VAL_4]], %[[VAL_11]] : index, index) +! CHECK: ^bb1(%[[VAL_15:.*]]: index, %[[VAL_16:.*]]: index): +! CHECK: %[[VAL_17:.*]] = arith.cmpi sgt, %[[VAL_16]], %[[VAL_4]] : index +! CHECK: cond_br %[[VAL_17]], ^bb2, ^bb3 +! CHECK: ^bb2: +! CHECK: %[[VAL_18:.*]] = arith.addi %[[VAL_15]], %[[VAL_3]] : index +! CHECK: %[[VAL_19:.*]] = fir.array_coor %[[VAL_12]](%[[VAL_13]]) %[[VAL_18]] : (!fir.ref>, !fir.shape<1>, index) -> !fir.ref +! CHECK: fir.store %[[VAL_14]] to %[[VAL_19]] : !fir.ref +! CHECK: %[[VAL_20:.*]] = arith.subi %[[VAL_16]], %[[VAL_3]] : index +! CHECK: br ^bb1(%[[VAL_18]], %[[VAL_20]] : index, index) +! CHECK: ^bb3: +! CHECK: %[[VAL_21:.*]] = fir.load %[[VAL_0]] : !fir.ref +! CHECK: %[[VAL_22:.*]] = fir.convert %[[VAL_21]] : (i32) -> index +! CHECK: %[[VAL_23:.*]] = arith.addi %[[VAL_22]], %[[VAL_1]] : index +! CHECK: %[[VAL_24:.*]] = arith.cmpi sgt, %[[VAL_23]], %[[VAL_4]] : index +! CHECK: %[[VAL_25:.*]] = arith.select %[[VAL_24]], %[[VAL_23]], %[[VAL_4]] : index +! CHECK: %[[VAL_26:.*]] = fir.slice %[[VAL_5]], %[[VAL_22]], %[[VAL_3]] : (index, index, index) -> !fir.slice<1> +! CHECK: %[[VAL_27:.*]] = fir.allocmem !fir.array, %[[VAL_11]] +! CHECK: br ^bb4(%[[VAL_4]], %[[VAL_11]] : index, index) +! CHECK: ^bb4(%[[VAL_28:.*]]: index, %[[VAL_29:.*]]: index): +! CHECK: %[[VAL_30:.*]] = arith.cmpi sgt, %[[VAL_29]], %[[VAL_4]] : index +! CHECK: cond_br %[[VAL_30]], ^bb5, ^bb6 +! CHECK: ^bb5: +! CHECK: %[[VAL_31:.*]] = arith.addi %[[VAL_28]], %[[VAL_3]] : index +! CHECK: %[[VAL_32:.*]] = fir.array_coor %[[VAL_12]](%[[VAL_13]]) %[[VAL_31]] : (!fir.ref>, !fir.shape<1>, index) -> !fir.ref +! CHECK: %[[VAL_33:.*]] = fir.array_coor %[[VAL_27]](%[[VAL_13]]) %[[VAL_31]] : (!fir.heap>, !fir.shape<1>, index) -> !fir.ref +! CHECK: %[[VAL_34:.*]] = fir.load %[[VAL_32]] : !fir.ref +! CHECK: fir.store %[[VAL_34]] to %[[VAL_33]] : !fir.ref +! CHECK: %[[VAL_35:.*]] = arith.subi %[[VAL_29]], %[[VAL_3]] : index +! CHECK: br ^bb4(%[[VAL_31]], %[[VAL_35]] : index, index) +! CHECK: ^bb6: +! CHECK: %[[VAL_36:.*]] = arith.subi %[[VAL_21]], %[[VAL_6]] : i32 +! CHECK: %[[VAL_37:.*]] = fir.convert %[[VAL_36]] : (i32) -> index +! CHECK: %[[VAL_38:.*]] = fir.slice %[[VAL_3]], %[[VAL_37]], %[[VAL_3]] : (index, index, index) -> !fir.slice<1> +! CHECK: br ^bb7(%[[VAL_4]], %[[VAL_25]] : index, index) +! CHECK: ^bb7(%[[VAL_39:.*]]: index, %[[VAL_40:.*]]: index): +! CHECK: %[[VAL_41:.*]] = arith.cmpi sgt, %[[VAL_40]], %[[VAL_4]] : index +! CHECK: cond_br %[[VAL_41]], ^bb8, ^bb9(%[[VAL_4]], %[[VAL_11]] : index, index) +! CHECK: ^bb8: +! CHECK: %[[VAL_42:.*]] = arith.addi %[[VAL_39]], %[[VAL_3]] : index +! CHECK: %[[VAL_43:.*]] = fir.array_coor %[[VAL_12]](%[[VAL_13]]) {{\[}}%[[VAL_38]]] %[[VAL_42]] : (!fir.ref>, !fir.shape<1>, !fir.slice<1>, index) -> !fir.ref +! CHECK: %[[VAL_44:.*]] = fir.load %[[VAL_43]] : !fir.ref +! CHECK: %[[VAL_45:.*]] = arith.addf %[[VAL_44]], %[[VAL_7]] : f32 +! CHECK: %[[VAL_46:.*]] = fir.array_coor %[[VAL_27]](%[[VAL_13]]) {{\[}}%[[VAL_26]]] %[[VAL_42]] : (!fir.heap>, !fir.shape<1>, !fir.slice<1>, index) -> !fir.ref +! CHECK: fir.store %[[VAL_45]] to %[[VAL_46]] : !fir.ref +! CHECK: %[[VAL_47:.*]] = arith.subi %[[VAL_40]], %[[VAL_3]] : index +! CHECK: br ^bb7(%[[VAL_42]], %[[VAL_47]] : index, index) +! CHECK: ^bb9(%[[VAL_48:.*]]: index, %[[VAL_49:.*]]: index): +! CHECK: %[[VAL_50:.*]] = arith.cmpi sgt, %[[VAL_49]], %[[VAL_4]] : index +! CHECK: cond_br %[[VAL_50]], ^bb10, ^bb11 +! CHECK: ^bb10: +! CHECK: %[[VAL_51:.*]] = arith.addi %[[VAL_48]], %[[VAL_3]] : index +! CHECK: %[[VAL_52:.*]] = fir.array_coor %[[VAL_27]](%[[VAL_13]]) %[[VAL_51]] : (!fir.heap>, !fir.shape<1>, index) -> !fir.ref +! CHECK: %[[VAL_53:.*]] = fir.array_coor %[[VAL_12]](%[[VAL_13]]) %[[VAL_51]] : (!fir.ref>, !fir.shape<1>, index) -> !fir.ref +! CHECK: %[[VAL_54:.*]] = fir.load %[[VAL_52]] : !fir.ref +! CHECK: fir.store %[[VAL_54]] to %[[VAL_53]] : !fir.ref +! CHECK: %[[VAL_55:.*]] = arith.subi %[[VAL_49]], %[[VAL_3]] : index +! CHECK: br ^bb9(%[[VAL_51]], %[[VAL_55]] : index, index) +! CHECK: ^bb11: +! CHECK: fir.freemem %[[VAL_27]] : !fir.heap> +! CHECK: %[[VAL_58:.*]] = fir.call @_FortranAioBeginExternalListOutput(%[[VAL_8]], %{{.*}}, %{{.*}}) : (i32, !fir.ref, i32) -> !fir.ref +! CHECK: %[[VAL_59:.*]] = fir.slice %[[VAL_3]], %[[VAL_5]], %[[VAL_3]] : (index, index, index) -> !fir.slice<1> +! CHECK: %[[VAL_60:.*]] = fir.embox %[[VAL_12]](%[[VAL_13]]) {{\[}}%[[VAL_59]]] : (!fir.ref>, !fir.shape<1>, !fir.slice<1>) -> !fir.box> +! CHECK: %[[VAL_61:.*]] = fir.convert %[[VAL_60]] : (!fir.box>) -> !fir.box +! CHECK: %[[VAL_62:.*]] = fir.call @_FortranAioOutputDescriptor(%[[VAL_58]], %[[VAL_61]]) : (!fir.ref, !fir.box) -> i1 +! CHECK: %[[VAL_63:.*]] = fir.load %[[VAL_0]] : !fir.ref +! CHECK: %[[VAL_64:.*]] = arith.subi %[[VAL_63]], %[[VAL_6]] : i32 +! CHECK: %[[VAL_65:.*]] = fir.convert %[[VAL_64]] : (i32) -> index +! CHECK: %[[VAL_66:.*]] = fir.convert %[[VAL_63]] : (i32) -> index +! CHECK: %[[VAL_67:.*]] = fir.slice %[[VAL_65]], %[[VAL_66]], %[[VAL_3]] : (index, index, index) -> !fir.slice<1> +! CHECK: %[[VAL_68:.*]] = fir.embox %[[VAL_12]](%[[VAL_13]]) {{\[}}%[[VAL_67]]] : (!fir.ref>, !fir.shape<1>, !fir.slice<1>) -> !fir.box> +! CHECK: %[[VAL_69:.*]] = fir.convert %[[VAL_68]] : (!fir.box>) -> !fir.box +! CHECK: %[[VAL_70:.*]] = fir.call @_FortranAioOutputDescriptor(%[[VAL_58]], %[[VAL_69]]) : (!fir.ref, !fir.box) -> i1 +! CHECK: %[[VAL_71:.*]] = fir.call @_FortranAioEndIoStatement(%[[VAL_58]]) : (!fir.ref) -> i32 +! CHECK: return +! CHECK: } + +! CHECK-LABEL: func @_QPss3( +! CHECK-SAME: %[[VAL_0:.*]]: !fir.ref {fir.bindc_name = "n"}) { +! CHECK-DAG: %[[VAL_1:.*]] = arith.constant -1 : index +! CHECK-DAG: %[[VAL_2:.*]] = arith.constant 2 : index +! CHECK-DAG: %[[VAL_3:.*]] = arith.constant 1 : index +! CHECK-DAG: %[[VAL_4:.*]] = arith.constant -2 : i32 +! CHECK-DAG: %[[VAL_5:.*]] = arith.constant 0 : index +! CHECK-DAG: %[[VAL_6:.*]] = arith.constant 1 : i32 +! CHECK-DAG: %[[VAL_7:.*]] = arith.constant 7.000000e+00 : f32 +! CHECK-DAG: %[[VAL_8:.*]] = arith.constant -1 : i32 +! CHECK: %[[VAL_10:.*]] = fir.load %[[VAL_0]] : !fir.ref +! CHECK: %[[VAL_11:.*]] = fir.convert %[[VAL_10]] : (i32) -> index +! CHECK: %[[CMP:.*]] = arith.cmpi sgt, %[[VAL_11A]], %[[VAL_5]] : index +! CHECK: %[[VAL_11:.*]] = arith.select %[[CMP]], %[[VAL_11A]], %[[VAL_5]] : index +! CHECK: %[[VAL_12:.*]] = fir.alloca !fir.array<2x?xf32>, %[[VAL_11]] {bindc_name = "aa", uniq_name = "_QFss3Eaa"} +! CHECK: %[[VAL_13:.*]] = fir.shape %[[VAL_2]], %[[VAL_11]] : (index, index) -> !fir.shape<2> +! CHECK: %[[VAL_14:.*]] = fir.convert %[[VAL_4]] : (i32) -> f32 +! CHECK: br ^bb1(%[[VAL_5]], %[[VAL_11]] : index, index) +! CHECK: ^bb1(%[[VAL_15:.*]]: index, %[[VAL_16:.*]]: index): +! CHECK: %[[VAL_17:.*]] = arith.cmpi sgt, %[[VAL_16]], %[[VAL_5]] : index +! CHECK: cond_br %[[VAL_17]], ^bb2(%[[VAL_5]], %[[VAL_2]] : index, index), ^bb5 +! CHECK: ^bb2(%[[VAL_18:.*]]: index, %[[VAL_19:.*]]: index): +! CHECK: %[[VAL_20:.*]] = arith.cmpi sgt, %[[VAL_19]], %[[VAL_5]] : index +! CHECK: cond_br %[[VAL_20]], ^bb3, ^bb4 +! CHECK: ^bb3: +! CHECK: %[[VAL_21:.*]] = arith.addi %[[VAL_18]], %[[VAL_3]] : index +! CHECK: %[[VAL_22:.*]] = arith.addi %[[VAL_15]], %[[VAL_3]] : index +! CHECK: %[[VAL_23:.*]] = fir.array_coor %[[VAL_12]](%[[VAL_13]]) %[[VAL_21]], %[[VAL_22]] : (!fir.ref>, !fir.shape<2>, index, index) -> !fir.ref +! CHECK: fir.store %[[VAL_14]] to %[[VAL_23]] : !fir.ref +! CHECK: %[[VAL_24:.*]] = arith.subi %[[VAL_19]], %[[VAL_3]] : index +! CHECK: br ^bb2(%[[VAL_21]], %[[VAL_24]] : index, index) +! CHECK: ^bb4: +! CHECK: %[[VAL_25:.*]] = arith.addi %[[VAL_15]], %[[VAL_3]] : index +! CHECK: %[[VAL_26:.*]] = arith.subi %[[VAL_16]], %[[VAL_3]] : index +! CHECK: br ^bb1(%[[VAL_25]], %[[VAL_26]] : index, index) +! CHECK: ^bb5: +! CHECK: %[[VAL_27:.*]] = fir.load %[[VAL_0]] : !fir.ref +! CHECK: %[[VAL_28:.*]] = fir.convert %[[VAL_27]] : (i32) -> index +! CHECK: %[[VAL_29:.*]] = arith.addi %[[VAL_28]], %[[VAL_1]] : index +! CHECK: %[[VAL_30:.*]] = arith.cmpi sgt, %[[VAL_29]], %[[VAL_5]] : index +! CHECK: %[[VAL_31:.*]] = arith.select %[[VAL_30]], %[[VAL_29]], %[[VAL_5]] : index +! CHECK: %[[VAL_32:.*]] = fir.slice %[[VAL_3]], %[[VAL_2]], %[[VAL_3]], %[[VAL_2]], %[[VAL_28]], %[[VAL_3]] : (index, index, index, index, index, index) -> !fir.slice<2> +! CHECK: %[[VAL_33:.*]] = fir.allocmem !fir.array<2x?xf32>, %[[VAL_11]] +! CHECK: br ^bb6(%[[VAL_5]], %[[VAL_11]] : index, index) +! CHECK: ^bb6(%[[VAL_34:.*]]: index, %[[VAL_35:.*]]: index): +! CHECK: %[[VAL_36:.*]] = arith.cmpi sgt, %[[VAL_35]], %[[VAL_5]] : index +! CHECK: cond_br %[[VAL_36]], ^bb7(%[[VAL_5]], %[[VAL_2]] : index, index), ^bb10 +! CHECK: ^bb7(%[[VAL_37:.*]]: index, %[[VAL_38:.*]]: index): +! CHECK: %[[VAL_39:.*]] = arith.cmpi sgt, %[[VAL_38]], %[[VAL_5]] : index +! CHECK: cond_br %[[VAL_39]], ^bb8, ^bb9 +! CHECK: ^bb8: +! CHECK: %[[VAL_40:.*]] = arith.addi %[[VAL_37]], %[[VAL_3]] : index +! CHECK: %[[VAL_41:.*]] = arith.addi %[[VAL_34]], %[[VAL_3]] : index +! CHECK: %[[VAL_42:.*]] = fir.array_coor %[[VAL_12]](%[[VAL_13]]) %[[VAL_40]], %[[VAL_41]] : (!fir.ref>, !fir.shape<2>, index, index) -> !fir.ref +! CHECK: %[[VAL_43:.*]] = fir.array_coor %[[VAL_33]](%[[VAL_13]]) %[[VAL_40]], %[[VAL_41]] : (!fir.heap>, !fir.shape<2>, index, index) -> !fir.ref +! CHECK: %[[VAL_44:.*]] = fir.load %[[VAL_42]] : !fir.ref +! CHECK: fir.store %[[VAL_44]] to %[[VAL_43]] : !fir.ref +! CHECK: %[[VAL_45:.*]] = arith.subi %[[VAL_38]], %[[VAL_3]] : index +! CHECK: br ^bb7(%[[VAL_40]], %[[VAL_45]] : index, index) +! CHECK: ^bb9: +! CHECK: %[[VAL_46:.*]] = arith.addi %[[VAL_34]], %[[VAL_3]] : index +! CHECK: %[[VAL_47:.*]] = arith.subi %[[VAL_35]], %[[VAL_3]] : index +! CHECK: br ^bb6(%[[VAL_46]], %[[VAL_47]] : index, index) +! CHECK: ^bb10: +! CHECK: %[[VAL_48:.*]] = arith.subi %[[VAL_27]], %[[VAL_6]] : i32 +! CHECK: %[[VAL_49:.*]] = fir.convert %[[VAL_48]] : (i32) -> index +! CHECK: %[[VAL_50:.*]] = fir.slice %[[VAL_3]], %[[VAL_2]], %[[VAL_3]], %[[VAL_3]], %[[VAL_49]], %[[VAL_3]] : (index, index, index, index, index, index) -> !fir.slice<2> +! CHECK: br ^bb11(%[[VAL_5]], %[[VAL_31]] : index, index) +! CHECK: ^bb11(%[[VAL_51:.*]]: index, %[[VAL_52:.*]]: index): +! CHECK: %[[VAL_53:.*]] = arith.cmpi sgt, %[[VAL_52]], %[[VAL_5]] : index +! CHECK: cond_br %[[VAL_53]], ^bb12(%[[VAL_5]], %[[VAL_2]] : index, index), ^bb15(%[[VAL_5]], %[[VAL_11]] : index, index) +! CHECK: ^bb12(%[[VAL_54:.*]]: index, %[[VAL_55:.*]]: index): +! CHECK: %[[VAL_56:.*]] = arith.cmpi sgt, %[[VAL_55]], %[[VAL_5]] : index +! CHECK: cond_br %[[VAL_56]], ^bb13, ^bb14 +! CHECK: ^bb13: +! CHECK: %[[VAL_57:.*]] = arith.addi %[[VAL_54]], %[[VAL_3]] : index +! CHECK: %[[VAL_58:.*]] = arith.addi %[[VAL_51]], %[[VAL_3]] : index +! CHECK: %[[VAL_59:.*]] = fir.array_coor %[[VAL_12]](%[[VAL_13]]) {{\[}}%[[VAL_50]]] %[[VAL_57]], %[[VAL_58]] : (!fir.ref>, !fir.shape<2>, !fir.slice<2>, index, index) -> !fir.ref +! CHECK: %[[VAL_60:.*]] = fir.load %[[VAL_59]] : !fir.ref +! CHECK: %[[VAL_61:.*]] = arith.addf %[[VAL_60]], %[[VAL_7]] : f32 +! CHECK: %[[VAL_62:.*]] = fir.array_coor %[[VAL_33]](%[[VAL_13]]) {{\[}}%[[VAL_32]]] %[[VAL_57]], %[[VAL_58]] : (!fir.heap>, !fir.shape<2>, !fir.slice<2>, index, index) -> !fir.ref +! CHECK: fir.store %[[VAL_61]] to %[[VAL_62]] : !fir.ref +! CHECK: %[[VAL_63:.*]] = arith.subi %[[VAL_55]], %[[VAL_3]] : index +! CHECK: br ^bb12(%[[VAL_57]], %[[VAL_63]] : index, index) +! CHECK: ^bb14: +! CHECK: %[[VAL_64:.*]] = arith.addi %[[VAL_51]], %[[VAL_3]] : index +! CHECK: %[[VAL_65:.*]] = arith.subi %[[VAL_52]], %[[VAL_3]] : index +! CHECK: br ^bb11(%[[VAL_64]], %[[VAL_65]] : index, index) +! CHECK: ^bb15(%[[VAL_66:.*]]: index, %[[VAL_67:.*]]: index): +! CHECK: %[[VAL_68:.*]] = arith.cmpi sgt, %[[VAL_67]], %[[VAL_5]] : index +! CHECK: cond_br %[[VAL_68]], ^bb16(%[[VAL_5]], %[[VAL_2]] : index, index), ^bb19 +! CHECK: ^bb16(%[[VAL_69:.*]]: index, %[[VAL_70:.*]]: index): +! CHECK: %[[VAL_71:.*]] = arith.cmpi sgt, %[[VAL_70]], %[[VAL_5]] : index +! CHECK: cond_br %[[VAL_71]], ^bb17, ^bb18 +! CHECK: ^bb17: +! CHECK: %[[VAL_72:.*]] = arith.addi %[[VAL_69]], %[[VAL_3]] : index +! CHECK: %[[VAL_73:.*]] = arith.addi %[[VAL_66]], %[[VAL_3]] : index +! CHECK: %[[VAL_74:.*]] = fir.array_coor %[[VAL_33]](%[[VAL_13]]) %[[VAL_72]], %[[VAL_73]] : (!fir.heap>, !fir.shape<2>, index, index) -> !fir.ref +! CHECK: %[[VAL_75:.*]] = fir.array_coor %[[VAL_12]](%[[VAL_13]]) %[[VAL_72]], %[[VAL_73]] : (!fir.ref>, !fir.shape<2>, index, index) -> !fir.ref +! CHECK: %[[VAL_76:.*]] = fir.load %[[VAL_74]] : !fir.ref +! CHECK: fir.store %[[VAL_76]] to %[[VAL_75]] : !fir.ref +! CHECK: %[[VAL_77:.*]] = arith.subi %[[VAL_70]], %[[VAL_3]] : index +! CHECK: br ^bb16(%[[VAL_72]], %[[VAL_77]] : index, index) +! CHECK: ^bb18: +! CHECK: %[[VAL_78:.*]] = arith.addi %[[VAL_66]], %[[VAL_3]] : index +! CHECK: %[[VAL_79:.*]] = arith.subi %[[VAL_67]], %[[VAL_3]] : index +! CHECK: br ^bb15(%[[VAL_78]], %[[VAL_79]] : index, index) +! CHECK: ^bb19: +! CHECK: fir.freemem %[[VAL_33]] : !fir.heap> +! CHECK: %[[VAL_82:.*]] = fir.call @_FortranAioBeginExternalListOutput(%[[VAL_8]], %{{.*}}, %{{.*}}) : (i32, !fir.ref, i32) -> !fir.ref +! CHECK: %[[VAL_83:.*]] = fir.slice %[[VAL_3]], %[[VAL_2]], %[[VAL_3]], %[[VAL_3]], %[[VAL_2]], %[[VAL_3]] : (index, index, index, index, index, index) -> !fir.slice<2> +! CHECK: %[[VAL_84:.*]] = fir.embox %[[VAL_12]](%[[VAL_13]]) {{\[}}%[[VAL_83]]] : (!fir.ref>, !fir.shape<2>, !fir.slice<2>) -> !fir.box> +! CHECK: %[[VAL_85:.*]] = fir.convert %[[VAL_84]] : (!fir.box>) -> !fir.box +! CHECK: %[[VAL_86:.*]] = fir.call @_FortranAioOutputDescriptor(%[[VAL_82]], %[[VAL_85]]) : (!fir.ref, !fir.box) -> i1 +! CHECK: %[[VAL_87:.*]] = fir.load %[[VAL_0]] : !fir.ref +! CHECK: %[[VAL_88:.*]] = arith.subi %[[VAL_87]], %[[VAL_6]] : i32 +! CHECK: %[[VAL_89:.*]] = fir.convert %[[VAL_88]] : (i32) -> index +! CHECK: %[[VAL_90:.*]] = fir.convert %[[VAL_87]] : (i32) -> index +! CHECK: %[[VAL_91:.*]] = fir.slice %[[VAL_3]], %[[VAL_2]], %[[VAL_3]], %[[VAL_89]], %[[VAL_90]], %[[VAL_3]] : (index, index, index, index, index, index) -> !fir.slice<2> +! CHECK: %[[VAL_92:.*]] = fir.embox %[[VAL_12]](%[[VAL_13]]) {{\[}}%[[VAL_91]]] : (!fir.ref>, !fir.shape<2>, !fir.slice<2>) -> !fir.box> +! CHECK: %[[VAL_93:.*]] = fir.convert %[[VAL_92]] : (!fir.box>) -> !fir.box +! CHECK: %[[VAL_94:.*]] = fir.call @_FortranAioOutputDescriptor(%[[VAL_82]], %[[VAL_93]]) : (!fir.ref, !fir.box) -> i1 +! CHECK: %[[VAL_95:.*]] = fir.call @_FortranAioEndIoStatement(%[[VAL_82]]) : (!fir.ref) -> i32 +! CHECK: return +! CHECK: } + +! CHECK-LABEL: func @_QPss4( +! CHECK-SAME: %[[VAL_0:.*]]: !fir.ref {fir.bindc_name = "n"}) { +! CHECK-DAG: %[[VAL_1:.*]] = arith.constant -1 : index +! CHECK-DAG: %[[VAL_2:.*]] = arith.constant 2 : index +! CHECK-DAG: %[[VAL_3:.*]] = arith.constant 1 : index +! CHECK-DAG: %[[VAL_4:.*]] = arith.constant -2 : i32 +! CHECK-DAG: %[[VAL_5:.*]] = arith.constant 0 : index +! CHECK-DAG: %[[VAL_6:.*]] = arith.constant 1 : i32 +! CHECK-DAG: %[[VAL_7:.*]] = arith.constant 7.000000e+00 : f32 +! CHECK-DAG: %[[VAL_8:.*]] = arith.constant -1 : i32 +! CHECK: %[[VAL_10:.*]] = fir.load %[[VAL_0]] : !fir.ref +! CHECK: %[[VAL_11A:.*]] = fir.convert %[[VAL_10]] : (i32) -> index +! CHECK: %[[CMP:.*]] = arith.cmpi sgt, %[[VAL_11A]], %[[VAL_5]] : index +! CHECK: %[[VAL_11:.*]] = arith.select %[[CMP]], %[[VAL_11A]], %[[VAL_5]] : index +! CHECK: %[[VAL_12:.*]] = fir.alloca !fir.array, %[[VAL_11]] {bindc_name = "aa", uniq_name = "_QFss4Eaa"} +! CHECK: %[[VAL_13:.*]] = fir.shape %[[VAL_11]], %[[VAL_2]] : (index, index) -> !fir.shape<2> +! CHECK: %[[VAL_14:.*]] = fir.convert %[[VAL_4]] : (i32) -> f32 +! CHECK: br ^bb1(%[[VAL_5]], %[[VAL_2]] : index, index) +! CHECK: ^bb1(%[[VAL_15:.*]]: index, %[[VAL_16:.*]]: index): +! CHECK: %[[VAL_17:.*]] = arith.cmpi sgt, %[[VAL_16]], %[[VAL_5]] : index +! CHECK: cond_br %[[VAL_17]], ^bb2(%[[VAL_5]], %[[VAL_11]] : index, index), ^bb5 +! CHECK: ^bb2(%[[VAL_18:.*]]: index, %[[VAL_19:.*]]: index): +! CHECK: %[[VAL_20:.*]] = arith.cmpi sgt, %[[VAL_19]], %[[VAL_5]] : index +! CHECK: cond_br %[[VAL_20]], ^bb3, ^bb4 +! CHECK: ^bb3: +! CHECK: %[[VAL_21:.*]] = arith.addi %[[VAL_18]], %[[VAL_3]] : index +! CHECK: %[[VAL_22:.*]] = arith.addi %[[VAL_15]], %[[VAL_3]] : index +! CHECK: %[[VAL_23:.*]] = fir.array_coor %[[VAL_12]](%[[VAL_13]]) %[[VAL_21]], %[[VAL_22]] : (!fir.ref>, !fir.shape<2>, index, index) -> !fir.ref +! CHECK: fir.store %[[VAL_14]] to %[[VAL_23]] : !fir.ref +! CHECK: %[[VAL_24:.*]] = arith.subi %[[VAL_19]], %[[VAL_3]] : index +! CHECK: br ^bb2(%[[VAL_21]], %[[VAL_24]] : index, index) +! CHECK: ^bb4: +! CHECK: %[[VAL_25:.*]] = arith.addi %[[VAL_15]], %[[VAL_3]] : index +! CHECK: %[[VAL_26:.*]] = arith.subi %[[VAL_16]], %[[VAL_3]] : index +! CHECK: br ^bb1(%[[VAL_25]], %[[VAL_26]] : index, index) +! CHECK: ^bb5: +! CHECK: %[[VAL_27:.*]] = fir.load %[[VAL_0]] : !fir.ref +! CHECK: %[[VAL_28:.*]] = fir.convert %[[VAL_27]] : (i32) -> index +! CHECK: %[[VAL_29:.*]] = arith.addi %[[VAL_28]], %[[VAL_1]] : index +! CHECK: %[[VAL_30:.*]] = arith.cmpi sgt, %[[VAL_29]], %[[VAL_5]] : index +! CHECK: %[[VAL_31:.*]] = arith.select %[[VAL_30]], %[[VAL_29]], %[[VAL_5]] : index +! CHECK: %[[VAL_32:.*]] = fir.slice %[[VAL_2]], %[[VAL_28]], %[[VAL_3]], %[[VAL_3]], %[[VAL_2]], %[[VAL_3]] : (index, index, index, index, index, index) -> !fir.slice<2> +! CHECK: %[[VAL_33:.*]] = fir.allocmem !fir.array, %[[VAL_11]] +! CHECK: br ^bb6(%[[VAL_5]], %[[VAL_2]] : index, index) +! CHECK: ^bb6(%[[VAL_34:.*]]: index, %[[VAL_35:.*]]: index): +! CHECK: %[[VAL_36:.*]] = arith.cmpi sgt, %[[VAL_35]], %[[VAL_5]] : index +! CHECK: cond_br %[[VAL_36]], ^bb7(%[[VAL_5]], %[[VAL_11]] : index, index), ^bb10 +! CHECK: ^bb7(%[[VAL_37:.*]]: index, %[[VAL_38:.*]]: index): +! CHECK: %[[VAL_39:.*]] = arith.cmpi sgt, %[[VAL_38]], %[[VAL_5]] : index +! CHECK: cond_br %[[VAL_39]], ^bb8, ^bb9 +! CHECK: ^bb8: +! CHECK: %[[VAL_40:.*]] = arith.addi %[[VAL_37]], %[[VAL_3]] : index +! CHECK: %[[VAL_41:.*]] = arith.addi %[[VAL_34]], %[[VAL_3]] : index +! CHECK: %[[VAL_42:.*]] = fir.array_coor %[[VAL_12]](%[[VAL_13]]) %[[VAL_40]], %[[VAL_41]] : (!fir.ref>, !fir.shape<2>, index, index) -> !fir.ref +! CHECK: %[[VAL_43:.*]] = fir.array_coor %[[VAL_33]](%[[VAL_13]]) %[[VAL_40]], %[[VAL_41]] : (!fir.heap>, !fir.shape<2>, index, index) -> !fir.ref +! CHECK: %[[VAL_44:.*]] = fir.load %[[VAL_42]] : !fir.ref +! CHECK: fir.store %[[VAL_44]] to %[[VAL_43]] : !fir.ref +! CHECK: %[[VAL_45:.*]] = arith.subi %[[VAL_38]], %[[VAL_3]] : index +! CHECK: br ^bb7(%[[VAL_40]], %[[VAL_45]] : index, index) +! CHECK: ^bb9: +! CHECK: %[[VAL_46:.*]] = arith.addi %[[VAL_34]], %[[VAL_3]] : index +! CHECK: %[[VAL_47:.*]] = arith.subi %[[VAL_35]], %[[VAL_3]] : index +! CHECK: br ^bb6(%[[VAL_46]], %[[VAL_47]] : index, index) +! CHECK: ^bb10: +! CHECK: %[[VAL_48:.*]] = arith.subi %[[VAL_27]], %[[VAL_6]] : i32 +! CHECK: %[[VAL_49:.*]] = fir.convert %[[VAL_48]] : (i32) -> index +! CHECK: %[[VAL_50:.*]] = fir.slice %[[VAL_3]], %[[VAL_49]], %[[VAL_3]], %[[VAL_3]], %[[VAL_2]], %[[VAL_3]] : (index, index, index, index, index, index) -> !fir.slice<2> +! CHECK: br ^bb11(%[[VAL_5]], %[[VAL_2]] : index, index) +! CHECK: ^bb11(%[[VAL_51:.*]]: index, %[[VAL_52:.*]]: index): +! CHECK: %[[VAL_53:.*]] = arith.cmpi sgt, %[[VAL_52]], %[[VAL_5]] : index +! CHECK: cond_br %[[VAL_53]], ^bb12(%[[VAL_5]], %[[VAL_31]] : index, index), ^bb15(%[[VAL_5]], %[[VAL_2]] : index, index) +! CHECK: ^bb12(%[[VAL_54:.*]]: index, %[[VAL_55:.*]]: index): +! CHECK: %[[VAL_56:.*]] = arith.cmpi sgt, %[[VAL_55]], %[[VAL_5]] : index +! CHECK: cond_br %[[VAL_56]], ^bb13, ^bb14 +! CHECK: ^bb13: +! CHECK: %[[VAL_57:.*]] = arith.addi %[[VAL_54]], %[[VAL_3]] : index +! CHECK: %[[VAL_58:.*]] = arith.addi %[[VAL_51]], %[[VAL_3]] : index +! CHECK: %[[VAL_59:.*]] = fir.array_coor %[[VAL_12]](%[[VAL_13]]) {{\[}}%[[VAL_50]]] %[[VAL_57]], %[[VAL_58]] : (!fir.ref>, !fir.shape<2>, !fir.slice<2>, index, index) -> !fir.ref +! CHECK: %[[VAL_60:.*]] = fir.load %[[VAL_59]] : !fir.ref +! CHECK: %[[VAL_61:.*]] = arith.addf %[[VAL_60]], %[[VAL_7]] : f32 +! CHECK: %[[VAL_62:.*]] = fir.array_coor %[[VAL_33]](%[[VAL_13]]) {{\[}}%[[VAL_32]]] %[[VAL_57]], %[[VAL_58]] : (!fir.heap>, !fir.shape<2>, !fir.slice<2>, index, index) -> !fir.ref +! CHECK: fir.store %[[VAL_61]] to %[[VAL_62]] : !fir.ref +! CHECK: %[[VAL_63:.*]] = arith.subi %[[VAL_55]], %[[VAL_3]] : index +! CHECK: br ^bb12(%[[VAL_57]], %[[VAL_63]] : index, index) +! CHECK: ^bb14: +! CHECK: %[[VAL_64:.*]] = arith.addi %[[VAL_51]], %[[VAL_3]] : index +! CHECK: %[[VAL_65:.*]] = arith.subi %[[VAL_52]], %[[VAL_3]] : index +! CHECK: br ^bb11(%[[VAL_64]], %[[VAL_65]] : index, index) +! CHECK: ^bb15(%[[VAL_66:.*]]: index, %[[VAL_67:.*]]: index): +! CHECK: %[[VAL_68:.*]] = arith.cmpi sgt, %[[VAL_67]], %[[VAL_5]] : index +! CHECK: cond_br %[[VAL_68]], ^bb16(%[[VAL_5]], %[[VAL_11]] : index, index), ^bb19 +! CHECK: ^bb16(%[[VAL_69:.*]]: index, %[[VAL_70:.*]]: index): +! CHECK: %[[VAL_71:.*]] = arith.cmpi sgt, %[[VAL_70]], %[[VAL_5]] : index +! CHECK: cond_br %[[VAL_71]], ^bb17, ^bb18 +! CHECK: ^bb17: +! CHECK: %[[VAL_72:.*]] = arith.addi %[[VAL_69]], %[[VAL_3]] : index +! CHECK: %[[VAL_73:.*]] = arith.addi %[[VAL_66]], %[[VAL_3]] : index +! CHECK: %[[VAL_74:.*]] = fir.array_coor %[[VAL_33]](%[[VAL_13]]) %[[VAL_72]], %[[VAL_73]] : (!fir.heap>, !fir.shape<2>, index, index) -> !fir.ref +! CHECK: %[[VAL_75:.*]] = fir.array_coor %[[VAL_12]](%[[VAL_13]]) %[[VAL_72]], %[[VAL_73]] : (!fir.ref>, !fir.shape<2>, index, index) -> !fir.ref +! CHECK: %[[VAL_76:.*]] = fir.load %[[VAL_74]] : !fir.ref +! CHECK: fir.store %[[VAL_76]] to %[[VAL_75]] : !fir.ref +! CHECK: %[[VAL_77:.*]] = arith.subi %[[VAL_70]], %[[VAL_3]] : index +! CHECK: br ^bb16(%[[VAL_72]], %[[VAL_77]] : index, index) +! CHECK: ^bb18: +! CHECK: %[[VAL_78:.*]] = arith.addi %[[VAL_66]], %[[VAL_3]] : index +! CHECK: %[[VAL_79:.*]] = arith.subi %[[VAL_67]], %[[VAL_3]] : index +! CHECK: br ^bb15(%[[VAL_78]], %[[VAL_79]] : index, index) +! CHECK: ^bb19: +! CHECK: fir.freemem %[[VAL_33]] : !fir.heap> +! CHECK: %[[VAL_82:.*]] = fir.call @_FortranAioBeginExternalListOutput(%[[VAL_8]], %{{.*}}, %{{.*}}) : (i32, !fir.ref, i32) -> !fir.ref +! CHECK: %[[VAL_83:.*]] = fir.slice %[[VAL_3]], %[[VAL_2]], %[[VAL_3]], %[[VAL_3]], %[[VAL_2]], %[[VAL_3]] : (index, index, index, index, index, index) -> !fir.slice<2> +! CHECK: %[[VAL_84:.*]] = fir.embox %[[VAL_12]](%[[VAL_13]]) {{\[}}%[[VAL_83]]] : (!fir.ref>, !fir.shape<2>, !fir.slice<2>) -> !fir.box> +! CHECK: %[[VAL_85:.*]] = fir.convert %[[VAL_84]] : (!fir.box>) -> !fir.box +! CHECK: %[[VAL_86:.*]] = fir.call @_FortranAioOutputDescriptor(%[[VAL_82]], %[[VAL_85]]) : (!fir.ref, !fir.box) -> i1 +! CHECK: %[[VAL_87:.*]] = fir.load %[[VAL_0]] : !fir.ref +! CHECK: %[[VAL_88:.*]] = arith.subi %[[VAL_87]], %[[VAL_6]] : i32 +! CHECK: %[[VAL_89:.*]] = fir.convert %[[VAL_88]] : (i32) -> index +! CHECK: %[[VAL_90:.*]] = fir.convert %[[VAL_87]] : (i32) -> index +! CHECK: %[[VAL_91:.*]] = fir.slice %[[VAL_89]], %[[VAL_90]], %[[VAL_3]], %[[VAL_3]], %[[VAL_2]], %[[VAL_3]] : (index, index, index, index, index, index) -> !fir.slice<2> +! CHECK: %[[VAL_92:.*]] = fir.embox %[[VAL_12]](%[[VAL_13]]) {{\[}}%[[VAL_91]]] : (!fir.ref>, !fir.shape<2>, !fir.slice<2>) -> !fir.box> +! CHECK: %[[VAL_93:.*]] = fir.convert %[[VAL_92]] : (!fir.box>) -> !fir.box +! CHECK: %[[VAL_94:.*]] = fir.call @_FortranAioOutputDescriptor(%[[VAL_82]], %[[VAL_93]]) : (!fir.ref, !fir.box) -> i1 +! CHECK: %[[VAL_95:.*]] = fir.call @_FortranAioEndIoStatement(%[[VAL_82]]) : (!fir.ref) -> i32 +! CHECK: return +! CHECK: } + + +! CHECK-LABEL: func @_QPtt1 +subroutine tt1 + ! CHECK: fir.call @_FortranAioBeginExternalListOutput + ! CHECK: %[[temp3:[0-9]+]] = fir.allocmem !fir.array<3xf32> + ! CHECK: br ^bb1(%[[temp3]] + ! CHECK-NEXT: ^bb1(%[[temp3arg:[0-9]+]]: !fir.heap> + ! CHECK: %[[temp1:[0-9]+]] = fir.allocmem !fir.array<1xf32> + ! CHECK: fir.call @_QFtt1Pr + ! CHECK: fir.call @realloc + ! CHECK: fir.freemem %[[temp1]] : !fir.heap> + ! CHECK: %[[temp3x:[0-9]+]] = fir.allocmem !fir.array<3xf32> + ! CHECK: fir.call @_FortranAioOutputDescriptor + ! CHECK-NEXT: fir.freemem %[[temp3x]] : !fir.heap> + ! CHECK-NEXT: fir.freemem %[[temp3arg]] : !fir.heap> + ! CHECK-NEXT: fir.call @_FortranAioEndIoStatement + print*, [(r([7.0]),i=1,3)] +contains + ! CHECK-LABEL: func @_QFtt1Pr + function r(x) + real x(:) + r = x(1) + end +end diff --git a/flang/test/Lower/call-by-value-attr.f90 b/flang/test/Lower/call-by-value-attr.f90 index d7356116f8a282ee350f919573e492912293e48d..b94f173f6b72ddd773fac536781b9bb268fe655c 100644 --- a/flang/test/Lower/call-by-value-attr.f90 +++ b/flang/test/Lower/call-by-value-attr.f90 @@ -80,3 +80,49 @@ program call_by_value_attr !CHECK: fir.call @_QPsubra(%[[CONVERT_B]]) call subra(b(5:15)) end program call_by_value_attr + + +! CHECK-LABEL: func @_QPtest_litteral_copies_1 +subroutine test_litteral_copies_1 + ! VALUE arguments can be modified by the callee, so the static storage of + ! literal constants and named parameters must not be passed directly to them. + interface + subroutine takes_array_value(v) + integer, value :: v(4) + end subroutine + end interface + integer, parameter :: p(100) = 42 + ! CHECK: %[[VAL_0:.*]] = arith.constant 100 : index + ! CHECK: %[[VAL_1:.*]] = fir.address_of(@_QQ{{.*}}) : !fir.ref> + ! CHECK: %[[VAL_5:.*]] = fir.allocmem !fir.array<100xi32> + ! CHECK: fir.do_loop % + ! CHECK: } + ! CHECK: fir.array_merge_store %{{.*}}, %{{.*}} to %[[VAL_5]] : !fir.array<100xi32>, !fir.array<100xi32>, !fir.heap> + ! CHECK: %[[VAL_17:.*]] = fir.convert %[[VAL_5]] : (!fir.heap>) -> !fir.ref> + ! CHECK: fir.call @_QPtakes_array_value(%[[VAL_17]]) : (!fir.ref>) -> () + call takes_array_value(p) + ! CHECK: fir.freemem %[[VAL_5]] : !fir.heap> +end subroutine + +! CHECK-LABEL: func @_QPtest_litteral_copies_2 +subroutine test_litteral_copies_2 + interface + subroutine takes_char_value(v) + character(*), value :: v + end subroutine + end interface + ! CHECK: %[[VAL_0:.*]] = fir.address_of(@_QQ{{.*}}) : !fir.ref> + ! CHECK: %[[VAL_1:.*]] = arith.constant 71 : index + ! CHECK: %[[VAL_2:.*]] = fir.alloca !fir.char<1,71> {bindc_name = ".chrtmp"} + ! CHECK: %[[VAL_3:.*]] = arith.constant 1 : i64 + ! CHECK: %[[VAL_4:.*]] = fir.convert %[[VAL_1]] : (index) -> i64 + ! CHECK: %[[VAL_5:.*]] = arith.muli %[[VAL_3]], %[[VAL_4]] : i64 + ! CHECK: %[[VAL_6:.*]] = arith.constant false + ! CHECK: %[[VAL_7:.*]] = fir.convert %[[VAL_2]] : (!fir.ref>) -> !fir.ref + ! CHECK: %[[VAL_8:.*]] = fir.convert %[[VAL_0]] : (!fir.ref>) -> !fir.ref + ! CHECK: fir.call @llvm.memmove.p0.p0.i64(%[[VAL_7]], %[[VAL_8]], %[[VAL_5]], %[[VAL_6]]) : (!fir.ref, !fir.ref, i64, i1) -> () + ! CHECK: %[[VAL_9:.*]] = fir.convert %[[VAL_2]] : (!fir.ref>) -> !fir.ref> + ! CHECK: %[[VAL_10:.*]] = fir.emboxchar %[[VAL_9]], %[[VAL_1]] : (!fir.ref>, index) -> !fir.boxchar<1> + ! CHECK: fir.call @_QPtakes_char_value(%[[VAL_10]]) : (!fir.boxchar<1>) -> () + call takes_char_value("a character string litteral that could be locally modfied by the callee") +end subroutine diff --git a/flang/test/Lower/forall/scalar-substring.f90 b/flang/test/Lower/forall/scalar-substring.f90 new file mode 100644 index 0000000000000000000000000000000000000000..bbbc6a56358a1c1c741f2c37daeff12fdaaebed8 --- /dev/null +++ b/flang/test/Lower/forall/scalar-substring.f90 @@ -0,0 +1,81 @@ +! RUN: bbc -emit-fir %s -o - | FileCheck %s + +subroutine s(ch) + character(10) :: ch + forall (i=1:4) + ch(i:i) = ch(i+1:i+1) + end forall +end subroutine s + +! CHECK-LABEL: func @_QPs( +! CHECK-SAME: %[[VAL_0:.*]]: !fir.boxchar<1> {fir.bindc_name = "ch"}) { +! CHECK: %[[VAL_1:.*]] = fir.alloca i32 {adapt.valuebyref, bindc_name = "i"} +! CHECK: %[[VAL_2:.*]]:2 = fir.unboxchar %[[VAL_0]] : (!fir.boxchar<1>) -> (!fir.ref>, index) +! CHECK: %[[VAL_3:.*]] = arith.constant 1 : i32 +! CHECK: %[[VAL_4:.*]] = fir.convert %[[VAL_3]] : (i32) -> index +! CHECK: %[[VAL_5:.*]] = arith.constant 4 : i32 +! CHECK: %[[VAL_6:.*]] = fir.convert %[[VAL_5]] : (i32) -> index +! CHECK: %[[VAL_7:.*]] = arith.constant 1 : index +! CHECK: fir.do_loop %[[VAL_8:.*]] = %[[VAL_4]] to %[[VAL_6]] step %[[VAL_7]] unordered { +! CHECK: %[[VAL_9:.*]] = fir.convert %[[VAL_8]] : (index) -> i32 +! CHECK: fir.store %[[VAL_9]] to %[[VAL_1]] : !fir.ref +! CHECK: %[[VAL_10:.*]] = fir.load %[[VAL_1]] : !fir.ref +! CHECK: %[[VAL_11:.*]] = arith.constant 1 : i32 +! CHECK: %[[VAL_12:.*]] = arith.addi %[[VAL_10]], %[[VAL_11]] : i32 +! CHECK: %[[VAL_13:.*]] = fir.convert %[[VAL_12]] : (i32) -> i64 +! CHECK: %[[VAL_14:.*]] = fir.load %[[VAL_1]] : !fir.ref +! CHECK: %[[VAL_15:.*]] = arith.constant 1 : i32 +! CHECK: %[[VAL_16:.*]] = arith.addi %[[VAL_14]], %[[VAL_15]] : i32 +! CHECK: %[[VAL_17:.*]] = fir.convert %[[VAL_16]] : (i32) -> i64 +! CHECK: %[[VAL_18:.*]] = fir.convert %[[VAL_13]] : (i64) -> index +! CHECK: %[[VAL_19:.*]] = fir.convert %[[VAL_17]] : (i64) -> index +! CHECK: %[[VAL_20:.*]] = arith.constant 1 : index +! CHECK: %[[VAL_21:.*]] = arith.subi %[[VAL_18]], %[[VAL_20]] : index +! CHECK: %[[VAL_22:.*]] = fir.convert %[[VAL_2]]#0 : (!fir.ref>) -> !fir.ref>> +! CHECK: %[[VAL_23:.*]] = fir.coordinate_of %[[VAL_22]], %[[VAL_21]] : (!fir.ref>>, index) -> !fir.ref> +! CHECK: %[[VAL_24:.*]] = fir.convert %[[VAL_23]] : (!fir.ref>) -> !fir.ref> +! CHECK: %[[VAL_25:.*]] = arith.subi %[[VAL_19]], %[[VAL_18]] : index +! CHECK: %[[VAL_26:.*]] = arith.addi %[[VAL_25]], %[[VAL_20]] : index +! CHECK: %[[VAL_27:.*]] = arith.constant 0 : index +! CHECK: %[[VAL_28:.*]] = arith.cmpi slt, %[[VAL_26]], %[[VAL_27]] : index +! CHECK: %[[VAL_29:.*]] = arith.select %[[VAL_28]], %[[VAL_27]], %[[VAL_26]] : index +! CHECK: %[[VAL_30:.*]] = fir.load %[[VAL_1]] : !fir.ref +! CHECK: %[[VAL_31:.*]] = fir.convert %[[VAL_30]] : (i32) -> i64 +! CHECK: %[[VAL_32:.*]] = fir.load %[[VAL_1]] : !fir.ref +! CHECK: %[[VAL_33:.*]] = fir.convert %[[VAL_32]] : (i32) -> i64 +! CHECK: %[[VAL_34:.*]] = fir.convert %[[VAL_31]] : (i64) -> index +! CHECK: %[[VAL_35:.*]] = fir.convert %[[VAL_33]] : (i64) -> index +! CHECK: %[[VAL_36:.*]] = arith.constant 1 : index +! CHECK: %[[VAL_37:.*]] = arith.subi %[[VAL_34]], %[[VAL_36]] : index +! CHECK: %[[VAL_38:.*]] = fir.convert %[[VAL_2]]#0 : (!fir.ref>) -> !fir.ref>> +! CHECK: %[[VAL_39:.*]] = fir.coordinate_of %[[VAL_38]], %[[VAL_37]] : (!fir.ref>>, index) -> !fir.ref> +! CHECK: %[[VAL_40:.*]] = fir.convert %[[VAL_39]] : (!fir.ref>) -> !fir.ref> +! CHECK: %[[VAL_41:.*]] = arith.subi %[[VAL_35]], %[[VAL_34]] : index +! CHECK: %[[VAL_42:.*]] = arith.addi %[[VAL_41]], %[[VAL_36]] : index +! CHECK: %[[VAL_43:.*]] = arith.constant 0 : index +! CHECK: %[[VAL_44:.*]] = arith.cmpi slt, %[[VAL_42]], %[[VAL_43]] : index +! CHECK: %[[VAL_45:.*]] = arith.select %[[VAL_44]], %[[VAL_43]], %[[VAL_42]] : index +! CHECK: %[[VAL_46:.*]] = arith.cmpi slt, %[[VAL_45]], %[[VAL_29]] : index +! CHECK: %[[VAL_47:.*]] = arith.select %[[VAL_46]], %[[VAL_45]], %[[VAL_29]] : index +! CHECK: %[[VAL_48:.*]] = arith.constant 1 : i64 +! CHECK: %[[VAL_49:.*]] = fir.convert %[[VAL_47]] : (index) -> i64 +! CHECK: %[[VAL_50:.*]] = arith.muli %[[VAL_48]], %[[VAL_49]] : i64 +! CHECK: %[[VAL_51:.*]] = arith.constant false +! CHECK: %[[VAL_52:.*]] = fir.convert %[[VAL_40]] : (!fir.ref>) -> !fir.ref +! CHECK: %[[VAL_53:.*]] = fir.convert %[[VAL_24]] : (!fir.ref>) -> !fir.ref +! CHECK: fir.call @llvm.memmove.p0.p0.i64(%[[VAL_52]], %[[VAL_53]], %[[VAL_50]], %[[VAL_51]]) : (!fir.ref, !fir.ref, i64, i1) -> () +! CHECK: %[[VAL_54:.*]] = arith.constant 1 : index +! CHECK: %[[VAL_55:.*]] = arith.subi %[[VAL_45]], %[[VAL_54]] : index +! CHECK: %[[VAL_56:.*]] = arith.constant 32 : i8 +! CHECK: %[[VAL_57:.*]] = fir.undefined !fir.char<1> +! CHECK: %[[VAL_58:.*]] = fir.insert_value %[[VAL_57]], %[[VAL_56]], [0 : index] : (!fir.char<1>, i8) -> !fir.char<1> +! CHECK: %[[VAL_59:.*]] = arith.constant 1 : index +! CHECK: fir.do_loop %[[VAL_60:.*]] = %[[VAL_47]] to %[[VAL_55]] step %[[VAL_59]] { +! CHECK: %[[VAL_61:.*]] = fir.convert %[[VAL_40]] : (!fir.ref>) -> !fir.ref>> +! CHECK: %[[VAL_62:.*]] = fir.coordinate_of %[[VAL_61]], %[[VAL_60]] : (!fir.ref>>, index) -> !fir.ref> +! CHECK: fir.store %[[VAL_58]] to %[[VAL_62]] : !fir.ref> +! CHECK: } +! CHECK: } +! CHECK: return +! CHECK: } + diff --git a/flang/test/Lower/intrinsic-procedures/ieee_is_finite.f90 b/flang/test/Lower/intrinsic-procedures/ieee_is_finite.f90 new file mode 100644 index 0000000000000000000000000000000000000000..d1a2da0d55bb936ba83f9b4ce3cf1b94e93e84d2 --- /dev/null +++ b/flang/test/Lower/intrinsic-procedures/ieee_is_finite.f90 @@ -0,0 +1,68 @@ +! RUN: bbc -emit-fir %s -o - | FileCheck %s + +! CHECK-LABEL: @_QPis_finite_test +subroutine is_finite_test(x, y) + use ieee_arithmetic, only: ieee_is_finite + real(4) x + real(8) y + ! CHECK: %[[V_3:[0-9]+]] = fir.load %arg0 : !fir.ref + ! CHECK: %[[V_4:[0-9]+]] = arith.bitcast %[[V_3]] : f32 to i32 + ! CHECK: %[[V_5:[0-9]+]] = arith.subi %c32{{.*}}, %c8{{.*}} : i32 + ! CHECK: %[[V_6:[0-9]+]] = arith.shrui %c-1{{.*}}, %[[V_5]] : i32 + ! CHECK: %[[V_7:[0-9]+]] = arith.shrsi %[[V_4]], %c23{{.*}} : i32 + ! CHECK: %[[V_8:[0-9]+]] = arith.andi %[[V_7]], %[[V_6]] : i32 + ! CHECK: %[[V_9:[0-9]+]] = arith.cmpi eq, %c8{{.*}}, %c0{{.*}} : i32 + ! CHECK: %[[V_10:[0-9]+]] = arith.select %[[V_9]], %c0{{.*}}, %[[V_8]] : i32 + ! CHECK: %[[V_11:[0-9]+]] = arith.cmpi ne, %[[V_10]], %c255{{.*}} : i32 + ! CHECK: %[[V_12:[0-9]+]] = fir.convert %[[V_11]] : (i1) -> !fir.logical<4> + ! CHECK: %[[V_13:[0-9]+]] = fir.convert %[[V_12]] : (!fir.logical<4>) -> i1 + print*, ieee_is_finite(x) + + ! CHECK: %[[V_19:[0-9]+]] = fir.load %arg0 : !fir.ref + ! CHECK: %[[V_20:[0-9]+]] = fir.load %arg0 : !fir.ref + ! CHECK: %[[V_21:[0-9]+]] = arith.addf %[[V_19]], %[[V_20]] : f32 + ! CHECK: %[[V_22:[0-9]+]] = arith.bitcast %[[V_21]] : f32 to i32 + ! CHECK: %[[V_23:[0-9]+]] = arith.subi %c32{{.*}}, %c8{{.*}} : i32 + ! CHECK: %[[V_24:[0-9]+]] = arith.shrui %c-1{{.*}}, %[[V_23]] : i32 + ! CHECK: %[[V_25:[0-9]+]] = arith.shrsi %[[V_22]], %c23{{.*}} : i32 + ! CHECK: %[[V_26:[0-9]+]] = arith.andi %[[V_25]], %[[V_24]] : i32 + ! CHECK: %[[V_27:[0-9]+]] = arith.cmpi eq, %c8{{.*}}, %c0{{.*}} : i32 + ! CHECK: %[[V_28:[0-9]+]] = arith.select %[[V_27]], %c0{{.*}}, %[[V_26]] : i32 + ! CHECK: %[[V_29:[0-9]+]] = arith.cmpi ne, %[[V_28]], %c255{{.*}} : i32 + ! CHECK: %[[V_30:[0-9]+]] = fir.convert %[[V_29]] : (i1) -> !fir.logical<4> + ! CHECK: %[[V_31:[0-9]+]] = fir.convert %[[V_30]] : (!fir.logical<4>) -> i1 + print*, ieee_is_finite(x+x) + + ! CHECK: %[[V_37:[0-9]+]] = fir.load %arg1 : !fir.ref + ! CHECK: %[[V_38:[0-9]+]] = arith.bitcast %[[V_37]] : f64 to i64 + ! CHECK: %[[V_39:[0-9]+]] = arith.subi %c64{{.*}}, %c11{{.*}} : i64 + ! CHECK: %[[V_40:[0-9]+]] = arith.shrui %c-1{{.*}}, %[[V_39]] : i64 + ! CHECK: %[[V_41:[0-9]+]] = arith.shrsi %[[V_38]], %c52{{.*}} : i64 + ! CHECK: %[[V_42:[0-9]+]] = arith.andi %[[V_41]], %[[V_40]] : i64 + ! CHECK: %[[V_43:[0-9]+]] = arith.cmpi eq, %c11{{.*}}, %c0{{.*}} : i64 + ! CHECK: %[[V_44:[0-9]+]] = arith.select %[[V_43]], %c0{{.*}}, %[[V_42]] : i64 + ! CHECK: %[[V_45:[0-9]+]] = arith.cmpi ne, %[[V_44]], %c2047{{.*}} : i64 + ! CHECK: %[[V_46:[0-9]+]] = fir.convert %[[V_45]] : (i1) -> !fir.logical<4> + ! CHECK: %[[V_47:[0-9]+]] = fir.convert %[[V_46]] : (!fir.logical<4>) -> i1 + print*, ieee_is_finite(y) + + ! CHECK: %[[V_53:[0-9]+]] = fir.load %arg1 : !fir.ref + ! CHECK: %[[V_54:[0-9]+]] = fir.load %arg1 : !fir.ref + ! CHECK: %[[V_55:[0-9]+]] = arith.addf %[[V_53]], %[[V_54]] : f64 + ! CHECK: %[[V_56:[0-9]+]] = arith.bitcast %[[V_55]] : f64 to i64 + ! CHECK: %[[V_57:[0-9]+]] = arith.subi %c64{{.*}}, %c11{{.*}} : i64 + ! CHECK: %[[V_58:[0-9]+]] = arith.shrui %c-1{{.*}}, %[[V_57]] : i64 + ! CHECK: %[[V_59:[0-9]+]] = arith.shrsi %[[V_56]], %c52{{.*}} : i64 + ! CHECK: %[[V_60:[0-9]+]] = arith.andi %[[V_59]], %[[V_58]] : i64 + ! CHECK: %[[V_61:[0-9]+]] = arith.cmpi eq, %c11{{.*}}, %c0{{.*}} : i64 + ! CHECK: %[[V_62:[0-9]+]] = arith.select %[[V_61]], %c0{{.*}}, %[[V_60]] : i64 + ! CHECK: %[[V_63:[0-9]+]] = arith.cmpi ne, %[[V_62]], %c2047{{.*}} : i64 + ! CHECK: %[[V_64:[0-9]+]] = fir.convert %[[V_63]] : (i1) -> !fir.logical<4> + ! CHECK: %[[V_65:[0-9]+]] = fir.convert %[[V_64]] : (!fir.logical<4>) -> i1 + print*, ieee_is_finite(y+y) +end subroutine is_finite_test + + real(4) x + real(8) y + call is_finite_test(huge(x), huge(y)) +end diff --git a/flang/test/Lower/intrinsic-procedures/ieee_operator_eq.f90 b/flang/test/Lower/intrinsic-procedures/ieee_operator_eq.f90 new file mode 100644 index 0000000000000000000000000000000000000000..37d0ca02c5fb048b56d2dc602380404762f165f5 --- /dev/null +++ b/flang/test/Lower/intrinsic-procedures/ieee_operator_eq.f90 @@ -0,0 +1,46 @@ +! RUN: bbc -emit-fir %s -o - | FileCheck %s + +! CHECK-LABEL: @_QPs +subroutine s(r1,r2) + use ieee_arithmetic, only: ieee_round_type, operator(==) + type(ieee_round_type) :: r1, r2 + ! CHECK: %[[V_3:[0-9]+]] = fir.field_index mode, !fir.type<_QMieee_arithmeticTieee_round_type{mode:i8}> + ! CHECK: %[[V_4:[0-9]+]] = fir.coordinate_of %arg0, %[[V_3]] : (!fir.ref>, !fir.field) -> !fir.ref + ! CHECK: %[[V_5:[0-9]+]] = fir.load %[[V_4]] : !fir.ref + ! CHECK: %[[V_6:[0-9]+]] = fir.coordinate_of %arg1, %[[V_3]] : (!fir.ref>, !fir.field) -> !fir.ref + ! CHECK: %[[V_7:[0-9]+]] = fir.load %[[V_6]] : !fir.ref + ! CHECK: %[[V_8:[0-9]+]] = arith.cmpi eq, %[[V_5]], %[[V_7]] : i8 + ! CHECK: %[[V_9:[0-9]+]] = fir.call @_FortranAioOutputLogical(%{{.*}}, %[[V_8]]) : (!fir.ref, i1) -> i1 + print*, r1 == r2 +end + +! CHECK-LABEL: @_QQmain + use ieee_arithmetic, only: ieee_round_type, ieee_nearest, ieee_to_zero + interface + subroutine s(r1,r2) + import ieee_round_type + type(ieee_round_type) :: r1, r2 + end + end interface + ! CHECK: %[[V_0:[0-9]+]] = fir.alloca !fir.type<_QMieee_arithmeticTieee_round_type{mode:i8}> + ! CHECK: %[[V_1:[0-9]+]] = fir.alloca !fir.type<_QMieee_arithmeticTieee_round_type{mode:i8}> + ! CHECK: %[[V_2:[0-9]+]] = fir.alloca !fir.type<_QMieee_arithmeticTieee_round_type{mode:i8}> + ! CHECK: %[[V_3:[0-9]+]] = fir.alloca !fir.type<_QMieee_arithmeticTieee_round_type{mode:i8}> + ! CHECK: %[[V_4:[0-9]+]] = fir.field_index mode, !fir.type<_QMieee_arithmeticTieee_round_type{mode:i8}> + ! CHECK: %[[V_5:[0-9]+]] = fir.coordinate_of %[[V_3]], %[[V_4]] : (!fir.ref>, !fir.field) -> !fir.ref + ! CHECK: fir.store %c2{{.*}} to %[[V_5]] : !fir.ref + ! CHECK: %[[V_6:[0-9]+]] = fir.field_index mode, !fir.type<_QMieee_arithmeticTieee_round_type{mode:i8}> + ! CHECK: %[[V_7:[0-9]+]] = fir.coordinate_of %[[V_2]], %[[V_6]] : (!fir.ref>, !fir.field) -> !fir.ref + ! CHECK: fir.store %c1{{.*}} to %[[V_7]] : !fir.ref + call s(ieee_to_zero, ieee_nearest) + + ! CHECK: fir.call @_QPs(%[[V_3]], %[[V_2]]) : (!fir.ref>, !fir.ref>) -> () + ! CHECK: %[[V_8:[0-9]+]] = fir.field_index mode, !fir.type<_QMieee_arithmeticTieee_round_type{mode:i8}> + ! CHECK: %[[V_9:[0-9]+]] = fir.coordinate_of %[[V_1]], %[[V_8]] : (!fir.ref>, !fir.field) -> !fir.ref + ! CHECK: fir.store %c1{{.*}} to %[[V_9]] : !fir.ref + ! CHECK: %[[V_10:[0-9]+]] = fir.field_index mode, !fir.type<_QMieee_arithmeticTieee_round_type{mode:i8}> + ! CHECK: %[[V_11:[0-9]+]] = fir.coordinate_of %[[V_0]], %[[V_10]] : (!fir.ref>, !fir.field) -> !fir.ref + ! CHECK: fir.store %c1{{.*}} to %[[V_11]] : !fir.ref + ! CHECK: fir.call @_QPs(%[[V_1]], %[[V_0]]) : (!fir.ref>, !fir.ref>) -> () + call s(ieee_nearest, ieee_nearest) +end diff --git a/flang/test/Lower/optional-value-caller.f90 b/flang/test/Lower/optional-value-caller.f90 new file mode 100644 index 0000000000000000000000000000000000000000..72c10dcb5497c3c2e7b09e3b12c9dedd85d79d32 --- /dev/null +++ b/flang/test/Lower/optional-value-caller.f90 @@ -0,0 +1,423 @@ +! Test lowering of OPTIONAL VALUE dummy argument on caller side. +! RUN: bbc -emit-fir %s -o - | FileCheck %s + +! A copy must be made if the actual is a variable (and no copy-out), but care +! has to be take if the actual argument may be absent at runtime: the copy +! must be conditional. When the allocation is dynamic, the temp allocation and +! deallocation are also conditionals. + +module test +interface + subroutine scalar(i) + integer, optional, value :: i + end subroutine + subroutine dyn_char(c) + character(*), optional, value :: c + end subroutine + subroutine array(i) + integer, optional, value :: i(100) + end subroutine + subroutine dyn_array(i, n) + integer(8) :: n + integer, optional, value :: i(n) + end subroutine + subroutine dyn_char_array(c, n) + integer(8) :: n + character(*), optional, value :: c(n) + end subroutine + function returns_ptr() + integer, pointer :: returns_ptr + end function +end interface +contains + +! CHECK-LABEL: func @_QMtestPtest_scalar_not_a_var() { +subroutine test_scalar_not_a_var() + call scalar(42) +! CHECK: %[[VAL_0:.*]] = fir.alloca i32 {adapt.valuebyref} +! CHECK: %[[VAL_1:.*]] = arith.constant 42 : i32 +! CHECK: fir.store %[[VAL_1]] to %[[VAL_0]] : !fir.ref +! CHECK: fir.call @_QPscalar(%[[VAL_0]]) : (!fir.ref) -> () +end subroutine + +! CHECK-LABEL: func @_QMtestPtest_scalar( +! CHECK-SAME: %[[VAL_0:.*]]: !fir.ref {fir.bindc_name = "i", fir.optional}) { +subroutine test_scalar(i) + integer, optional :: i + call scalar(i) +! CHECK: %[[VAL_1:.*]] = fir.alloca i32 {adapt.valuebyref} +! CHECK: %[[VAL_2:.*]] = fir.is_present %[[VAL_0]] : (!fir.ref) -> i1 +! CHECK: %[[VAL_3:.*]] = fir.if %[[VAL_2]] -> (!fir.ref) { +! CHECK: %[[VAL_4:.*]] = fir.load %[[VAL_0]] : !fir.ref +! CHECK: fir.store %[[VAL_4]] to %[[VAL_1]] : !fir.ref +! CHECK: fir.result %[[VAL_1]] : !fir.ref +! CHECK: } else { +! CHECK: %[[VAL_5:.*]] = fir.absent !fir.ref +! CHECK: fir.result %[[VAL_5]] : !fir.ref +! CHECK: } +! CHECK: fir.call @_QPscalar(%[[VAL_6:.*]]) : (!fir.ref) -> () +end subroutine + +! CHECK-LABEL: func @_QMtestPtest_scalar2( +! CHECK-SAME: %[[VAL_0:.*]]: !fir.ref {fir.bindc_name = "i", fir.optional}) { +subroutine test_scalar2(i) + integer, optional, value :: i + call scalar(i) +! CHECK: %[[VAL_1:.*]] = fir.alloca i32 {adapt.valuebyref} +! CHECK: %[[VAL_2:.*]] = fir.is_present %[[VAL_0]] : (!fir.ref) -> i1 +! CHECK: %[[VAL_3:.*]] = fir.if %[[VAL_2]] -> (!fir.ref) { +! CHECK: %[[VAL_4:.*]] = fir.load %[[VAL_0]] : !fir.ref +! CHECK: fir.store %[[VAL_4]] to %[[VAL_1]] : !fir.ref +! CHECK: fir.result %[[VAL_1]] : !fir.ref +! CHECK: } else { +! CHECK: %[[VAL_5:.*]] = fir.absent !fir.ref +! CHECK: fir.result %[[VAL_5]] : !fir.ref +! CHECK: } +! CHECK: fir.call @_QPscalar(%[[VAL_6:.*]]) : (!fir.ref) -> () +end subroutine + +! CHECK-LABEL: func @_QMtestPtest_scalar3( +! CHECK-SAME: %[[VAL_0:.*]]: !fir.ref {fir.bindc_name = "i", fir.optional}) { +subroutine test_scalar3(i) + integer, optional :: i + ! i must be present when it appears in "()" + call scalar((i)) +! CHECK: %[[VAL_1:.*]] = fir.alloca i32 +! CHECK: %[[VAL_2:.*]] = fir.load %[[VAL_0]] : !fir.ref +! CHECK: %[[VAL_3:.*]] = fir.no_reassoc %[[VAL_2]] : i32 +! CHECK: fir.store %[[VAL_3]] to %[[VAL_1]] : !fir.ref +! CHECK: fir.call @_QPscalar(%[[VAL_1]]) : (!fir.ref) -> () +end subroutine + +! CHECK-LABEL: func @_QMtestPtest_scalar_ptr( +! CHECK-SAME: %[[VAL_0:.*]]: !fir.ref>> {fir.bindc_name = "i"}) { +subroutine test_scalar_ptr(i) + integer, pointer :: i + call scalar(i) +! CHECK: %[[VAL_1:.*]] = fir.alloca i32 {adapt.valuebyref} +! CHECK: %[[VAL_2:.*]] = fir.load %[[VAL_0]] : !fir.ref>> +! CHECK: %[[VAL_3:.*]] = fir.box_addr %[[VAL_2]] : (!fir.box>) -> !fir.ptr +! CHECK: %[[VAL_4:.*]] = fir.convert %[[VAL_3]] : (!fir.ptr) -> i64 +! CHECK: %[[VAL_5:.*]] = arith.constant 0 : i64 +! CHECK: %[[VAL_6:.*]] = arith.cmpi ne, %[[VAL_4]], %[[VAL_5]] : i64 +! CHECK: %[[VAL_7:.*]] = fir.load %[[VAL_0]] : !fir.ref>> +! CHECK: %[[VAL_8:.*]] = fir.box_addr %[[VAL_7]] : (!fir.box>) -> !fir.ptr +! CHECK: %[[VAL_9:.*]] = fir.if %[[VAL_6]] -> (!fir.ref) { +! CHECK: %[[VAL_10:.*]] = fir.load %[[VAL_8]] : !fir.ptr +! CHECK: fir.store %[[VAL_10]] to %[[VAL_1]] : !fir.ref +! CHECK: fir.result %[[VAL_1]] : !fir.ref +! CHECK: } else { +! CHECK: %[[VAL_11:.*]] = fir.absent !fir.ref +! CHECK: fir.result %[[VAL_11]] : !fir.ref +! CHECK: } +! CHECK: fir.call @_QPscalar(%[[VAL_12:.*]]) : (!fir.ref) -> () +end subroutine + +! CHECK-LABEL: func @_QMtestPtest_scalar_simple_var( +! CHECK-SAME: %[[VAL_0:.*]]: !fir.ref {fir.bindc_name = "i"}) { +subroutine test_scalar_simple_var(i) + integer :: i + call scalar(i) +! CHECK: %[[VAL_1:.*]] = fir.alloca i32 +! CHECK: %[[VAL_2:.*]] = fir.load %[[VAL_0]] : !fir.ref +! CHECK: fir.store %[[VAL_2]] to %[[VAL_1]] : !fir.ref +! CHECK: fir.call @_QPscalar(%[[VAL_1]]) : (!fir.ref) -> () +end subroutine + + +! CHECK-LABEL: func @_QMtestPtest_scalar_alloc( +! CHECK-SAME: %[[VAL_0:.*]]: !fir.ref>> {fir.bindc_name = "i"}) { +subroutine test_scalar_alloc(i) + integer, allocatable :: i + call scalar(i) +! CHECK: %[[VAL_1:.*]] = fir.alloca i32 {adapt.valuebyref} +! CHECK: %[[VAL_2:.*]] = fir.load %[[VAL_0]] : !fir.ref>> +! CHECK: %[[VAL_3:.*]] = fir.box_addr %[[VAL_2]] : (!fir.box>) -> !fir.heap +! CHECK: %[[VAL_4:.*]] = fir.convert %[[VAL_3]] : (!fir.heap) -> i64 +! CHECK: %[[VAL_5:.*]] = arith.constant 0 : i64 +! CHECK: %[[VAL_6:.*]] = arith.cmpi ne, %[[VAL_4]], %[[VAL_5]] : i64 +! CHECK: %[[VAL_7:.*]] = fir.load %[[VAL_0]] : !fir.ref>> +! CHECK: %[[VAL_8:.*]] = fir.box_addr %[[VAL_7]] : (!fir.box>) -> !fir.heap +! CHECK: %[[VAL_9:.*]] = fir.if %[[VAL_6]] -> (!fir.ref) { +! CHECK: %[[VAL_10:.*]] = fir.load %[[VAL_8]] : !fir.heap +! CHECK: fir.store %[[VAL_10]] to %[[VAL_1]] : !fir.ref +! CHECK: fir.result %[[VAL_1]] : !fir.ref +! CHECK: } else { +! CHECK: %[[VAL_11:.*]] = fir.absent !fir.ref +! CHECK: fir.result %[[VAL_11]] : !fir.ref +! CHECK: } +! CHECK: fir.call @_QPscalar(%[[VAL_12:.*]]) : (!fir.ref) -> () +end subroutine + +! CHECK-LABEL: func @_QMtestPtest_ptr_2() { +subroutine test_ptr_2() + call scalar(returns_ptr()) +! CHECK: %[[VAL_0:.*]] = fir.alloca i32 {adapt.valuebyref} +! CHECK: %[[VAL_1:.*]] = fir.alloca !fir.box> {bindc_name = ".result"} +! CHECK: %[[VAL_2:.*]] = fir.call @_QPreturns_ptr() : () -> !fir.box> +! CHECK: fir.save_result %[[VAL_2]] to %[[VAL_1]] : !fir.box>, !fir.ref>> +! CHECK: %[[VAL_3:.*]] = fir.load %[[VAL_1]] : !fir.ref>> +! CHECK: %[[VAL_4:.*]] = fir.box_addr %[[VAL_3]] : (!fir.box>) -> !fir.ptr +! CHECK: %[[VAL_5:.*]] = fir.convert %[[VAL_4]] : (!fir.ptr) -> i64 +! CHECK: %[[VAL_6:.*]] = arith.constant 0 : i64 +! CHECK: %[[VAL_7:.*]] = arith.cmpi ne, %[[VAL_5]], %[[VAL_6]] : i64 +! CHECK: %[[VAL_8:.*]] = fir.load %[[VAL_1]] : !fir.ref>> +! CHECK: %[[VAL_9:.*]] = fir.box_addr %[[VAL_8]] : (!fir.box>) -> !fir.ptr +! CHECK: %[[VAL_10:.*]] = fir.if %[[VAL_7]] -> (!fir.ref) { +! CHECK: %[[VAL_11:.*]] = fir.load %[[VAL_9]] : !fir.ptr +! CHECK: fir.store %[[VAL_11]] to %[[VAL_0]] : !fir.ref +! CHECK: fir.result %[[VAL_0]] : !fir.ref +! CHECK: } else { +! CHECK: %[[VAL_12:.*]] = fir.absent !fir.ref +! CHECK: fir.result %[[VAL_12]] : !fir.ref +! CHECK: } +! CHECK: fir.call @_QPscalar(%[[VAL_13:.*]]) : (!fir.ref) -> () +end subroutine + +! CHECK-LABEL: func @_QMtestPtest_array( +! CHECK-SAME: %[[VAL_0:.*]]: !fir.ref> {fir.bindc_name = "i", fir.optional}) { +subroutine test_array(i) + integer, optional :: i(100) + call array(i) +! CHECK: %[[VAL_1:.*]] = arith.constant 100 : index +! CHECK: %[[VAL_2:.*]] = fir.is_present %[[VAL_0]] : (!fir.ref>) -> i1 +! CHECK: %[[VAL_3:.*]] = fir.if %[[VAL_2]] -> (!fir.heap>) { +! CHECK: %[[VAL_4:.*]] = fir.allocmem !fir.array<100xi32>, %[[VAL_1]] {uniq_name = ".copy"} +! CHECK: %[[VAL_5:.*]] = fir.shape %[[VAL_1]] : (index) -> !fir.shape<1> +! CHECK: %[[VAL_6:.*]] = fir.array_load %[[VAL_4]](%[[VAL_5]]) : (!fir.heap>, !fir.shape<1>) -> !fir.array<100xi32> +! CHECK: %[[VAL_7:.*]] = fir.shape %[[VAL_1]] : (index) -> !fir.shape<1> +! CHECK: %[[VAL_8:.*]] = fir.array_load %[[VAL_0]](%[[VAL_7]]) : (!fir.ref>, !fir.shape<1>) -> !fir.array<100xi32> +! CHECK: %[[VAL_9:.*]] = arith.constant 1 : index +! CHECK: %[[VAL_10:.*]] = arith.constant 0 : index +! CHECK: %[[VAL_11:.*]] = arith.subi %[[VAL_1]], %[[VAL_9]] : index +! CHECK: %[[VAL_12:.*]] = fir.do_loop %[[VAL_13:.*]] = %[[VAL_10]] to %[[VAL_11]] step %[[VAL_9]] unordered iter_args(%[[VAL_14:.*]] = %[[VAL_6]]) -> (!fir.array<100xi32>) { +! CHECK: %[[VAL_15:.*]] = fir.array_fetch %[[VAL_8]], %[[VAL_13]] : (!fir.array<100xi32>, index) -> i32 +! CHECK: %[[VAL_16:.*]] = fir.array_update %[[VAL_14]], %[[VAL_15]], %[[VAL_13]] : (!fir.array<100xi32>, i32, index) -> !fir.array<100xi32> +! CHECK: fir.result %[[VAL_16]] : !fir.array<100xi32> +! CHECK: } +! CHECK: fir.array_merge_store %[[VAL_6]], %[[VAL_17:.*]] to %[[VAL_4]] : !fir.array<100xi32>, !fir.array<100xi32>, !fir.heap> +! CHECK: fir.result %[[VAL_4]] : !fir.heap> +! CHECK: } else { +! CHECK: %[[VAL_18:.*]] = fir.zero_bits !fir.heap> +! CHECK: fir.result %[[VAL_18]] : !fir.heap> +! CHECK: } +! CHECK: %[[VAL_19:.*]] = fir.convert %[[VAL_20:.*]] : (!fir.heap>) -> !fir.ref> +! CHECK: fir.call @_QParray(%[[VAL_19]]) : (!fir.ref>) -> () +! CHECK: fir.if %[[VAL_2]] { +! CHECK: fir.freemem %[[VAL_20]] : !fir.heap> +! CHECK: } +end subroutine + +! CHECK-LABEL: func @_QMtestPtest_array2( +! CHECK-SAME: %[[VAL_0:.*]]: !fir.ref> {fir.bindc_name = "i", fir.optional}, +subroutine test_array2(i, n) + integer(8) :: n + integer, optional, value :: i(n) + call array(i) +! CHECK-SAME: %[[VAL_1:.*]]: !fir.ref {fir.bindc_name = "n"}) { +! CHECK: %[[VAL_2:.*]] = fir.load %[[VAL_1]] : !fir.ref +! CHECK: %[[VAL_3:.*]] = fir.convert %[[VAL_2]] : (i64) -> index +! CHECK: %[[VAL_4:.*]] = arith.constant 0 : index +! CHECK: %[[VAL_5:.*]] = arith.cmpi sgt, %[[VAL_3]], %[[VAL_4]] : index +! CHECK: %[[VAL_6:.*]] = arith.select %[[VAL_5]], %[[VAL_3]], %[[VAL_4]] : index +! CHECK: %[[VAL_7:.*]] = fir.is_present %[[VAL_0]] : (!fir.ref>) -> i1 +! CHECK: %[[VAL_8:.*]] = fir.if %[[VAL_7]] -> (!fir.heap>) { +! CHECK: %[[VAL_9:.*]] = fir.allocmem !fir.array, %[[VAL_6]] {uniq_name = ".copy"} +! CHECK: %[[VAL_17:.*]] = fir.do_loop +! CHECK: } +! CHECK: fir.array_merge_store %{{.*}}, %[[VAL_17]] to %[[VAL_9]] : !fir.array, !fir.array, !fir.heap> +! CHECK: fir.result %[[VAL_9]] : !fir.heap> +! CHECK: } else { +! CHECK: %[[VAL_23:.*]] = fir.zero_bits !fir.heap> +! CHECK: fir.result %[[VAL_23]] : !fir.heap> +! CHECK: } +! CHECK: %[[VAL_24:.*]] = fir.convert %[[VAL_8]] : (!fir.heap>) -> !fir.ref> +! CHECK: fir.call @_QParray(%[[VAL_24]]) : (!fir.ref>) -> () +! CHECK: fir.if %[[VAL_7]] { +! CHECK: fir.freemem %[[VAL_8]] : !fir.heap> +! CHECK: } +end subroutine + +! CHECK-LABEL: func @_QMtestPtest_dyn_array( +! CHECK-SAME: %[[VAL_0:.*]]: !fir.ref> {fir.bindc_name = "i", fir.optional}, +! CHECK-SAME: %[[VAL_1:.*]]: !fir.ref {fir.bindc_name = "n"}) { +subroutine test_dyn_array(i, n) + integer(8) :: n + integer, optional :: i(n) + call dyn_array(i, n) +! CHECK: %[[VAL_2:.*]] = fir.load %[[VAL_1]] : !fir.ref +! CHECK: %[[VAL_3:.*]] = fir.convert %[[VAL_2]] : (i64) -> index +! CHECK: %[[VAL_4:.*]] = arith.constant 0 : index +! CHECK: %[[VAL_5:.*]] = arith.cmpi sgt, %[[VAL_3]], %[[VAL_4]] : index +! CHECK: %[[VAL_6:.*]] = arith.select %[[VAL_5]], %[[VAL_3]], %[[VAL_4]] : index +! CHECK: %[[VAL_7:.*]] = fir.is_present %[[VAL_0]] : (!fir.ref>) -> i1 +! CHECK: %[[VAL_8:.*]] = fir.if %[[VAL_7]] -> (!fir.heap>) { +! CHECK: %[[VAL_9:.*]] = fir.allocmem !fir.array, %{{.*}} {uniq_name = ".copy"} +! CHECK: %[[VAL_17:.*]] = fir.do_loop +! CHECK: } +! CHECK: fir.array_merge_store %{{.*}}, %[[VAL_17]] to %[[VAL_9]] : !fir.array, !fir.array, !fir.heap> +! CHECK: fir.result %[[VAL_9]] : !fir.heap> +! CHECK: } else { +! CHECK: %[[VAL_23:.*]] = fir.zero_bits !fir.heap> +! CHECK: fir.result %[[VAL_23]] : !fir.heap> +! CHECK: } +! CHECK: %[[VAL_24:.*]] = fir.convert %[[VAL_8]] : (!fir.heap>) -> !fir.ref> +! CHECK: fir.call @_QPdyn_array(%[[VAL_24]], %[[VAL_1]]) : (!fir.ref>, !fir.ref) -> () +! CHECK: fir.if %[[VAL_7]] { +! CHECK: fir.freemem %[[VAL_8]] : !fir.heap> +! CHECK: } +end subroutine + +! CHECK-LABEL: func @_QMtestPtest_dyn_array_from_assumed( +! CHECK-SAME: %[[VAL_0:.*]]: !fir.box> {fir.bindc_name = "i", fir.optional}, +! CHECK-SAME: %[[VAL_1:.*]]: !fir.ref {fir.bindc_name = "n"}) { +subroutine test_dyn_array_from_assumed(i, n) + integer(8) :: n + integer, optional :: i(:) + call dyn_array(i, n) +! CHECK: %[[VAL_2:.*]] = fir.is_present %[[VAL_0]] : (!fir.box>) -> i1 +! CHECK: %[[VAL_3:.*]] = fir.zero_bits !fir.ref> +! CHECK: %[[VAL_4:.*]] = arith.constant 0 : index +! CHECK: %[[VAL_5:.*]] = fir.shape %[[VAL_4]] : (index) -> !fir.shape<1> +! CHECK: %[[VAL_6:.*]] = fir.embox %[[VAL_3]](%[[VAL_5]]) : (!fir.ref>, !fir.shape<1>) -> !fir.box> +! CHECK: %[[VAL_7:.*]] = arith.select %[[VAL_2]], %[[VAL_0]], %[[VAL_6]] : !fir.box> +! CHECK: %[[VAL_8:.*]] = fir.if %[[VAL_2]] -> (!fir.heap>) { +! CHECK: %[[VAL_9:.*]] = arith.constant 0 : index +! CHECK: %[[VAL_10:.*]]:3 = fir.box_dims %[[VAL_7]], %[[VAL_9]] : (!fir.box>, index) -> (index, index, index) +! CHECK: %[[VAL_11:.*]] = fir.allocmem !fir.array, %[[VAL_10]]#1 {uniq_name = ".copy"} +! CHECK: %[[VAL_18:.*]] = fir.do_loop +! CHECK: } +! CHECK: fir.array_merge_store %{{.*}}, %[[VAL_18]] to %[[VAL_11]] : !fir.array, !fir.array, !fir.heap> +! CHECK: fir.result %[[VAL_11]] : !fir.heap> +! CHECK: } else { +! CHECK: %[[VAL_24:.*]] = fir.zero_bits !fir.heap> +! CHECK: fir.result %[[VAL_24]] : !fir.heap> +! CHECK: } +! CHECK: %[[VAL_25:.*]] = fir.convert %[[VAL_8]] : (!fir.heap>) -> !fir.ref> +! CHECK: fir.call @_QPdyn_array(%[[VAL_25]], %[[VAL_1]]) : (!fir.ref>, !fir.ref) -> () +! CHECK: fir.if %[[VAL_2]] { +! CHECK: fir.freemem %[[VAL_8]] : !fir.heap> +! CHECK: } +end subroutine + +! CHECK-LABEL: func @_QMtestPtest_array_ptr( +! CHECK-SAME: %[[VAL_0:.*]]: !fir.ref>>> {fir.bindc_name = "i"}) { +subroutine test_array_ptr(i) + integer, pointer :: i(:) + call array(i) +! CHECK: %[[VAL_1:.*]] = fir.load %[[VAL_0]] : !fir.ref>>> +! CHECK: %[[VAL_2:.*]] = fir.box_addr %[[VAL_1]] : (!fir.box>>) -> !fir.ptr> +! CHECK: %[[VAL_3:.*]] = fir.convert %[[VAL_2]] : (!fir.ptr>) -> i64 +! CHECK: %[[VAL_4:.*]] = arith.constant 0 : i64 +! CHECK: %[[VAL_5:.*]] = arith.cmpi ne, %[[VAL_3]], %[[VAL_4]] : i64 +! CHECK: %[[VAL_6:.*]] = fir.load %[[VAL_0]] : !fir.ref>>> +! CHECK: %[[VAL_7:.*]] = arith.constant 0 : index +! CHECK: %[[VAL_8:.*]]:3 = fir.box_dims %[[VAL_6]], %[[VAL_7]] : (!fir.box>>, index) -> (index, index, index) +! CHECK: %[[VAL_9:.*]] = fir.if %[[VAL_5]] -> (!fir.heap>) { +! CHECK: %[[VAL_10:.*]] = arith.constant 0 : index +! CHECK: %[[VAL_11:.*]]:3 = fir.box_dims %[[VAL_6]], %[[VAL_10]] : (!fir.box>>, index) -> (index, index, index) +! CHECK: %[[VAL_12:.*]] = fir.allocmem !fir.array, %[[VAL_11]]#1 {uniq_name = ".copy"} +! CHECK: %[[VAL_20:.*]] = fir.do_loop +! CHECK: } +! CHECK: fir.array_merge_store %{{.*}}, %[[VAL_20]] to %[[VAL_12]] : !fir.array, !fir.array, !fir.heap> +! CHECK: fir.result %[[VAL_12]] : !fir.heap> +! CHECK: } else { +! CHECK: %[[VAL_26:.*]] = fir.zero_bits !fir.heap> +! CHECK: fir.result %[[VAL_26]] : !fir.heap> +! CHECK: } +! CHECK: %[[VAL_27:.*]] = fir.convert %[[VAL_9]] : (!fir.heap>) -> !fir.ref> +! CHECK: fir.call @_QParray(%[[VAL_27]]) : (!fir.ref>) -> () +! CHECK: fir.if %[[VAL_5]] { +! CHECK: fir.freemem %[[VAL_9]] : !fir.heap> +! CHECK: } +end subroutine + +! CHECK-LABEL: func @_QMtestPtest_char( +! CHECK-SAME: %[[VAL_0:.*]]: !fir.boxchar<1> {fir.bindc_name = "c", fir.optional}) { +subroutine test_char(c) + character(*), optional :: c + call dyn_char(c) +! CHECK: %[[VAL_1:.*]]:2 = fir.unboxchar %[[VAL_0]] : (!fir.boxchar<1>) -> (!fir.ref>, index) +! CHECK: %[[VAL_2:.*]] = fir.is_present %[[VAL_1]]#0 : (!fir.ref>) -> i1 +! CHECK: %[[VAL_3:.*]] = arith.constant 0 : index +! CHECK: %[[VAL_4:.*]] = arith.select %[[VAL_2]], %[[VAL_1]]#1, %[[VAL_3]] : index +! CHECK: %[[VAL_5:.*]] = fir.alloca !fir.char<1,?>(%[[VAL_4]] : index) {adapt.valuebyref} +! CHECK: %[[VAL_6:.*]] = fir.if %[[VAL_2]] -> (!fir.ref>) { +! CHECK: %[[VAL_13:.*]] = fir.convert %[[VAL_5]] : (!fir.ref>) -> !fir.ref +! CHECK: %[[VAL_14:.*]] = fir.convert %[[VAL_1]]#0 : (!fir.ref>) -> !fir.ref +! CHECK: fir.call @llvm.memmove.p0.p0.i64(%[[VAL_13]], %[[VAL_14]], %{{.*}}, %{{.*}}) : (!fir.ref, !fir.ref, i64, i1) -> () +! CHECK: fir.result %[[VAL_5]] : !fir.ref> +! CHECK: } else { +! CHECK: %[[VAL_24:.*]] = fir.absent !fir.ref> +! CHECK: fir.result %[[VAL_24]] : !fir.ref> +! CHECK: } +! CHECK: %[[VAL_25:.*]] = fir.emboxchar %[[VAL_6]], %[[VAL_4]] : (!fir.ref>, index) -> !fir.boxchar<1> +! CHECK: fir.call @_QPdyn_char(%[[VAL_25]]) : (!fir.boxchar<1>) -> () +end subroutine + +! CHECK-LABEL: func @_QMtestPtest_char_ptr( +! CHECK-SAME: %[[VAL_0:.*]]: !fir.ref>>> {fir.bindc_name = "c"}) { +subroutine test_char_ptr(c) + character(:), pointer :: c + call dyn_char(c) +! CHECK: %[[VAL_1:.*]] = fir.load %[[VAL_0]] : !fir.ref>>> +! CHECK: %[[VAL_2:.*]] = fir.box_addr %[[VAL_1]] : (!fir.box>>) -> !fir.ptr> +! CHECK: %[[VAL_3:.*]] = fir.convert %[[VAL_2]] : (!fir.ptr>) -> i64 +! CHECK: %[[VAL_4:.*]] = arith.constant 0 : i64 +! CHECK: %[[VAL_5:.*]] = arith.cmpi ne, %[[VAL_3]], %[[VAL_4]] : i64 +! CHECK: %[[VAL_6:.*]] = fir.load %[[VAL_0]] : !fir.ref>>> +! CHECK: %[[VAL_7:.*]] = fir.box_elesize %[[VAL_6]] : (!fir.box>>) -> index +! CHECK: %[[VAL_8:.*]] = fir.box_addr %[[VAL_6]] : (!fir.box>>) -> !fir.ptr> +! CHECK: %[[VAL_9:.*]] = arith.constant 0 : index +! CHECK: %[[VAL_10:.*]] = arith.select %[[VAL_5]], %[[VAL_7]], %[[VAL_9]] : index +! CHECK: %[[VAL_11:.*]] = fir.alloca !fir.char<1,?>(%[[VAL_10]] : index) {adapt.valuebyref} +! CHECK: %[[VAL_12:.*]] = fir.if %[[VAL_5]] -> (!fir.ref>) { +! CHECK: %[[VAL_19:.*]] = fir.convert %[[VAL_11]] : (!fir.ref>) -> !fir.ref +! CHECK: %[[VAL_20:.*]] = fir.convert %[[VAL_8]] : (!fir.ptr>) -> !fir.ref +! CHECK: fir.call @llvm.memmove.p0.p0.i64(%[[VAL_19]], %[[VAL_20]], %{{.*}}, %{{.*}}) : (!fir.ref, !fir.ref, i64, i1) -> () +! CHECK: fir.result %[[VAL_11]] : !fir.ref> +! CHECK: } else { +! CHECK: %[[VAL_30:.*]] = fir.absent !fir.ref> +! CHECK: fir.result %[[VAL_30]] : !fir.ref> +! CHECK: } +! CHECK: %[[VAL_31:.*]] = fir.emboxchar %[[VAL_12]], %[[VAL_10]] : (!fir.ref>, index) -> !fir.boxchar<1> +! CHECK: fir.call @_QPdyn_char(%[[VAL_31]]) : (!fir.boxchar<1>) -> () +end subroutine + +! CHECK-LABEL: func @_QMtestPtest_char_array( +! CHECK-SAME: %[[VAL_0:.*]]: !fir.box>> {fir.bindc_name = "c", fir.optional}) { +subroutine test_char_array(c) + integer(8) :: n + character(*), optional :: c(:) + call dyn_char_array(c, n) +! CHECK: %[[VAL_1:.*]] = fir.alloca i64 {bindc_name = "n", uniq_name = "_QMtestFtest_char_arrayEn"} +! CHECK: %[[VAL_2:.*]] = fir.is_present %[[VAL_0]] : (!fir.box>>) -> i1 +! CHECK: %[[VAL_3:.*]] = fir.zero_bits !fir.ref>> +! CHECK: %[[VAL_4:.*]] = arith.constant 0 : index +! CHECK: %[[VAL_5:.*]] = fir.shape %[[VAL_4]] : (index) -> !fir.shape<1> +! CHECK: %[[VAL_6:.*]] = arith.constant 0 : index +! CHECK: %[[VAL_7:.*]] = fir.embox %[[VAL_3]](%[[VAL_5]]) typeparams %[[VAL_6]] : (!fir.ref>>, !fir.shape<1>, index) -> !fir.box>> +! CHECK: %[[VAL_8:.*]] = arith.select %[[VAL_2]], %[[VAL_0]], %[[VAL_7]] : !fir.box>> +! CHECK: %[[VAL_9:.*]] = fir.if %[[VAL_2]] -> (!fir.heap>>) { +! CHECK: %[[VAL_10:.*]] = arith.constant 0 : index +! CHECK: %[[VAL_11:.*]]:3 = fir.box_dims %[[VAL_8]], %[[VAL_10]] : (!fir.box>>, index) -> (index, index, index) +! CHECK: %[[VAL_12:.*]] = fir.box_elesize %[[VAL_8]] : (!fir.box>>) -> index +! CHECK: %[[VAL_13:.*]] = fir.allocmem !fir.array>(%[[VAL_12]] : index), %[[VAL_11]]#1 {uniq_name = ".copy"} +! CHECK: %[[VAL_20:.*]] = fir.do_loop {{.*}} +! CHECK: fir.call @llvm.memmove.p0.p0.i64 +! CHECK: } +! CHECK: fir.array_merge_store %{{.*}}, %[[VAL_20]] to %[[VAL_13]] typeparams %[[VAL_12]] : !fir.array>, !fir.array>, !fir.heap>>, index +! CHECK: fir.result %[[VAL_13]] : !fir.heap>> +! CHECK: } else { +! CHECK: %[[VAL_45:.*]] = fir.zero_bits !fir.heap>> +! CHECK: fir.result %[[VAL_45]] : !fir.heap>> +! CHECK: } +! CHECK: %[[VAL_46:.*]] = fir.box_elesize %[[VAL_8]] : (!fir.box>>) -> index +! CHECK: %[[VAL_47:.*]] = fir.convert %[[VAL_9]] : (!fir.heap>>) -> !fir.ref> +! CHECK: %[[VAL_49:.*]] = fir.emboxchar %[[VAL_47]], %[[VAL_46]] : (!fir.ref>, index) -> !fir.boxchar<1> +! CHECK: fir.call @_QPdyn_char_array(%[[VAL_49]], %[[VAL_1]]) : (!fir.boxchar<1>, !fir.ref) -> () +! CHECK: fir.if %[[VAL_2]] { +! CHECK: fir.freemem %[[VAL_9]] : !fir.heap>> +! CHECK: } +end subroutine +end diff --git a/flang/test/Semantics/misc-intrinsics.f90 b/flang/test/Semantics/misc-intrinsics.f90 index 0dc65fbd098cb93cf88ea95e46cd5ed76be208af..b454d4764e411431d95a34deae534d1aa2f48c3d 100644 --- a/flang/test/Semantics/misc-intrinsics.f90 +++ b/flang/test/Semantics/misc-intrinsics.f90 @@ -19,5 +19,8 @@ program test_size print *, size(array) print *, ubound(array) print *, lbound(array) + print *, size(arg(:,1)) + print *, ubound(arg(:,1)) + print *, shape(arg(:,1)) end subroutine end diff --git a/flang/unittests/Evaluate/real.cpp b/flang/unittests/Evaluate/real.cpp index 1974f42624415146de4f5f5bfd14abacaac9ef12..60e5710b52a43a95794badb0e489701d4bda30d7 100644 --- a/flang/unittests/Evaluate/real.cpp +++ b/flang/unittests/Evaluate/real.cpp @@ -392,6 +392,22 @@ void subsetTests(int pass, Rounding rounding, std::uint32_t opds) { ("%d AINT(0x%jx)", pass, static_cast(rj)); } + { + ValueWithRealFlags root{x.SQRT(rounding)}; +#ifndef __clang__ // broken and also slow + fpenv.ClearFlags(); +#endif + FLT fcheck{std::sqrt(fj)}; + auto actualFlags{FlagsToBits(fpenv.CurrentFlags())}; + u.f = fcheck; + UINT rcheck{NormalizeNaN(u.ui)}; + UINT check = root.value.RawBits().ToUInt64(); + MATCH(rcheck, check) + ("%d SQRT(0x%jx)", pass, static_cast(rj)); + MATCH(actualFlags, FlagsToBits(root.flags)) + ("%d SQRT(0x%jx)", pass, static_cast(rj)); + } + { MATCH(IsNaN(rj), x.IsNotANumber()) ("%d IsNaN(0x%jx)", pass, static_cast(rj)); diff --git a/flang/unittests/Runtime/NumericalFormatTest.cpp b/flang/unittests/Runtime/NumericalFormatTest.cpp index d48e29bcc6e97d8a540621d0fe57a76722cd5d6e..40191137fe189cb49bbb5840b314e5a5425db7c1 100644 --- a/flang/unittests/Runtime/NumericalFormatTest.cpp +++ b/flang/unittests/Runtime/NumericalFormatTest.cpp @@ -394,9 +394,9 @@ TEST(IOApiTests, FormatDoubleValues) { {"(E62.55,';')", " 0.1000000000000000055511151231257827021181583404541015625E+" "00;"}, - {"(E0.0,';')", "0.E+00;"}, + {"(E0.0,';')", ".1E+00;"}, {"(E0.55,';')", - "0.1000000000000000055511151231257827021181583404541015625E+" + ".1000000000000000055511151231257827021181583404541015625E+" "00;"}, {"(E0,';')", ".1E+00;"}, {"(F58.55,';')", @@ -491,7 +491,7 @@ TEST(IOApiTests, FormatDoubleValues) { "701797267771758512566055119913150489110145103786273816725095" "583738973359899366480994116420570263709027924276754456522908" "75386825064197182655334472656250-323;"}, - {"(G0,';')", ".5-323;"}, + {"(G0,';')", ".5E-323;"}, {"(E757.750,';')", " 0." "494065645841246544176568792868221372365059802614324764425585" @@ -586,7 +586,7 @@ TEST(IOApiTests, FormatDoubleValues) { "408698898317506783884692609277397797285865965494109136909540" "61364675687023986783152906809846172109246253967285156250-" "307;"}, - {"(G0,';')", ".22250738585072014-307;"}, + {"(G0,';')", ".22250738585072014E-307;"}, }}, {// greatest finite 0x7fefffffffffffffuLL, @@ -616,7 +616,7 @@ TEST(IOApiTests, FormatDoubleValues) { "090389328944075868508455133942304583236903222948165808559332" "123348274797826204144723168738177180919299881250404026184124" "8583680000+306;"}, - {"(G0,';')", ".17976931348623157+309;"}, + {"(G0,';')", ".17976931348623157E+309;"}, }}, }; diff --git a/libc/config/darwin/arm/entrypoints.txt b/libc/config/darwin/arm/entrypoints.txt index a8f882f04cc21175da1ab917a281abdf4625d3ac..5c44d6a20c02357303664ea35c032a076231b4a9 100644 --- a/libc/config/darwin/arm/entrypoints.txt +++ b/libc/config/darwin/arm/entrypoints.txt @@ -131,6 +131,8 @@ set(TARGET_LIBM_ENTRYPOINTS libc.src.math.fmin libc.src.math.fminf libc.src.math.fminl + libc.src.math.fmod + libc.src.math.fmodf libc.src.math.frexp libc.src.math.frexpf libc.src.math.frexpl diff --git a/libc/config/linux/aarch64/entrypoints.txt b/libc/config/linux/aarch64/entrypoints.txt index 01b4fac24af78917b792178da5e83a996d1f9316..7b1abab3b395e7b5e616c7cd2f6458a6f416a987 100644 --- a/libc/config/linux/aarch64/entrypoints.txt +++ b/libc/config/linux/aarch64/entrypoints.txt @@ -150,6 +150,8 @@ set(TARGET_LIBM_ENTRYPOINTS libc.src.math.fmin libc.src.math.fminf libc.src.math.fminl + libc.src.math.fmod + libc.src.math.fmodf libc.src.math.frexp libc.src.math.frexpf libc.src.math.frexpl diff --git a/libc/config/linux/arm/entrypoints.txt b/libc/config/linux/arm/entrypoints.txt new file mode 100644 index 0000000000000000000000000000000000000000..39341224a386fd145d7ec39f5b13190f8035a956 --- /dev/null +++ b/libc/config/linux/arm/entrypoints.txt @@ -0,0 +1,63 @@ +set(TARGET_LIBC_ENTRYPOINTS + # ctype.h entrypoints + libc.src.ctype.isalnum + libc.src.ctype.isalpha + libc.src.ctype.isascii + libc.src.ctype.isblank + libc.src.ctype.iscntrl + libc.src.ctype.isdigit + libc.src.ctype.isgraph + libc.src.ctype.islower + libc.src.ctype.isprint + libc.src.ctype.ispunct + libc.src.ctype.isspace + libc.src.ctype.isupper + libc.src.ctype.isxdigit + libc.src.ctype.toascii + libc.src.ctype.tolower + libc.src.ctype.toupper + + # string.h entrypoints + libc.src.string.stpncpy + libc.src.string.strcat + libc.src.string.strchr + libc.src.string.strcmp + libc.src.string.strcpy + libc.src.string.strcspn + libc.src.string.strlcat + libc.src.string.strlcpy + libc.src.string.strlen + libc.src.string.strncat + libc.src.string.strncmp + libc.src.string.strncpy + libc.src.string.strnlen + libc.src.string.strpbrk + libc.src.string.strrchr + libc.src.string.strspn + libc.src.string.strstr + libc.src.string.strtok + libc.src.string.strtok_r + + # inttypes.h entrypoints + libc.src.inttypes.imaxdiv + libc.src.inttypes.strtoimax + libc.src.inttypes.strtoumax + + # stdlib.h entrypoints + libc.src.stdlib.abs + libc.src.stdlib.bsearch + libc.src.stdlib.div + libc.src.stdlib.labs + libc.src.stdlib.ldiv + libc.src.stdlib.llabs + libc.src.stdlib.lldiv + libc.src.stdlib.qsort +) + +set(TARGET_LIBM_ENTRYPOINTS +) + +set(TARGET_LLVMLIBC_ENTRYPOINTS + ${TARGET_LIBC_ENTRYPOINTS} + ${TARGET_LIBM_ENTRYPOINTS} +) diff --git a/libc/config/linux/x86_64/entrypoints.txt b/libc/config/linux/x86_64/entrypoints.txt index c744e37fae49daad80c6d2ea2eafec91df713f3b..e6a63ec2cd5b55651cf20d69acfeadeb6a00f150 100644 --- a/libc/config/linux/x86_64/entrypoints.txt +++ b/libc/config/linux/x86_64/entrypoints.txt @@ -156,6 +156,8 @@ set(TARGET_LIBM_ENTRYPOINTS libc.src.math.fmax libc.src.math.fmaxf libc.src.math.fmaxl + libc.src.math.fmod + libc.src.math.fmodf libc.src.math.frexp libc.src.math.frexpf libc.src.math.frexpl diff --git a/libc/config/windows/entrypoints.txt b/libc/config/windows/entrypoints.txt index 8d7cf6713c86ece770352a16e0dfa111a2cf3aa8..503c1f120da2311de65d9ff288f138ee63817c6b 100644 --- a/libc/config/windows/entrypoints.txt +++ b/libc/config/windows/entrypoints.txt @@ -133,6 +133,8 @@ set(TARGET_LIBM_ENTRYPOINTS libc.src.math.fmax libc.src.math.fmaxf libc.src.math.fmaxl + libc.src.math.fmod + libc.src.math.fmodf libc.src.math.frexp libc.src.math.frexpf libc.src.math.frexpl diff --git a/libc/docs/math.rst b/libc/docs/math.rst index 2603ffb1ff73630eaf5ddb6ef7fd0e118c09076c..42c402ffda2a876dd171efd18a3e6487b730ee24 100644 --- a/libc/docs/math.rst +++ b/libc/docs/math.rst @@ -76,7 +76,7 @@ fdim |check| |check| |check| floor |check| |check| |check| fmax |check| |check| |check| fmin |check| |check| |check| -fmod +fmod |check| |check| fpclassify frexp |check| |check| |check| ilogb |check| |check| |check| diff --git a/libc/spec/stdc.td b/libc/spec/stdc.td index 90218443d0991f0c542cc50751f1d461d45b9a31..72809cc694cf2fba13defca46de3223c1b5bb483 100644 --- a/libc/spec/stdc.td +++ b/libc/spec/stdc.td @@ -378,6 +378,10 @@ def StdC : StandardSpec<"stdc"> { FunctionSpec<"fma", RetValSpec, [ArgSpec, ArgSpec, ArgSpec]>, FunctionSpec<"fmaf", RetValSpec, [ArgSpec, ArgSpec, ArgSpec]>, + FunctionSpec<"fmod", RetValSpec, [ArgSpec, ArgSpec]>, + + FunctionSpec<"fmodf", RetValSpec, [ArgSpec, ArgSpec]>, + FunctionSpec<"frexp", RetValSpec, [ArgSpec, ArgSpec]>, FunctionSpec<"frexpf", RetValSpec, [ArgSpec, ArgSpec]>, FunctionSpec<"frexpl", RetValSpec, [ArgSpec, ArgSpec]>, diff --git a/libc/src/__support/CMakeLists.txt b/libc/src/__support/CMakeLists.txt index 24c6372187234673f5177b4f1b8f59c43ed9d6a2..4812f27a9819eec463711293e5ae857b2007d996 100644 --- a/libc/src/__support/CMakeLists.txt +++ b/libc/src/__support/CMakeLists.txt @@ -45,6 +45,7 @@ add_header_library( libc.include.errno libc.src.errno.errno libc.src.__support.CPP.limits + libc.src.__support.CPP.uint128 libc.src.__support.FPUtil.fputil ) diff --git a/libc/src/__support/CPP/CMakeLists.txt b/libc/src/__support/CPP/CMakeLists.txt index 294a46b1e9a74bfc3d2be58dadc394402f766ee3..6920a0c2df926fa83135fefbc5dcb76ca5217333 100644 --- a/libc/src/__support/CPP/CMakeLists.txt +++ b/libc/src/__support/CPP/CMakeLists.txt @@ -4,6 +4,22 @@ add_header_library( Array.h ) +add_header_library( + uint + HDRS + UInt.h + DEPENDS + .array +) + +add_header_library( + uint128 + HDRS + UInt128.h + DEPENDS + .uint +) + add_header_library( array_ref HDRS @@ -32,6 +48,8 @@ add_header_library( limits HDRS Limits.h + DEPENDS + .uint ) add_header_library( @@ -44,6 +62,8 @@ add_header_library( type_traits HDRS TypeTraits.h + DEPENDS + .uint ) add_header_library( @@ -77,11 +97,3 @@ add_header_library( HDRS error.h ) - -add_header_library( - uint - HDRS - UInt.h - DEPENDS - libc.src.__support.CPP.array -) diff --git a/libc/src/__support/CPP/Limits.h b/libc/src/__support/CPP/Limits.h index 06104e99bbce98643f06056ff11727ca639be248..8f27d075eae2af505d6c437479045654c6cfa969 100644 --- a/libc/src/__support/CPP/Limits.h +++ b/libc/src/__support/CPP/Limits.h @@ -9,6 +9,8 @@ #ifndef LLVM_LIBC_SRC_SUPPORT_CPP_LIMITS_H #define LLVM_LIBC_SRC_SUPPORT_CPP_LIMITS_H +#include "UInt.h" + #include namespace __llvm_libc { @@ -72,18 +74,26 @@ public: static constexpr unsigned char max() { return UCHAR_MAX; } static constexpr unsigned char min() { return 0; } }; +// This specialization enables two things: +// 1. On platforms where UInt128 resolves to UInt<128>, this specialization +// provides limits of UInt128. +// 2. On platforms where UInt128 resolves to __uint128_t, this specialization +// allows us to unittest UInt<128>. +template <> class NumericLimits> { +public: + static constexpr UInt<128> max() { return ~UInt<128>(0); } + static constexpr UInt<128> min() { return 0; } +}; #ifdef __SIZEOF_INT128__ +// On platform where UInt128 resolves to __uint128_t, this specialization +// provides the limits of UInt128. template <> class NumericLimits<__uint128_t> { public: static constexpr __uint128_t max() { return ~__uint128_t(0); } static constexpr __uint128_t min() { return 0; } }; -template <> class NumericLimits<__int128_t> { -public: - static constexpr __int128_t max() { return ~__uint128_t(0) >> 1; } - static constexpr __int128_t min() { return __int128_t(1) << 127; } -}; #endif + } // namespace cpp } // namespace __llvm_libc diff --git a/libc/src/__support/CPP/TypeTraits.h b/libc/src/__support/CPP/TypeTraits.h index 61f55ff373e352f4d7079e4ed6eefb80e83c171c..fba5f04fc73c75e2ad4da9b0ea752027091019ee 100644 --- a/libc/src/__support/CPP/TypeTraits.h +++ b/libc/src/__support/CPP/TypeTraits.h @@ -56,9 +56,13 @@ template struct IsIntegral { IsSameV || IsSameV || IsSameV || IsSameV || IsSameV || IsSameV || - IsSameV, TypeNoCV> + // We need to include UInt<128> and __uint128_t when available because + // we want to unittest UInt<128>. If we include only UInt128, then on + // platform where it resolves to __uint128_t, we cannot unittest + // UInt<128>. + IsSameV<__llvm_libc::cpp::UInt<128>, TypeNoCV> #ifdef __SIZEOF_INT128__ - || IsSameV<__uint128_t, TypeNoCV> || IsSameV<__int128_t, TypeNoCV> + || IsSameV<__uint128_t, TypeNoCV> #endif ; }; diff --git a/libc/src/__support/CPP/UInt.h b/libc/src/__support/CPP/UInt.h index 8e002bdae0de0498affaf5447f05a809500ad271..045950b0a96bfea078aa852497cba2b9907a464f 100644 --- a/libc/src/__support/CPP/UInt.h +++ b/libc/src/__support/CPP/UInt.h @@ -89,6 +89,11 @@ public: return result; } + constexpr UInt operator+=(const UInt &other) { + *this = *this + other; + return *this; + } + // Multiply this number with x and store the result in this number. It is // implemented using the long multiplication algorithm by splitting the // 64-bit words of this number and |x| in to 32-bit halves but peforming @@ -158,6 +163,11 @@ public: return result; } + constexpr UInt &operator*=(const UInt &other) { + *this = *this * other; + return *this; + } + constexpr void shift_left(size_t s) { const size_t drop = s / 64; // Number of words to drop const size_t shift = s % 64; // Bits to shift in the remaining words. @@ -183,6 +193,11 @@ public: return result; } + constexpr UInt &operator<<=(size_t s) { + shift_left(s); + return *this; + } + constexpr void shift_right(size_t s) { const size_t drop = s / 64; // Number of words to drop const size_t shift = s % 64; // Bit shift in the remaining words. @@ -208,6 +223,11 @@ public: return result; } + constexpr UInt &operator>>=(size_t s) { + shift_right(s); + return *this; + } + constexpr UInt operator&(const UInt &other) const { UInt result; for (size_t i = 0; i < WordCount; ++i) @@ -215,6 +235,12 @@ public: return result; } + constexpr UInt &operator&=(const UInt &other) { + for (size_t i = 0; i < WordCount; ++i) + val[i] &= other.val[i]; + return *this; + } + constexpr UInt operator|(const UInt &other) const { UInt result; for (size_t i = 0; i < WordCount; ++i) @@ -222,6 +248,12 @@ public: return result; } + constexpr UInt &operator|=(const UInt &other) { + for (size_t i = 0; i < WordCount; ++i) + val[i] |= other.val[i]; + return *this; + } + constexpr UInt operator^(const UInt &other) const { UInt result; for (size_t i = 0; i < WordCount; ++i) @@ -229,6 +261,19 @@ public: return result; } + constexpr UInt &operator^=(const UInt &other) { + for (size_t i = 0; i < WordCount; ++i) + val[i] ^= other.val[i]; + return *this; + } + + constexpr UInt operator~() const { + UInt result; + for (size_t i = 0; i < WordCount; ++i) + result.val[i] = ~val[i]; + return result; + } + constexpr bool operator==(const UInt &other) const { for (size_t i = 0; i < WordCount; ++i) { if (val[i] != other.val[i]) @@ -297,6 +342,12 @@ public: return true; } + constexpr UInt &operator++() { + UInt one(1); + add(one); + return *this; + } + // Return the i-th 64-bit word of the number. constexpr const uint64_t &operator[](size_t i) const { return val[i]; } @@ -345,10 +396,4 @@ constexpr UInt<128> UInt<128>::operator*(const UInt<128> &other) const { } // namespace cpp } // namespace __llvm_libc -/* TODO: determine the best way to support uint128 using this class. -#if !defined(__SIZEOF_INT128__) -using __uint128_t = __llvm_libc::internal::UInt<128>; -#endif // uint128 is not defined, define it with this class. -*/ - #endif // LLVM_LIBC_UTILS_CPP_UINT_H diff --git a/libc/src/__support/CPP/UInt128.h b/libc/src/__support/CPP/UInt128.h new file mode 100644 index 0000000000000000000000000000000000000000..6cb4f9c61f8388f0cf747a4be92d8429c3941297 --- /dev/null +++ b/libc/src/__support/CPP/UInt128.h @@ -0,0 +1,20 @@ +//===-- A 128 bit unsigned int type -----------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_LIBC_SRC_SUPPORT_CPP_UINT128_H +#define LLVM_LIBC_SRC_SUPPORT_CPP_UINT128_H + +#include "UInt.h" + +#if !defined(__SIZEOF_INT128__) +using UInt128 = __llvm_libc::cpp::UInt<128>; +#else +using UInt128 = __uint128_t; +#endif + +#endif // LLVM_LIBC_SRC_SUPPORT_CPP_UINT128_H diff --git a/libc/src/__support/FPUtil/CMakeLists.txt b/libc/src/__support/FPUtil/CMakeLists.txt index fbde58ba78ae5346b97aaaffa5323a5b601975b2..12f39d3087049bbcf3cfaadeba2a1a41884a1211 100644 --- a/libc/src/__support/FPUtil/CMakeLists.txt +++ b/libc/src/__support/FPUtil/CMakeLists.txt @@ -12,15 +12,15 @@ add_header_library( NearestIntegerOperations.h NormalFloat.h PlatformDefs.h - UInt.h builtin_wrappers.h DEPENDS libc.include.math libc.include.errno libc.include.fenv libc.src.__support.common - libc.src.__support.CPP.type_traits libc.src.__support.CPP.bit + libc.src.__support.CPP.type_traits + libc.src.__support.CPP.uint128 libc.src.errno.errno ) diff --git a/libc/src/__support/FPUtil/FPBits.h b/libc/src/__support/FPUtil/FPBits.h index 6743fee9b1a480ac5d828ff4652b25b92a1997a0..f972b97de1b0f218f23e4ccbd127fbba35f1c190 100644 --- a/libc/src/__support/FPUtil/FPBits.h +++ b/libc/src/__support/FPUtil/FPBits.h @@ -13,6 +13,8 @@ #include "src/__support/CPP/Bit.h" #include "src/__support/CPP/TypeTraits.h" +#include "src/__support/FPUtil/builtin_wrappers.h" +#include "src/__support/common.h" #include "FloatProperties.h" #include @@ -133,6 +135,11 @@ template struct FPBits { return (bits & FloatProp::EXP_MANT_MASK) > FloatProp::EXPONENT_MASK; } + bool is_quiet_nan() const { + return (bits & FloatProp::EXP_MANT_MASK) == + (FloatProp::EXPONENT_MASK | FloatProp::QUIET_NAN_MASK); + } + bool is_inf_or_nan() const { return (bits & FloatProp::EXPONENT_MASK) == FloatProp::EXPONENT_MASK; } @@ -160,6 +167,33 @@ template struct FPBits { bits.set_mantissa(v); return T(bits); } + + // The function convert integer number and unbiased exponent to proper float + // T type: + // Result = number * 2^(ep+1 - exponent_bias) + // Be careful! + // 1) "ep" is raw exponent value. + // 2) The function add to +1 to ep for seamless normalized to denormalized + // transition. + // 3) The function did not check exponent high limit. + // 4) "number" zero value is not processed correctly. + // 5) Number is unsigned, so the result can be only positive. + inline static constexpr FPBits make_value(UIntType number, int ep) { + FPBits result; + // offset: +1 for sign, but -1 for implicit first bit + int lz = fputil::unsafe_clz(number) - FloatProp::EXPONENT_WIDTH; + number <<= lz; + ep -= lz; + + if (likely(ep >= 0)) { + // Implicit number bit will be removed by mask + result.set_mantissa(number); + result.set_unbiased_exponent(ep + 1); + } else { + result.set_mantissa(number >> -ep); + } + return result; + } }; } // namespace fputil diff --git a/libc/src/__support/FPUtil/FloatProperties.h b/libc/src/__support/FPUtil/FloatProperties.h index cbef3908220cd0b257e79130756281e74ce77f7f..6fb96a5bd3dc853bd37de0f164db197884abb89b 100644 --- a/libc/src/__support/FPUtil/FloatProperties.h +++ b/libc/src/__support/FPUtil/FloatProperties.h @@ -10,6 +10,9 @@ #define LLVM_LIBC_SRC_SUPPORT_FPUTIL_FLOAT_PROPERTIES_H #include "PlatformDefs.h" + +#include "src/__support/CPP/UInt128.h" + #include namespace __llvm_libc { @@ -104,7 +107,7 @@ template <> struct FloatProperties { // Properties for numbers represented in 80 bits long double on non-Windows x86 // platforms. template <> struct FloatProperties { - typedef __uint128_t BitsType; + typedef UInt128 BitsType; static_assert(sizeof(BitsType) == sizeof(long double), "Unexpected size of 'long double' type."); @@ -140,7 +143,7 @@ template <> struct FloatProperties { // Properties for numbers represented in 128 bits long double on non x86 // platform. template <> struct FloatProperties { - typedef __uint128_t BitsType; + typedef UInt128 BitsType; static_assert(sizeof(BitsType) == sizeof(long double), "Unexpected size of 'long double' type."); diff --git a/libc/src/__support/FPUtil/Hypot.h b/libc/src/__support/FPUtil/Hypot.h index dc1056b22289c92a1d1c1681a5b11f893595f598..c4eb0abe6de75d30616cc1afac9742ba8f004822 100644 --- a/libc/src/__support/FPUtil/Hypot.h +++ b/libc/src/__support/FPUtil/Hypot.h @@ -15,6 +15,7 @@ #include "builtin_wrappers.h" #include "src/__support/CPP/Bit.h" #include "src/__support/CPP/TypeTraits.h" +#include "src/__support/CPP/UInt128.h" namespace __llvm_libc { namespace fputil { @@ -25,7 +26,7 @@ template static inline T find_leading_one(T mant, int &shift_length) { shift_length = 0; if (mant > 0) { - shift_length = (sizeof(mant) * 8) - 1 - clz(mant); + shift_length = (sizeof(mant) * 8) - 1 - unsafe_clz(mant); } return T(1) << shift_length; } @@ -38,7 +39,9 @@ template <> struct DoubleLength { using Type = uint32_t; }; template <> struct DoubleLength { using Type = uint64_t; }; -template <> struct DoubleLength { using Type = __uint128_t; }; +template <> struct DoubleLength { + using Type = UInt128; +}; // Correctly rounded IEEE 754 HYPOT(x, y) with round to nearest, ties to even. // diff --git a/libc/src/__support/FPUtil/builtin_wrappers.h b/libc/src/__support/FPUtil/builtin_wrappers.h index 723299592d80851a0a1bdfda93fd6863f7be64f4..fc524d24d62384f1568d1b87718586a9f82b2694 100644 --- a/libc/src/__support/FPUtil/builtin_wrappers.h +++ b/libc/src/__support/FPUtil/builtin_wrappers.h @@ -17,6 +17,15 @@ namespace fputil { // __builtin_clz/ctz* rather than using the exactly-sized aliases from stdint.h. // This way, we can avoid making any assumptions about integer sizes and let the // compiler match for us. +namespace __internal { + +template static inline int correct_zero(T val, int bits) { + if (val == T(0)) + return sizeof(T(0)) * 8; + else + return bits; +} + template static inline int clz(T val); template <> inline int clz(unsigned int val) { return __builtin_clz(val); @@ -38,22 +47,23 @@ template <> inline int ctz(unsigned long int val) { template <> inline int ctz(unsigned long long int val) { return __builtin_ctzll(val); } +} // namespace __internal -template static inline bool isnan(T val) { - return __builtin_isnan(val); +template static inline int safe_ctz(T val) { + return __internal::correct_zero(val, __internal::ctz(val)); } -template static inline bool isinf(T val) { - return __builtin_isinf(val); +template static inline int unsafe_ctz(T val) { + return __internal::ctz(val); } -template static inline bool isfinite(T val) { - return __builtin_isfinite(val); +template static inline int safe_clz(T val) { + return __internal::correct_zero(val, __internal::clz(val)); } -inline float quiet_NaN(float) { return __builtin_nanf(""); } -inline double quiet_NaN(double) { return __builtin_nan(""); } -inline long double quiet_NaN(long double) { return __builtin_nanl(""); } +template static inline int unsafe_clz(T val) { + return __internal::clz(val); +} } // namespace fputil } // namespace __llvm_libc diff --git a/libc/src/__support/FPUtil/generic/CMakeLists.txt b/libc/src/__support/FPUtil/generic/CMakeLists.txt index a755e7670ce687207c75f226adb303fc12c69617..f19b887dac18cd118c6d0c07fffb82883bd453ef 100644 --- a/libc/src/__support/FPUtil/generic/CMakeLists.txt +++ b/libc/src/__support/FPUtil/generic/CMakeLists.txt @@ -3,10 +3,20 @@ add_header_library( HDRS sqrt.h sqrt_80_bit_long_double.h + DEPENDS + libc.src.__support.CPP.uint128 ) add_header_library( fma HDRS FMA.h + DEPENDS + libc.src.__support.CPP.uint128 +) + +add_header_library( + fmod + HDRS + FMod.h ) diff --git a/libc/src/__support/FPUtil/generic/FMA.h b/libc/src/__support/FPUtil/generic/FMA.h index 3dad2adf10df9466f449a2c5d41f185d9b97b9ce..b71f5bf64dc3de94cfb2abf06f191f52260cbc8c 100644 --- a/libc/src/__support/FPUtil/generic/FMA.h +++ b/libc/src/__support/FPUtil/generic/FMA.h @@ -11,6 +11,7 @@ #include "src/__support/CPP/Bit.h" #include "src/__support/CPP/TypeTraits.h" +#include "src/__support/CPP/UInt128.h" #include "src/__support/FPUtil/FEnvImpl.h" #include "src/__support/FPUtil/FPBits.h" #include "src/__support/FPUtil/FloatProperties.h" @@ -78,12 +79,12 @@ namespace internal { // Extract the sticky bits and shift the `mantissa` to the right by // `shift_length`. -static inline bool shift_mantissa(int shift_length, __uint128_t &mant) { +static inline bool shift_mantissa(int shift_length, UInt128 &mant) { if (shift_length >= 128) { mant = 0; return true; // prod_mant is non-zero. } - __uint128_t mask = (__uint128_t(1) << shift_length) - 1; + UInt128 mask = (UInt128(1) << shift_length) - 1; bool sticky_bits = (mant & mask) != 0; mant >>= shift_length; return sticky_bits; @@ -131,9 +132,9 @@ template <> inline double fma(double x, double y, double z) { return x * y + z; // Extract mantissa and append hidden leading bits. - __uint128_t x_mant = x_bits.get_mantissa() | FPBits::MIN_NORMAL; - __uint128_t y_mant = y_bits.get_mantissa() | FPBits::MIN_NORMAL; - __uint128_t z_mant = z_bits.get_mantissa() | FPBits::MIN_NORMAL; + UInt128 x_mant = x_bits.get_mantissa() | FPBits::MIN_NORMAL; + UInt128 y_mant = y_bits.get_mantissa() | FPBits::MIN_NORMAL; + UInt128 z_mant = z_bits.get_mantissa() | FPBits::MIN_NORMAL; // If the exponent of the product x*y > the exponent of z, then no extra // precision beside the entire product x*y is needed. On the other hand, when @@ -154,7 +155,7 @@ template <> inline double fma(double x, double y, double z) { // the original mantissa as high part when constructing 128-bit z_mant. So the // mantissa of prod will be left-shifted by 64 - 54 = 10 initially. - __uint128_t prod_mant = x_mant * y_mant << 10; + UInt128 prod_mant = x_mant * y_mant << 10; int prod_lsb_exp = x_exp + y_exp - (FPBits::EXPONENT_BIAS + 2 * MantissaWidth::VALUE + 10); @@ -206,8 +207,9 @@ template <> inline double fma(double x, double y, double z) { // Normalize the result. if (prod_mant != 0) { uint64_t prod_hi = static_cast(prod_mant >> 64); - int lead_zeros = - prod_hi ? clz(prod_hi) : 64 + clz(static_cast(prod_mant)); + int lead_zeros = prod_hi + ? unsafe_clz(prod_hi) + : 64 + unsafe_clz(static_cast(prod_mant)); // Move the leading 1 to the most significant bit. prod_mant <<= lead_zeros; // The lower 64 bits are always sticky bits after moving the leading 1 to diff --git a/libc/src/__support/FPUtil/generic/FMod.h b/libc/src/__support/FPUtil/generic/FMod.h new file mode 100644 index 0000000000000000000000000000000000000000..d33acea8bafb1c2d6cac7562ef9d10013e3257e2 --- /dev/null +++ b/libc/src/__support/FPUtil/generic/FMod.h @@ -0,0 +1,320 @@ +//===-- Common header for fmod implementations ------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_LIBC_SRC_SUPPORT_FPUTIL_GENERIC_FMOD_H +#define LLVM_LIBC_SRC_SUPPORT_FPUTIL_GENERIC_FMOD_H + +#include "src/__support/CPP/Limits.h" +#include "src/__support/CPP/TypeTraits.h" +#include "src/__support/FPUtil/FEnvImpl.h" +#include "src/__support/FPUtil/FPBits.h" +#include "src/__support/FPUtil/builtin_wrappers.h" +#include "src/__support/common.h" +#include "src/math/generic/math_utils.h" + +namespace __llvm_libc { +namespace fputil { +namespace generic { + +// Objective: +// The algorithm uses integer arithmetic (max uint64_t) for general case. +// Some common cases, like abs(x) < abs(y) or abs(x) < 1000 * abs(y) are +// treated specially to increase performance. The part of checking special +// cases, numbers NaN, INF etc. treated separately. +// +// Objective: +// 1) FMod definition (https://cplusplus.com/reference/cmath/fmod/): +// fmod = numer - tquot * denom, where tquot is the truncated +// (i.e., rounded towards zero) result of: numer/denom. +// 2) FMod with negative x and/or y can be trivially converted to fmod for +// positive x and y. Therefore the algorithm below works only with +// positive numbers. +// 3) All positive floating point numbers can be represented as m * 2^e, +// where "m" is positive integer and "e" is signed. +// 4) FMod function can be calculated in integer numbers (x > y): +// fmod = m_x * 2^e_x - tquot * m_y * 2^e_y +// = 2^e_y * (m_x * 2^(e_x - e^y) - tquot * m_y). +// All variables in parentheses are unsigned integers. +// +// Mathematical background: +// Input x,y in the algorithm is represented (mathematically) like m_x*2^e_x +// and m_y*2^e_y. This is an ambiguous number representation. For example: +// m * 2^e = (2 * m) * 2^(e-1) +// The algorithm uses the facts that +// r = a % b = (a % (N * b)) % b, +// (a * c) % (b * c) = (a % b) * c +// where N is positive integer number. a, b and c - positive. Let's adopt +// the formula for representation above. +// a = m_x * 2^e_x, b = m_y * 2^e_y, N = 2^k +// r(k) = a % b = (m_x * 2^e_x) % (2^k * m_y * 2^e_y) +// = 2^(e_y + k) * (m_x * 2^(e_x - e_y - k) % m_y) +// r(k) = m_r * 2^e_r = (m_x % m_y) * 2^(m_y + k) +// = (2^p * (m_x % m_y) * 2^(e_y + k - p)) +// m_r = 2^p * (m_x % m_y), e_r = m_y + k - p +// +// Algorithm description: +// First, let write x = m_x * 2^e_x and y = m_y * 2^e_y with m_x, m_y, e_x, e_y +// are integers (m_x amd m_y positive). +// Then the naive implementation of the fmod function with a simple +// for/while loop: +// while (e_x > e_y) { +// m_x *= 2; --e_x; // m_x * 2^e_x == 2 * m_x * 2^(e_x - 1) +// m_x %= m_y; +// } +// On the other hand, the algorithm exploits the fact that m_x, m_y are the +// mantissas of floating point numbers, which use less bits than the storage +// integers: 24 / 32 for floats and 53 / 64 for doubles, so if in each step of +// the iteration, we can left shift m_x as many bits as the storage integer +// type can hold, the exponent reduction per step will be at least 32 - 24 = 8 +// for floats and 64 - 53 = 11 for doubles (double example below): +// while (e_x > e_y) { +// m_x <<= 11; e_x -= 11; // m_x * 2^e_x == 2^11 * m_x * 2^(e_x - 11) +// m_x %= m_y; +// } +// Some extra improvements are done: +// 1) Shift m_y maximum to the right, which can significantly improve +// performance for small integer numbers (y = 3 for example). +// The m_x shift in the loop can be 62 instead of 11 for double. +// 2) For some architectures with very slow division, it can be better to +// calculate inverse value ones, and after do multiplication in the loop. +// 3) "likely" special cases are treated specially to improve performance. +// +// Simple example: +// The examples below use byte for simplicity. +// 1) Shift hy maximum to right without losing bits and increase iy value +// m_y = 0b00101100 e_y = 20 after shift m_y = 0b00001011 e_y = 22. +// 2) m_x = m_x % m_y. +// 3) Move m_x maximum to left. Note that after (m_x = m_x % m_y) CLZ in m_x +// is not lower than CLZ in m_y. m_x=0b00001001 e_x = 100, m_x=0b10010000, +// e_x = 100-4 = 96. +// 4) Repeat (2) until e_x == e_y. +// +// Complexity analysis (double): +// Converting x,y to (m_x,e_x),(m_y, e_y): CTZ/shift/AND/OR/if. Loop count: +// (m_x - m_y) / (64 - "length of m_y"). +// max("length of m_y") = 53, +// max(e_x - e_y) = 2048 +// Maximum operation is 186. For rare "unrealistic" cases. +// +// Special cases (double): +// Supposing that case where |y| > 1e-292 and |x/y|<2000 is very common +// special processing is implemented. No m_y alignment, no loop: +// result = (m_x * 2^(e_x - e_y)) % m_y. +// When x and y are both subnormal (rare case but...) the +// result = m_x % m_y. +// Simplified conversion back to double. + +// Exceptional cases handler according to cppreference.com +// https://en.cppreference.com/w/cpp/numeric/math/fmod +// and POSIX standard described in Linux man +// https://man7.org/linux/man-pages/man3/fmod.3p.html +// C standard for the function is not full, so not by default (although it can +// be implemented in another handler. +// Signaling NaN converted to quiet NaN with FE_INVALID exception. +// https://www.open-std.org/JTC1/SC22/WG14/www/docs/n1011.htm +template struct FModExceptionalInputHandler { + + static_assert(cpp::IsFloatingPointType::Value, + "FModCStandardWrapper instantiated with invalid type."); + + static bool PreCheck(T x, T y, T &out) { + using FPB = fputil::FPBits; + const T quiet_NaN = FPB::build_nan(FPB::FloatProp::QUIET_NAN_MASK); + FPB sx(x), sy(y); + if (likely(!sy.is_zero() && !sy.is_inf_or_nan() && !sx.is_inf_or_nan())) { + return false; + } + + if (sx.is_nan() || sy.is_nan()) { + if ((sx.is_nan() && !sx.is_quiet_nan()) || + (sy.is_nan() && !sy.is_quiet_nan())) + fputil::set_except(FE_INVALID); + out = quiet_NaN; + return true; + } + + if (sx.is_inf() || sy.is_zero()) { + fputil::set_except(FE_INVALID); + out = with_errno(quiet_NaN, EDOM); + return true; + } + + if (sy.is_inf()) { + out = x; + return true; + } + + // case where x == 0 + out = x; + return true; + } +}; + +template struct FModFastMathWrapper { + + static_assert(cpp::IsFloatingPointType::Value, + "FModFastMathWrapper instantiated with invalid type."); + + static bool PreCheck(T, T, T &) { return false; } +}; + +template class FModDivisionSimpleHelper { +private: + using intU_t = typename FPBits::UIntType; + +public: + inline constexpr static intU_t execute(int exp_diff, int sides_zeroes_count, + intU_t m_x, intU_t m_y) { + while (exp_diff > sides_zeroes_count) { + exp_diff -= sides_zeroes_count; + m_x <<= sides_zeroes_count; + m_x %= m_y; + } + m_x <<= exp_diff; + m_x %= m_y; + return m_x; + } +}; + +template class FModDivisionInvMultHelper { +private: + using FPB = FPBits; + using intU_t = typename FPB::UIntType; + +public: + inline constexpr static intU_t execute(int exp_diff, int sides_zeroes_count, + intU_t m_x, intU_t m_y) { + if (exp_diff > sides_zeroes_count) { + intU_t inv_hy = (cpp::NumericLimits::max() / m_y); + while (exp_diff > sides_zeroes_count) { + exp_diff -= sides_zeroes_count; + intU_t hd = + (m_x * inv_hy) >> (FPB::FloatProp::BIT_WIDTH - sides_zeroes_count); + m_x <<= sides_zeroes_count; + m_x -= hd * m_y; + while (unlikely(m_x > m_y)) + m_x -= m_y; + } + intU_t hd = (m_x * inv_hy) >> (FPB::FloatProp::BIT_WIDTH - exp_diff); + m_x <<= exp_diff; + m_x -= hd * m_y; + while (unlikely(m_x > m_y)) + m_x -= m_y; + } else { + m_x <<= exp_diff; + m_x %= m_y; + } + return m_x; + } +}; + +template , + class DivisionHelper = FModDivisionSimpleHelper> +class FMod { + static_assert(cpp::IsFloatingPointType::Value, + "FMod instantiated with invalid type."); + +private: + using FPB = FPBits; + using intU_t = typename FPB::UIntType; + + inline static constexpr FPB eval_internal(FPB sx, FPB sy) { + + if (likely(sx.uintval() <= sy.uintval())) { + if (sx.uintval() < sy.uintval()) + return sx; // |x|<|y| return x + return FPB::zero(); // |x|=|y| return 0.0 + } + + int e_x = sx.get_unbiased_exponent(); + int e_y = sy.get_unbiased_exponent(); + + // Most common case where |y| is "very normal" and |x/y| < 2^EXPONENT_WIDTH + if (likely(e_y > int(FPB::FloatProp::MANTISSA_WIDTH) && + e_x - e_y <= int(FPB::FloatProp::EXPONENT_WIDTH))) { + intU_t m_x = sx.get_explicit_mantissa(); + intU_t m_y = sy.get_explicit_mantissa(); + intU_t d = (e_x == e_y) ? (m_x - m_y) : (m_x << (e_x - e_y)) % m_y; + if (d == 0) + return FPB::zero(); + // iy - 1 because of "zero power" for number with power 1 + return FPB::make_value(d, e_y - 1); + } + /* Both subnormal special case. */ + if (unlikely(e_x == 0 && e_y == 0)) { + FPB d; + d.set_mantissa(sx.uintval() % sy.uintval()); + return d; + } + + // Note that hx is not subnormal by conditions above. + intU_t m_x = sx.get_explicit_mantissa(); + e_x--; + + intU_t m_y = sy.get_explicit_mantissa(); + int lead_zeros_m_y = FPB::FloatProp::EXPONENT_WIDTH; + if (likely(e_y > 0)) { + e_y--; + } else { + m_y = sy.get_mantissa(); + lead_zeros_m_y = unsafe_clz(m_y); + } + + // Assume hy != 0 + int tail_zeros_m_y = unsafe_ctz(m_y); + int sides_zeroes_count = lead_zeros_m_y + tail_zeros_m_y; + // n > 0 by conditions above + int exp_diff = e_x - e_y; + { + // Shift hy right until the end or n = 0 + int right_shift = exp_diff < tail_zeros_m_y ? exp_diff : tail_zeros_m_y; + m_y >>= right_shift; + exp_diff -= right_shift; + e_y += right_shift; + } + + { + // Shift hx left until the end or n = 0 + int left_shift = exp_diff < int(FPB::FloatProp::EXPONENT_WIDTH) + ? exp_diff + : FPB::FloatProp::EXPONENT_WIDTH; + m_x <<= left_shift; + exp_diff -= left_shift; + } + + m_x %= m_y; + if (unlikely(m_x == 0)) + return FPB::zero(); + + if (exp_diff == 0) + return FPB::make_value(m_x, e_y); + + /* hx next can't be 0, because hx < hy, hy % 2 == 1 hx * 2^i % hy != 0 */ + m_x = DivisionHelper::execute(exp_diff, sides_zeroes_count, m_x, m_y); + return FPB::make_value(m_x, e_y); + } + +public: + static inline T eval(T x, T y) { + if (T out; Wrapper::PreCheck(x, y, out)) + return out; + FPB sx(x), sy(y); + bool sign = sx.get_sign(); + sx.set_sign(false); + sy.set_sign(false); + FPB result = eval_internal(sx, sy); + result.set_sign(sign); + return result.get_val(); + } +}; + +} // namespace generic +} // namespace fputil +} // namespace __llvm_libc + +#endif // LLVM_LIBC_SRC_SUPPORT_FPUTIL_GENERIC_FMOD_H diff --git a/libc/src/__support/FPUtil/generic/sqrt.h b/libc/src/__support/FPUtil/generic/sqrt.h index cd3a5ec48c82c867d88928ea090cc636f7d431d2..5f5d9bb00cde7565a63478945570dc8270574300 100644 --- a/libc/src/__support/FPUtil/generic/sqrt.h +++ b/libc/src/__support/FPUtil/generic/sqrt.h @@ -12,6 +12,7 @@ #include "sqrt_80_bit_long_double.h" #include "src/__support/CPP/Bit.h" #include "src/__support/CPP/TypeTraits.h" +#include "src/__support/CPP/UInt128.h" #include "src/__support/FPUtil/FEnvImpl.h" #include "src/__support/FPUtil/FPBits.h" #include "src/__support/FPUtil/PlatformDefs.h" @@ -35,8 +36,8 @@ template <> struct SpecialLongDouble { template static inline void normalize(int &exponent, typename FPBits::UIntType &mantissa) { - const int shift = - clz(mantissa) - (8 * sizeof(mantissa) - 1 - MantissaWidth::VALUE); + const int shift = unsafe_clz(mantissa) - + (8 * sizeof(mantissa) - 1 - MantissaWidth::VALUE); exponent -= shift; mantissa <<= shift; } @@ -48,10 +49,11 @@ inline void normalize(int &exponent, uint64_t &mantissa) { } #elif !defined(SPECIAL_X86_LONG_DOUBLE) template <> -inline void normalize(int &exponent, __uint128_t &mantissa) { +inline void normalize(int &exponent, UInt128 &mantissa) { const uint64_t hi_bits = static_cast(mantissa >> 64); - const int shift = hi_bits ? (clz(hi_bits) - 15) - : (clz(static_cast(mantissa)) + 49); + const int shift = hi_bits + ? (unsafe_clz(hi_bits) - 15) + : (unsafe_clz(static_cast(mantissa)) + 49); exponent -= shift; mantissa <<= shift; } diff --git a/libc/src/__support/FPUtil/generic/sqrt_80_bit_long_double.h b/libc/src/__support/FPUtil/generic/sqrt_80_bit_long_double.h index 0fa720b1ae8ae83a2c5bdcce6f3ddd7a21883202..c460b42ae9584a8149f5a201a484c35a3f2bb7d8 100644 --- a/libc/src/__support/FPUtil/generic/sqrt_80_bit_long_double.h +++ b/libc/src/__support/FPUtil/generic/sqrt_80_bit_long_double.h @@ -9,6 +9,7 @@ #ifndef LLVM_LIBC_SRC_SUPPORT_FPUTIL_GENERIC_SQRT_80_BIT_LONG_DOUBLE_H #define LLVM_LIBC_SRC_SUPPORT_FPUTIL_GENERIC_SQRT_80_BIT_LONG_DOUBLE_H +#include "src/__support/CPP/UInt128.h" #include "src/__support/FPUtil/FEnvImpl.h" #include "src/__support/FPUtil/FPBits.h" #include "src/__support/FPUtil/PlatformDefs.h" @@ -18,9 +19,9 @@ namespace __llvm_libc { namespace fputil { namespace x86 { -inline void normalize(int &exponent, __uint128_t &mantissa) { +inline void normalize(int &exponent, UInt128 &mantissa) { const int shift = - clz(static_cast(mantissa)) - + unsafe_clz(static_cast(mantissa)) - (8 * sizeof(uint64_t) - 1 - MantissaWidth::VALUE); exponent -= shift; mantissa <<= shift; diff --git a/libc/src/__support/FPUtil/x86_64/LongDoubleBits.h b/libc/src/__support/FPUtil/x86_64/LongDoubleBits.h index 0a2dc452fc73d2c0cd88c20bfd4c04aca025624a..468176b27ee4facd7858e85debe9dd94d48e158a 100644 --- a/libc/src/__support/FPUtil/x86_64/LongDoubleBits.h +++ b/libc/src/__support/FPUtil/x86_64/LongDoubleBits.h @@ -10,6 +10,7 @@ #define LLVM_LIBC_SRC_SUPPORT_FPUTIL_X86_64_LONG_DOUBLE_BITS_H #include "src/__support/CPP/Bit.h" +#include "src/__support/CPP/UInt128.h" #include "src/__support/architectures.h" #if !defined(LLVM_LIBC_ARCH_X86) @@ -32,7 +33,7 @@ template <> struct Padding<4> { static constexpr unsigned VALUE = 16; }; template <> struct Padding<8> { static constexpr unsigned VALUE = 48; }; template <> struct FPBits { - using UIntType = __uint128_t; + using UIntType = UInt128; static constexpr int EXPONENT_BIAS = 0x3FFF; static constexpr int MAX_EXPONENT = 0x7FFF; diff --git a/libc/src/__support/str_to_float.h b/libc/src/__support/str_to_float.h index 30a1f0eb4a9bc463938661237e1178791af5c490..1fff9373be2b3cc7ac5cba407fdd5d0759923620 100644 --- a/libc/src/__support/str_to_float.h +++ b/libc/src/__support/str_to_float.h @@ -10,6 +10,7 @@ #define LIBC_SRC_SUPPORT_STR_TO_FLOAT_H #include "src/__support/CPP/Limits.h" +#include "src/__support/CPP/UInt128.h" #include "src/__support/FPUtil/FPBits.h" #include "src/__support/FPUtil/builtin_wrappers.h" #include "src/__support/ctype_utils.h" @@ -51,18 +52,18 @@ template uint32_t inline leading_zeroes(T inputNumber) { } template <> uint32_t inline leading_zeroes(uint32_t inputNumber) { - return inputNumber == 0 ? 32 : fputil::clz(inputNumber); + return fputil::safe_clz(inputNumber); } template <> uint32_t inline leading_zeroes(uint64_t inputNumber) { - return inputNumber == 0 ? 64 : fputil::clz(inputNumber); + return fputil::safe_clz(inputNumber); } -static inline uint64_t low64(__uint128_t num) { +static inline uint64_t low64(const UInt128 &num) { return static_cast(num & 0xffffffffffffffff); } -static inline uint64_t high64(__uint128_t num) { +static inline uint64_t high64(const UInt128 &num) { return static_cast(num >> 64); } @@ -116,11 +117,11 @@ eisel_lemire(typename fputil::FPBits::UIntType mantissa, int32_t exp10, const uint64_t *power_of_ten = DETAILED_POWERS_OF_TEN[exp10 - DETAILED_POWERS_OF_TEN_MIN_EXP_10]; - __uint128_t first_approx = static_cast<__uint128_t>(mantissa) * - static_cast<__uint128_t>(power_of_ten[1]); + UInt128 first_approx = + static_cast(mantissa) * static_cast(power_of_ten[1]); // Wider Approximation - __uint128_t final_approx; + UInt128 final_approx; // The halfway constant is used to check if the bits that will be shifted away // intially are all 1. For doubles this is 64 (bitstype size) - 52 (final // mantissa size) - 3 (we shift away the last two bits separately for @@ -132,10 +133,10 @@ eisel_lemire(typename fputil::FPBits::UIntType mantissa, int32_t exp10, 1; if ((high64(first_approx) & halfway_constant) == halfway_constant && low64(first_approx) + mantissa < mantissa) { - __uint128_t low_bits = static_cast<__uint128_t>(mantissa) * - static_cast<__uint128_t>(power_of_ten[0]); - __uint128_t second_approx = - first_approx + static_cast<__uint128_t>(high64(low_bits)); + UInt128 low_bits = + static_cast(mantissa) * static_cast(power_of_ten[0]); + UInt128 second_approx = + first_approx + static_cast(high64(low_bits)); if ((high64(second_approx) & halfway_constant) == halfway_constant && low64(second_approx) + 1 == 0 && @@ -220,31 +221,31 @@ inline bool eisel_lemire( // full 128 bits of the power of ten to get an approximation with the same // number of significant bits. This means that we only get the one // approximation, and that approximation is 256 bits long. - __uint128_t approx_upper = static_cast<__uint128_t>(high64(mantissa)) * - static_cast<__uint128_t>(power_of_ten[1]); + UInt128 approx_upper = static_cast(high64(mantissa)) * + static_cast(power_of_ten[1]); - __uint128_t approx_middle = static_cast<__uint128_t>(high64(mantissa)) * - static_cast<__uint128_t>(power_of_ten[0]) + - static_cast<__uint128_t>(low64(mantissa)) * - static_cast<__uint128_t>(power_of_ten[1]); + UInt128 approx_middle = static_cast(high64(mantissa)) * + static_cast(power_of_ten[0]) + + static_cast(low64(mantissa)) * + static_cast(power_of_ten[1]); - __uint128_t approx_lower = static_cast<__uint128_t>(low64(mantissa)) * - static_cast<__uint128_t>(power_of_ten[0]); + UInt128 approx_lower = static_cast(low64(mantissa)) * + static_cast(power_of_ten[0]); - __uint128_t final_approx_lower = - approx_lower + (static_cast<__uint128_t>(low64(approx_middle)) << 64); - __uint128_t final_approx_upper = approx_upper + high64(approx_middle) + - (final_approx_lower < approx_lower ? 1 : 0); + UInt128 final_approx_lower = + approx_lower + (static_cast(low64(approx_middle)) << 64); + UInt128 final_approx_upper = approx_upper + high64(approx_middle) + + (final_approx_lower < approx_lower ? 1 : 0); // The halfway constant is used to check if the bits that will be shifted away // intially are all 1. For 80 bit floats this is 128 (bitstype size) - 64 // (final mantissa size) - 3 (we shift away the last two bits separately for // accuracy, and the most significant bit is ignored.) = 61 bits. Similarly, // it's 12 bits for 128 bit floats in this case. - constexpr __uint128_t HALFWAY_CONSTANT = - (__uint128_t(1) << (BITS_IN_MANTISSA - - fputil::FloatProperties::MANTISSA_WIDTH - - 3)) - + constexpr UInt128 HALFWAY_CONSTANT = + (UInt128(1) << (BITS_IN_MANTISSA - + fputil::FloatProperties::MANTISSA_WIDTH - + 3)) - 1; if ((final_approx_upper & HALFWAY_CONSTANT) == HALFWAY_CONSTANT && diff --git a/libc/src/__support/threads/CMakeLists.txt b/libc/src/__support/threads/CMakeLists.txt index 5dce96cfc2ab2fb0c7000531dfb7408840116418..04f028799b323bea494057bf40d11ef3571779ce 100644 --- a/libc/src/__support/threads/CMakeLists.txt +++ b/libc/src/__support/threads/CMakeLists.txt @@ -28,11 +28,17 @@ if(TARGET libc.src.__support.threads.${LIBC_TARGET_OS}.mutex) endif() if(TARGET libc.src.__support.threads.${LIBC_TARGET_OS}.thread) - add_header_library( + add_object_library( thread HDRS thread.h + SRCS + ${LIBC_TARGET_OS}/thread.cpp DEPENDS - .${LIBC_TARGET_OS}.thread + .thread_attrib + COMPILE_OPTIONS + -O3 + -fno-omit-frame-pointer # This allows us to sniff out the thread args from + # the new thread's stack reliably. ) endif() diff --git a/libc/src/__support/threads/linux/thread.cpp b/libc/src/__support/threads/linux/thread.cpp new file mode 100644 index 0000000000000000000000000000000000000000..fd0a3f69a7bd5e9a05891e3a58c1aa1b8ccbae3c --- /dev/null +++ b/libc/src/__support/threads/linux/thread.cpp @@ -0,0 +1,260 @@ +//===--- Implementation of a Linux thread class -----------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "src/__support/threads/thread.h" +#include "src/__support/CPP/atomic.h" +#include "src/__support/CPP/error.h" +#include "src/__support/OSUtil/syscall.h" // For syscall functions. +#include "src/__support/threads/linux/futex_word.h" // For FutexWordType + +#ifdef LLVM_LIBC_ARCH_AARCH64 +#include +#endif + +#include +#include // For CLONE_* flags. +#include +#include // For PROT_* and MAP_* definitions. +#include // For syscall numbers. + +namespace __llvm_libc { + +#ifdef SYS_mmap2 +static constexpr long MMAP_SYSCALL_NUMBER = SYS_mmap2; +#elif SYS_mmap +static constexpr long MMAP_SYSCALL_NUMBER = SYS_mmap; +#else +#error "SYS_mmap or SYS_mmap2 not available on the target platform" +#endif + +static constexpr size_t DEFAULT_STACK_SIZE = (1 << 16); // 64KB +static constexpr uint32_t CLEAR_TID_VALUE = 0xABCD1234; +static constexpr unsigned CLONE_SYSCALL_FLAGS = + CLONE_VM // Share the memory space with the parent. + | CLONE_FS // Share the file system with the parent. + | CLONE_FILES // Share the files with the parent. + | CLONE_SIGHAND // Share the signal handlers with the parent. + | CLONE_THREAD // Same thread group as the parent. + | CLONE_SYSVSEM // Share a single list of System V semaphore adjustment + // values + | CLONE_PARENT_SETTID // Set child thread ID in |ptid| of the parent. + | CLONE_CHILD_CLEARTID; // Let the kernel clear the tid address + // wake the joining thread. +// TODO: Add the CLONE_SETTLS flag and setup the TLS area correctly +// when making the clone syscall. + +static inline cpp::ErrorOr alloc_stack(size_t size) { + long mmap_result = + __llvm_libc::syscall(MMAP_SYSCALL_NUMBER, + 0, // No special address + size, + PROT_READ | PROT_WRITE, // Read and write stack + MAP_ANONYMOUS | MAP_PRIVATE, // Process private + -1, // Not backed by any file + 0 // No offset + ); + if (mmap_result < 0 && (uintptr_t(mmap_result) >= UINTPTR_MAX - size)) + return cpp::Error{int(-mmap_result)}; + return reinterpret_cast(mmap_result); +} + +static inline void free_stack(void *stack, size_t size) { + __llvm_libc::syscall(SYS_munmap, stack, size); +} + +struct Thread; + +// We align the start args to 16-byte boundary as we adjust the allocated +// stack memory with its size. We want the adjusted address to be at a +// 16-byte boundary to satisfy the x86_64 and aarch64 ABI requirements. +// If different architecture in future requires higher alignment, then we +// can add a platform specific alignment spec. +struct alignas(STACK_ALIGNMENT) StartArgs { + Thread *thread; + ThreadRunner runner; + void *arg; +}; + +__attribute__((always_inline)) inline uintptr_t get_start_args_addr() { +// NOTE: For __builtin_frame_address to work reliably across compilers, +// architectures and various optimization levels, the TU including this file +// should be compiled with -fno-omit-frame-pointer. +#ifdef LLVM_LIBC_ARCH_X86_64 + return reinterpret_cast(__builtin_frame_address(0)) + // The x86_64 call instruction pushes resume address on to the stack. + // Next, The x86_64 SysV ABI requires that the frame pointer be pushed + // on to the stack. So, we have to step past two 64-bit values to get + // to the start args. + + sizeof(uintptr_t) * 2; +#elif defined(LLVM_LIBC_ARCH_AARCH64) + // The frame pointer after cloning the new thread in the Thread::run method + // is set to the stack pointer where start args are stored. So, we fetch + // from there. + return reinterpret_cast(__builtin_frame_address(1)); +#endif +} + +static void start_thread() __attribute__((noinline)) { + auto *start_args = reinterpret_cast(get_start_args_addr()); + auto *thread = start_args->thread; + auto *attrib = thread->attrib; + long retval; + if (attrib->style == ThreadStyle::POSIX) { + attrib->retval.posix_retval = + start_args->runner.posix_runner(start_args->arg); + retval = long(attrib->retval.posix_retval); + } else { + attrib->retval.stdc_retval = + start_args->runner.stdc_runner(start_args->arg); + retval = long(attrib->retval.stdc_retval); + } + + uint32_t joinable_state = uint32_t(DetachState::JOINABLE); + if (!thread->attrib->detach_state.compare_exchange_strong( + joinable_state, uint32_t(DetachState::EXITING))) { + // Thread is detached so cleanup the resources. + if (thread->attrib->owned_stack) + free_stack(thread->attrib->stack, thread->attrib->stack_size); + } + + __llvm_libc::syscall(SYS_exit, retval); +} + +int Thread::run(ThreadStyle style, ThreadRunner runner, void *arg, void *stack, + size_t size, bool detached) { + bool owned_stack = false; + if (stack == nullptr) { + if (size == 0) + size = DEFAULT_STACK_SIZE; + auto alloc = alloc_stack(size); + if (!alloc) + return alloc.error_code(); + else + stack = alloc.value(); + owned_stack = true; + } + + // When the new thread is spawned by the kernel, the new thread gets the + // stack we pass to the clone syscall. However, this stack is empty and does + // not have any local vars present in this function. Hence, one cannot + // pass arguments to the thread start function, or use any local vars from + // here. So, we pack them into the new stack from where the thread can sniff + // them out. + // + // Likewise, the actual thread state information is also stored on the + // stack memory. + uintptr_t adjusted_stack = reinterpret_cast(stack) + size - + sizeof(StartArgs) - sizeof(ThreadAttributes) - + sizeof(cpp::Atomic); + adjusted_stack &= ~(uintptr_t(STACK_ALIGNMENT) - 1); + + auto *start_args = reinterpret_cast(adjusted_stack); + start_args->thread = this; + start_args->runner = runner; + start_args->arg = arg; + + attrib = + reinterpret_cast(adjusted_stack + sizeof(StartArgs)); + attrib->style = style; + attrib->detach_state = + uint32_t(detached ? DetachState::DETACHED : DetachState::JOINABLE); + attrib->stack = stack; + attrib->stack_size = size; + attrib->owned_stack = owned_stack; + + auto clear_tid = reinterpret_cast *>( + adjusted_stack + sizeof(StartArgs) + sizeof(ThreadAttributes)); + clear_tid->val = CLEAR_TID_VALUE; + platform_data = clear_tid; + + // The clone syscall takes arguments in an architecture specific order. + // Also, we want the result of the syscall to be in a register as the child + // thread gets a completely different stack after it is created. The stack + // variables from this function will not be availalbe to the child thread. +#ifdef LLVM_LIBC_ARCH_X86_64 + long register clone_result asm("rax"); + clone_result = __llvm_libc::syscall( + SYS_clone, CLONE_SYSCALL_FLAGS, adjusted_stack, + &attrib->tid, // The address where the child tid is written + &clear_tid->val, // The futex where the child thread status is signalled + 0 // Set TLS to null for now. + ); +#elif defined(LLVM_LIBC_ARCH_AARCH64) + long register clone_result asm("x0"); + clone_result = __llvm_libc::syscall( + SYS_clone, CLONE_SYSCALL_FLAGS, adjusted_stack, + &attrib->tid, // The address where the child tid is written + 0, // Set TLS to null for now. + &clear_tid->val // The futex where the child thread status is signalled + ); +#else +#error "Unsupported architecture for the clone syscall." +#endif + + if (clone_result == 0) { +#ifdef LLVM_LIBC_ARCH_AARCH64 + // We set the frame pointer to be the same as the "sp" so that start args + // can be sniffed out from start_thread. + __arm_wsr64("x29", __arm_rsr64("sp")); +#endif + start_thread(); + } else if (clone_result < 0) { + if (attrib->owned_stack) + free_stack(attrib->stack, attrib->stack_size); + return -clone_result; + } + + return 0; +} + +int Thread::join(ThreadReturnValue &retval) { + wait(); + + if (attrib->style == ThreadStyle::POSIX) + retval.posix_retval = attrib->retval.posix_retval; + else + retval.stdc_retval = attrib->retval.stdc_retval; + + if (attrib->owned_stack) + free_stack(attrib->stack, attrib->stack_size); + + return 0; +} + +int Thread::detach() { + uint32_t joinable_state = uint32_t(DetachState::JOINABLE); + if (attrib->detach_state.compare_exchange_strong( + joinable_state, uint32_t(DetachState::DETACHED))) { + return int(DetachType::SIMPLE); + } + + // If the thread was already detached, then the detach method should not + // be called at all. If the thread is exiting, then we wait for it to exit + // and free up resources. + wait(); + + if (attrib->owned_stack) + free_stack(attrib->stack, attrib->stack_size); + return int(DetachType::CLEANUP); +} + +void Thread::wait() { + // The kernel should set the value at the clear tid address to zero. + // If not, it is a spurious wake and we should continue to wait on + // the futex. + auto *clear_tid = + reinterpret_cast *>(platform_data); + while (clear_tid->load() != 0) { + // We cannot do a FUTEX_WAIT_PRIVATE here as the kernel does a + // FUTEX_WAKE and not a FUTEX_WAKE_PRIVATE. + __llvm_libc::syscall(SYS_futex, &clear_tid->val, FUTEX_WAIT, + CLEAR_TID_VALUE, nullptr); + } +} + +} // namespace __llvm_libc diff --git a/libc/src/__support/threads/linux/thread.h b/libc/src/__support/threads/linux/thread.h deleted file mode 100644 index a947613ea5e271de79212fe35e8b867d8f0e6a94..0000000000000000000000000000000000000000 --- a/libc/src/__support/threads/linux/thread.h +++ /dev/null @@ -1,282 +0,0 @@ -//===--- Implementation of a Linux thread class -----------------*- C++ -*-===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#ifndef LLVM_LIBC_SRC_SUPPORT_THREADS_LINUX_THREAD_H -#define LLVM_LIBC_SRC_SUPPORT_THREADS_LINUX_THREAD_H - -#include "src/__support/CPP/atomic.h" -#include "src/__support/CPP/error.h" -#include "src/__support/OSUtil/syscall.h" // For syscall functions. -#include "src/__support/threads/linux/futex_word.h" // For FutexWordType -#include "src/__support/threads/thread_attrib.h" - -#ifdef LLVM_LIBC_ARCH_AARCH64 -#include -#endif - -#include -#include // For CLONE_* flags. -#include -#include // For PROT_* and MAP_* definitions. -#include // For syscall numbers. - -namespace __llvm_libc { - -template struct Thread; - -#ifdef SYS_mmap2 -static constexpr long MMAP_SYSCALL_NUMBER = SYS_mmap2; -#elif SYS_mmap -static constexpr long MMAP_SYSCALL_NUMBER = SYS_mmap; -#else -#error "SYS_mmap or SYS_mmap2 not available on the target platform" -#endif - -static constexpr size_t DEFAULT_STACK_SIZE = (1 << 16); // 64KB -static constexpr uint32_t CLEAR_TID_VALUE = 0xABCD1234; -static constexpr unsigned CLONE_SYSCALL_FLAGS = - CLONE_VM // Share the memory space with the parent. - | CLONE_FS // Share the file system with the parent. - | CLONE_FILES // Share the files with the parent. - | CLONE_SIGHAND // Share the signal handlers with the parent. - | CLONE_THREAD // Same thread group as the parent. - | CLONE_SYSVSEM // Share a single list of System V semaphore adjustment - // values - | CLONE_PARENT_SETTID // Set child thread ID in |ptid| of the parent. - | CLONE_CHILD_CLEARTID; // Let the kernel clear the tid address - // wake the joining thread. -// TODO: Add the CLONE_SETTLS flag and setup the TLS area correctly -// when making the clone syscall. - -static inline cpp::ErrorOr alloc_stack(size_t size) { - long mmap_result = - __llvm_libc::syscall(MMAP_SYSCALL_NUMBER, - 0, // No special address - size, - PROT_READ | PROT_WRITE, // Read and write stack - MAP_ANONYMOUS | MAP_PRIVATE, // Process private - -1, // Not backed by any file - 0 // No offset - ); - if (mmap_result < 0 && (uintptr_t(mmap_result) >= UINTPTR_MAX - size)) - return cpp::Error{int(-mmap_result)}; - return reinterpret_cast(mmap_result); -} - -static inline void free_stack(void *stack, size_t size) { - __llvm_libc::syscall(SYS_munmap, stack, size); -} - -template using ThreadRunner = ReturnType(void *); - -// We align the start args to 16-byte boundary as we adjust the allocated -// stack memory with its size. We want the adjusted address to be at a -// 16-byte boundary to satisfy the x86_64 and aarch64 ABI requirements. -// If different architecture in future requires higher alignment, then we -// can add a platform specific alignment spec. -template struct alignas(STACK_ALIGNMENT) StartArgs { - Thread *thread; - ThreadRunner *func; - void *arg; -}; - -__attribute__((always_inline)) inline uintptr_t get_start_args_addr() { - // NOTE: For __builtin_frame_address to work reliably across compilers, - // architectures and various optimization levels, the TU including this file - // should be compiled with -fno-omit-frame-pointer. -#ifdef LLVM_LIBC_ARCH_X86_64 - return reinterpret_cast(__builtin_frame_address(0)) - // The x86_64 call instruction pushes resume address on to the stack. - // Next, The x86_64 SysV ABI requires that the frame pointer be pushed - // on to the stack. So, we have to step past two 64-bit values to get - // to the start args. - + sizeof(uintptr_t) * 2; -#elif defined(LLVM_LIBC_ARCH_AARCH64) - // The frame pointer after cloning the new thread in the Thread::run method - // is set to the stack pointer where start args are stored. So, we fetch - // from there. - return reinterpret_cast(__builtin_frame_address(1)); -#endif -} - -template struct Thread { -private: - ThreadAttributes *attrib; - cpp::Atomic *clear_tid; - -public: - Thread() = default; - - static void start_thread() __attribute__((noinline)); - - // Return 0 on success or an error value on failure. - int run(ThreadRunner *f, void *arg, void *stack, size_t size, - bool detached = false) { - bool owned_stack = false; - if (stack == nullptr) { - if (size == 0) - size = DEFAULT_STACK_SIZE; - auto alloc = alloc_stack(size); - if (!alloc) - return alloc.error_code(); - else - stack = alloc.value(); - owned_stack = true; - } - - // When the new thread is spawned by the kernel, the new thread gets the - // stack we pass to the clone syscall. However, this stack is empty and does - // not have any local vars present in this function. Hence, one cannot - // pass arguments to the thread start function, or use any local vars from - // here. So, we pack them into the new stack from where the thread can sniff - // them out. - // - // Likewise, the actual thread state information is also stored on the - // stack memory. - uintptr_t adjusted_stack = reinterpret_cast(stack) + size - - sizeof(StartArgs) - - sizeof(ThreadAttributes) - - sizeof(cpp::Atomic); - adjusted_stack &= ~(uintptr_t(STACK_ALIGNMENT) - 1); - - auto *start_args = - reinterpret_cast *>(adjusted_stack); - start_args->thread = this; - start_args->func = f; - start_args->arg = arg; - - attrib = reinterpret_cast *>( - adjusted_stack + sizeof(StartArgs)); - attrib->detach_state = - uint32_t(detached ? DetachState::DETACHED : DetachState::JOINABLE); - attrib->stack = stack; - attrib->stack_size = size; - attrib->owned_stack = owned_stack; - - clear_tid = reinterpret_cast *>( - adjusted_stack + sizeof(StartArgs) + - sizeof(ThreadAttributes)); - clear_tid->val = CLEAR_TID_VALUE; - - // The clone syscall takes arguments in an architecture specific order. - // Also, we want the result of the syscall to be in a register as the child - // thread gets a completely different stack after it is created. The stack - // variables from this function will not be availalbe to the child thread. -#ifdef LLVM_LIBC_ARCH_X86_64 - long register clone_result asm("rax"); - clone_result = __llvm_libc::syscall( - SYS_clone, CLONE_SYSCALL_FLAGS, adjusted_stack, - &attrib->tid, // The address where the child tid is written - &clear_tid->val, // The futex where the child thread status is signalled - 0 // Set TLS to null for now. - ); -#elif defined(LLVM_LIBC_ARCH_AARCH64) - long register clone_result asm("x0"); - clone_result = __llvm_libc::syscall( - SYS_clone, CLONE_SYSCALL_FLAGS, adjusted_stack, - &attrib->tid, // The address where the child tid is written - 0, // Set TLS to null for now. - &clear_tid->val // The futex where the child thread status is signalled - ); -#else -#error "Unsupported architecture for the clone syscall." -#endif - - if (clone_result == 0) { -#ifdef LLVM_LIBC_ARCH_AARCH64 - // We set the frame pointer to be the same as the "sp" so that start args - // can be sniffed out from start_thread. - __arm_wsr64("x29", __arm_rsr64("sp")); -#endif - start_thread(); - } else if (clone_result < 0) { - if (attrib->owned_stack) - free_stack(attrib->stack, attrib->stack_size); - return -clone_result; - } - - return 0; - } - - int join(ReturnType *retval) { - wait(); - - *retval = attrib->retval; - if (attrib->owned_stack) - free_stack(attrib->stack, attrib->stack_size); - - return 0; - } - - // Detach a joinable thread. - // - // This method does not have error return value. However, the type of detach - // is returned to help with testing. - int detach() { - uint32_t joinable_state = uint32_t(DetachState::JOINABLE); - if (attrib->detach_state.compare_exchange_strong( - joinable_state, uint32_t(DetachState::DETACHED))) { - return int(DetachType::SIMPLE); - } - - // If the thread was already detached, then the detach method should not - // be called at all. If the thread is exiting, then we wait for it to exit - // and free up resources. - wait(); - - if (attrib->owned_stack) - free_stack(attrib->stack, attrib->stack_size); - return int(DetachType::CLEANUP); - } - - // Wait for the thread to finish. This method can only be called - // if: - // 1. A detached thread is guaranteed to be running. - // 2. A joinable thread has not been detached or joined. As long as it has - // not been detached or joined, wait can be called multiple times. - // - // Also, only one thread can wait and expect to get woken up when the thread - // finishes. - // - // NOTE: This function is to be used for testing only. There is no standard - // which requires exposing it via a public API. - void wait() { - // The kernel should set the value at the clear tid address to zero. - // If not, it is a spurious wake and we should continue to wait on - // the futex. - while (clear_tid->load() != 0) { - // We cannot do a FUTEX_WAIT_PRIVATE here as the kernel does a - // FUTEX_WAKE and not a FUTEX_WAKE_PRIVATE. - __llvm_libc::syscall(SYS_futex, &clear_tid->val, FUTEX_WAIT, - CLEAR_TID_VALUE, nullptr); - } - } -}; - -template -__attribute__((noinline)) void Thread::start_thread() { - auto *start_args = - reinterpret_cast *>(get_start_args_addr()); - auto *thread = start_args->thread; - ReturnType retval = thread->attrib->retval = - start_args->func(start_args->arg); - - uint32_t joinable_state = uint32_t(DetachState::JOINABLE); - if (!thread->attrib->detach_state.compare_exchange_strong( - joinable_state, uint32_t(DetachState::EXITING))) { - // Thread is detached so cleanup the resources. - if (thread->attrib->owned_stack) - free_stack(thread->attrib->stack, thread->attrib->stack_size); - } - - __llvm_libc::syscall(SYS_exit, retval); -} - -} // namespace __llvm_libc - -#endif // LLVM_LIBC_SRC_SUPPORT_THREADS_LINUX_THREAD_H diff --git a/libc/src/__support/threads/thread.h b/libc/src/__support/threads/thread.h index 2d38c37d060e02f23cb8e34716c337a24a519bf0..36a191cbc046d987978b6ab045f4ea616ce0a630 100644 --- a/libc/src/__support/threads/thread.h +++ b/libc/src/__support/threads/thread.h @@ -9,39 +9,153 @@ #ifndef LLVM_LIBC_SRC_SUPPORT_THREADS_THREAD_H #define LLVM_LIBC_SRC_SUPPORT_THREADS_THREAD_H -#include +#include "src/__support/CPP/atomic.h" +#include "src/__support/architectures.h" -// The platform specific implemnetations are pulled via the following include. -// The idea is for the platform implementation to implement a class named Thread -// in the namespace __llvm_libc with the following properties: -// -// 1. Has a defaulted default constructor (not a default constructor). -// -// 2. Has a "run" method with the following signature: -// -// int run(ThreadRunner *f, void *arg, void *stack, size_t size); -// -// Returns: -// 0 on success and an error value on failure. -// Args: -// arg - The argument to be passed to the thread runner after the thread -// is created. -// stack - The stack to use for the thread. -// size - The stack size. -// -// If callers pass a non-null |stack| value, then it will assumed that -// 1. The clean up the stack memory is their responsibility -// 2. The guard area is setup appropriately by the caller. -// -// 3. Has a "join" method with the following signature: -// ErrorOr join(); -// The "join" method should return 0 on success and set retcode to the -// threads return value. On failure, an appropriate errno value should be -// returned. +#include // For size_t +#include + +namespace __llvm_libc { + +using ThreadRunnerPosix = void *(void *); +using ThreadRunnerStdc = int(void *); + +union ThreadRunner { + ThreadRunnerPosix *posix_runner; + ThreadRunnerStdc *stdc_runner; +}; + +union ThreadReturnValue { + void *posix_retval; + int stdc_retval; +}; + +#if (defined(LLVM_LIBC_ARCH_AARCH64) || defined(LLVM_LIBC_ARCH_X86_64)) +constexpr unsigned int STACK_ALIGNMENT = 16; +#endif +// TODO: Provide stack alignment requirements for other architectures. + +enum class DetachState : uint32_t { + JOINABLE = 0x11, + EXITING = 0x22, + DETACHED = 0x33 +}; + +enum class ThreadStyle : uint8_t { POSIX = 0x1, STDC = 0x2 }; + +// Detach type is useful in testing the detach operation. +enum class DetachType : int { + // Indicates that the detach operation just set the detach state to DETACHED + // and returned. + SIMPLE = 1, + + // Indicates that the detach operation performed thread cleanup. + CLEANUP = 2 +}; + +// A data type to hold common thread attributes which have to be stored as +// thread state. Note that this is different from public attribute types like +// pthread_attr_t which might contain information which need not be saved as +// part of a thread's state. For example, the stack guard size. // -// 4. Has an operator== for comparison between two threads. -#ifdef __unix__ -#include "linux/thread.h" -#endif // __unix__ +// Thread attributes are typically stored on the stack. So, we align as required +// for the target architecture. +struct alignas(STACK_ALIGNMENT) ThreadAttributes { + // We want the "detach_state" attribute to be an atomic value as it could be + // updated by one thread while the self thread is reading it. It is a tristate + // variable with the following state transitions: + // 1. The a thread is created in a detached state, then user code should never + // call a detach or join function. Calling either of them can lead to + // undefined behavior. + // The value of |detach_state| is expected to be DetachState::DETACHED for + // its lifetime. + // 2. If a thread is created in a joinable state, |detach_state| will start + // with the value DetachState::JOINABLE. Another thread can detach this + // thread before it exits. The state transitions will as follows: + // (a) If the detach method sees the state as JOINABLE, then it will + // compare exchange to a state of DETACHED. The thread will clean + // itself up after it finishes. + // (b) If the detach method does not see JOINABLE in (a), then it will + // conclude that the thread is EXITING and will wait until the thread + // exits. It will clean up the thread resources once the thread + // exits. + cpp::Atomic detach_state; + void *stack; // Pointer to the thread stack + void *tls; + unsigned long long stack_size; // Size of the stack + unsigned char owned_stack; // Indicates if the thread owns this stack memory + int tid; + ThreadStyle style; + ThreadReturnValue retval; +}; + +struct Thread { + ThreadAttributes *attrib; + void *platform_data; + + Thread() = default; + + int run(ThreadRunnerPosix *func, void *arg, void *stack, size_t size, + bool detached = false) { + ThreadRunner runner; + runner.posix_runner = func; + return run(ThreadStyle::POSIX, runner, arg, stack, size, detached); + } + + int run(ThreadRunnerStdc *func, void *arg, void *stack, size_t size, + bool detached = false) { + ThreadRunner runner; + runner.stdc_runner = func; + return run(ThreadStyle::STDC, runner, arg, stack, size, detached); + } + + int join(int *val) { + ThreadReturnValue retval; + int status = join(retval); + if (status != 0) + return status; + *val = retval.stdc_retval; + return 0; + } + + int join(void **val) { + ThreadReturnValue retval; + int status = join(retval); + if (status != 0) + return status; + *val = retval.posix_retval; + return 0; + } + + // Platform should implement the functions below. + + // Return 0 on success or an error value on failure. + int run(ThreadStyle style, ThreadRunner runner, void *arg, void *stack, + size_t stack_size, bool detached); + + // Return 0 on success or an error value on failure. + int join(ThreadReturnValue &retval); + + // Detach a joinable thread. + // + // This method does not have error return value. However, the type of detach + // is returned to help with testing. + int detach(); + + // Wait for the thread to finish. This method can only be called + // if: + // 1. A detached thread is guaranteed to be running. + // 2. A joinable thread has not been detached or joined. As long as it has + // not been detached or joined, wait can be called multiple times. + // + // Also, only one thread can wait and expect to get woken up when the thread + // finishes. + // + // NOTE: This function is to be used for testing only. There is no standard + // which requires exposing it via a public API. + void wait(); +}; + +} // namespace __llvm_libc #endif // LLVM_LIBC_SRC_SUPPORT_THREADS_THREAD_H diff --git a/libc/src/__support/threads/thread_attrib.h b/libc/src/__support/threads/thread_attrib.h deleted file mode 100644 index 31d97acd3fb90927a459cfbb926c4ad4f2bf2a7d..0000000000000000000000000000000000000000 --- a/libc/src/__support/threads/thread_attrib.h +++ /dev/null @@ -1,77 +0,0 @@ -//===--- A data type for thread attributes ----------------------*- C++ -*-===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#ifndef LLVM_LIBC_SRC_SUPPORT_THREADS_THREAD_ATTRIB_H -#define LLVM_LIBC_SRC_SUPPORT_THREADS_THREAD_ATTRIB_H - -#include "src/__support/CPP/atomic.h" -#include "src/__support/architectures.h" - -#include - -namespace __llvm_libc { - -#if (defined(LLVM_LIBC_ARCH_AARCH64) || defined(LLVM_LIBC_ARCH_X86_64)) -constexpr unsigned int STACK_ALIGNMENT = 16; -#endif -// TODO: Provide stack alignment requirements for other architectures. - -enum class DetachState : uint32_t { - JOINABLE = 0x11, - EXITING = 0x22, - DETACHED = 0x33 -}; - -// Detach type is useful in testing the detach operation. -enum class DetachType : int { - // Indicates that the detach operation just set the detach state to DETACHED - // and returned. - SIMPLE = 1, - - // Indicates that the detach operation performed thread cleanup. - CLEANUP = 2 -}; - -// A data type to hold common thread attributes which have to be stored as -// thread state. Note that this is different from public attribute types like -// pthread_attr_t which might contain information which need not be saved as -// part of a thread's state. For example, the stack guard size. -// -// Thread attributes are typically stored on the stack. So, we align as required -// for the target architecture. -template -struct alignas(STACK_ALIGNMENT) ThreadAttributes { - // We want the "detach_state" attribute to be an atomic value as it could be - // updated by one thread while the self thread is reading it. It is a tristate - // variable with the following state transitions: - // 1. The a thread is created in a detached state, then user code should never - // call a detach or join function. Calling either of them can lead to - // undefined behavior. - // The value of |detach_state| is expected to be DetachState::DETACHED for - // its lifetime. - // 2. If a thread is created in a joinable state, |detach_state| will start - // with the value DetachState::JOINABLE. Another thread can detach this - // thread before it exits. The state transitions will as follows: - // (a) If the detach method sees the state as JOINABLE, then it will - // compare exchange to a state of DETACHED. The thread will clean - // itself up after it finishes. - // (b) If the detach method does not see JOINABLE in (a), then it will - // conclude that the thread is EXITING and will wait until the thread - // exits. It will clean up the thread resources once the thread - // exits. - cpp::Atomic detach_state; - void *stack; // Pointer to the thread stack - unsigned long long stack_size; // Size of the stack - unsigned char owned_stack; // Indicates if the thread owns this stack memory - ReturnType retval; // The return value of thread runner is saved here - int tid; -}; - -} // namespace __llvm_libc - -#endif // LLVM_LIBC_SRC_SUPPORT_THREADS_THREAD_ATTRIB_H diff --git a/libc/src/math/CMakeLists.txt b/libc/src/math/CMakeLists.txt index 60ccfb2159a1f51370eb27ba4278ee2a089f8470..c3376310e9d462220e68aaf94b962087604582c8 100644 --- a/libc/src/math/CMakeLists.txt +++ b/libc/src/math/CMakeLists.txt @@ -103,6 +103,9 @@ add_math_entrypoint_object(fmin) add_math_entrypoint_object(fminf) add_math_entrypoint_object(fminl) +add_math_entrypoint_object(fmod) +add_math_entrypoint_object(fmodf) + add_math_entrypoint_object(frexp) add_math_entrypoint_object(frexpf) add_math_entrypoint_object(frexpl) diff --git a/libc/src/math/fmod.h b/libc/src/math/fmod.h new file mode 100644 index 0000000000000000000000000000000000000000..a79ff018ec641c2834ab6b5b797d2a38f14ca5c4 --- /dev/null +++ b/libc/src/math/fmod.h @@ -0,0 +1,18 @@ +//===-- Implementation header for fmod --------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_LIBC_SRC_MATH_FMOD_H +#define LLVM_LIBC_SRC_MATH_FMOD_H + +namespace __llvm_libc { + +double fmod(double x, double y); + +} // namespace __llvm_libc + +#endif // LLVM_LIBC_SRC_MATH_FMOD_H diff --git a/libc/src/math/fmodf.h b/libc/src/math/fmodf.h new file mode 100644 index 0000000000000000000000000000000000000000..ab9c4aee611782b1ef3c2f700eceb154bd170f29 --- /dev/null +++ b/libc/src/math/fmodf.h @@ -0,0 +1,18 @@ +//===-- Implementation header for fmodf -------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_LIBC_SRC_MATH_FMODF_H +#define LLVM_LIBC_SRC_MATH_FMODF_H + +namespace __llvm_libc { + +float fmodf(float x, float y); + +} // namespace __llvm_libc + +#endif // LLVM_LIBC_SRC_MATH_FMODF_H diff --git a/libc/src/math/generic/CMakeLists.txt b/libc/src/math/generic/CMakeLists.txt index 72b8e6b7a1aededeb4e0010875b2682d51f69516..1a693e6d5c2c9edfad203fbfb20d534acbdaaf88 100644 --- a/libc/src/math/generic/CMakeLists.txt +++ b/libc/src/math/generic/CMakeLists.txt @@ -1090,3 +1090,29 @@ add_object_library( COMPILE_OPTIONS -O3 ) + +add_entrypoint_object( + fmod + SRCS + fmod.cpp + HDRS + ../fmod.h + DEPENDS + libc.include.math + libc.src.__support.FPUtil.generic.fmod + COMPILE_OPTIONS + -O3 +) + +add_entrypoint_object( + fmodf + SRCS + fmodf.cpp + HDRS + ../fmodf.h + DEPENDS + libc.include.math + libc.src.__support.FPUtil.generic.fmod + COMPILE_OPTIONS + -O3 +) \ No newline at end of file diff --git a/libc/src/math/generic/fmod.cpp b/libc/src/math/generic/fmod.cpp new file mode 100644 index 0000000000000000000000000000000000000000..563a1644aa75d0751f34fb3d6d1cb246cd66057f --- /dev/null +++ b/libc/src/math/generic/fmod.cpp @@ -0,0 +1,19 @@ +//===-- Double-precision fmod function ------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "src/math/fmod.h" +#include "src/__support/FPUtil/generic/FMod.h" +#include "src/__support/common.h" + +namespace __llvm_libc { + +LLVM_LIBC_FUNCTION(double, fmod, (double x, double y)) { + return fputil::generic::FMod::eval(x, y); +} + +} // namespace __llvm_libc diff --git a/libc/src/math/generic/fmodf.cpp b/libc/src/math/generic/fmodf.cpp new file mode 100644 index 0000000000000000000000000000000000000000..dca476c183b0c4e116f4f8d44a9706c44b4a238b --- /dev/null +++ b/libc/src/math/generic/fmodf.cpp @@ -0,0 +1,19 @@ +//===-- Single-precision fmodf function -----------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "src/math/fmodf.h" +#include "src/__support/FPUtil/generic/FMod.h" +#include "src/__support/common.h" + +namespace __llvm_libc { + +LLVM_LIBC_FUNCTION(float, fmodf, (float x, float y)) { + return fputil::generic::FMod::eval(x, y); +} + +} // namespace __llvm_libc diff --git a/libc/src/pthread/pthread_create.cpp b/libc/src/pthread/pthread_create.cpp index bc945a93984e98880d349a440ff8aaaa6b5eca2c..ec1e0e331ae0447ab10512e1e1f660e36093a3dd 100644 --- a/libc/src/pthread/pthread_create.cpp +++ b/libc/src/pthread/pthread_create.cpp @@ -16,14 +16,14 @@ namespace __llvm_libc { -static_assert(sizeof(pthread_t) == sizeof(__llvm_libc::Thread), - "Mismatch between pthread_t and internal Thread."); +static_assert(sizeof(pthread_t) == sizeof(__llvm_libc::Thread), + "Mismatch between pthread_t and internal Thread."); LLVM_LIBC_FUNCTION(int, pthread_create, (pthread_t *__restrict th, const pthread_attr_t *__restrict attr, __pthread_start_t func, void *arg)) { - auto *thread = reinterpret_cast<__llvm_libc::Thread *>(th); + auto *thread = reinterpret_cast<__llvm_libc::Thread *>(th); int result = thread->run(func, arg, nullptr, 0); if (result != 0 && result != EPERM) return EAGAIN; diff --git a/libc/src/pthread/pthread_detach.cpp b/libc/src/pthread/pthread_detach.cpp index c71b12fbdcda59ae25abd2b50badf24f7a65f0ab..009438e16c4c3d3386eb53b8f68c0a8a51a464cd 100644 --- a/libc/src/pthread/pthread_detach.cpp +++ b/libc/src/pthread/pthread_detach.cpp @@ -15,11 +15,11 @@ namespace __llvm_libc { -static_assert(sizeof(pthread_t) == sizeof(__llvm_libc::Thread), - "Mismatch between pthread_t and internal Thread."); +static_assert(sizeof(pthread_t) == sizeof(__llvm_libc::Thread), + "Mismatch between pthread_t and internal Thread."); LLVM_LIBC_FUNCTION(int, pthread_detach, (pthread_t th)) { - auto *thread = reinterpret_cast *>(&th); + auto *thread = reinterpret_cast(&th); thread->detach(); return 0; } diff --git a/libc/src/pthread/pthread_join.cpp b/libc/src/pthread/pthread_join.cpp index c3bf4adc6372ef670b5ad6c347e03553a431d99a..0774d02680526878b8cc0f2e8e40dc315ec7eede 100644 --- a/libc/src/pthread/pthread_join.cpp +++ b/libc/src/pthread/pthread_join.cpp @@ -15,11 +15,11 @@ namespace __llvm_libc { -static_assert(sizeof(pthread_t) == sizeof(__llvm_libc::Thread), - "Mismatch between pthread_t and internal Thread."); +static_assert(sizeof(pthread_t) == sizeof(__llvm_libc::Thread), + "Mismatch between pthread_t and internal Thread."); LLVM_LIBC_FUNCTION(int, pthread_join, (pthread_t th, void **retval)) { - auto *thread = reinterpret_cast *>(&th); + auto *thread = reinterpret_cast(&th); int result = thread->join(retval); return result; } diff --git a/libc/src/string/memory_utils/utils.h b/libc/src/string/memory_utils/utils.h index f23a3240fde726e968351aa272f4ae2f3b68b9af..7197a7e605e2a5ac106a28ff6157b9169d4a94ca 100644 --- a/libc/src/string/memory_utils/utils.h +++ b/libc/src/string/memory_utils/utils.h @@ -17,6 +17,8 @@ // time. #if defined(LLVM_LIBC_ARCH_AARCH64) || defined(LLVM_LIBC_ARCH_X86) #define LLVM_LIBC_CACHELINE_SIZE 64 +#elif defined(LLVM_LIBC_ARCH_ARM) +#define LLVM_LIBC_CACHELINE_SIZE 32 #else #error "Unsupported platform for memory functions." #endif diff --git a/libc/src/threads/thrd_create.cpp b/libc/src/threads/thrd_create.cpp index 91b36a71f8239002bf098ea834297e837251c8aa..9e81f0fdf78c1ef4eeea741d52c75e70577ef232 100644 --- a/libc/src/threads/thrd_create.cpp +++ b/libc/src/threads/thrd_create.cpp @@ -15,12 +15,12 @@ namespace __llvm_libc { -static_assert(sizeof(thrd_t) == sizeof(__llvm_libc::Thread), - "Mismatch between thrd_t and internal Thread."); +static_assert(sizeof(thrd_t) == sizeof(__llvm_libc::Thread), + "Mismatch between thrd_t and internal Thread."); LLVM_LIBC_FUNCTION(int, thrd_create, (thrd_t * th, thrd_start_t func, void *arg)) { - auto *thread = reinterpret_cast<__llvm_libc::Thread *>(th); + auto *thread = reinterpret_cast<__llvm_libc::Thread *>(th); int result = thread->run(func, arg, nullptr, 0); if (result == 0) return thrd_success; diff --git a/libc/src/threads/thrd_detach.cpp b/libc/src/threads/thrd_detach.cpp index e3c4301709cdf3d4930bb5e07a10f304c2ae2498..20c6540faa17c91377d7902b7dec399719a328bc 100644 --- a/libc/src/threads/thrd_detach.cpp +++ b/libc/src/threads/thrd_detach.cpp @@ -14,11 +14,11 @@ namespace __llvm_libc { -static_assert(sizeof(thrd_t) == sizeof(__llvm_libc::Thread), - "Mismatch between thrd_t and internal Thread."); +static_assert(sizeof(thrd_t) == sizeof(__llvm_libc::Thread), + "Mismatch between thrd_t and internal Thread."); LLVM_LIBC_FUNCTION(int, thrd_detach, (thrd_t th)) { - auto *thread = reinterpret_cast *>(&th); + auto *thread = reinterpret_cast(&th); thread->detach(); return 0; } diff --git a/libc/src/threads/thrd_join.cpp b/libc/src/threads/thrd_join.cpp index fbdfa78fec2b94cc129bb7e9f9a7a3572cfa6645..4c3ecac34a4e95f0e8e7ae494ad69f93af647a33 100644 --- a/libc/src/threads/thrd_join.cpp +++ b/libc/src/threads/thrd_join.cpp @@ -14,11 +14,11 @@ namespace __llvm_libc { -static_assert(sizeof(thrd_t) == sizeof(__llvm_libc::Thread), - "Mismatch between thrd_t and internal Thread."); +static_assert(sizeof(thrd_t) == sizeof(__llvm_libc::Thread), + "Mismatch between thrd_t and internal Thread."); LLVM_LIBC_FUNCTION(int, thrd_join, (thrd_t * th, int *retval)) { - auto *thread = reinterpret_cast *>(th); + auto *thread = reinterpret_cast(th); int result = thread->join(retval); return result == 0 ? thrd_success : thrd_error; } diff --git a/libc/test/integration/CMakeLists.txt b/libc/test/integration/CMakeLists.txt index 393084d122aad74c985ecad0ef4448787a1ac3ce..675f4c47c878a6c86d3be57cff4a3418440b7af1 100644 --- a/libc/test/integration/CMakeLists.txt +++ b/libc/test/integration/CMakeLists.txt @@ -1,5 +1,10 @@ add_custom_target(libc-integration-tests) +function(add_libc_integration_test_suite name) + add_custom_target(${name}) + add_dependencies(libc-integration-tests ${name}) +endfunction() + add_library( libc_integration_test_dummy STATIC diff --git a/libc/test/integration/src/CMakeLists.txt b/libc/test/integration/src/CMakeLists.txt index 641755c0f39e8b5e76a02453bb9ae1c791788e76..f175dc4f23bcd0e6cfb23a755b09907aab6eab72 100644 --- a/libc/test/integration/src/CMakeLists.txt +++ b/libc/test/integration/src/CMakeLists.txt @@ -1 +1,3 @@ +add_subdirectory(pthread) add_subdirectory(stdlib) +add_subdirectory(threads) diff --git a/libc/test/integration/src/pthread/CMakeLists.txt b/libc/test/integration/src/pthread/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..0f52d8ac28e6378ec2ee078b2b841c9c29400b77 --- /dev/null +++ b/libc/test/integration/src/pthread/CMakeLists.txt @@ -0,0 +1,34 @@ +add_libc_integration_test_suite(libc-pthread-integration-tests) + +add_integration_test( + pthread_mutex_test + SUITE + libc-pthread-integration-tests + SRCS + pthread_mutex_test.cpp + LOADER + libc.loader.linux.crt1 + DEPENDS + libc.include.pthread + libc.src.errno.errno + libc.src.pthread.pthread_mutex_destroy + libc.src.pthread.pthread_mutex_init + libc.src.pthread.pthread_mutex_lock + libc.src.pthread.pthread_mutex_unlock + libc.src.pthread.pthread_create + libc.src.pthread.pthread_join +) + +add_integration_test( + pthread_test + SUITE + libc-pthread-integration-tests + SRCS + pthread_test.cpp + LOADER + libc.loader.linux.crt1 + DEPENDS + libc.include.pthread + libc.src.pthread.pthread_create + libc.src.pthread.pthread_join +) diff --git a/libc/test/src/pthread/pthread_mutex_test.cpp b/libc/test/integration/src/pthread/pthread_mutex_test.cpp similarity index 95% rename from libc/test/src/pthread/pthread_mutex_test.cpp rename to libc/test/integration/src/pthread/pthread_mutex_test.cpp index 28c06b01eb1a1146a7adf638d427f01b3eb1c120..823efe393eab452ea8af1b29043115018ac3d575 100644 --- a/libc/test/src/pthread/pthread_mutex_test.cpp +++ b/libc/test/integration/src/pthread/pthread_mutex_test.cpp @@ -1,4 +1,4 @@ -//===-- Unittests for pthread_mutex_t -------------------------------------===// +//===-- Tests for pthread_mutex_t -----------------------------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. @@ -14,7 +14,7 @@ #include "src/pthread/pthread_create.h" #include "src/pthread/pthread_join.h" -#include "utils/UnitTest/Test.h" +#include "utils/IntegrationTest/test.h" #include @@ -39,7 +39,7 @@ void *counter(void *arg) { return nullptr; } -TEST(LlvmLibcMutexTest, RelayCounter) { +void relay_counter() { ASSERT_EQ(__llvm_libc::pthread_mutex_init(&mutex, nullptr), 0); // The idea of this test is that two competing threads will update @@ -84,7 +84,7 @@ void *stepper(void *arg) { return nullptr; } -TEST(LlvmLibcMutexTest, WaitAndStep) { +void wait_and_step() { ASSERT_EQ(__llvm_libc::pthread_mutex_init(&start_lock, nullptr), 0); ASSERT_EQ(__llvm_libc::pthread_mutex_init(&step_lock, nullptr), 0); @@ -151,7 +151,7 @@ void *waiter_func(void *) { return nullptr; } -TEST(LlvmLibcMutexTest, MultipleWaiters) { +void multiple_waiters() { __llvm_libc::pthread_mutex_init(&multiple_waiter_lock, nullptr); __llvm_libc::pthread_mutex_init(&counter_lock, nullptr); @@ -184,3 +184,10 @@ TEST(LlvmLibcMutexTest, MultipleWaiters) { __llvm_libc::pthread_mutex_destroy(&multiple_waiter_lock); __llvm_libc::pthread_mutex_destroy(&counter_lock); } + +int main() { + relay_counter(); + wait_and_step(); + multiple_waiters(); + return 0; +} diff --git a/libc/test/src/pthread/pthread_test.cpp b/libc/test/integration/src/pthread/pthread_test.cpp similarity index 88% rename from libc/test/src/pthread/pthread_test.cpp rename to libc/test/integration/src/pthread/pthread_test.cpp index 3a3d587ab5d51809e962d8a9bb45d78838663580..f5be1c67f35365048361f5c4641d3d81392a8df3 100644 --- a/libc/test/src/pthread/pthread_test.cpp +++ b/libc/test/integration/src/pthread/pthread_test.cpp @@ -1,4 +1,4 @@ -//===-- Unittests for pthread_t -------------------------------------------===// +//===-- Tests for pthread_t -----------------------------------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. @@ -8,7 +8,7 @@ #include "src/pthread/pthread_create.h" #include "src/pthread/pthread_join.h" -#include "utils/UnitTest/Test.h" +#include "utils/IntegrationTest/test.h" #include @@ -19,7 +19,7 @@ static void *thread_func(void *) { return nullptr; } -TEST(LlvmLibcThreadTest, CreateAndJoin) { +void create_and_join() { for (counter = 0; counter <= thread_count;) { pthread_t thread; int old_counter_val = counter; @@ -36,7 +36,7 @@ TEST(LlvmLibcThreadTest, CreateAndJoin) { static void *return_arg(void *arg) { return arg; } -TEST(LlvmLibcThreadTest, SpawnAndJoin) { +void spawn_and_join() { pthread_t thread_list[thread_count]; int args[thread_count]; @@ -54,3 +54,9 @@ TEST(LlvmLibcThreadTest, SpawnAndJoin) { ASSERT_EQ(*reinterpret_cast(retval), i); } } + +int main() { + create_and_join(); + spawn_and_join(); + return 0; +} diff --git a/libc/test/src/threads/CMakeLists.txt b/libc/test/integration/src/threads/CMakeLists.txt similarity index 74% rename from libc/test/src/threads/CMakeLists.txt rename to libc/test/integration/src/threads/CMakeLists.txt index 6ac093cb7205cabf3007eaf89dbf78749ae9320e..05acc7f1da45635b64889bcb24824649b5ba82ad 100644 --- a/libc/test/src/threads/CMakeLists.txt +++ b/libc/test/integration/src/threads/CMakeLists.txt @@ -1,29 +1,32 @@ -add_libc_testsuite(libc_threads_unittests) +add_libc_integration_test_suite(libc-threads-integration-tests) -add_libc_unittest( - call_once_test +add_integration_test( + mtx_test SUITE - libc_threads_unittests + libc-threads-integration-tests SRCS - call_once_test.cpp + mtx_test.cpp + LOADER + libc.loader.linux.crt1 DEPENDS libc.include.threads - libc.src.threads.call_once + libc.src.errno.errno libc.src.threads.mtx_destroy libc.src.threads.mtx_init libc.src.threads.mtx_lock libc.src.threads.mtx_unlock libc.src.threads.thrd_create libc.src.threads.thrd_join - libc.src.__support.CPP.atomic ) -add_libc_unittest( +add_integration_test( thrd_test SUITE - libc_threads_unittests + libc-threads-integration-tests SRCS thrd_test.cpp + LOADER + libc.loader.linux.crt1 DEPENDS libc.include.threads libc.src.errno.errno @@ -31,29 +34,34 @@ add_libc_unittest( libc.src.threads.thrd_join ) -add_libc_unittest( - mtx_test +add_integration_test( + call_once_test SUITE - libc_threads_unittests + libc-threads-integration-tests SRCS - mtx_test.cpp + call_once_test.cpp + LOADER + libc.loader.linux.crt1 DEPENDS libc.include.threads - libc.src.errno.errno + libc.src.threads.call_once libc.src.threads.mtx_destroy libc.src.threads.mtx_init libc.src.threads.mtx_lock libc.src.threads.mtx_unlock libc.src.threads.thrd_create libc.src.threads.thrd_join + libc.src.__support.CPP.atomic ) -add_libc_unittest( +add_integration_test( cnd_test SUITE - libc_threads_unittests + libc-threads-integration-tests SRCS cnd_test.cpp + LOADER + libc.loader.linux.crt1 DEPENDS libc.include.threads libc.src.threads.cnd_init diff --git a/libc/test/src/threads/call_once_test.cpp b/libc/test/integration/src/threads/call_once_test.cpp similarity index 92% rename from libc/test/src/threads/call_once_test.cpp rename to libc/test/integration/src/threads/call_once_test.cpp index 6090e63435af40b42f9a08297f1dbef35e91889e..cf853372f3a93389f72d626703fc94db0441f82e 100644 --- a/libc/test/src/threads/call_once_test.cpp +++ b/libc/test/integration/src/threads/call_once_test.cpp @@ -1,4 +1,4 @@ -//===-- Unittests for call_once -------------------------------------------===// +//===-- Tests for call_once -----------------------------------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. @@ -6,7 +6,6 @@ // //===----------------------------------------------------------------------===// -#include "include/threads.h" #include "src/__support/CPP/atomic.h" #include "src/threads/call_once.h" #include "src/threads/mtx_destroy.h" @@ -15,7 +14,10 @@ #include "src/threads/mtx_unlock.h" #include "src/threads/thrd_create.h" #include "src/threads/thrd_join.h" -#include "utils/UnitTest/Test.h" + +#include "utils/IntegrationTest/test.h" + +#include static constexpr unsigned int NUM_THREADS = 5; static __llvm_libc::cpp::Atomic thread_count; @@ -32,7 +34,7 @@ static int func(void *) { return 0; } -TEST(LlvmLibcCallOnceTest, CallFrom5Threads) { +void call_from_5_threads() { // Ensure the call count and thread count are 0 to begin with. call_count = 0; thread_count = 0; @@ -73,7 +75,7 @@ static int once_func_caller(void *) { // Test the synchronization aspect of the call_once function. // This is not a fool proof test, but something which might be // useful when we add a flakiness detection scheme to UnitTest. -TEST(LlvmLibcCallOnceTest, TestSynchronization) { +void test_synchronization() { start_count = 0; done_count = 0; @@ -111,3 +113,9 @@ TEST(LlvmLibcCallOnceTest, TestSynchronization) { __llvm_libc::mtx_destroy(&once_func_blocker); } + +int main() { + call_from_5_threads(); + test_synchronization(); + return 0; +} diff --git a/libc/test/src/threads/cnd_test.cpp b/libc/test/integration/src/threads/cnd_test.cpp similarity index 93% rename from libc/test/src/threads/cnd_test.cpp rename to libc/test/integration/src/threads/cnd_test.cpp index 5d69142312bdcdf04655e83071559afdee720c12..605bc72dd40c34b67f3fac199be20f9c639ee371 100644 --- a/libc/test/src/threads/cnd_test.cpp +++ b/libc/test/integration/src/threads/cnd_test.cpp @@ -1,4 +1,4 @@ -//===-- Unittests for condition variable broadcast fucntionality ----------===// +//===-- Tests for standard condition variables ----------------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. @@ -6,7 +6,6 @@ // //===----------------------------------------------------------------------===// -#include "include/threads.h" #include "src/__support/CPP/atomic.h" #include "src/threads/cnd_broadcast.h" #include "src/threads/cnd_destroy.h" @@ -19,7 +18,10 @@ #include "src/threads/mtx_unlock.h" #include "src/threads/thrd_create.h" #include "src/threads/thrd_join.h" -#include "utils/UnitTest/Test.h" + +#include "utils/IntegrationTest/test.h" + +#include namespace wait_notify_broadcast_test { @@ -54,7 +56,7 @@ int broadcast_thread_func(void *) { return 0; } -TEST(LlvmLibcCndVarTest, WaitNotifyBroadcastTest) { +void wait_notify_broadcast_test() { __llvm_libc::cnd_init(&broadcast_cnd); __llvm_libc::cnd_init(&threads_ready_cnd); __llvm_libc::mtx_init(&broadcast_mtx, mtx_plain); @@ -111,7 +113,7 @@ int waiter_thread_func(void *unused) { return 0x600D; } -TEST(LlvmLibcCndVarTest, SingleWaiterTest) { +void single_waiter_test() { ASSERT_EQ(__llvm_libc::mtx_init(&waiter_mtx, mtx_plain), int(thrd_success)); ASSERT_EQ(__llvm_libc::mtx_init(&main_thread_mtx, mtx_plain), int(thrd_success)); @@ -142,3 +144,9 @@ TEST(LlvmLibcCndVarTest, SingleWaiterTest) { } } // namespace single_waiter_test + +int main() { + wait_notify_broadcast_test::wait_notify_broadcast_test(); + single_waiter_test::single_waiter_test(); + return 0; +} diff --git a/libc/test/src/threads/mtx_test.cpp b/libc/test/integration/src/threads/mtx_test.cpp similarity index 94% rename from libc/test/src/threads/mtx_test.cpp rename to libc/test/integration/src/threads/mtx_test.cpp index 3abe77e0eab725286254f127537b9244e85963cb..2d927cdf3d59d46444268a4902ac529c230d9dff 100644 --- a/libc/test/src/threads/mtx_test.cpp +++ b/libc/test/integration/src/threads/mtx_test.cpp @@ -1,4 +1,4 @@ -//===-- Unittests for mtx_t -----------------------------------------------===// +//===-- Tests for mtx_t operations ----------------------------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. @@ -6,14 +6,16 @@ // //===----------------------------------------------------------------------===// -#include "include/threads.h" #include "src/threads/mtx_destroy.h" #include "src/threads/mtx_init.h" #include "src/threads/mtx_lock.h" #include "src/threads/mtx_unlock.h" #include "src/threads/thrd_create.h" #include "src/threads/thrd_join.h" -#include "utils/UnitTest/Test.h" + +#include "utils/IntegrationTest/test.h" + +#include constexpr int START = 0; constexpr int MAX = 10000; @@ -36,7 +38,7 @@ int counter(void *arg) { return 0; } -TEST(LlvmLibcMutexTest, RelayCounter) { +void relay_counter() { ASSERT_EQ(__llvm_libc::mtx_init(&mutex, mtx_plain), static_cast(thrd_success)); @@ -82,7 +84,7 @@ int stepper(void *arg) { return 0; } -TEST(LlvmLibcMutexTest, WaitAndStep) { +void wait_and_step() { ASSERT_EQ(__llvm_libc::mtx_init(&start_lock, mtx_plain), static_cast(thrd_success)); ASSERT_EQ(__llvm_libc::mtx_init(&step_lock, mtx_plain), @@ -156,7 +158,7 @@ int waiter_func(void *) { return 0; } -TEST(LlvmLibcMutexTest, MultipleWaiters) { +void multiple_waiters() { __llvm_libc::mtx_init(&multiple_waiter_lock, mtx_plain); __llvm_libc::mtx_init(&counter_lock, mtx_plain); @@ -189,3 +191,10 @@ TEST(LlvmLibcMutexTest, MultipleWaiters) { __llvm_libc::mtx_destroy(&multiple_waiter_lock); __llvm_libc::mtx_destroy(&counter_lock); } + +int main() { + relay_counter(); + wait_and_step(); + multiple_waiters(); + return 0; +} diff --git a/libc/test/src/threads/thrd_test.cpp b/libc/test/integration/src/threads/thrd_test.cpp similarity index 86% rename from libc/test/src/threads/thrd_test.cpp rename to libc/test/integration/src/threads/thrd_test.cpp index ef4b25807674745a9689c1a9967b15d260dd15db..69df030278894521852284a8740e48bc1e917af8 100644 --- a/libc/test/src/threads/thrd_test.cpp +++ b/libc/test/integration/src/threads/thrd_test.cpp @@ -1,4 +1,4 @@ -//===-- Unittests for thrd_t ----------------------------------------------===// +//===-- Tests for thrd_t creation and joining -----------------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. @@ -6,10 +6,12 @@ // //===----------------------------------------------------------------------===// -#include "include/threads.h" #include "src/threads/thrd_create.h" #include "src/threads/thrd_join.h" -#include "utils/UnitTest/Test.h" + +#include "utils/IntegrationTest/test.h" + +#include static constexpr int thread_count = 1000; static int counter = 0; @@ -18,7 +20,7 @@ static int thread_func(void *) { return 0; } -TEST(LlvmLibcThreadTest, CreateAndJoin) { +void create_and_join() { for (counter = 0; counter <= thread_count;) { thrd_t thread; int old_counter_val = counter; @@ -33,7 +35,7 @@ TEST(LlvmLibcThreadTest, CreateAndJoin) { static int return_arg(void *arg) { return *reinterpret_cast(arg); } -TEST(LlvmLibcThreadTest, SpawnAndJoin) { +void spawn_and_join() { thrd_t thread_list[thread_count]; int args[thread_count]; @@ -50,3 +52,8 @@ TEST(LlvmLibcThreadTest, SpawnAndJoin) { ASSERT_EQ(retval, i); } } + +int main() { + create_and_join(); + spawn_and_join(); +} diff --git a/libc/test/src/CMakeLists.txt b/libc/test/src/CMakeLists.txt index 2895eba829dbe76c7e3b3b3617ad986b0e8ab903..3d96014182fffe2d4bd74a93348712fb5d82bca3 100644 --- a/libc/test/src/CMakeLists.txt +++ b/libc/test/src/CMakeLists.txt @@ -50,7 +50,6 @@ endif() # add_subdirectory(assert) # add_subdirectory(signal) add_subdirectory(stdio) -add_subdirectory(threads) add_subdirectory(time) if(${LIBC_TARGET_OS} STREQUAL "linux") diff --git a/libc/test/src/__support/CMakeLists.txt b/libc/test/src/__support/CMakeLists.txt index 6f0750cd5d40411421702e69aa897b9e8dbea545..97ae33dd13b83ff251194090eab385e6d79e9bb3 100644 --- a/libc/test/src/__support/CMakeLists.txt +++ b/libc/test/src/__support/CMakeLists.txt @@ -18,6 +18,7 @@ add_libc_unittest( high_precision_decimal_test.cpp DEPENDS libc.src.__support.high_precision_decimal + libc.src.__support.CPP.uint128 ) add_libc_unittest( @@ -28,6 +29,7 @@ add_libc_unittest( str_to_float_test.cpp DEPENDS libc.src.__support.str_to_float + libc.src.__support.CPP.uint128 ) add_libc_unittest( diff --git a/libc/test/src/__support/CPP/CMakeLists.txt b/libc/test/src/__support/CPP/CMakeLists.txt index 9e8e5ae0d964cbb89f26a96f58f1408225cd59c4..9844425e3cc18550e9a4e2533e9044533cca4b78 100644 --- a/libc/test/src/__support/CPP/CMakeLists.txt +++ b/libc/test/src/__support/CPP/CMakeLists.txt @@ -28,6 +28,7 @@ add_libc_unittest( limits_test.cpp DEPENDS libc.src.__support.CPP.limits + libc.src.__support.CPP.uint ) add_libc_unittest( diff --git a/libc/test/src/__support/CPP/limits_test.cpp b/libc/test/src/__support/CPP/limits_test.cpp index 0272b5a6521ae586a23ed544776016ae96004a4a..b55823f5527db4fab875c289b40d3e9c1fafd99f 100644 --- a/libc/test/src/__support/CPP/limits_test.cpp +++ b/libc/test/src/__support/CPP/limits_test.cpp @@ -7,6 +7,7 @@ //===----------------------------------------------------------------------===// #include "src/__support/CPP/Limits.h" +#include "src/__support/CPP/UInt.h" #include "utils/UnitTest/Test.h" // This just checks against the C spec, almost all implementations will surpass @@ -29,23 +30,15 @@ TEST(LlvmLibcLimitsTest, LimitsFollowSpec) { ULLONG_MAX); } +TEST(LlvmLibcLimitsTest, UInt128Limits) { + auto umax128 = + __llvm_libc::cpp::NumericLimits<__llvm_libc::cpp::UInt<128>>::max(); + auto umax64 = __llvm_libc::cpp::UInt<128>( + __llvm_libc::cpp::NumericLimits::max()); + EXPECT_GT(umax128, umax64); + ASSERT_EQ(~__llvm_libc::cpp::UInt<128>(0), umax128); #ifdef __SIZEOF_INT128__ -// This checks that the current environment supports 128 bit integers. -TEST(LlvmLibcLimitsTest, Int128Works) { - __int128_t max128 = ~__uint128_t(0) >> 1; - __int128_t min128 = (__int128_t(1) << 127); - EXPECT_GT(__llvm_libc::cpp::NumericLimits<__int128_t>::max(), - __int128_t(__llvm_libc::cpp::NumericLimits::max())); - ASSERT_EQ(__llvm_libc::cpp::NumericLimits<__int128_t>::max(), max128); - - EXPECT_LT(__llvm_libc::cpp::NumericLimits<__int128_t>::min(), - __int128_t(__llvm_libc::cpp::NumericLimits::min())); - ASSERT_EQ(__llvm_libc::cpp::NumericLimits<__int128_t>::min(), min128); - - __uint128_t umax128 = ~__uint128_t(0); - EXPECT_GT( - __llvm_libc::cpp::NumericLimits<__uint128_t>::max(), - __uint128_t(__llvm_libc::cpp::NumericLimits::max())); - ASSERT_EQ(__llvm_libc::cpp::NumericLimits<__uint128_t>::max(), umax128); -} + ASSERT_EQ(~__uint128_t(0), + __llvm_libc::cpp::NumericLimits<__uint128_t>::max()); #endif +} diff --git a/libc/test/src/__support/high_precision_decimal_test.cpp b/libc/test/src/__support/high_precision_decimal_test.cpp index 46cc572c7624d34de5f8564e9627c834317a8bf5..a0d1043896aa27dfd200d9d5067811b07d01641c 100644 --- a/libc/test/src/__support/high_precision_decimal_test.cpp +++ b/libc/test/src/__support/high_precision_decimal_test.cpp @@ -6,6 +6,7 @@ // //===----------------------------------------------------------------------===// +#include "src/__support/CPP/UInt128.h" #include "src/__support/high_precision_decimal.h" #include "utils/UnitTest/Test.h" @@ -344,34 +345,26 @@ TEST(LlvmLibcHighPrecisionDecimalTest, RoundingTest) { EXPECT_EQ(hpd.round_to_integer_type(), uint32_t(1)); EXPECT_EQ(hpd.round_to_integer_type(), uint64_t(1)); -#ifdef __SIZEOF_INT128__ - EXPECT_EQ(hpd.round_to_integer_type<__uint128_t>(), __uint128_t(1)); -#endif + EXPECT_EQ(hpd.round_to_integer_type(), UInt128(1)); hpd.shift(1); // shift left 1 to get 2.469 (rounds to 2) EXPECT_EQ(hpd.round_to_integer_type(), uint32_t(2)); EXPECT_EQ(hpd.round_to_integer_type(), uint64_t(2)); -#ifdef __SIZEOF_INT128__ - EXPECT_EQ(hpd.round_to_integer_type<__uint128_t>(), __uint128_t(2)); -#endif + EXPECT_EQ(hpd.round_to_integer_type(), UInt128(2)); hpd.shift(1); // shift left 1 to get 4.938 (rounds to 5) EXPECT_EQ(hpd.round_to_integer_type(), uint32_t(5)); EXPECT_EQ(hpd.round_to_integer_type(), uint64_t(5)); -#ifdef __SIZEOF_INT128__ - EXPECT_EQ(hpd.round_to_integer_type<__uint128_t>(), __uint128_t(5)); -#endif + EXPECT_EQ(hpd.round_to_integer_type(), UInt128(5)); // 2.5 is right between two integers, so we round to even (2) hpd = __llvm_libc::internal::HighPrecisionDecimal("2.5"); EXPECT_EQ(hpd.round_to_integer_type(), uint32_t(2)); EXPECT_EQ(hpd.round_to_integer_type(), uint64_t(2)); -#ifdef __SIZEOF_INT128__ - EXPECT_EQ(hpd.round_to_integer_type<__uint128_t>(), __uint128_t(2)); -#endif + EXPECT_EQ(hpd.round_to_integer_type(), UInt128(2)); // unless it's marked as having truncated, which means it's actually slightly // higher, forcing a round up (3) @@ -379,9 +372,7 @@ TEST(LlvmLibcHighPrecisionDecimalTest, RoundingTest) { EXPECT_EQ(hpd.round_to_integer_type(), uint32_t(3)); EXPECT_EQ(hpd.round_to_integer_type(), uint64_t(3)); -#ifdef __SIZEOF_INT128__ - EXPECT_EQ(hpd.round_to_integer_type<__uint128_t>(), __uint128_t(3)); -#endif + EXPECT_EQ(hpd.round_to_integer_type(), UInt128(3)); // Check that the larger int types are being handled properly (overflow is not // handled, so int types that are too small are ignored for this test.) @@ -390,16 +381,13 @@ TEST(LlvmLibcHighPrecisionDecimalTest, RoundingTest) { hpd = __llvm_libc::internal::HighPrecisionDecimal("1099511627776"); EXPECT_EQ(hpd.round_to_integer_type(), uint64_t(1099511627776)); -#ifdef __SIZEOF_INT128__ - EXPECT_EQ(hpd.round_to_integer_type<__uint128_t>(), - __uint128_t(1099511627776)); + EXPECT_EQ(hpd.round_to_integer_type(), UInt128(1099511627776)); // 1267650600228229401496703205376 = 2^100 hpd = __llvm_libc::internal::HighPrecisionDecimal( "1267650600228229401496703205376"); - __uint128_t result = __uint128_t(1) << 100; + UInt128 result = UInt128(1) << 100; - EXPECT_EQ(hpd.round_to_integer_type<__uint128_t>(), result); -#endif + EXPECT_EQ(hpd.round_to_integer_type(), result); } diff --git a/libc/test/src/__support/str_to_float_test.cpp b/libc/test/src/__support/str_to_float_test.cpp index 211e3dc04eebd5d0b0fc15c680251ce78db6b465..82e87bc8251c99c1f40920ff956fad8c2bd9f159 100644 --- a/libc/test/src/__support/str_to_float_test.cpp +++ b/libc/test/src/__support/str_to_float_test.cpp @@ -6,6 +6,7 @@ // //===----------------------------------------------------------------------===// +#include "src/__support/CPP/UInt128.h" #include "src/__support/FPUtil/FPBits.h" #include "src/__support/str_to_float.h" @@ -273,14 +274,14 @@ TEST_F(LlvmLibcStrToFloatTest, EiselLemireFloat80Simple) { } TEST_F(LlvmLibcStrToFloatTest, EiselLemireFloat80LongerMantissa) { - eisel_lemire_test((__uint128_t(0x1234567812345678) << 64) + - __uint128_t(0x1234567812345678), + eisel_lemire_test((UInt128(0x1234567812345678) << 64) + + UInt128(0x1234567812345678), 0, 0x91a2b3c091a2b3c1, 16507); - eisel_lemire_test((__uint128_t(0x1234567812345678) << 64) + - __uint128_t(0x1234567812345678), + eisel_lemire_test((UInt128(0x1234567812345678) << 64) + + UInt128(0x1234567812345678), 300, 0xd97757de56adb65c, 17503); - eisel_lemire_test((__uint128_t(0x1234567812345678) << 64) + - __uint128_t(0x1234567812345678), + eisel_lemire_test((UInt128(0x1234567812345678) << 64) + + UInt128(0x1234567812345678), -300, 0xc30feb9a7618457d, 15510); } @@ -299,7 +300,7 @@ TEST_F(LlvmLibcStrToFloatTest, EiselLemireFloat80TableLimits) { TEST_F(LlvmLibcStrToFloatTest, EiselLemireFloat80Fallback) { uint32_t outputExp2 = 0; - __uint128_t quadOutputMantissa = 0; + UInt128 quadOutputMantissa = 0; // This number is halfway between two possible results, and the algorithm // can't determine which is correct. @@ -313,39 +314,33 @@ TEST_F(LlvmLibcStrToFloatTest, EiselLemireFloat80Fallback) { ASSERT_FALSE(__llvm_libc::internal::eisel_lemire( 1, -1000, &quadOutputMantissa, &outputExp2)); } -#elif defined(__SIZEOF_INT128__) +#else // Quad precision long double TEST_F(LlvmLibcStrToFloatTest, EiselLemireFloat128Simple) { - eisel_lemire_test(123, 0, (__uint128_t(0x1ec0000000000) << 64), + eisel_lemire_test(123, 0, (UInt128(0x1ec0000000000) << 64), 16389); - eisel_lemire_test(12345678901234568192u, 0, - (__uint128_t(0x156a95319d63e) << 64) + - __uint128_t(0x1800000000000000), - 16446); + eisel_lemire_test( + 12345678901234568192u, 0, + (UInt128(0x156a95319d63e) << 64) + UInt128(0x1800000000000000), 16446); } TEST_F(LlvmLibcStrToFloatTest, EiselLemireFloat128LongerMantissa) { eisel_lemire_test( - (__uint128_t(0x1234567812345678) << 64) + __uint128_t(0x1234567812345678), - 0, (__uint128_t(0x1234567812345) << 64) + __uint128_t(0x6781234567812345), - 16507); + (UInt128(0x1234567812345678) << 64) + UInt128(0x1234567812345678), 0, + (UInt128(0x1234567812345) << 64) + UInt128(0x6781234567812345), 16507); eisel_lemire_test( - (__uint128_t(0x1234567812345678) << 64) + __uint128_t(0x1234567812345678), - 300, - (__uint128_t(0x1b2eeafbcad5b) << 64) + __uint128_t(0x6cb8b4451dfcde19), - 17503); + (UInt128(0x1234567812345678) << 64) + UInt128(0x1234567812345678), 300, + (UInt128(0x1b2eeafbcad5b) << 64) + UInt128(0x6cb8b4451dfcde19), 17503); eisel_lemire_test( - (__uint128_t(0x1234567812345678) << 64) + __uint128_t(0x1234567812345678), - -300, - (__uint128_t(0x1861fd734ec30) << 64) + __uint128_t(0x8afa7189f0f7595f), - 15510); + (UInt128(0x1234567812345678) << 64) + UInt128(0x1234567812345678), -300, + (UInt128(0x1861fd734ec30) << 64) + UInt128(0x8afa7189f0f7595f), 15510); } TEST_F(LlvmLibcStrToFloatTest, EiselLemireFloat128Fallback) { uint32_t outputExp2 = 0; - __uint128_t quadOutputMantissa = 0; + UInt128 quadOutputMantissa = 0; ASSERT_FALSE(__llvm_libc::internal::eisel_lemire( - (__uint128_t(0x5ce0e9a56015fec5) << 64) + __uint128_t(0xaadfa328ae39b333), - 1, &quadOutputMantissa, &outputExp2)); + (UInt128(0x5ce0e9a56015fec5) << 64) + UInt128(0xaadfa328ae39b333), 1, + &quadOutputMantissa, &outputExp2)); } #endif diff --git a/libc/test/src/__support/uint128_test.cpp b/libc/test/src/__support/uint128_test.cpp index c13b2be2d24f48441f06046b127f29d336faa14f..7cb405c3de87f06e52fbb048b15889968250d719 100644 --- a/libc/test/src/__support/uint128_test.cpp +++ b/libc/test/src/__support/uint128_test.cpp @@ -10,56 +10,59 @@ #include "utils/UnitTest/Test.h" -using UInt128 = __llvm_libc::cpp::UInt<128>; +// We want to test __llvm_libc::cpp::UInt<128> explicitly. So, for convenience, +// we use a sugar which does not conflict with the UInt128 type which can +// resolve to __uint128_t if the platform has it. +using LL_UInt128 = __llvm_libc::cpp::UInt<128>; TEST(LlvmLibcUInt128ClassTest, BasicInit) { - UInt128 empty; - UInt128 half_val(12345); - UInt128 full_val({12345, 67890}); + LL_UInt128 empty; + LL_UInt128 half_val(12345); + LL_UInt128 full_val({12345, 67890}); ASSERT_TRUE(half_val != full_val); } TEST(LlvmLibcUInt128ClassTest, AdditionTests) { - UInt128 val1(12345); - UInt128 val2(54321); - UInt128 result1(66666); + LL_UInt128 val1(12345); + LL_UInt128 val2(54321); + LL_UInt128 result1(66666); EXPECT_EQ(val1 + val2, result1); EXPECT_EQ((val1 + val2), (val2 + val1)); // addition is reciprocal // Test overflow - UInt128 val3({0xf000000000000001, 0}); - UInt128 val4({0x100000000000000f, 0}); - UInt128 result2({0x10, 0x1}); + LL_UInt128 val3({0xf000000000000001, 0}); + LL_UInt128 val4({0x100000000000000f, 0}); + LL_UInt128 result2({0x10, 0x1}); EXPECT_EQ(val3 + val4, result2); EXPECT_EQ(val3 + val4, val4 + val3); } TEST(LlvmLibcUInt128ClassTest, MultiplicationTests) { - UInt128 val1({5, 0}); - UInt128 val2({10, 0}); - UInt128 result1({50, 0}); + LL_UInt128 val1({5, 0}); + LL_UInt128 val2({10, 0}); + LL_UInt128 result1({50, 0}); EXPECT_EQ((val1 * val2), result1); EXPECT_EQ((val1 * val2), (val2 * val1)); // multiplication is reciprocal // Check that the multiplication works accross the whole number - UInt128 val3({0xf, 0}); - UInt128 val4({0x1111111111111111, 0x1111111111111111}); - UInt128 result2({0xffffffffffffffff, 0xffffffffffffffff}); + LL_UInt128 val3({0xf, 0}); + LL_UInt128 val4({0x1111111111111111, 0x1111111111111111}); + LL_UInt128 result2({0xffffffffffffffff, 0xffffffffffffffff}); EXPECT_EQ((val3 * val4), result2); EXPECT_EQ((val3 * val4), (val4 * val3)); // Check that multiplication doesn't reorder the bits. - UInt128 val5({2, 0}); - UInt128 val6({0x1357024675316420, 0x0123456776543210}); - UInt128 result3({0x26ae048cea62c840, 0x02468aceeca86420}); + LL_UInt128 val5({2, 0}); + LL_UInt128 val6({0x1357024675316420, 0x0123456776543210}); + LL_UInt128 result3({0x26ae048cea62c840, 0x02468aceeca86420}); EXPECT_EQ((val5 * val6), result3); EXPECT_EQ((val5 * val6), (val6 * val5)); // Make sure that multiplication handles overflow correctly. - UInt128 val7(2); - UInt128 val8({0x8000800080008000, 0x8000800080008000}); - UInt128 result4({0x0001000100010000, 0x0001000100010001}); + LL_UInt128 val7(2); + LL_UInt128 val8({0x8000800080008000, 0x8000800080008000}); + LL_UInt128 result4({0x0001000100010000, 0x0001000100010001}); EXPECT_EQ((val7 * val8), result4); EXPECT_EQ((val7 * val8), (val8 * val7)); @@ -67,92 +70,154 @@ TEST(LlvmLibcUInt128ClassTest, MultiplicationTests) { // 1e-60. They almost cancel on the high bits, but the result we're looking // for is just the low bits. The full result would be // 0x7fffffffffffffffffffffffffffffff3a4f32d17f40d08f917cf11d1e039c50 - UInt128 val9({0x01D762422C946590, 0x9F4F2726179A2245}); - UInt128 val10({0x3792F412CB06794D, 0xCDB02555653131B6}); - UInt128 result5({0x917cf11d1e039c50, 0x3a4f32d17f40d08f}); + LL_UInt128 val9({0x01D762422C946590, 0x9F4F2726179A2245}); + LL_UInt128 val10({0x3792F412CB06794D, 0xCDB02555653131B6}); + LL_UInt128 result5({0x917cf11d1e039c50, 0x3a4f32d17f40d08f}); EXPECT_EQ((val9 * val10), result5); EXPECT_EQ((val9 * val10), (val10 * val9)); } TEST(LlvmLibcUInt128ClassTest, ShiftLeftTests) { - UInt128 val1(0x0123456789abcdef); - UInt128 result1(0x123456789abcdef0); + LL_UInt128 val1(0x0123456789abcdef); + LL_UInt128 result1(0x123456789abcdef0); EXPECT_EQ((val1 << 4), result1); - UInt128 val2({0x13579bdf02468ace, 0x123456789abcdef0}); - UInt128 result2({0x02468ace00000000, 0x9abcdef013579bdf}); + LL_UInt128 val2({0x13579bdf02468ace, 0x123456789abcdef0}); + LL_UInt128 result2({0x02468ace00000000, 0x9abcdef013579bdf}); EXPECT_EQ((val2 << 32), result2); + LL_UInt128 val22 = val2; + val22 <<= 32; + EXPECT_EQ(val22, result2); - UInt128 result3({0, 0x13579bdf02468ace}); + LL_UInt128 result3({0, 0x13579bdf02468ace}); EXPECT_EQ((val2 << 64), result3); - UInt128 result4({0, 0x02468ace00000000}); + LL_UInt128 result4({0, 0x02468ace00000000}); EXPECT_EQ((val2 << 96), result4); - UInt128 result5({0, 0x2468ace000000000}); + LL_UInt128 result5({0, 0x2468ace000000000}); EXPECT_EQ((val2 << 100), result5); - UInt128 result6({0, 0}); + LL_UInt128 result6({0, 0}); EXPECT_EQ((val2 << 128), result6); EXPECT_EQ((val2 << 256), result6); } TEST(LlvmLibcUInt128ClassTest, ShiftRightTests) { - UInt128 val1(0x0123456789abcdef); - UInt128 result1(0x00123456789abcde); + LL_UInt128 val1(0x0123456789abcdef); + LL_UInt128 result1(0x00123456789abcde); EXPECT_EQ((val1 >> 4), result1); - UInt128 val2({0x13579bdf02468ace, 0x123456789abcdef0}); - UInt128 result2({0x9abcdef013579bdf, 0x0000000012345678}); + LL_UInt128 val2({0x13579bdf02468ace, 0x123456789abcdef0}); + LL_UInt128 result2({0x9abcdef013579bdf, 0x0000000012345678}); EXPECT_EQ((val2 >> 32), result2); + LL_UInt128 val22 = val2; + val22 >>= 32; + EXPECT_EQ(val22, result2); - UInt128 result3({0x123456789abcdef0, 0}); + LL_UInt128 result3({0x123456789abcdef0, 0}); EXPECT_EQ((val2 >> 64), result3); - UInt128 result4({0x0000000012345678, 0}); + LL_UInt128 result4({0x0000000012345678, 0}); EXPECT_EQ((val2 >> 96), result4); - UInt128 result5({0x0000000001234567, 0}); + LL_UInt128 result5({0x0000000001234567, 0}); EXPECT_EQ((val2 >> 100), result5); - UInt128 result6({0, 0}); + LL_UInt128 result6({0, 0}); EXPECT_EQ((val2 >> 128), result6); EXPECT_EQ((val2 >> 256), result6); } TEST(LlvmLibcUInt128ClassTest, AndTests) { - UInt128 base({0xffff00000000ffff, 0xffffffff00000000}); - UInt128 val128({0xf0f0f0f00f0f0f0f, 0xff00ff0000ff00ff}); + LL_UInt128 base({0xffff00000000ffff, 0xffffffff00000000}); + LL_UInt128 val128({0xf0f0f0f00f0f0f0f, 0xff00ff0000ff00ff}); uint64_t val64 = 0xf0f0f0f00f0f0f0f; int val32 = 0x0f0f0f0f; - UInt128 result128({0xf0f0000000000f0f, 0xff00ff0000000000}); - UInt128 result64(0xf0f0000000000f0f); - UInt128 result32(0x00000f0f); + LL_UInt128 result128({0xf0f0000000000f0f, 0xff00ff0000000000}); + LL_UInt128 result64(0xf0f0000000000f0f); + LL_UInt128 result32(0x00000f0f); EXPECT_EQ((base & val128), result128); EXPECT_EQ((base & val64), result64); EXPECT_EQ((base & val32), result32); } TEST(LlvmLibcUInt128ClassTest, OrTests) { - UInt128 base({0xffff00000000ffff, 0xffffffff00000000}); - UInt128 val128({0xf0f0f0f00f0f0f0f, 0xff00ff0000ff00ff}); + LL_UInt128 base({0xffff00000000ffff, 0xffffffff00000000}); + LL_UInt128 val128({0xf0f0f0f00f0f0f0f, 0xff00ff0000ff00ff}); uint64_t val64 = 0xf0f0f0f00f0f0f0f; int val32 = 0x0f0f0f0f; - UInt128 result128({0xfffff0f00f0fffff, 0xffffffff00ff00ff}); - UInt128 result64({0xfffff0f00f0fffff, 0xffffffff00000000}); - UInt128 result32({0xffff00000f0fffff, 0xffffffff00000000}); + LL_UInt128 result128({0xfffff0f00f0fffff, 0xffffffff00ff00ff}); + LL_UInt128 result64({0xfffff0f00f0fffff, 0xffffffff00000000}); + LL_UInt128 result32({0xffff00000f0fffff, 0xffffffff00000000}); EXPECT_EQ((base | val128), result128); EXPECT_EQ((base | val64), result64); EXPECT_EQ((base | val32), result32); } +TEST(LlvmLibcUInt128ClassTest, CompoundAssignments) { + LL_UInt128 x({0xffff00000000ffff, 0xffffffff00000000}); + LL_UInt128 b({0xf0f0f0f00f0f0f0f, 0xff00ff0000ff00ff}); + + LL_UInt128 a = x; + a |= b; + LL_UInt128 or_result({0xfffff0f00f0fffff, 0xffffffff00ff00ff}); + EXPECT_EQ(a, or_result); + + a = x; + a &= b; + LL_UInt128 and_result({0xf0f0000000000f0f, 0xff00ff0000000000}); + EXPECT_EQ(a, and_result); + + a = x; + a ^= b; + LL_UInt128 xor_result({0x0f0ff0f00f0ff0f0, 0x00ff00ff00ff00ff}); + EXPECT_EQ(a, xor_result); + + a = LL_UInt128(uint64_t(0x0123456789abcdef)); + LL_UInt128 shift_left_result(uint64_t(0x123456789abcdef0)); + a <<= 4; + EXPECT_EQ(a, shift_left_result); + + a = LL_UInt128(uint64_t(0x123456789abcdef1)); + LL_UInt128 shift_right_result(uint64_t(0x0123456789abcdef)); + a >>= 4; + EXPECT_EQ(a, shift_right_result); + + a = LL_UInt128({0xf000000000000001, 0}); + b = LL_UInt128({0x100000000000000f, 0}); + LL_UInt128 add_result({0x10, 0x1}); + a += b; + EXPECT_EQ(a, add_result); + + a = LL_UInt128({0xf, 0}); + b = LL_UInt128({0x1111111111111111, 0x1111111111111111}); + LL_UInt128 mul_result({0xffffffffffffffff, 0xffffffffffffffff}); + a *= b; + EXPECT_EQ(a, mul_result); +} + +TEST(LlvmLibcUInt128ClassTest, UnaryPredecrement) { + LL_UInt128 a = LL_UInt128({0x1111111111111111, 0x1111111111111111}); + ++a; + EXPECT_EQ(a, LL_UInt128({0x1111111111111112, 0x1111111111111111})); + + a = LL_UInt128({0xffffffffffffffff, 0x0}); + ++a; + EXPECT_EQ(a, LL_UInt128({0x0, 0x1})); + + a = LL_UInt128({0xffffffffffffffff, 0xffffffffffffffff}); + ++a; + EXPECT_EQ(a, LL_UInt128({0x0, 0x0})); +} + TEST(LlvmLibcUInt128ClassTest, EqualsTests) { - UInt128 a1({0xffffffff00000000, 0xffff00000000ffff}); - UInt128 a2({0xffffffff00000000, 0xffff00000000ffff}); - UInt128 b({0xff00ff0000ff00ff, 0xf0f0f0f00f0f0f0f}); - UInt128 a_reversed({0xffff00000000ffff, 0xffffffff00000000}); - UInt128 a_upper(0xffff00000000ffff); - UInt128 a_lower(0xffffffff00000000); + LL_UInt128 a1({0xffffffff00000000, 0xffff00000000ffff}); + LL_UInt128 a2({0xffffffff00000000, 0xffff00000000ffff}); + LL_UInt128 b({0xff00ff0000ff00ff, 0xf0f0f0f00f0f0f0f}); + LL_UInt128 a_reversed({0xffff00000000ffff, 0xffffffff00000000}); + LL_UInt128 a_upper(0xffff00000000ffff); + LL_UInt128 a_lower(0xffffffff00000000); ASSERT_TRUE(a1 == a1); ASSERT_TRUE(a1 == a2); ASSERT_FALSE(a1 == b); @@ -163,15 +228,15 @@ TEST(LlvmLibcUInt128ClassTest, EqualsTests) { } TEST(LlvmLibcUInt128ClassTest, ComparisonTests) { - UInt128 a({0xffffffff00000000, 0xffff00000000ffff}); - UInt128 b({0xff00ff0000ff00ff, 0xf0f0f0f00f0f0f0f}); + LL_UInt128 a({0xffffffff00000000, 0xffff00000000ffff}); + LL_UInt128 b({0xff00ff0000ff00ff, 0xf0f0f0f00f0f0f0f}); EXPECT_GT(a, b); EXPECT_GE(a, b); EXPECT_LT(b, a); EXPECT_LE(b, a); - UInt128 x(0xffffffff00000000); - UInt128 y(0x00000000ffffffff); + LL_UInt128 x(0xffffffff00000000); + LL_UInt128 y(0x00000000ffffffff); EXPECT_GT(x, y); EXPECT_GE(x, y); EXPECT_LT(y, x); diff --git a/libc/test/src/math/CMakeLists.txt b/libc/test/src/math/CMakeLists.txt index ddc02b2fc7786481cbaee607ef833cb087f9d543..971a73d3d84b036bd2ca342c399d761e81e549ec 100644 --- a/libc/test/src/math/CMakeLists.txt +++ b/libc/test/src/math/CMakeLists.txt @@ -1271,6 +1271,34 @@ add_fp_unittest( libc.src.__support.FPUtil.fputil ) +add_fp_unittest( + fmodf_test + SUITE + libc_math_unittests + SRCS + fmodf_test.cpp + HDRS + FModTest.h + DEPENDS + libc.include.math + libc.src.math.fmodf + libc.src.__support.FPUtil.fputil +) + +add_fp_unittest( + fmod_test + SUITE + libc_math_unittests + SRCS + fmod_test.cpp + HDRS + FModTest.h + DEPENDS + libc.include.math + libc.src.math.fmod + libc.src.__support.FPUtil.fputil +) + add_subdirectory(generic) add_subdirectory(exhaustive) add_subdirectory(differential_testing) diff --git a/libc/test/src/math/FModTest.h b/libc/test/src/math/FModTest.h new file mode 100644 index 0000000000000000000000000000000000000000..4962194d939f429168335617ad24e0534ac6807f --- /dev/null +++ b/libc/test/src/math/FModTest.h @@ -0,0 +1,270 @@ +//===-- Utility class to test fmod special numbers ------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_LIBC_TEST_SRC_MATH_FMODTEST_H +#define LLVM_LIBC_TEST_SRC_MATH_FMODTEST_H + +#include "src/__support/FPUtil/BasicOperations.h" +#include "src/__support/FPUtil/NearestIntegerOperations.h" +#include "utils/UnitTest/FPMatcher.h" +#include "utils/UnitTest/Test.h" + +#include +#include + +#define TEST_SPECIAL(x, y, expected, dom_err, expected_exception) \ + EXPECT_FP_EQ(expected, f(x, y)); \ + EXPECT_MATH_ERRNO((dom_err) ? EDOM : 0); \ + EXPECT_FP_EXCEPTION(expected_exception); \ + __llvm_libc::fputil::clear_except(FE_ALL_EXCEPT) + +#define TEST_REGULAR(x, y, expected) TEST_SPECIAL(x, y, expected, false, 0) + +template class FmodTest : public __llvm_libc::testing::Test { + + DECLARE_SPECIAL_CONSTANTS(T) + +public: + typedef T (*FModFunc)(T, T); + + void testSpecialNumbers(FModFunc f) { + using nl = std::numeric_limits; + + // fmod (+0, y) == +0 for y != 0. + TEST_SPECIAL(0.0, 3.0, 0.0, false, 0); + TEST_SPECIAL(0.0, nl::denorm_min(), 0.0, false, 0); + TEST_SPECIAL(0.0, -nl::denorm_min(), 0.0, false, 0); + TEST_SPECIAL(0.0, nl::min(), 0.0, false, 0); + TEST_SPECIAL(0.0, -nl::min(), 0.0, false, 0); + TEST_SPECIAL(0.0, nl::max(), 0.0, false, 0); + TEST_SPECIAL(0.0, -nl::max(), 0.0, false, 0); + + // fmod (-0, y) == -0 for y != 0. + TEST_SPECIAL(neg_zero, 3.0, neg_zero, false, 0); + TEST_SPECIAL(neg_zero, nl::denorm_min(), neg_zero, false, 0); + TEST_SPECIAL(neg_zero, -nl::denorm_min(), neg_zero, false, 0); + TEST_SPECIAL(neg_zero, nl::min(), neg_zero, false, 0); + TEST_SPECIAL(neg_zero, -nl::min(), neg_zero, false, 0); + TEST_SPECIAL(neg_zero, nl::max(), neg_zero, false, 0); + TEST_SPECIAL(neg_zero, -nl::max(), neg_zero, false, 0); + + // fmod (+inf, y) == nl::quiet_NaN() plus invalid exception. + TEST_SPECIAL(inf, 3.0, nl::quiet_NaN(), true, FE_INVALID); + TEST_SPECIAL(inf, -1.1L, nl::quiet_NaN(), true, FE_INVALID); + TEST_SPECIAL(inf, 0.0, nl::quiet_NaN(), true, FE_INVALID); + TEST_SPECIAL(inf, neg_zero, nl::quiet_NaN(), true, FE_INVALID); + TEST_SPECIAL(inf, nl::denorm_min(), nl::quiet_NaN(), true, FE_INVALID); + TEST_SPECIAL(inf, nl::min(), nl::quiet_NaN(), true, FE_INVALID); + TEST_SPECIAL(inf, nl::max(), nl::quiet_NaN(), true, FE_INVALID); + TEST_SPECIAL(inf, inf, nl::quiet_NaN(), true, FE_INVALID); + TEST_SPECIAL(inf, neg_inf, nl::quiet_NaN(), true, FE_INVALID); + + // fmod (-inf, y) == nl::quiet_NaN() plus invalid exception. + TEST_SPECIAL(neg_inf, 3.0, nl::quiet_NaN(), true, FE_INVALID); + TEST_SPECIAL(neg_inf, -1.1L, nl::quiet_NaN(), true, FE_INVALID); + TEST_SPECIAL(neg_inf, 0.0, nl::quiet_NaN(), true, FE_INVALID); + TEST_SPECIAL(neg_inf, neg_zero, nl::quiet_NaN(), true, FE_INVALID); + TEST_SPECIAL(neg_inf, nl::denorm_min(), nl::quiet_NaN(), true, FE_INVALID); + TEST_SPECIAL(neg_inf, nl::min(), nl::quiet_NaN(), true, FE_INVALID); + TEST_SPECIAL(neg_inf, nl::max(), nl::quiet_NaN(), true, FE_INVALID); + TEST_SPECIAL(neg_inf, inf, nl::quiet_NaN(), true, FE_INVALID); + TEST_SPECIAL(neg_inf, neg_inf, nl::quiet_NaN(), true, FE_INVALID); + + // fmod (x, +0) == nl::quiet_NaN() plus invalid exception. + TEST_SPECIAL(3.0, 0.0, nl::quiet_NaN(), true, FE_INVALID); + TEST_SPECIAL(-1.1L, 0.0, nl::quiet_NaN(), true, FE_INVALID); + TEST_SPECIAL(0.0, 0.0, nl::quiet_NaN(), true, FE_INVALID); + TEST_SPECIAL(neg_zero, 0.0, nl::quiet_NaN(), true, FE_INVALID); + TEST_SPECIAL(nl::denorm_min(), 0.0, nl::quiet_NaN(), true, FE_INVALID); + TEST_SPECIAL(nl::min(), 0.0, nl::quiet_NaN(), true, FE_INVALID); + TEST_SPECIAL(nl::max(), 0.0, nl::quiet_NaN(), true, FE_INVALID); + + // fmod (x, -0) == nl::quiet_NaN() plus invalid exception. + TEST_SPECIAL(3.0, neg_zero, nl::quiet_NaN(), true, FE_INVALID); + TEST_SPECIAL(-1.1L, neg_zero, nl::quiet_NaN(), true, FE_INVALID); + TEST_SPECIAL(0.0, neg_zero, nl::quiet_NaN(), true, FE_INVALID); + TEST_SPECIAL(neg_zero, neg_zero, nl::quiet_NaN(), true, FE_INVALID); + TEST_SPECIAL(nl::denorm_min(), neg_zero, nl::quiet_NaN(), true, FE_INVALID); + TEST_SPECIAL(nl::min(), neg_zero, nl::quiet_NaN(), true, FE_INVALID); + TEST_SPECIAL(nl::max(), neg_zero, nl::quiet_NaN(), true, FE_INVALID); + + // fmod (x, +inf) == x for x not infinite. + TEST_SPECIAL(0.0, inf, 0.0, false, 0); + TEST_SPECIAL(neg_zero, inf, neg_zero, false, 0); + TEST_SPECIAL(nl::denorm_min(), inf, nl::denorm_min(), false, 0); + TEST_SPECIAL(nl::min(), inf, nl::min(), false, 0); + TEST_SPECIAL(nl::max(), inf, nl::max(), false, 0); + TEST_SPECIAL(3.0, inf, 3.0, false, 0); + // fmod (x, -inf) == x for x not infinite. + TEST_SPECIAL(0.0, neg_inf, 0.0, false, 0); + TEST_SPECIAL(neg_zero, neg_inf, neg_zero, false, 0); + TEST_SPECIAL(nl::denorm_min(), neg_inf, nl::denorm_min(), false, 0); + TEST_SPECIAL(nl::min(), neg_inf, nl::min(), false, 0); + TEST_SPECIAL(nl::max(), neg_inf, nl::max(), false, 0); + TEST_SPECIAL(3.0, neg_inf, 3.0, false, 0); + + TEST_SPECIAL(0.0, nl::quiet_NaN(), nl::quiet_NaN(), false, 0); + TEST_SPECIAL(0.0, -nl::quiet_NaN(), nl::quiet_NaN(), false, 0); + TEST_SPECIAL(neg_zero, nl::quiet_NaN(), nl::quiet_NaN(), false, 0); + TEST_SPECIAL(neg_zero, -nl::quiet_NaN(), nl::quiet_NaN(), false, 0); + TEST_SPECIAL(1.0, nl::quiet_NaN(), nl::quiet_NaN(), false, 0); + TEST_SPECIAL(1.0, -nl::quiet_NaN(), nl::quiet_NaN(), false, 0); + TEST_SPECIAL(inf, nl::quiet_NaN(), nl::quiet_NaN(), false, 0); + TEST_SPECIAL(inf, -nl::quiet_NaN(), nl::quiet_NaN(), false, 0); + TEST_SPECIAL(neg_inf, nl::quiet_NaN(), nl::quiet_NaN(), false, 0); + TEST_SPECIAL(neg_inf, -nl::quiet_NaN(), nl::quiet_NaN(), false, 0); + TEST_SPECIAL(0.0, nl::signaling_NaN(), nl::quiet_NaN(), false, FE_INVALID); + TEST_SPECIAL(0.0, -nl::signaling_NaN(), nl::quiet_NaN(), false, FE_INVALID); + TEST_SPECIAL(neg_zero, nl::signaling_NaN(), nl::quiet_NaN(), false, + FE_INVALID); + TEST_SPECIAL(neg_zero, -nl::signaling_NaN(), nl::quiet_NaN(), false, + FE_INVALID); + TEST_SPECIAL(1.0, nl::signaling_NaN(), nl::quiet_NaN(), false, FE_INVALID); + TEST_SPECIAL(1.0, -nl::signaling_NaN(), nl::quiet_NaN(), false, FE_INVALID); + TEST_SPECIAL(inf, nl::signaling_NaN(), nl::quiet_NaN(), false, FE_INVALID); + TEST_SPECIAL(inf, -nl::signaling_NaN(), nl::quiet_NaN(), false, FE_INVALID); + TEST_SPECIAL(neg_inf, nl::signaling_NaN(), nl::quiet_NaN(), false, + FE_INVALID); + TEST_SPECIAL(neg_inf, -nl::signaling_NaN(), nl::quiet_NaN(), false, + FE_INVALID); + TEST_SPECIAL(nl::quiet_NaN(), 0.0, nl::quiet_NaN(), false, 0); + TEST_SPECIAL(-nl::quiet_NaN(), 0.0, nl::quiet_NaN(), false, 0); + TEST_SPECIAL(nl::quiet_NaN(), neg_zero, nl::quiet_NaN(), false, 0); + TEST_SPECIAL(-nl::quiet_NaN(), neg_zero, nl::quiet_NaN(), false, 0); + TEST_SPECIAL(nl::quiet_NaN(), 1.0, nl::quiet_NaN(), false, 0); + TEST_SPECIAL(-nl::quiet_NaN(), 1.0, nl::quiet_NaN(), false, 0); + TEST_SPECIAL(nl::quiet_NaN(), inf, nl::quiet_NaN(), false, 0); + TEST_SPECIAL(-nl::quiet_NaN(), inf, nl::quiet_NaN(), false, 0); + TEST_SPECIAL(nl::quiet_NaN(), neg_inf, nl::quiet_NaN(), false, 0); + TEST_SPECIAL(-nl::quiet_NaN(), neg_inf, nl::quiet_NaN(), false, 0); + TEST_SPECIAL(nl::signaling_NaN(), 0.0, nl::quiet_NaN(), false, FE_INVALID); + TEST_SPECIAL(-nl::signaling_NaN(), 0.0, nl::quiet_NaN(), false, FE_INVALID); + TEST_SPECIAL(nl::signaling_NaN(), neg_zero, nl::quiet_NaN(), false, + FE_INVALID); + TEST_SPECIAL(-nl::signaling_NaN(), neg_zero, nl::quiet_NaN(), false, + FE_INVALID); + TEST_SPECIAL(nl::signaling_NaN(), 1.0, nl::quiet_NaN(), false, FE_INVALID); + TEST_SPECIAL(-nl::signaling_NaN(), 1.0, nl::quiet_NaN(), false, FE_INVALID); + TEST_SPECIAL(nl::signaling_NaN(), inf, nl::quiet_NaN(), false, FE_INVALID); + TEST_SPECIAL(-nl::signaling_NaN(), inf, nl::quiet_NaN(), false, FE_INVALID); + TEST_SPECIAL(nl::signaling_NaN(), neg_inf, nl::quiet_NaN(), false, + FE_INVALID); + TEST_SPECIAL(-nl::signaling_NaN(), neg_inf, nl::quiet_NaN(), false, + FE_INVALID); + TEST_SPECIAL(nl::quiet_NaN(), nl::quiet_NaN(), nl::quiet_NaN(), false, 0); + TEST_SPECIAL(nl::quiet_NaN(), -nl::quiet_NaN(), nl::quiet_NaN(), false, 0); + TEST_SPECIAL(-nl::quiet_NaN(), nl::quiet_NaN(), nl::quiet_NaN(), false, 0); + TEST_SPECIAL(-nl::quiet_NaN(), -nl::quiet_NaN(), nl::quiet_NaN(), false, 0); + TEST_SPECIAL(nl::quiet_NaN(), nl::signaling_NaN(), nl::quiet_NaN(), false, + FE_INVALID); + TEST_SPECIAL(nl::quiet_NaN(), -nl::signaling_NaN(), nl::quiet_NaN(), false, + FE_INVALID); + TEST_SPECIAL(-nl::quiet_NaN(), nl::signaling_NaN(), nl::quiet_NaN(), false, + FE_INVALID); + TEST_SPECIAL(-nl::quiet_NaN(), -nl::signaling_NaN(), nl::quiet_NaN(), false, + FE_INVALID); + TEST_SPECIAL(nl::signaling_NaN(), nl::quiet_NaN(), nl::quiet_NaN(), false, + FE_INVALID); + TEST_SPECIAL(nl::signaling_NaN(), -nl::quiet_NaN(), nl::quiet_NaN(), false, + FE_INVALID); + TEST_SPECIAL(-nl::signaling_NaN(), nl::quiet_NaN(), nl::quiet_NaN(), false, + FE_INVALID); + TEST_SPECIAL(-nl::signaling_NaN(), -nl::quiet_NaN(), nl::quiet_NaN(), false, + FE_INVALID); + TEST_SPECIAL(nl::signaling_NaN(), nl::signaling_NaN(), nl::quiet_NaN(), + false, FE_INVALID); + TEST_SPECIAL(nl::signaling_NaN(), -nl::signaling_NaN(), nl::quiet_NaN(), + false, FE_INVALID); + TEST_SPECIAL(-nl::signaling_NaN(), nl::signaling_NaN(), nl::quiet_NaN(), + false, FE_INVALID); + TEST_SPECIAL(-nl::signaling_NaN(), -nl::signaling_NaN(), nl::quiet_NaN(), + false, FE_INVALID); + + TEST_SPECIAL(6.5, 2.25L, 2.0L, false, 0); + TEST_SPECIAL(-6.5, 2.25L, -2.0L, false, 0); + TEST_SPECIAL(6.5, -2.25L, 2.0L, false, 0); + TEST_SPECIAL(-6.5, -2.25L, -2.0L, false, 0); + + TEST_SPECIAL(nl::max(), nl::max(), 0.0, false, 0); + TEST_SPECIAL(nl::max(), -nl::max(), 0.0, false, 0); + TEST_SPECIAL(nl::max(), nl::min(), 0.0, false, 0); + TEST_SPECIAL(nl::max(), -nl::min(), 0.0, false, 0); + TEST_SPECIAL(nl::max(), nl::denorm_min(), 0.0, false, 0); + TEST_SPECIAL(nl::max(), -nl::denorm_min(), 0.0, false, 0); + TEST_SPECIAL(-nl::max(), nl::max(), neg_zero, false, 0); + TEST_SPECIAL(-nl::max(), -nl::max(), neg_zero, false, 0); + TEST_SPECIAL(-nl::max(), nl::min(), neg_zero, false, 0); + TEST_SPECIAL(-nl::max(), -nl::min(), neg_zero, false, 0); + TEST_SPECIAL(-nl::max(), nl::denorm_min(), neg_zero, false, 0); + TEST_SPECIAL(-nl::max(), -nl::denorm_min(), neg_zero, false, 0); + + TEST_SPECIAL(nl::min(), nl::max(), nl::min(), false, 0); + TEST_SPECIAL(nl::min(), -nl::max(), nl::min(), false, 0); + TEST_SPECIAL(nl::min(), nl::min(), 0.0, false, 0); + TEST_SPECIAL(nl::min(), -nl::min(), 0.0, false, 0); + TEST_SPECIAL(nl::min(), nl::denorm_min(), 0.0, false, 0); + TEST_SPECIAL(nl::min(), -nl::denorm_min(), 0.0, false, 0); + TEST_SPECIAL(-nl::min(), nl::max(), -nl::min(), false, 0); + TEST_SPECIAL(-nl::min(), -nl::max(), -nl::min(), false, 0); + TEST_SPECIAL(-nl::min(), nl::min(), neg_zero, false, 0); + TEST_SPECIAL(-nl::min(), -nl::min(), neg_zero, false, 0); + TEST_SPECIAL(-nl::min(), nl::denorm_min(), neg_zero, false, 0); + TEST_SPECIAL(-nl::min(), -nl::denorm_min(), neg_zero, false, 0); + + TEST_SPECIAL(nl::denorm_min(), nl::max(), nl::denorm_min(), false, 0); + TEST_SPECIAL(nl::denorm_min(), -nl::max(), nl::denorm_min(), false, 0); + TEST_SPECIAL(nl::denorm_min(), nl::min(), nl::denorm_min(), false, 0); + TEST_SPECIAL(nl::denorm_min(), -nl::min(), nl::denorm_min(), false, 0); + TEST_SPECIAL(nl::denorm_min(), nl::denorm_min(), 0.0, false, 0); + TEST_SPECIAL(nl::denorm_min(), -nl::denorm_min(), 0.0, false, 0); + TEST_SPECIAL(-nl::denorm_min(), nl::max(), -nl::denorm_min(), false, 0); + TEST_SPECIAL(-nl::denorm_min(), -nl::max(), -nl::denorm_min(), false, 0); + TEST_SPECIAL(-nl::denorm_min(), nl::min(), -nl::denorm_min(), false, 0); + TEST_SPECIAL(-nl::denorm_min(), -nl::min(), -nl::denorm_min(), false, 0); + TEST_SPECIAL(-nl::denorm_min(), nl::denorm_min(), neg_zero, false, 0); + TEST_SPECIAL(-nl::denorm_min(), -nl::denorm_min(), neg_zero, false, 0); + } + + void testRegularExtreme(FModFunc f) { + + TEST_REGULAR(0x1p127L, 0x3p-149L, 0x1p-149L); + TEST_REGULAR(0x1p127L, -0x3p-149L, 0x1p-149L); + TEST_REGULAR(0x1p127L, 0x3p-148L, 0x1p-147L); + TEST_REGULAR(0x1p127L, -0x3p-148L, 0x1p-147L); + TEST_REGULAR(0x1p127L, 0x3p-126L, 0x1p-125L); + TEST_REGULAR(0x1p127L, -0x3p-126L, 0x1p-125L); + TEST_REGULAR(-0x1p127L, 0x3p-149L, -0x1p-149L); + TEST_REGULAR(-0x1p127L, -0x3p-149L, -0x1p-149L); + TEST_REGULAR(-0x1p127L, 0x3p-148L, -0x1p-147L); + TEST_REGULAR(-0x1p127L, -0x3p-148L, -0x1p-147L); + TEST_REGULAR(-0x1p127L, 0x3p-126L, -0x1p-125L); + TEST_REGULAR(-0x1p127L, -0x3p-126L, -0x1p-125L); + + if constexpr (sizeof(T) >= sizeof(double)) { + TEST_REGULAR(0x1p1023L, 0x3p-1074L, 0x1p-1073L); + TEST_REGULAR(0x1p1023L, -0x3p-1074L, 0x1p-1073L); + TEST_REGULAR(0x1p1023L, 0x3p-1073L, 0x1p-1073L); + TEST_REGULAR(0x1p1023L, -0x3p-1073L, 0x1p-1073L); + TEST_REGULAR(0x1p1023L, 0x3p-1022L, 0x1p-1021L); + TEST_REGULAR(0x1p1023L, -0x3p-1022L, 0x1p-1021L); + TEST_REGULAR(-0x1p1023L, 0x3p-1074L, -0x1p-1073L); + TEST_REGULAR(-0x1p1023L, -0x3p-1074L, -0x1p-1073L); + TEST_REGULAR(-0x1p1023L, 0x3p-1073L, -0x1p-1073L); + TEST_REGULAR(-0x1p1023L, -0x3p-1073L, -0x1p-1073L); + TEST_REGULAR(-0x1p1023L, 0x3p-1022L, -0x1p-1021L); + TEST_REGULAR(-0x1p1023L, -0x3p-1022L, -0x1p-1021L); + } + } +}; + +#define LIST_FMOD_TESTS(T, func) \ + using LlvmLibcFmodTest = FmodTest; \ + TEST_F(LlvmLibcFmodTest, SpecialNumbers) { testSpecialNumbers(&func); } \ + TEST_F(LlvmLibcFmodTest, RegularExtreme) { testRegularExtreme(&func); } + +#endif // LLVM_LIBC_TEST_SRC_MATH_FMODTEST_H diff --git a/libc/test/src/math/differential_testing/CMakeLists.txt b/libc/test/src/math/differential_testing/CMakeLists.txt index 208e9979a028ce21e63e9b4f419beeb9ce6d3403..7eb2fbf26a9763e84e28e40991b9e27ecf7aad92 100644 --- a/libc/test/src/math/differential_testing/CMakeLists.txt +++ b/libc/test/src/math/differential_testing/CMakeLists.txt @@ -470,3 +470,43 @@ add_diff_binary( COMPILE_OPTIONS -fno-builtin ) + +add_diff_binary( + fmodf_diff + SRCS + fmodf_diff.cpp + DEPENDS + .single_input_single_output_diff + libc.src.math.fmodf +) + +add_diff_binary( + fmodf_perf + SRCS + fmodf_perf.cpp + DEPENDS + .single_input_single_output_diff + libc.src.math.fmodf + COMPILE_OPTIONS + -fno-builtin +) + +add_diff_binary( + fmod_diff + SRCS + fmod_diff.cpp + DEPENDS + .single_input_single_output_diff + libc.src.math.fmod +) + +add_diff_binary( + fmod_perf + SRCS + fmod_perf.cpp + DEPENDS + .single_input_single_output_diff + libc.src.math.fmod + COMPILE_OPTIONS + -fno-builtin +) diff --git a/libc/test/src/math/differential_testing/fmod_diff.cpp b/libc/test/src/math/differential_testing/fmod_diff.cpp new file mode 100644 index 0000000000000000000000000000000000000000..c20a7c5140f0091c9f4901549476bb735c1b8b09 --- /dev/null +++ b/libc/test/src/math/differential_testing/fmod_diff.cpp @@ -0,0 +1,15 @@ +//===-- Differential test for fmod ----------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "BinaryOpSingleOutputDiff.h" + +#include "src/math/fmod.h" + +#include + +BINARY_OP_SINGLE_OUTPUT_DIFF(double, __llvm_libc::fmod, ::fmod, "fmod_diff.log") diff --git a/libc/test/src/math/differential_testing/fmod_perf.cpp b/libc/test/src/math/differential_testing/fmod_perf.cpp new file mode 100644 index 0000000000000000000000000000000000000000..37878bee99c3af1ea2a029413fe5d7f9e9608881 --- /dev/null +++ b/libc/test/src/math/differential_testing/fmod_perf.cpp @@ -0,0 +1,15 @@ +//===-- Differential test for fmod ----------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "BinaryOpSingleOutputDiff.h" + +#include "src/math/fmod.h" + +#include + +BINARY_OP_SINGLE_OUTPUT_PERF(double, __llvm_libc::fmod, ::fmod, "fmod_perf.log") diff --git a/libc/test/src/math/differential_testing/fmodf_diff.cpp b/libc/test/src/math/differential_testing/fmodf_diff.cpp new file mode 100644 index 0000000000000000000000000000000000000000..634c6399877b0fc36aba52ec2fdc009176f85802 --- /dev/null +++ b/libc/test/src/math/differential_testing/fmodf_diff.cpp @@ -0,0 +1,16 @@ +//===-- Differential test for fmodf ---------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "BinaryOpSingleOutputDiff.h" + +#include "src/math/fmodf.h" + +#include + +BINARY_OP_SINGLE_OUTPUT_DIFF(float, __llvm_libc::fmodf, ::fmodf, + "fmodf_diff.log") diff --git a/libc/test/src/math/differential_testing/fmodf_perf.cpp b/libc/test/src/math/differential_testing/fmodf_perf.cpp new file mode 100644 index 0000000000000000000000000000000000000000..36d0fe56d964afb185e507cbd2bac5a8d4d0b40f --- /dev/null +++ b/libc/test/src/math/differential_testing/fmodf_perf.cpp @@ -0,0 +1,16 @@ +//===-- Differential test for fmodf ---------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "BinaryOpSingleOutputDiff.h" + +#include "src/math/fmodf.h" + +#include + +BINARY_OP_SINGLE_OUTPUT_PERF(float, __llvm_libc::fmodf, ::fmodf, + "fmodf_perf.log") diff --git a/libc/test/src/math/exhaustive/CMakeLists.txt b/libc/test/src/math/exhaustive/CMakeLists.txt index 34f8a38772570fde7c420281257998bb650122b8..a59724819ed55a09ce9b0934edea7d6470db2fb6 100644 --- a/libc/test/src/math/exhaustive/CMakeLists.txt +++ b/libc/test/src/math/exhaustive/CMakeLists.txt @@ -184,3 +184,16 @@ add_fp_unittest( LINK_LIBRARIES -lpthread ) + +add_fp_unittest( + fmod_generic_impl_test + NO_RUN_POSTBUILD + NEED_MPFR + SUITE + libc_math_exhaustive_tests + SRCS + fmod_generic_impl_test.cpp + DEPENDS + libc.src.__support.FPUtil.fputil + libc.src.__support.FPUtil.generic.fmod +) diff --git a/libc/test/src/math/exhaustive/fmod_generic_impl_test.cpp b/libc/test/src/math/exhaustive/fmod_generic_impl_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..9af7e87ec8f07e73fd9e5b1fc043acec8976adc0 --- /dev/null +++ b/libc/test/src/math/exhaustive/fmod_generic_impl_test.cpp @@ -0,0 +1,78 @@ +//===-- Utility class to test FMod generic implementation -------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +#include "src/__support/CPP/TypeTraits.h" +#include "src/__support/FPUtil/generic/FMod.h" +#include "utils/MPFRWrapper/MPFRUtils.h" +#include "utils/UnitTest/FPMatcher.h" +#include "utils/UnitTest/Test.h" + +#include +#include + +namespace mpfr = __llvm_libc::testing::mpfr; + +template +class LlvmLibcFModTest : public __llvm_libc::testing::Test { + + using DivisionHelper = __llvm_libc::cpp::ConditionalType< + InverseMultiplication, + __llvm_libc::fputil::generic::FModDivisionInvMultHelper, + __llvm_libc::fputil::generic::FModDivisionSimpleHelper>; + + static constexpr std::array test_bases = { + T(0.0), + T(1.0), + T(3.0), + T(27.0), + T(11.0 / 8.0), + T(2.764443), + T(1.0) - std::numeric_limits::epsilon(), + T(1.0) + std::numeric_limits::epsilon(), + T(M_PI), + T(M_SQRT2), + T(M_E)}; + +public: + void testExtensive() { + using FMod = __llvm_libc::fputil::generic::FMod< + T, __llvm_libc::fputil::generic::FModFastMathWrapper, + DivisionHelper>; + using nl = std::numeric_limits; + int min2 = nl::min_exponent - nl::digits - 5; + int max2 = nl::max_exponent + 3; + for (T by : test_bases) { + for (int iy = min2; iy < max2; iy++) { + T y = by * std::ldexp(2, iy); + if (y == 0 || !std::isfinite(y)) + continue; + for (T bx : test_bases) { + for (int ix = min2; ix < max2; ix++) { + T x = bx * std::ldexp(2, ix); + if (!std::isfinite(x)) + continue; + T result = FMod::eval(x, y); + mpfr::BinaryInput input{x, y}; + EXPECT_MPFR_MATCH(mpfr::Operation::Fmod, input, result, 0.0); + } + } + } + } + } +}; + +using LlvmLibcFModFloatTest = LlvmLibcFModTest; +TEST_F(LlvmLibcFModFloatTest, ExtensiveTest) { testExtensive(); } + +using LlvmLibcFModFloatInvTest = LlvmLibcFModTest; +TEST_F(LlvmLibcFModFloatInvTest, ExtensiveTest) { testExtensive(); } + +using LlvmLibcFModDoubleTest = LlvmLibcFModTest; +TEST_F(LlvmLibcFModDoubleTest, ExtensiveTest) { testExtensive(); } + +using LlvmLibcFModDoubleInvTest = LlvmLibcFModTest; +TEST_F(LlvmLibcFModDoubleInvTest, ExtensiveTest) { testExtensive(); } diff --git a/libc/test/src/math/fmod_test.cpp b/libc/test/src/math/fmod_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..03790e4941e19182b3bd7bf8fe6e37ee08db09de --- /dev/null +++ b/libc/test/src/math/fmod_test.cpp @@ -0,0 +1,13 @@ +//===-- Unittests for fmod ------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "FModTest.h" + +#include "src/math/fmod.h" + +LIST_FMOD_TESTS(double, __llvm_libc::fmod) diff --git a/libc/test/src/math/fmodf_test.cpp b/libc/test/src/math/fmodf_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..2b13379eba25235af73a15177f0d379559851d89 --- /dev/null +++ b/libc/test/src/math/fmodf_test.cpp @@ -0,0 +1,13 @@ +//===-- Unittests for fmodf -----------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "FModTest.h" + +#include "src/math/fmodf.h" + +LIST_FMOD_TESTS(float, __llvm_libc::fmodf) diff --git a/libc/test/src/pthread/CMakeLists.txt b/libc/test/src/pthread/CMakeLists.txt index f69c77fd4c3ec9512fb3bb3dda368e615b0a3e2d..db0516cecc4fdb5bf4bf7564aa058a0eefc82d5c 100644 --- a/libc/test/src/pthread/CMakeLists.txt +++ b/libc/test/src/pthread/CMakeLists.txt @@ -39,32 +39,3 @@ add_libc_unittest( libc.src.pthread.pthread_mutexattr_setrobust libc.src.pthread.pthread_mutexattr_settype ) - -add_libc_unittest( - pthread_mutex_test - SUITE - libc_pthread_unittests - SRCS - pthread_mutex_test.cpp - DEPENDS - libc.include.pthread - libc.src.errno.errno - libc.src.pthread.pthread_mutex_destroy - libc.src.pthread.pthread_mutex_init - libc.src.pthread.pthread_mutex_lock - libc.src.pthread.pthread_mutex_unlock - libc.src.pthread.pthread_create - libc.src.pthread.pthread_join -) - -add_libc_unittest( - pthread_test - SUITE - libc_pthread_unittests - SRCS - pthread_test.cpp - DEPENDS - libc.include.pthread - libc.src.pthread.pthread_create - libc.src.pthread.pthread_join -) diff --git a/libc/test/src/stdlib/CMakeLists.txt b/libc/test/src/stdlib/CMakeLists.txt index 0cdb69bcc59d6f4baaddeb2f99299f32147e5f7b..065408908a10dd40a366c20d6d44008270f22233 100644 --- a/libc/test/src/stdlib/CMakeLists.txt +++ b/libc/test/src/stdlib/CMakeLists.txt @@ -77,6 +77,7 @@ add_libc_unittest( SRCS strtold_test.cpp DEPENDS + libc.src.__support.CPP.uint128 libc.src.stdlib.strtold ) diff --git a/libc/test/src/stdlib/strtold_test.cpp b/libc/test/src/stdlib/strtold_test.cpp index eb5cda72b90e4ea4bda981dec0a6fef9c0923aea..3ddcdc1b1064c9468233e5def6807850b4f04786 100644 --- a/libc/test/src/stdlib/strtold_test.cpp +++ b/libc/test/src/stdlib/strtold_test.cpp @@ -6,6 +6,7 @@ // //===----------------------------------------------------------------------===// +#include "src/__support/CPP/UInt128.h" #include "src/__support/FPUtil/FPBits.h" #include "src/stdlib/strtold.h" @@ -30,7 +31,7 @@ public: const uint64_t expectedRawData, const int expectedErrno = 0) #else void run_test(const char *inputString, const ptrdiff_t expectedStrLen, - const __uint128_t expectedRawData, const int expectedErrno = 0) + const UInt128 expectedRawData, const int expectedErrno = 0) #endif { // expectedRawData64 is the expected long double result as a uint64_t, @@ -46,7 +47,7 @@ public: // | // +-- 11 Exponent Bits - // expectedRawData80 is the expected long double result as a __uint128_t, + // expectedRawData80 is the expected long double result as a UInt128, // organized according to the x86 extended precision format: // // +-- 1 Sign Bit @@ -59,7 +60,7 @@ public: // | | // +-- 15 Exponent Bits +-- 63 Mantissa bits - // expectedRawData128 is the expected long double result as a __uint128_t, + // expectedRawData128 is the expected long double result as a UInt128, // organized according to IEEE754 quadruple precision format: // // +-- 1 Sign Bit +-- 112 Mantissa bits @@ -97,73 +98,67 @@ public: TEST_F(LlvmLibcStrToLDTest, SimpleTest) { run_test("123", 3, SELECT_CONST(uint64_t(0x405ec00000000000), - __uint128_t(0x4005f60000) << 40, - __uint128_t(0x4005ec0000000000) << 64)); + UInt128(0x4005f60000) << 40, + UInt128(0x4005ec0000000000) << 64)); // This should fail on Eisel-Lemire, forcing a fallback to simple decimal // conversion. run_test("12345678901234549760", 20, SELECT_CONST(uint64_t(0x43e56a95319d63d8), - (__uint128_t(0x403eab54a9) << 40) + - __uint128_t(0x8ceb1ec400), - (__uint128_t(0x403e56a95319d63d) << 64) + - __uint128_t(0x8800000000000000))); + (UInt128(0x403eab54a9) << 40) + UInt128(0x8ceb1ec400), + (UInt128(0x403e56a95319d63d) << 64) + + UInt128(0x8800000000000000))); // Found while looking for difficult test cases here: // https://github.com/nigeltao/parse-number-fxx-test-data/blob/main/more-test-cases/golang-org-issue-36657.txt run_test("1090544144181609348835077142190", 31, SELECT_CONST(uint64_t(0x462b8779f2474dfb), - (__uint128_t(0x4062dc3bcf) << 40) + - __uint128_t(0x923a6fd402), - (__uint128_t(0x4062b8779f2474df) << 64) + - __uint128_t(0xa804bfd8c6d5c000))); + (UInt128(0x4062dc3bcf) << 40) + UInt128(0x923a6fd402), + (UInt128(0x4062b8779f2474df) << 64) + + UInt128(0xa804bfd8c6d5c000))); run_test("0x123", 5, SELECT_CONST(uint64_t(0x4072300000000000), - (__uint128_t(0x4007918000) << 40), - (__uint128_t(0x4007230000000000) << 64))); + (UInt128(0x4007918000) << 40), + (UInt128(0x4007230000000000) << 64))); } // These are tests that have caused problems for doubles in the past. TEST_F(LlvmLibcStrToLDTest, Float64SpecificFailures) { run_test("3E70000000000000", 16, SELECT_CONST(uint64_t(0x7FF0000000000000), - (__uint128_t(0x7fff800000) << 40), - (__uint128_t(0x7fff000000000000) << 64)), + (UInt128(0x7fff800000) << 40), + (UInt128(0x7fff000000000000) << 64)), ERANGE); run_test("358416272e-33", 13, SELECT_CONST(uint64_t(0x3adbbb2a68c9d0b9), - (__uint128_t(0x3fadddd953) << 40) + - __uint128_t(0x464e85c400), - (__uint128_t(0x3fadbbb2a68c9d0b) << 64) + - __uint128_t(0x8800e7969e1c5fc8))); + (UInt128(0x3fadddd953) << 40) + UInt128(0x464e85c400), + (UInt128(0x3fadbbb2a68c9d0b) << 64) + + UInt128(0x8800e7969e1c5fc8))); run_test("2.16656806400000023841857910156251e9", 36, SELECT_CONST(uint64_t(0x41e0246690000001), - (__uint128_t(0x401e812334) << 40) + - __uint128_t(0x8000000400), - (__uint128_t(0x401e024669000000) << 64) + - __uint128_t(0x800000000000018))); + (UInt128(0x401e812334) << 40) + UInt128(0x8000000400), + (UInt128(0x401e024669000000) << 64) + + UInt128(0x800000000000018))); run_test("27949676547093071875", 20, SELECT_CONST(uint64_t(0x43f83e132bc608c9), - (__uint128_t(0x403fc1f099) << 40) + - __uint128_t(0x5e30464402), - (__uint128_t(0x403f83e132bc608c) << 64) + - __uint128_t(0x8803000000000000))); + (UInt128(0x403fc1f099) << 40) + UInt128(0x5e30464402), + (UInt128(0x403f83e132bc608c) << 64) + + UInt128(0x8803000000000000))); } TEST_F(LlvmLibcStrToLDTest, MaxSizeNumbers) { run_test("1.1897314953572317650e4932", 26, SELECT_CONST(uint64_t(0x7FF0000000000000), - (__uint128_t(0x7ffeffffff) << 40) + - __uint128_t(0xffffffffff), - (__uint128_t(0x7ffeffffffffffff) << 64) + - __uint128_t(0xfffd57322e3f8675)), + (UInt128(0x7ffeffffff) << 40) + UInt128(0xffffffffff), + (UInt128(0x7ffeffffffffffff) << 64) + + UInt128(0xfffd57322e3f8675)), SELECT_CONST(ERANGE, 0, 0)); run_test("1.18973149535723176508e4932", 27, SELECT_CONST(uint64_t(0x7FF0000000000000), - (__uint128_t(0x7fff800000) << 40), - (__uint128_t(0x7ffeffffffffffff) << 64) + - __uint128_t(0xffffd2478338036c)), + (UInt128(0x7fff800000) << 40), + (UInt128(0x7ffeffffffffffff) << 64) + + UInt128(0xffffd2478338036c)), SELECT_CONST(ERANGE, ERANGE, 0)); } @@ -171,94 +166,86 @@ TEST_F(LlvmLibcStrToLDTest, MaxSizeNumbers) { // be too small for 64 bit floats. TEST_F(LlvmLibcStrToLDTest, SubnormalTests) { run_test("1e-4950", 7, - SELECT_CONST(uint64_t(0), (__uint128_t(0x00000000000000000003)), - (__uint128_t(0x000000000000000000057c9647e1a018))), + SELECT_CONST(uint64_t(0), (UInt128(0x00000000000000000003)), + (UInt128(0x000000000000000000057c9647e1a018))), ERANGE); run_test("1.89e-4951", 10, - SELECT_CONST(uint64_t(0), (__uint128_t(0x00000000000000000001)), - (__uint128_t(0x0000000000000000000109778a006738))), + SELECT_CONST(uint64_t(0), (UInt128(0x00000000000000000001)), + (UInt128(0x0000000000000000000109778a006738))), ERANGE); run_test("4e-4966", 7, - SELECT_CONST(uint64_t(0), (__uint128_t(0)), - (__uint128_t(0x00000000000000000000000000000001))), + SELECT_CONST(uint64_t(0), (UInt128(0)), + (UInt128(0x00000000000000000000000000000001))), ERANGE); } TEST_F(LlvmLibcStrToLDTest, SmallNormalTests) { - run_test( - "3.37e-4932", 10, - SELECT_CONST(uint64_t(0), - (__uint128_t(0x1804cf7) << 40) + __uint128_t(0x908850712), - (__uint128_t(0x10099ee12110a) << 64) + - __uint128_t(0xe24b75c0f50dc0c)), - SELECT_CONST(ERANGE, 0, 0)); + run_test("3.37e-4932", 10, + SELECT_CONST( + uint64_t(0), (UInt128(0x1804cf7) << 40) + UInt128(0x908850712), + (UInt128(0x10099ee12110a) << 64) + UInt128(0xe24b75c0f50dc0c)), + SELECT_CONST(ERANGE, 0, 0)); } TEST_F(LlvmLibcStrToLDTest, ComplexHexadecimalTests) { run_test("0x1p16383", 9, - SELECT_CONST(0x7ff0000000000000, (__uint128_t(0x7ffe800000) << 40), - (__uint128_t(0x7ffe000000000000) << 64)), + SELECT_CONST(0x7ff0000000000000, (UInt128(0x7ffe800000) << 40), + (UInt128(0x7ffe000000000000) << 64)), SELECT_CONST(ERANGE, 0, 0)); run_test("0x123456789abcdef", 17, SELECT_CONST(0x43723456789abcdf, - (__uint128_t(0x403791a2b3) << 40) + - __uint128_t(0xc4d5e6f780), - (__uint128_t(0x403723456789abcd) << 64) + - __uint128_t(0xef00000000000000))); + (UInt128(0x403791a2b3) << 40) + UInt128(0xc4d5e6f780), + (UInt128(0x403723456789abcd) << 64) + + UInt128(0xef00000000000000))); run_test("0x123456789abcdef0123456789ABCDEF", 33, SELECT_CONST(0x47723456789abcdf, - (__uint128_t(0x407791a2b3) << 40) + - __uint128_t(0xc4d5e6f781), - (__uint128_t(0x407723456789abcd) << 64) + - __uint128_t(0xef0123456789abce))); + (UInt128(0x407791a2b3) << 40) + UInt128(0xc4d5e6f781), + (UInt128(0x407723456789abcd) << 64) + + UInt128(0xef0123456789abce))); } TEST_F(LlvmLibcStrToLDTest, InfTests) { run_test("INF", 3, - SELECT_CONST(0x7ff0000000000000, (__uint128_t(0x7fff800000) << 40), - (__uint128_t(0x7fff000000000000) << 64))); + SELECT_CONST(0x7ff0000000000000, (UInt128(0x7fff800000) << 40), + (UInt128(0x7fff000000000000) << 64))); run_test("INFinity", 8, - SELECT_CONST(0x7ff0000000000000, (__uint128_t(0x7fff800000) << 40), - (__uint128_t(0x7fff000000000000) << 64))); + SELECT_CONST(0x7ff0000000000000, (UInt128(0x7fff800000) << 40), + (UInt128(0x7fff000000000000) << 64))); run_test("-inf", 4, - SELECT_CONST(0xfff0000000000000, (__uint128_t(0xffff800000) << 40), - (__uint128_t(0xffff000000000000) << 64))); + SELECT_CONST(0xfff0000000000000, (UInt128(0xffff800000) << 40), + (UInt128(0xffff000000000000) << 64))); } TEST_F(LlvmLibcStrToLDTest, NaNTests) { run_test("NaN", 3, - SELECT_CONST(0x7ff8000000000000, (__uint128_t(0x7fffc00000) << 40), - (__uint128_t(0x7fff800000000000) << 64))); + SELECT_CONST(0x7ff8000000000000, (UInt128(0x7fffc00000) << 40), + (UInt128(0x7fff800000000000) << 64))); run_test("-nAn", 4, - SELECT_CONST(0xfff8000000000000, (__uint128_t(0xffffc00000) << 40), - (__uint128_t(0xffff800000000000) << 64))); + SELECT_CONST(0xfff8000000000000, (UInt128(0xffffc00000) << 40), + (UInt128(0xffff800000000000) << 64))); run_test("NaN()", 5, - SELECT_CONST(0x7ff8000000000000, (__uint128_t(0x7fffc00000) << 40), - (__uint128_t(0x7fff800000000000) << 64))); + SELECT_CONST(0x7ff8000000000000, (UInt128(0x7fffc00000) << 40), + (UInt128(0x7fff800000000000) << 64))); run_test("NaN(1234)", 9, SELECT_CONST(0x7ff80000000004d2, - (__uint128_t(0x7fffc00000) << 40) + __uint128_t(0x4d2), - (__uint128_t(0x7fff800000000000) << 64) + - __uint128_t(0x4d2))); + (UInt128(0x7fffc00000) << 40) + UInt128(0x4d2), + (UInt128(0x7fff800000000000) << 64) + UInt128(0x4d2))); run_test("NaN(0xffffffffffff)", 19, SELECT_CONST(0x7ff8ffffffffffff, - (__uint128_t(0x7fffc000ff) << 40) + - __uint128_t(0xffffffffff), - (__uint128_t(0x7fff800000000000) << 64) + - __uint128_t(0xffffffffffff))); + (UInt128(0x7fffc000ff) << 40) + UInt128(0xffffffffff), + (UInt128(0x7fff800000000000) << 64) + + UInt128(0xffffffffffff))); run_test("NaN(0xfffffffffffff)", 20, SELECT_CONST(0x7fffffffffffffff, - (__uint128_t(0x7fffc00fff) << 40) + - __uint128_t(0xffffffffff), - (__uint128_t(0x7fff800000000000) << 64) + - __uint128_t(0xfffffffffffff))); + (UInt128(0x7fffc00fff) << 40) + UInt128(0xffffffffff), + (UInt128(0x7fff800000000000) << 64) + + UInt128(0xfffffffffffff))); run_test("NaN(0xffffffffffffffff)", 23, SELECT_CONST(0x7fffffffffffffff, - (__uint128_t(0x7fffffffff) << 40) + - __uint128_t(0xffffffffff), - (__uint128_t(0x7fff800000000000) << 64) + - __uint128_t(0xffffffffffffffff))); + (UInt128(0x7fffffffff) << 40) + UInt128(0xffffffffff), + (UInt128(0x7fff800000000000) << 64) + + UInt128(0xffffffffffffffff))); run_test("NaN( 1234)", 3, - SELECT_CONST(0x7ff8000000000000, (__uint128_t(0x7fffc00000) << 40), - (__uint128_t(0x7fff800000000000) << 64))); + SELECT_CONST(0x7ff8000000000000, (UInt128(0x7fffc00000) << 40), + (UInt128(0x7fff800000000000) << 64))); } diff --git a/libc/test/src/string/memory_utils/backend_test.cpp b/libc/test/src/string/memory_utils/backend_test.cpp index 27418b7c9933e945fa66b6b6de45ec49f03b30e7..f4ffe9c691e36ee03753df72b1d9b61c20e280d9 100644 --- a/libc/test/src/string/memory_utils/backend_test.cpp +++ b/libc/test/src/string/memory_utils/backend_test.cpp @@ -93,12 +93,12 @@ TYPED_TEST(LlvmLibcMemoryBackend, splat, FunctionTypes) { TYPED_TEST(LlvmLibcMemoryBackend, notEquals, FunctionTypes) { alignas(64) const auto a = GetRandomBuffer(); - EXPECT_EQ(ParamType::notEquals(a, a), 0UL); + EXPECT_EQ(ParamType::notEquals(a, a), uint64_t(0)); for (size_t i = 0; i < a.size(); ++i) { alignas(64) auto b = a; ++b[i]; - EXPECT_NE(ParamType::notEquals(a, b), 0UL); - EXPECT_NE(ParamType::notEquals(b, a), 0UL); + EXPECT_NE(ParamType::notEquals(a, b), uint64_t(0)); + EXPECT_NE(ParamType::notEquals(b, a), uint64_t(0)); } } diff --git a/libc/utils/IntegrationTest/test.h b/libc/utils/IntegrationTest/test.h index 97fa10e0524b9f4814b6e7b2d50eeab7e0c66ccf..556265052dfad5778d57ba4ee48c269a738d23c2 100644 --- a/libc/utils/IntegrationTest/test.h +++ b/libc/utils/IntegrationTest/test.h @@ -13,7 +13,7 @@ #include "src/__support/OSUtil/quick_exit.h" #define __AS_STRING(val) #val -#define __CHECK(file, line, val, should_exit) \ +#define __CHECK_TRUE(file, line, val, should_exit) \ if (!(val)) { \ __llvm_libc::write_to_stderr(file ":" __AS_STRING( \ line) ": Expected '" #val "' to be true, but is false\n"); \ @@ -21,17 +21,41 @@ __llvm_libc::quick_exit(127); \ } -#define __CHECK_NE(file, line, val, should_exit) \ - if ((val)) { \ +#define __CHECK_FALSE(file, line, val, should_exit) \ + if (val) { \ __llvm_libc::write_to_stderr(file ":" __AS_STRING( \ line) ": Expected '" #val "' to be false, but is true\n"); \ if (should_exit) \ __llvm_libc::quick_exit(127); \ } -#define EXPECT_TRUE(val) __CHECK(__FILE__, __LINE__, val, false) -#define ASSERT_TRUE(val) __CHECK(__FILE__, __LINE__, val, true) -#define EXPECT_FALSE(val) __CHECK_NE(__FILE__, __LINE__, val, false) -#define ASSERT_FALSE(val) __CHECK_NE(__FILE__, __LINE__, val, true) +#define __CHECK_EQ(file, line, val1, val2, should_exit) \ + if ((val1) != (val2)) { \ + __llvm_libc::write_to_stderr(file ":" __AS_STRING( \ + line) ": Expected '" #val1 "' to be equal to '" #val2 "'\n"); \ + if (should_exit) \ + __llvm_libc::quick_exit(127); \ + } + +#define __CHECK_NE(file, line, val1, val2, should_exit) \ + if ((val1) == (val2)) { \ + __llvm_libc::write_to_stderr(file ":" __AS_STRING( \ + line) ": Expected '" #val1 "' to not be equal to '" #val2 "'\n"); \ + if (should_exit) \ + __llvm_libc::quick_exit(127); \ + } + +#define EXPECT_TRUE(val) __CHECK_TRUE(__FILE__, __LINE__, val, false) +#define ASSERT_TRUE(val) __CHECK_TRUE(__FILE__, __LINE__, val, true) +#define EXPECT_FALSE(val) __CHECK_FALSE(__FILE__, __LINE__, val, false) +#define ASSERT_FALSE(val) __CHECK_FALSE(__FILE__, __LINE__, val, true) +#define EXPECT_EQ(val1, val2) \ + __CHECK_EQ(__FILE__, __LINE__, (val1), (val2), false) +#define ASSERT_EQ(val1, val2) \ + __CHECK_EQ(__FILE__, __LINE__, (val1), (val2), true) +#define EXPECT_NE(val1, val2) \ + __CHECK_NE(__FILE__, __LINE__, (val1), (val2), false) +#define ASSERT_NE(val1, val2) \ + __CHECK_NE(__FILE__, __LINE__, (val1), (val2), true) #endif // LLVM_LIBC_UTILS_INTEGRATION_TEST_TEST_H diff --git a/libc/utils/MPFRWrapper/MPFRUtils.cpp b/libc/utils/MPFRWrapper/MPFRUtils.cpp index 502a15f6c090b37c30a766093f9781829f578e30..61052b99dfadc6073b2b786998d936019f07393d 100644 --- a/libc/utils/MPFRWrapper/MPFRUtils.cpp +++ b/libc/utils/MPFRWrapper/MPFRUtils.cpp @@ -247,6 +247,12 @@ public: return result; } + MPFRNumber fmod(const MPFRNumber &b) { + MPFRNumber result(*this); + mpfr_fmod(result.value, value, b.value, mpfr_rounding); + return result; + } + MPFRNumber frexp(int &exp) { MPFRNumber result(*this); mpfr_exp_t resultExp; @@ -561,6 +567,8 @@ binary_operation_one_output(Operation op, InputType x, InputType y, MPFRNumber inputX(x, precision, rounding); MPFRNumber inputY(y, precision, rounding); switch (op) { + case Operation::Fmod: + return inputX.fmod(inputY); case Operation::Hypot: return inputX.hypot(inputY); default: diff --git a/libc/utils/MPFRWrapper/MPFRUtils.h b/libc/utils/MPFRWrapper/MPFRUtils.h index 2896973a33c2fb7076e3eda9b470f521b6379c15..f1caa41ca1d0128962b102929d42c8ce7551dd8c 100644 --- a/libc/utils/MPFRWrapper/MPFRUtils.h +++ b/libc/utils/MPFRWrapper/MPFRUtils.h @@ -56,6 +56,7 @@ enum class Operation : int { // input and produce a single floating point number of the same type as // output. BeginBinaryOperationsSingleOutput, + Fmod, Hypot, EndBinaryOperationsSingleOutput, diff --git a/libc/utils/UnitTest/CMakeLists.txt b/libc/utils/UnitTest/CMakeLists.txt index 622dbb426c5caaa120d4559971b414039981caf9..bc9e7b3e607da03bca7fbc07f0683af8fcee8f13 100644 --- a/libc/utils/UnitTest/CMakeLists.txt +++ b/libc/utils/UnitTest/CMakeLists.txt @@ -5,7 +5,7 @@ add_library( LibcTest.h ) target_include_directories(LibcUnitTest PUBLIC ${LIBC_SOURCE_DIR}) -add_dependencies(LibcUnitTest libc.src.__support.CPP.type_traits) +add_dependencies(LibcUnitTest libc.src.__support.CPP.type_traits libc.src.__support.CPP.uint128) target_link_libraries(LibcUnitTest PUBLIC libc_test_utils) add_library( @@ -71,6 +71,7 @@ target_link_libraries(LibcPrintfHelpers LibcUnitTest) add_dependencies( LibcPrintfHelpers LibcUnitTest - libc.utils.UnitTest.string_utils + libc.src.__support.CPP.uint128 libc.src.stdio.printf_core.core_structs + libc.utils.UnitTest.string_utils ) diff --git a/libc/utils/UnitTest/LibcTest.cpp b/libc/utils/UnitTest/LibcTest.cpp index f37440dc4bde4f10afbaf4dd33f58a8331036b34..42b99a2b951c07377377c0c0784674d10757fc79 100644 --- a/libc/utils/UnitTest/LibcTest.cpp +++ b/libc/utils/UnitTest/LibcTest.cpp @@ -8,7 +8,7 @@ #include "LibcTest.h" -#include "src/__support/CPP/UInt.h" +#include "src/__support/CPP/UInt128.h" #include "utils/testutils/ExecuteFunction.h" #include #include @@ -42,11 +42,15 @@ describeValue(ValType Value) { } std::string describeValue(std::string Value) { return std::string(Value); } -#ifdef __SIZEOF_INT128__ -// When the value is __uint128_t, also show its hexadecimal digits. -// Using template to force exact match, prevent ambiguous promotion. -std::string describeValue128(__uint128_t Value) { - std::string S(sizeof(__uint128_t) * 2, '0'); + +// When the value is UInt128 or __uint128_t, show its hexadecimal digits. +// We cannot just use a UInt128 specialization as that resolves to only +// one type, UInt<128> or __uint128_t. We want both overloads as we want to +// be able to unittest UInt<128> on platforms where UInt128 resolves to +// UInt128. +template +std::string describeValue128(UInt128Type Value) { + std::string S(sizeof(UInt128) * 2, '0'); for (auto I = S.rbegin(), End = S.rend(); I != End; ++I, Value >>= 4) { unsigned char Mod = static_cast(Value) & 15; @@ -56,26 +60,16 @@ std::string describeValue128(__uint128_t Value) { return "0x" + S; } -template <> std::string describeValue<__int128_t>(__int128_t Value) { - return describeValue128(Value); -} +#ifdef __SIZEOF_INT128__ template <> std::string describeValue<__uint128_t>(__uint128_t Value) { return describeValue128(Value); } #endif -// When the value is UInt<128>, also show its hexadecimal digits. template <> std::string describeValue<__llvm_libc::cpp::UInt<128>>(__llvm_libc::cpp::UInt<128> Value) { - std::string S(sizeof(__llvm_libc::cpp::UInt<128>) * 2, '0'); - - for (auto I = S.rbegin(), End = S.rend(); I != End; ++I, Value = Value >> 4) { - unsigned char Mod = static_cast(Value) & 15; - *I = Mod < 10 ? '0' + Mod : 'a' + Mod - 10; - } - - return "0x" + S; + return describeValue128(Value); } template @@ -234,17 +228,6 @@ template bool test(RunContext *Ctx, TestCondition Cond, const char *RHSStr, const char *File, unsigned long Line); -#ifdef __SIZEOF_INT128__ -template bool test<__int128_t>(RunContext *Ctx, TestCondition Cond, - __int128_t LHS, __int128_t RHS, - const char *LHSStr, const char *RHSStr, - const char *File, unsigned long Line); -#endif -template bool test<__llvm_libc::cpp::UInt<128>>( - RunContext *Ctx, TestCondition Cond, __llvm_libc::cpp::UInt<128> LHS, - __llvm_libc::cpp::UInt<128> RHS, const char *LHSStr, const char *RHSStr, - const char *File, unsigned long Line); - template bool test(RunContext *Ctx, TestCondition Cond, unsigned char LHS, unsigned char RHS, const char *LHSStr, const char *RHSStr, @@ -275,13 +258,24 @@ template bool test(RunContext *Ctx, TestCondition Cond, const char *LHSStr, const char *RHSStr, const char *File, unsigned long Line); +// We cannot just use a single UInt128 specialization as that resolves to only +// one type, UInt<128> or __uint128_t. We want both overloads as we want to +// be able to unittest UInt<128> on platforms where UInt128 resolves to +// UInt128. #ifdef __SIZEOF_INT128__ +// When builtin __uint128_t type is available, include its specialization +// also. template bool test<__uint128_t>(RunContext *Ctx, TestCondition Cond, __uint128_t LHS, __uint128_t RHS, const char *LHSStr, const char *RHSStr, const char *File, unsigned long Line); #endif +template bool test<__llvm_libc::cpp::UInt<128>>( + RunContext *Ctx, TestCondition Cond, __llvm_libc::cpp::UInt<128> LHS, + __llvm_libc::cpp::UInt<128> RHS, const char *LHSStr, const char *RHSStr, + const char *File, unsigned long Line); + } // namespace internal bool Test::testStrEq(const char *LHS, const char *RHS, const char *LHSStr, diff --git a/libc/utils/UnitTest/PrintfMatcher.cpp b/libc/utils/UnitTest/PrintfMatcher.cpp index f30b1ce31b0d015cc792a86ecb5407fc680cdf7b..cd17060faa8b1c37aa00f5634fef3b117bc91224 100644 --- a/libc/utils/UnitTest/PrintfMatcher.cpp +++ b/libc/utils/UnitTest/PrintfMatcher.cpp @@ -7,6 +7,8 @@ //===----------------------------------------------------------------------===// #include "PrintfMatcher.h" + +#include "src/__support/CPP/UInt128.h" #include "src/stdio/printf_core/core_structs.h" #include "utils/UnitTest/StringUtils.h" @@ -70,8 +72,7 @@ void display(testutils::StreamWrapper &stream, FormatSection form) { reinterpret_cast(form.conv_val_ptr)) << "\n"; else if (form.conv_name != '%') - stream << "\tvalue: " << int_to_hex<__uint128_t>(form.conv_val_raw) - << "\n"; + stream << "\tvalue: " << int_to_hex(form.conv_val_raw) << "\n"; } } } // anonymous namespace diff --git a/libcxx/include/CMakeLists.txt b/libcxx/include/CMakeLists.txt index 1f0d4354cdd0629b032351dab9b1f2c66bf63dca..0c0ede4ce01feea66efdd7003772d67f181f8418 100644 --- a/libcxx/include/CMakeLists.txt +++ b/libcxx/include/CMakeLists.txt @@ -99,6 +99,8 @@ set(files __algorithm/ranges_minmax.h __algorithm/ranges_minmax_element.h __algorithm/ranges_mismatch.h + __algorithm/ranges_move.h + __algorithm/ranges_move_backward.h __algorithm/ranges_none_of.h __algorithm/ranges_replace.h __algorithm/ranges_replace_if.h diff --git a/libcxx/include/__algorithm/move.h b/libcxx/include/__algorithm/move.h index 72bf3d76ea5be47c3f241148d97a683910efa2a2..0b08d31c176efb492646b06a95735bc205369eee 100644 --- a/libcxx/include/__algorithm/move.h +++ b/libcxx/include/__algorithm/move.h @@ -11,7 +11,10 @@ #include <__algorithm/unwrap_iter.h> #include <__config> +#include <__iterator/iterator_traits.h> +#include <__iterator/reverse_iterator.h> #include <__utility/move.h> +#include <__utility/pair.h> #include #include @@ -23,53 +26,88 @@ _LIBCPP_BEGIN_NAMESPACE_STD // move -template +template inline _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX14 -_OutputIterator -__move_constexpr(_InputIterator __first, _InputIterator __last, _OutputIterator __result) -{ - for (; __first != __last; ++__first, (void) ++__result) - *__result = _VSTD::move(*__first); - return __result; +pair<_InIter, _OutIter> __move_impl(_InIter __first, _Sent __last, _OutIter __result) { + while (__first != __last) { + *__result = std::move(*__first); + ++__first; + ++__result; + } + return std::make_pair(std::move(__first), std::move(__result)); } -template -inline _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX14 -_OutputIterator -__move(_InputIterator __first, _InputIterator __last, _OutputIterator __result) -{ - return _VSTD::__move_constexpr(__first, __last, __result); +template ::type, _OutType>::value + && is_trivially_move_assignable<_OutType>::value> > +inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_AFTER_CXX11 +pair<_InType*, _OutType*> __move_impl(_InType* __first, _InType* __last, _OutType* __result) { + if (__libcpp_is_constant_evaluated() +// TODO: Remove this once GCC supports __builtin_memmove during constant evaluation +#ifndef _LIBCPP_COMPILER_GCC + && !is_trivially_copyable<_InType>::value +#endif + ) + return std::__move_impl<_InType*, _InType*, _OutType*>(__first, __last, __result); + const size_t __n = static_cast(__last - __first); + ::__builtin_memmove(__result, __first, __n * sizeof(_OutType)); + return std::make_pair(__first + __n, __result + __n); } -template -inline _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX14 -typename enable_if -< - is_same::type, _Up>::value && - is_trivially_move_assignable<_Up>::value, - _Up* ->::type -__move(_Tp* __first, _Tp* __last, _Up* __result) -{ - const size_t __n = static_cast(__last - __first); - if (__n > 0) - _VSTD::memmove(__result, __first, __n * sizeof(_Up)); - return __result + __n; +template +struct __is_trivially_move_assignable_unwrapped_impl : false_type {}; + +template +struct __is_trivially_move_assignable_unwrapped_impl<_Type*> : is_trivially_move_assignable<_Type> {}; + +template +struct __is_trivially_move_assignable_unwrapped + : __is_trivially_move_assignable_unwrapped_impl(std::declval<_Iter>()))> {}; + +template ::value_type>::type, + typename iterator_traits<_OutIter>::value_type>::value + && __is_cpp17_contiguous_iterator<_InIter>::value + && __is_cpp17_contiguous_iterator<_OutIter>::value + && is_trivially_move_assignable<__iter_value_type<_OutIter> >::value, int> = 0> +inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_AFTER_CXX14 +pair, reverse_iterator<_OutIter> > +__move_impl(reverse_iterator<_InIter> __first, + reverse_iterator<_InIter> __last, + reverse_iterator<_OutIter> __result) { + auto __first_base = std::__unwrap_iter(__first.base()); + auto __last_base = std::__unwrap_iter(__last.base()); + auto __result_base = std::__unwrap_iter(__result.base()); + auto __result_first = __result_base - (__first_base - __last_base); + std::__move_impl(__last_base, __first_base, __result_first); + return std::make_pair(__last, reverse_iterator<_OutIter>(std::__rewrap_iter(__result.base(), __result_first))); +} + +template +inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_AFTER_CXX11 +__enable_if_t::value + && is_copy_constructible<_Sent>::value + && is_copy_constructible<_OutIter>::value, pair<_InIter, _OutIter> > +__move(_InIter __first, _Sent __last, _OutIter __result) { + auto __ret = std::__move_impl(std::__unwrap_iter(__first), std::__unwrap_iter(__last), std::__unwrap_iter(__result)); + return std::make_pair(std::__rewrap_iter(__first, __ret.first), std::__rewrap_iter(__result, __ret.second)); +} + +template +inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_AFTER_CXX11 +__enable_if_t::value + || !is_copy_constructible<_Sent>::value + || !is_copy_constructible<_OutIter>::value, pair<_InIter, _OutIter> > +__move(_InIter __first, _Sent __last, _OutIter __result) { + return std::__move_impl(std::move(__first), std::move(__last), std::move(__result)); } template -inline _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX17 -_OutputIterator -move(_InputIterator __first, _InputIterator __last, _OutputIterator __result) -{ - if (__libcpp_is_constant_evaluated()) { - return _VSTD::__move_constexpr(__first, __last, __result); - } else { - return _VSTD::__rewrap_iter(__result, - _VSTD::__move(_VSTD::__unwrap_iter(__first), - _VSTD::__unwrap_iter(__last), - _VSTD::__unwrap_iter(__result))); - } +inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_AFTER_CXX17 +_OutputIterator move(_InputIterator __first, _InputIterator __last, _OutputIterator __result) { + return std::__move(__first, __last, __result).second; } _LIBCPP_END_NAMESPACE_STD diff --git a/libcxx/include/__algorithm/ranges_move.h b/libcxx/include/__algorithm/ranges_move.h new file mode 100644 index 0000000000000000000000000000000000000000..ad4342d7c989a710947505caf5f95da78a0947ce --- /dev/null +++ b/libcxx/include/__algorithm/ranges_move.h @@ -0,0 +1,83 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef _LIBCPP___ALGORITHM_RANGES_MOVE_H +#define _LIBCPP___ALGORITHM_RANGES_MOVE_H + +#include <__algorithm/in_out_result.h> +#include <__algorithm/move.h> +#include <__config> +#include <__iterator/concepts.h> +#include <__iterator/iter_move.h> +#include <__ranges/access.h> +#include <__ranges/concepts.h> +#include <__ranges/dangling.h> +#include <__utility/move.h> + +#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) +# pragma GCC system_header +#endif + +#if _LIBCPP_STD_VER > 17 && !defined(_LIBCPP_HAS_NO_INCOMPLETE_RANGES) + +_LIBCPP_BEGIN_NAMESPACE_STD + +namespace ranges { + +template +using move_result = in_out_result<_InIter, _OutIter>; + +namespace __move { +struct __fn { + + template + requires __iter_move::__move_deref<_InIter> // check that we are allowed to std::move() the value + _LIBCPP_HIDE_FROM_ABI constexpr static + move_result<_InIter, _OutIter> __move_impl(_InIter __first, _Sent __last, _OutIter __result) { + auto __ret = std::__move(std::move(__first), std::move(__last), std::move(__result)); + return {std::move(__ret.first), std::move(__ret.second)}; + } + + template + _LIBCPP_HIDE_FROM_ABI constexpr static + move_result<_InIter, _OutIter> __move_impl(_InIter __first, _Sent __last, _OutIter __result) { + while (__first != __last) { + *__result = ranges::iter_move(__first); + ++__first; + ++__result; + } + return {std::move(__first), std::move(__result)}; + } + + template _Sent, weakly_incrementable _OutIter> + requires indirectly_movable<_InIter, _OutIter> + _LIBCPP_HIDE_FROM_ABI constexpr + move_result<_InIter, _OutIter> operator()(_InIter __first, _Sent __last, _OutIter __result) const { + return __move_impl(std::move(__first), std::move(__last), std::move(__result)); + } + + template + requires indirectly_movable, _OutIter> + _LIBCPP_HIDE_FROM_ABI constexpr + move_result, _OutIter> operator()(_Range&& __range, _OutIter __result) const { + return __move_impl(ranges::begin(__range), ranges::end(__range), std::move(__result)); + } + +}; +} // namespace __move + +inline namespace __cpo { + inline constexpr auto move = __move::__fn{}; +} // namespace __cpo +} // namespace ranges + +_LIBCPP_END_NAMESPACE_STD + +#endif // _LIBCPP_STD_VER > 17 && !defined(_LIBCPP_HAS_NO_INCOMPLETE_RANGES) + +#endif // _LIBCPP___ALGORITHM_RANGES_MOVE_H diff --git a/libcxx/include/__algorithm/ranges_move_backward.h b/libcxx/include/__algorithm/ranges_move_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..b3dfa7139603c2767f71f293b2ab9e77fef50fe1 --- /dev/null +++ b/libcxx/include/__algorithm/ranges_move_backward.h @@ -0,0 +1,75 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef _LIBCPP___ALGORITHM_RANGES_MOVE_BACKWARD_H +#define _LIBCPP___ALGORITHM_RANGES_MOVE_BACKWARD_H + +#include <__algorithm/in_out_result.h> +#include <__algorithm/ranges_move.h> +#include <__config> +#include <__iterator/concepts.h> +#include <__iterator/iter_move.h> +#include <__iterator/next.h> +#include <__iterator/reverse_iterator.h> +#include <__ranges/access.h> +#include <__ranges/concepts.h> +#include <__ranges/dangling.h> +#include <__utility/move.h> + +#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) +# pragma GCC system_header +#endif + +#if _LIBCPP_STD_VER > 17 && !defined(_LIBCPP_HAS_NO_INCOMPLETE_RANGES) + +_LIBCPP_BEGIN_NAMESPACE_STD + +namespace ranges { + +template +using move_backward_result = in_out_result<_InIter, _OutIter>; + +namespace __move_backward { +struct __fn { + + template + _LIBCPP_HIDE_FROM_ABI constexpr static + move_backward_result<_InIter, _OutIter> __move_backward_impl(_InIter __first, _Sent __last, _OutIter __result) { + auto __ret = ranges::move(std::make_reverse_iterator(ranges::next(__first, __last)), + std::make_reverse_iterator(__first), + std::make_reverse_iterator(__result)); + return {std::move(__ret.in.base()), std::move(__ret.out.base())}; + } + + template _Sent, bidirectional_iterator _OutIter> + requires indirectly_movable<_InIter, _OutIter> + _LIBCPP_HIDE_FROM_ABI constexpr + move_backward_result<_InIter, _OutIter> operator()(_InIter __first, _Sent __last, _OutIter __result) const { + return __move_backward_impl(std::move(__first), std::move(__last), std::move(__result)); + } + + template + requires indirectly_movable, _Iter> + _LIBCPP_HIDE_FROM_ABI constexpr + move_backward_result, _Iter> operator()(_Range&& __range, _Iter __result) const { + return __move_backward_impl(ranges::begin(__range), ranges::end(__range), std::move(__result)); + } + +}; +} // namespace __move_backward + +inline namespace __cpo { + inline constexpr auto move_backward = __move_backward::__fn{}; +} // namespace __cpo +} // namespace ranges + +_LIBCPP_END_NAMESPACE_STD + +#endif // _LIBCPP_STD_VER > 17 && !defined(_LIBCPP_HAS_NO_INCOMPLETE_RANGES) + +#endif // _LIBCPP___ALGORITHM_RANGES_MOVE_BACKWARD_H diff --git a/libcxx/include/__config b/libcxx/include/__config index 11f934c0edf3357e0fd2b629c7fd6cc7a9b8b402..90ae511b4269b1b8c97f7c64e2262eb898845092 100644 --- a/libcxx/include/__config +++ b/libcxx/include/__config @@ -1176,6 +1176,12 @@ _LIBCPP_BEGIN_NAMESPACE_STD _LIBCPP_END_NAMESPACE_STD # define _LIBCPP_PACKED_BYTE_FOR_AIX_END /* empty */ # endif +# if __has_attribute(__packed__) +# define _LIBCPP_PACKED __attribute__((__packed__)) +# else +# define _LIBCPP_PACKED +# endif + #endif // __cplusplus #endif // _LIBCPP___CONFIG diff --git a/libcxx/include/__debug b/libcxx/include/__debug index 403710600b0d68ade6c0e8438a61f1b79f753988..d3dd202b54ab2cd08af02794d16174b3206fd841 100644 --- a/libcxx/include/__debug +++ b/libcxx/include/__debug @@ -12,6 +12,7 @@ #include <__assert> #include <__config> +#include #include #if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) @@ -51,10 +52,6 @@ #if defined(_LIBCPP_ENABLE_DEBUG_MODE) || defined(_LIBCPP_BUILDING_LIBRARY) -#include -#include -#include - _LIBCPP_BEGIN_NAMESPACE_STD struct _LIBCPP_TYPE_VIS __c_node; diff --git a/libcxx/include/__format/formatter_output.h b/libcxx/include/__format/formatter_output.h index d92423c506ad5961d630186bf1902d2ab333f4f1..ab016f6f16107b8c6944fd4ef0d9efc4d5ba13bd 100644 --- a/libcxx/include/__format/formatter_output.h +++ b/libcxx/include/__format/formatter_output.h @@ -11,12 +11,16 @@ #define _LIBCPP___FORMAT_FORMATTER_OUTPUT_H #include <__algorithm/copy.h> +#include <__algorithm/copy_n.h> #include <__algorithm/fill_n.h> +#include <__algorithm/transform.h> #include <__config> +#include <__format/formatter.h> #include <__format/parser_std_format_spec.h> #include <__utility/move.h> #include <__utility/unreachable.h> #include +#include #include #if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) @@ -29,6 +33,24 @@ _LIBCPP_BEGIN_NAMESPACE_STD namespace __formatter { +_LIBCPP_HIDE_FROM_ABI constexpr char __hex_to_upper(char c) { + switch (c) { + case 'a': + return 'A'; + case 'b': + return 'B'; + case 'c': + return 'C'; + case 'd': + return 'D'; + case 'e': + return 'E'; + case 'f': + return 'F'; + } + return c; +} + // TODO FMT remove _v2 suffix. struct _LIBCPP_TYPE_VIS __padding_size_result_v2 { size_t __before_; @@ -67,6 +89,73 @@ _LIBCPP_HIDE_FROM_ABI constexpr __padding_size_result_v2 __padding_size_v2(size_ __libcpp_unreachable(); } +template +_LIBCPP_HIDE_FROM_ABI _OutIt __write_using_decimal_separators(_OutIt __out_it, const char* __begin, const char* __first, + const char* __last, string&& __grouping, _CharT __sep, + __format_spec::__parsed_specifications<_CharT> __specs) { + _LIBCPP_ASSERT(__specs.__alignment_ != __format_spec::__alignment::__default, + "the caller should adjust the default to the value required by the type"); + + int __size = (__first - __begin) + // [sign][prefix] + (__last - __first) + // data + (__grouping.size() - 1); // number of separator characters + + __padding_size_result_v2 __padding = {0, 0}; + if (__specs.__alignment_ == __format_spec::__alignment::__zero_padding) { + // Write [sign][prefix]. + __out_it = _VSTD::copy(__begin, __first, _VSTD::move(__out_it)); + + if (__specs.__width_ > __size) { + // Write zero padding. + __padding.__before_ = __specs.__width_ - __size; + __out_it = _VSTD::fill_n(_VSTD::move(__out_it), __specs.__width_ - __size, _CharT('0')); + } + } else { + if (__specs.__width_ > __size) { + // Determine padding and write padding. + __padding = __padding_size_v2(__size, __specs.__width_, __specs.__alignment_); + + __out_it = _VSTD::fill_n(_VSTD::move(__out_it), __padding.__before_, __specs.__fill_); + } + // Write [sign][prefix]. + __out_it = _VSTD::copy(__begin, __first, _VSTD::move(__out_it)); + } + + auto __r = __grouping.rbegin(); + auto __e = __grouping.rend() - 1; + _LIBCPP_ASSERT(__r != __e, "The slow grouping formatting is used while " + "there will be no separators written."); + // The output is divided in small groups of numbers to write: + // - A group before the first separator. + // - A separator and a group, repeated for the number of separators. + // - A group after the last separator. + // This loop achieves that process by testing the termination condition + // midway in the loop. + // + // TODO FMT This loop evaluates the loop invariant `__parser.__type != + // _Flags::_Type::__hexadecimal_upper_case` for every iteration. (This test + // happens in the __write call.) Benchmark whether making two loops and + // hoisting the invariant is worth the effort. + while (true) { + if (__specs.__std_.__type_ == __format_spec::__type::__hexadecimal_upper_case) { + __last = __first + *__r; + __out_it = _VSTD::transform(__first, __last, _VSTD::move(__out_it), __hex_to_upper); + __first = __last; + } else { + __out_it = _VSTD::copy_n(__first, *__r, _VSTD::move(__out_it)); + __first += *__r; + } + + if (__r == __e) + break; + + ++__r; + *__out_it++ = __sep; + } + + return _VSTD::fill_n(_VSTD::move(__out_it), __padding.__after_, __specs.__fill_); +} + /// Writes the input to the output with the required padding. /// /// Since the output column width is specified the function can be used for @@ -107,12 +196,39 @@ _LIBCPP_HIDE_FROM_ABI auto __write(const _CharT* __first, const _CharT* __last, return _VSTD::fill_n(_VSTD::move(__out_it), __padding.__after_, __specs.__fill_); } +/// \overload +/// Calls the function above where \a __size = \a __last - \a __first. +template +_LIBCPP_HIDE_FROM_ABI auto __write(const _CharT* __first, const _CharT* __last, + output_iterator auto __out_it, + __format_spec::__parsed_specifications<_ParserCharT> __specs) -> decltype(__out_it) { + return __write(__first, __last, _VSTD::move(__out_it), __specs, __last - __first); +} + +template +_LIBCPP_HIDE_FROM_ABI auto __write_transformed(const _CharT* __first, const _CharT* __last, + output_iterator auto __out_it, + __format_spec::__parsed_specifications<_ParserCharT> __specs, + _UnaryOperation __op) -> decltype(__out_it) { + _LIBCPP_ASSERT(__first <= __last, "Not a valid range"); + + ptrdiff_t __size = __last - __first; + if (__size >= __specs.__width_) + return _VSTD::transform(__first, __last, _VSTD::move(__out_it), __op); + + __padding_size_result_v2 __padding = __padding_size_v2(__size, __specs.__width_, __specs.__alignment_); + __out_it = _VSTD::fill_n(_VSTD::move(__out_it), __padding.__before_, __specs.__fill_); + __out_it = _VSTD::transform(__first, __last, _VSTD::move(__out_it), __op); + return _VSTD::fill_n(_VSTD::move(__out_it), __padding.__after_, __specs.__fill_); +} + # ifndef _LIBCPP_HAS_NO_UNICODE template _LIBCPP_HIDE_FROM_ABI auto __write_unicode_no_precision(basic_string_view<_CharT> __str, output_iterator auto __out_it, __format_spec::__parsed_specifications<_CharT> __specs) -> decltype(__out_it) { + _LIBCPP_ASSERT(!__specs.__has_precision(), "use __write_unicode"); // No padding -> copy the string if (!__specs.__has_width()) diff --git a/libcxx/include/__type_traits/is_arithmetic.h b/libcxx/include/__type_traits/is_arithmetic.h index cfe2fff7d4393b33e09610772e15588d965add26..6d631f41c7d4a5f66b844402111b5c4942d75a38 100644 --- a/libcxx/include/__type_traits/is_arithmetic.h +++ b/libcxx/include/__type_traits/is_arithmetic.h @@ -20,19 +20,10 @@ _LIBCPP_BEGIN_NAMESPACE_STD -#if __has_keyword(__is_arithmetic) - -template -struct _LIBCPP_TEMPLATE_VIS is_arithmetic : integral_constant {}; - -#else - template struct _LIBCPP_TEMPLATE_VIS is_arithmetic : public integral_constant::value || is_floating_point<_Tp>::value> {}; -#endif // __has_keyword(__is_arithmetic) - #if _LIBCPP_STD_VER > 14 template inline constexpr bool is_arithmetic_v = is_arithmetic<_Tp>::value; diff --git a/libcxx/include/__type_traits/is_floating_point.h b/libcxx/include/__type_traits/is_floating_point.h index d6211af743572ce1475844e480bb98d60cdb85e9..d93e5d99d07b8b0d6a03ff33eec4fb4854a87425 100644 --- a/libcxx/include/__type_traits/is_floating_point.h +++ b/libcxx/include/__type_traits/is_floating_point.h @@ -19,13 +19,6 @@ _LIBCPP_BEGIN_NAMESPACE_STD -#if __has_keyword(__is_floating_point) - -template -struct _LIBCPP_TEMPLATE_VIS is_floating_point : integral_constant {}; - -#else - template struct __libcpp_is_floating_point : public false_type {}; template <> struct __libcpp_is_floating_point : public true_type {}; template <> struct __libcpp_is_floating_point : public true_type {}; @@ -34,8 +27,6 @@ template <> struct __libcpp_is_floating_point : public tru template struct _LIBCPP_TEMPLATE_VIS is_floating_point : public __libcpp_is_floating_point::type> {}; -#endif // __has_keyword(__is_floating_point) - #if _LIBCPP_STD_VER > 14 template inline constexpr bool is_floating_point_v = is_floating_point<_Tp>::value; diff --git a/libcxx/include/algorithm b/libcxx/include/algorithm index 3db67affcabdd037b09d8f610861787505c2b92e..cb1cb1dc6b43562611ec6dc2955f268d777fc26f 100644 --- a/libcxx/include/algorithm +++ b/libcxx/include/algorithm @@ -450,6 +450,27 @@ namespace ranges { ranges::lexicographical_compare(R1&& r1, R2&& r2, Comp comp = {}, Proj1 proj1 = {}, Proj2 proj2 = {}); // since C++20 + template S1, bidirectional_iterator I2> + requires indirectly_movable + constexpr ranges::move_backward_result + ranges::move_backward(I1 first, S1 last, I2 result); // since C++20 + + template + requires indirectly_movable, I> + constexpr ranges::move_backward_result, I> + ranges::move_backward(R&& r, I result); // since C++20 + + template S, weakly_incrementable O> + requires indirectly_movable + constexpr ranges::move_result + ranges::move(I first, S last, O result); // since C++20 + + template + requires indirectly_movable, O> + constexpr ranges::move_result, O> + ranges::move(R&& r, O result); // since C++20 + + } constexpr bool // constexpr in C++20 @@ -1195,6 +1216,8 @@ template #include <__algorithm/ranges_minmax.h> #include <__algorithm/ranges_minmax_element.h> #include <__algorithm/ranges_mismatch.h> +#include <__algorithm/ranges_move.h> +#include <__algorithm/ranges_move_backward.h> #include <__algorithm/ranges_none_of.h> #include <__algorithm/ranges_replace.h> #include <__algorithm/ranges_replace_if.h> diff --git a/libcxx/include/locale b/libcxx/include/locale index b95a64168777e725961d42c57c023cabe27e439b..8ac2aacb6edfeac8303af0bb12ace333daecbcc8 100644 --- a/libcxx/include/locale +++ b/libcxx/include/locale @@ -201,9 +201,7 @@ template class messages_byname; #include <__iterator/istreambuf_iterator.h> #include <__iterator/ostreambuf_iterator.h> #include <__locale> -#ifndef __APPLE__ -# include -#endif +#include // TODO: Remove this include #include #include #include diff --git a/libcxx/include/module.modulemap.in b/libcxx/include/module.modulemap.in index 7294f3536810b380e73a67564ecc2201b532e07c..cd71a3c55d97513425c2487319f91cee6151feb1 100644 --- a/libcxx/include/module.modulemap.in +++ b/libcxx/include/module.modulemap.in @@ -338,6 +338,8 @@ module std [system] { module ranges_minmax { private header "__algorithm/ranges_minmax.h" } module ranges_minmax_element { private header "__algorithm/ranges_minmax_element.h" } module ranges_mismatch { private header "__algorithm/ranges_mismatch.h" } + module ranges_move { private header "__algorithm/ranges_move.h" } + module ranges_move_backward { private header "__algorithm/ranges_move_backward.h" } module ranges_none_of { private header "__algorithm/ranges_none_of.h" } module ranges_replace { private header "__algorithm/ranges_replace.h" } module ranges_replace_if { private header "__algorithm/ranges_replace_if.h" } diff --git a/libcxx/include/string b/libcxx/include/string index ab8d2ac446d1c591d93da786ac3f9e527124e532..0ce8c4fecebddf2f30f94fa055c362fec9acee3d 100644 --- a/libcxx/include/string +++ b/libcxx/include/string @@ -721,10 +721,16 @@ private: static const size_type __endian_factor = 2; #endif + // Attribute 'packed' is used to keep the layout compatible with the + // previous definition that did not use bit fields. This is because on + // some platforms bit fields have a default size rather than the actual + // size used, e.g., it is 4 bytes on AIX. See D128285 for details. struct __long { - size_type __is_long_ : 1; - size_type __cap_ : sizeof(size_type) * CHAR_BIT - 1; + struct _LIBCPP_PACKED { + size_type __is_long_ : 1; + size_type __cap_ : sizeof(size_type) * CHAR_BIT - 1; + }; size_type __size_; pointer __data_; }; @@ -734,8 +740,10 @@ private: struct __short { - unsigned char __is_long_ : 1; - unsigned char __size_ : 7; + struct _LIBCPP_PACKED { + unsigned char __is_long_ : 1; + unsigned char __size_ : 7; + }; char __padding_[sizeof(value_type) - 1]; value_type __data_[__min_cap]; }; diff --git a/libcxx/include/tuple b/libcxx/include/tuple index 251b685912b61ace571cb8539e69cc770b9cb198..d0c159249ede911c78bada146bb7ae7c05bb2610 100644 --- a/libcxx/include/tuple +++ b/libcxx/include/tuple @@ -25,14 +25,24 @@ public: explicit(see-below) tuple(U&&...); // constexpr in C++14 tuple(const tuple&) = default; tuple(tuple&&) = default; + + template + constexpr explicit(see-below) tuple(tuple&); // C++23 template explicit(see-below) tuple(const tuple&); // constexpr in C++14 template explicit(see-below) tuple(tuple&&); // constexpr in C++14 + template + constexpr explicit(see-below) tuple(const tuple&&); // C++23 + + template + constexpr explicit(see-below) tuple(pair&); // iff sizeof...(Types) == 2 // C++23 template explicit(see-below) tuple(const pair&); // iff sizeof...(T) == 2 // constexpr in C++14 template explicit(see-below) tuple(pair&&); // iff sizeof...(T) == 2 // constexpr in C++14 + template + constexpr explicit(see-below) tuple(const pair&&); // iff sizeof...(Types) == 2 // C++23 // allocator-extended constructors template @@ -45,25 +55,47 @@ public: tuple(allocator_arg_t, const Alloc& a, const tuple&); // constexpr in C++20 template tuple(allocator_arg_t, const Alloc& a, tuple&&); // constexpr in C++20 + template + constexpr explicit(see-below) + tuple(allocator_arg_t, const Alloc& a, tuple&); // C++23 template explicit(see-below) tuple(allocator_arg_t, const Alloc& a, const tuple&); // constexpr in C++20 template explicit(see-below) tuple(allocator_arg_t, const Alloc& a, tuple&&); // constexpr in C++20 + template + constexpr explicit(see-below) + tuple(allocator_arg_t, const Alloc& a, const tuple&&); // C++23 + template + constexpr explicit(see-below) + tuple(allocator_arg_t, const Alloc& a, pair&); // C++23 template explicit(see-below) tuple(allocator_arg_t, const Alloc& a, const pair&); // constexpr in C++20 template explicit(see-below) tuple(allocator_arg_t, const Alloc& a, pair&&); // constexpr in C++20 + template + constexpr explicit(see-below) + tuple(allocator_arg_t, const Alloc& a, const pair&&); // C++23 tuple& operator=(const tuple&); // constexpr in C++20 + constexpr const tuple& operator=(const tuple&) const; // C++23 tuple& operator=(tuple&&) noexcept(is_nothrow_move_assignable_v && ...); // constexpr in C++20 + constexpr const tuple& operator=(tuple&&) const; // C++23 template tuple& operator=(const tuple&); // constexpr in C++20 + template + constexpr const tuple& operator=(const tuple&) const; // C++23 template tuple& operator=(tuple&&); // constexpr in C++20 + template + constexpr const tuple& operator=(tuple&&) const; // C++23 template tuple& operator=(const pair&); // iff sizeof...(T) == 2 // constexpr in C++20 + template + constexpr const tuple& operator=(const pair&) const; // iff sizeof...(Types) == 2 // C++23 template tuple& operator=(pair&&); // iff sizeof...(T) == 2 // constexpr in C++20 + template + constexpr const tuple& operator=(pair&&) const; // iff sizeof...(Types) == 2 // C++23 template tuple& operator=(array const&) // iff sizeof...(T) == N, EXTENSION @@ -71,6 +103,7 @@ public: tuple& operator=(array&&) // iff sizeof...(T) == N, EXTENSION void swap(tuple&) noexcept(AND(swap(declval(), declval())...)); // constexpr in C++20 + constexpr void swap(const tuple&) const noexcept(see-below); // C++23 }; @@ -161,6 +194,9 @@ template void swap(tuple& x, tuple& y) noexcept(noexcept(x.swap(y))); +template + constexpr void swap(const tuple& x, const tuple& y) noexcept(see-below); // C++23 + } // std */ @@ -210,6 +246,13 @@ void swap(__tuple_leaf<_Ip, _Hp, _Ep>& __x, __tuple_leaf<_Ip, _Hp, _Ep>& __y) swap(__x.get(), __y.get()); } +template +_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_AFTER_CXX11 +void swap(const __tuple_leaf<_Ip, _Hp, _Ep>& __x, const __tuple_leaf<_Ip, _Hp, _Ep>& __y) + _NOEXCEPT_(__is_nothrow_swappable::value) { + swap(__x.get(), __y.get()); +} + template class __tuple_leaf { @@ -298,6 +341,12 @@ public: return 0; } + _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX11 + int swap(const __tuple_leaf& __t) const _NOEXCEPT_(__is_nothrow_swappable::value) { + _VSTD::swap(*this, __t); + return 0; + } + _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX11 _Hp& get() _NOEXCEPT {return __value_;} _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX11 const _Hp& get() const _NOEXCEPT {return __value_;} }; @@ -364,6 +413,12 @@ public: return 0; } + _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX11 + int swap(const __tuple_leaf& __rhs) const _NOEXCEPT_(__is_nothrow_swappable::value) { + _VSTD::swap(*this, __rhs); + return 0; + } + _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX11 _Hp& get() _NOEXCEPT {return static_cast<_Hp&>(*this);} _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX11 const _Hp& get() const _NOEXCEPT {return static_cast(*this);} }; @@ -454,6 +509,13 @@ struct _LIBCPP_DECLSPEC_EMPTY_BASES __tuple_impl<__tuple_indices<_Indx...>, _Tp. { _VSTD::__swallow(__tuple_leaf<_Indx, _Tp>::swap(static_cast<__tuple_leaf<_Indx, _Tp>&>(__t))...); } + + _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX11 + void swap(const __tuple_impl& __t) const + _NOEXCEPT_(__all<__is_nothrow_swappable::value...>::value) + { + _VSTD::__swallow(__tuple_leaf<_Indx, _Tp>::swap(static_cast&>(__t))...); + } }; template @@ -689,6 +751,7 @@ public: template class _And = _And, __enable_if_t< _And...>::value , int> = 0> + _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX17 tuple(allocator_arg_t, const _Alloc& __alloc, const tuple& __t) : __base_(allocator_arg_t(), __alloc, __t) { } @@ -696,30 +759,39 @@ public: template class _And = _And, __enable_if_t< _And...>::value , int> = 0> + _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX17 tuple(allocator_arg_t, const _Alloc& __alloc, tuple&& __t) : __base_(allocator_arg_t(), __alloc, _VSTD::move(__t)) { } // tuple(const tuple&) constructors (including allocator_arg_t variants) - template - struct _EnableCopyFromOtherTuple : _And< - _Not, tuple<_Up...> > >, - _Lazy<_Or, - _BoolConstant, + + template , class = void> + struct _EnableCtorFromUTypesTuple : false_type {}; + + template + struct _EnableCtorFromUTypesTuple<_OtherTuple, tuple<_Up...>, + // the length of the packs needs to checked first otherwise the 2 packs cannot be expanded simultaneously below + __enable_if_t> : _And< + // the two conditions below are not in spec. The purpose is to disable the UTypes Ctor when copy/move Ctor can work. + // Otherwise, is_constructible can trigger hard error in those cases https://godbolt.org/z/M94cGdKcE + _Not >, + _Not >, + is_constructible<_Tp, __copy_cvref_t<_OtherTuple, _Up> >..., + _Lazy<_Or, _BoolConstant, // _Tp and _Up are 1-element packs - the pack expansions look // weird to avoid tripping up the type traits in degenerate cases _Lazy<_And, - _Not&, _Tp> >..., - _Not&> >... + _Not >..., + _Not >..., + _Not >... > - >, - is_constructible<_Tp, const _Up&>... - > { }; + > + > {}; template , - _EnableCopyFromOtherTuple<_Up...>, + _EnableCtorFromUTypesTuple&>, is_convertible... // explicit check >::value , int> = 0> @@ -731,8 +803,7 @@ public: template , - _EnableCopyFromOtherTuple<_Up...>, + _EnableCtorFromUTypesTuple&>, _Not<_Lazy<_And, is_convertible...> > // explicit check >::value , int> = 0> @@ -744,8 +815,7 @@ public: template , - _EnableCopyFromOtherTuple<_Up...>, + _EnableCtorFromUTypesTuple&>, is_convertible... // explicit check >::value , int> = 0> @@ -756,8 +826,7 @@ public: template , - _EnableCopyFromOtherTuple<_Up...>, + _EnableCtorFromUTypesTuple&>, _Not<_Lazy<_And, is_convertible...> > // explicit check >::value , int> = 0> @@ -766,26 +835,27 @@ public: : __base_(allocator_arg_t(), __a, __t) { } +#if _LIBCPP_STD_VER > 20 + // tuple(tuple&) constructors (including allocator_arg_t variants) + + template &>::value>* = nullptr> + _LIBCPP_HIDE_FROM_ABI constexpr + explicit(!(is_convertible_v<_Up&, _Tp> && ...)) + tuple(tuple<_Up...>& __t) : __base_(__t) {} + + template &>::value>* = nullptr> + _LIBCPP_HIDE_FROM_ABI constexpr + explicit(!(is_convertible_v<_Up&, _Tp> && ...)) + tuple(allocator_arg_t, const _Alloc& __alloc, tuple<_Up...>& __t) : __base_(allocator_arg_t(), __alloc, __t) {} +#endif // _LIBCPP_STD_VER > 20 + // tuple(tuple&&) constructors (including allocator_arg_t variants) - template - struct _EnableMoveFromOtherTuple : _And< - _Not, tuple<_Up...> > >, - _Lazy<_Or, - _BoolConstant, - // _Tp and _Up are 1-element packs - the pack expansions look - // weird to avoid tripping up the type traits in degenerate cases - _Lazy<_And, - _Not, _Tp> >..., - _Not > >... - > - >, - is_constructible<_Tp, _Up>... - > { }; template , - _EnableMoveFromOtherTuple<_Up...>, + _EnableCtorFromUTypesTuple&&>, is_convertible<_Up, _Tp>... // explicit check >::value , int> = 0> @@ -797,8 +867,7 @@ public: template , - _EnableMoveFromOtherTuple<_Up...>, + _EnableCtorFromUTypesTuple&&>, _Not<_Lazy<_And, is_convertible<_Up, _Tp>...> > // explicit check >::value , int> = 0> @@ -810,8 +879,7 @@ public: template , - _EnableMoveFromOtherTuple<_Up...>, + _EnableCtorFromUTypesTuple&&>, is_convertible<_Up, _Tp>... // explicit check >::value , int> = 0> @@ -822,8 +890,7 @@ public: template , - _EnableMoveFromOtherTuple<_Up...>, + _EnableCtorFromUTypesTuple&&>, _Not<_Lazy<_And, is_convertible<_Up, _Tp>...> > // explicit check >::value , int> = 0> @@ -832,57 +899,77 @@ public: : __base_(allocator_arg_t(), __a, _VSTD::move(__t)) { } +#if _LIBCPP_STD_VER > 20 + // tuple(const tuple&&) constructors (including allocator_arg_t variants) + + template &&>::value>* = nullptr> + _LIBCPP_HIDE_FROM_ABI constexpr + explicit(!(is_convertible_v && ...)) + tuple(const tuple<_Up...>&& __t) : __base_(std::move(__t)) {} + + template &&>::value>* = nullptr> + _LIBCPP_HIDE_FROM_ABI constexpr + explicit(!(is_convertible_v && ...)) + tuple(allocator_arg_t, const _Alloc& __alloc, const tuple<_Up...>&& __t) + : __base_(allocator_arg_t(), __alloc, std::move(__t)) {} +#endif // _LIBCPP_STD_VER > 20 + // tuple(const pair&) constructors (including allocator_arg_t variants) - template - struct _EnableImplicitCopyFromPair : _And< - is_constructible<_FirstType<_DependentTp...>, const _Up1&>, - is_constructible<_SecondType<_DependentTp...>, const _Up2&>, - is_convertible >, // explicit check - is_convertible > - > { }; - template - struct _EnableExplicitCopyFromPair : _And< - is_constructible<_FirstType<_DependentTp...>, const _Up1&>, - is_constructible<_SecondType<_DependentTp...>, const _Up2&>, - _Not > >, // explicit check - _Not > > - > { }; + template