From a79ac14fa68297f9888bc70a10df5ed9b8864e38 Mon Sep 17 00:00:00 2001 From: David Blaikie Date: Fri, 27 Feb 2015 21:17:42 +0000 Subject: [PATCH] [opaque pointer type] Add textual IR support for explicit type parameter to load instruction Essentially the same as the GEP change in r230786. A similar migration script can be used to update test cases, though a few more test case improvements/changes were required this time around: (r229269-r229278) import fileinput import sys import re pat = re.compile(r"((?:=|:|^)\s*load (?:atomic )?(?:volatile )?(.*?))(| addrspace\(\d+\) *)\*($| *(?:%|@|null|undef|blockaddress|getelementptr|addrspacecast|bitcast|inttoptr|\[\[[a-zA-Z]|\{\{).*$)") for line in sys.stdin: sys.stdout.write(re.sub(pat, r"\1, \2\3*\4", line)) Reviewers: rafael, dexonsmith, grosser Differential Revision: http://reviews.llvm.org/D7649 llvm-svn: 230794 --- llvm/lib/AsmParser/LLParser.cpp | 10 +- llvm/lib/IR/AsmWriter.cpp | 6 +- .../BasicAA/2003-02-26-AccessSizeTest.ll | 6 +- .../Analysis/BasicAA/2003-04-22-GEPProblem.ll | 4 +- .../BasicAA/2003-05-21-GEP-Problem.ll | 2 +- .../Analysis/BasicAA/2003-06-01-AliasCrash.ll | 6 +- .../BasicAA/2003-09-19-LocalArgument.ll | 4 +- .../BasicAA/2006-03-03-BadArraySubscript.ll | 4 +- .../2007-01-13-BasePointerBadNoAlias.ll | 4 +- .../BasicAA/2007-08-05-GetOverloadedModRef.ll | 6 +- .../BasicAA/2007-10-24-ArgumentsGlobals.ll | 2 +- .../Analysis/BasicAA/2007-11-05-SizeCrash.ll | 4 +- .../BasicAA/2007-12-08-OutOfBoundsCrash.ll | 4 +- .../BasicAA/2008-06-02-GEPTailCrash.ll | 2 +- .../Analysis/BasicAA/2008-11-23-NoaliasRet.ll | 2 +- .../Analysis/BasicAA/2009-03-04-GEPNoalias.ll | 4 +- .../BasicAA/2009-10-13-AtomicModRef.ll | 4 +- .../BasicAA/2009-10-13-GEP-BaseNoAlias.ll | 4 +- .../2010-09-15-GEP-SignedArithmetic.ll | 4 +- .../BasicAA/2014-03-18-Maxlookup-reached.ll | 2 +- .../test/Analysis/BasicAA/aligned-overread.ll | 4 +- .../BasicAA/args-rets-allocas-loads.ll | 4 +- llvm/test/Analysis/BasicAA/byval.ll | 2 +- llvm/test/Analysis/BasicAA/cas.ll | 4 +- llvm/test/Analysis/BasicAA/dag.ll | 2 +- llvm/test/Analysis/BasicAA/featuretest.ll | 28 +- .../BasicAA/full-store-partial-alias.ll | 4 +- llvm/test/Analysis/BasicAA/gcsetest.ll | 10 +- llvm/test/Analysis/BasicAA/gep-alias.ll | 50 +- llvm/test/Analysis/BasicAA/global-size.ll | 12 +- llvm/test/Analysis/BasicAA/invariant_load.ll | 6 +- llvm/test/Analysis/BasicAA/memset_pattern.ll | 2 +- llvm/test/Analysis/BasicAA/modref.ll | 26 +- .../test/Analysis/BasicAA/must-and-partial.ll | 4 +- llvm/test/Analysis/BasicAA/no-escape-call.ll | 4 +- llvm/test/Analysis/BasicAA/noalias-bugs.ll | 2 +- llvm/test/Analysis/BasicAA/noalias-param.ll | 4 +- llvm/test/Analysis/BasicAA/nocapture.ll | 10 +- llvm/test/Analysis/BasicAA/phi-aa.ll | 12 +- llvm/test/Analysis/BasicAA/phi-spec-order.ll | 8 +- llvm/test/Analysis/BasicAA/phi-speculation.ll | 12 +- llvm/test/Analysis/BasicAA/pr18573.ll | 4 +- llvm/test/Analysis/BasicAA/store-promote.ll | 10 +- llvm/test/Analysis/BasicAA/tailcall-modref.ll | 4 +- .../test/Analysis/BasicAA/underlying-value.ll | 4 +- llvm/test/Analysis/BasicAA/zext.ll | 4 +- .../test/Analysis/BlockFrequencyInfo/basic.ll | 2 +- .../Analysis/BranchProbabilityInfo/basic.ll | 6 +- .../Analysis/BranchProbabilityInfo/loop.ll | 16 +- .../Analysis/BranchProbabilityInfo/pr18705.ll | 8 +- .../full-store-partial-alias.ll | 4 +- .../CFLAliasAnalysis/gep-signed-arithmetic.ll | 4 +- .../CFLAliasAnalysis/multilevel-combine.ll | 2 +- .../Analysis/CFLAliasAnalysis/multilevel.ll | 4 +- .../CFLAliasAnalysis/must-and-partial.ll | 8 +- llvm/test/Analysis/CostModel/AArch64/store.ll | 4 +- .../Analysis/CostModel/ARM/insertelement.ll | 12 +- .../Analysis/CostModel/PowerPC/load_store.ll | 16 +- .../Analysis/CostModel/X86/intrinsic-cost.ll | 6 +- .../test/Analysis/CostModel/X86/load_store.ll | 34 +- llvm/test/Analysis/CostModel/X86/loop_v2.ll | 6 +- .../Analysis/CostModel/X86/vectorized-loop.ll | 8 +- .../Delinearization/gcd_multiply_expr.ll | 28 +- .../test/Analysis/Delinearization/himeno_1.ll | 12 +- .../test/Analysis/Delinearization/himeno_2.ll | 12 +- .../Delinearization/multidim_only_ivs_2d.ll | 4 +- llvm/test/Analysis/Delinearization/undef.ll | 2 +- .../Analysis/DependenceAnalysis/Banerjee.ll | 26 +- .../Analysis/DependenceAnalysis/Coupled.ll | 32 +- .../Analysis/DependenceAnalysis/ExactRDIV.ll | 26 +- .../Analysis/DependenceAnalysis/ExactSIV.ll | 28 +- llvm/test/Analysis/DependenceAnalysis/GCD.ll | 20 +- .../Analysis/DependenceAnalysis/Invariant.ll | 4 +- .../NonCanonicalizedSubscript.ll | 2 +- .../DependenceAnalysis/Preliminary.ll | 24 +- .../DependenceAnalysis/Propagating.ll | 20 +- .../DependenceAnalysis/Separability.ll | 8 +- .../Analysis/DependenceAnalysis/StrongSIV.ll | 22 +- .../DependenceAnalysis/SymbolicRDIV.ll | 14 +- .../DependenceAnalysis/SymbolicSIV.ll | 18 +- .../DependenceAnalysis/WeakCrossingSIV.ll | 14 +- .../DependenceAnalysis/WeakZeroDstSIV.ll | 14 +- .../DependenceAnalysis/WeakZeroSrcSIV.ll | 14 +- llvm/test/Analysis/DependenceAnalysis/ZIV.ll | 6 +- .../GlobalsModRef/2008-09-03-ReadGlobals.ll | 2 +- llvm/test/Analysis/GlobalsModRef/aliastest.ll | 2 +- .../GlobalsModRef/chaining-analysis.ll | 2 +- .../Analysis/GlobalsModRef/indirect-global.ll | 8 +- .../test/Analysis/GlobalsModRef/modreftest.ll | 2 +- llvm/test/Analysis/GlobalsModRef/pr12351.ll | 6 +- .../Analysis/GlobalsModRef/volatile-instrs.ll | 2 +- llvm/test/Analysis/LazyCallGraph/basic.ll | 8 +- .../backward-dep-different-types.ll | 8 +- .../unsafe-and-rt-checks-no-dbg.ll | 12 +- .../unsafe-and-rt-checks.ll | 14 +- .../memdep_requires_dominator_tree.ll | 2 +- .../2008-07-12-UnneededSelect1.ll | 4 +- .../ScalarEvolution/2008-12-08-FiniteSGE.ll | 2 +- .../2009-01-02-SignedNegativeStride.ll | 4 +- .../2009-07-04-GroupConstantsWidthMismatch.ll | 4 +- .../2012-03-26-LoadConstant.ll | 6 +- .../avoid-infinite-recursion-0.ll | 2 +- .../Analysis/ScalarEvolution/avoid-smax-0.ll | 4 +- .../Analysis/ScalarEvolution/avoid-smax-1.ll | 6 +- .../ScalarEvolution/infer-prestart-no-wrap.ll | 6 +- .../load-with-range-metadata.ll | 4 +- llvm/test/Analysis/ScalarEvolution/load.ll | 8 +- .../ScalarEvolution/max-trip-count.ll | 2 +- .../Analysis/ScalarEvolution/min-max-exprs.ll | 2 +- .../ScalarEvolution/nsw-offset-assume.ll | 6 +- .../Analysis/ScalarEvolution/nsw-offset.ll | 6 +- llvm/test/Analysis/ScalarEvolution/nsw.ll | 6 +- llvm/test/Analysis/ScalarEvolution/pr22179.ll | 2 +- llvm/test/Analysis/ScalarEvolution/pr22674.ll | 6 +- llvm/test/Analysis/ScalarEvolution/scev-aa.ll | 18 +- .../ScalarEvolution/scev-prestart-nowrap.ll | 2 +- .../Analysis/ScalarEvolution/sext-iv-0.ll | 2 +- .../Analysis/ScalarEvolution/sext-iv-1.ll | 8 +- .../Analysis/ScalarEvolution/sext-iv-2.ll | 2 +- llvm/test/Analysis/ScalarEvolution/sle.ll | 2 +- .../Analysis/ScalarEvolution/trip-count11.ll | 4 +- .../Analysis/ScalarEvolution/trip-count12.ll | 2 +- .../Analysis/ScalarEvolution/trip-count4.ll | 2 +- .../Analysis/ScalarEvolution/trip-count5.ll | 6 +- .../Analysis/ScalarEvolution/trip-count6.ll | 2 +- .../Analysis/ScalarEvolution/trip-count7.ll | 10 +- .../ScalarEvolution/zext-signed-addrec.ll | 6 +- .../Analysis/ScopedNoAliasAA/basic-domains.ll | 24 +- llvm/test/Analysis/ScopedNoAliasAA/basic.ll | 12 +- llvm/test/Analysis/ScopedNoAliasAA/basic2.ll | 16 +- .../TypeBasedAliasAnalysis/PR17620.ll | 8 +- .../TypeBasedAliasAnalysis/aliastest.ll | 16 +- .../argument-promotion.ll | 4 +- .../Analysis/TypeBasedAliasAnalysis/dse.ll | 16 +- .../TypeBasedAliasAnalysis/dynamic-indices.ll | 20 +- .../gvn-nonlocal-type-mismatch.ll | 16 +- .../Analysis/TypeBasedAliasAnalysis/licm.ll | 10 +- .../TypeBasedAliasAnalysis/placement-tbaa.ll | 22 +- .../TypeBasedAliasAnalysis/precedence.ll | 4 +- .../Analysis/TypeBasedAliasAnalysis/sink.ll | 4 +- .../TypeBasedAliasAnalysis/tbaa-path.ll | 106 +-- .../ValueTracking/memory-dereferenceable.ll | 10 +- llvm/test/Assembler/2002-04-29-NameBinding.ll | 2 +- .../Assembler/2002-08-19-BytecodeReader.ll | 4 +- .../2003-08-20-ConstantExprGEP-Fold.ll | 2 +- llvm/test/Assembler/2004-06-07-VerifierBug.ll | 2 +- .../Assembler/2007-01-05-Cmp-ConstExpr.ll | 2 +- llvm/test/Assembler/2007-04-20-AlignedLoad.ll | 2 +- .../Assembler/2007-12-11-AddressSpaces.ll | 6 +- ...-02-05-FunctionLocalMetadataBecomesNull.ll | 2 +- llvm/test/Assembler/align-inst-load.ll | 2 +- llvm/test/Assembler/align-inst.ll | 2 +- llvm/test/Assembler/atomic.ll | 8 +- llvm/test/Assembler/fast-math-flags.ll | 48 +- llvm/test/Assembler/half-constprop.ll | 4 +- llvm/test/Assembler/half-conv.ll | 2 +- llvm/test/Assembler/insertextractvalue.ll | 2 +- .../invalid-load-mismatched-explicit-type.ll | 6 + .../invalid-load-missing-explicit-type.ll | 6 + llvm/test/Assembler/numbered-values.ll | 2 +- llvm/test/Assembler/unnamed.ll | 2 +- llvm/test/Assembler/upgrade-loop-metadata.ll | 4 +- llvm/test/Assembler/uselistorder.ll | 2 +- llvm/test/Bitcode/arm32_neon_vcnt_upgrade.ll | 4 +- llvm/test/Bitcode/case-ranges-3.3.ll | 4 +- .../Bitcode/function-encoding-rel-operands.ll | 2 +- llvm/test/Bitcode/memInstructions.3.2.ll | 128 +-- llvm/test/Bitcode/metadata-2.ll | 2 +- llvm/test/Bitcode/upgrade-loop-metadata.ll | 4 +- llvm/test/Bitcode/use-list-order.ll | 6 +- .../test/CodeGen/AArch64/128bit_load_store.ll | 6 +- llvm/test/CodeGen/AArch64/PBQP-chain.ll | 48 +- .../CodeGen/AArch64/PBQP-coalesce-benefit.ll | 4 +- llvm/test/CodeGen/AArch64/PBQP-csr.ll | 18 +- llvm/test/CodeGen/AArch64/Redundantstore.ll | 2 +- llvm/test/CodeGen/AArch64/a57-csel.ll | 2 +- ...aarch64-2014-08-11-MachineCombinerCrash.ll | 2 +- .../aarch64-2014-12-02-combine-soften.ll | 2 +- .../AArch64/aarch64-a57-fp-load-balancing.ll | 88 +- ...arch64-address-type-promotion-assertion.ll | 2 +- .../AArch64/aarch64-address-type-promotion.ll | 4 +- llvm/test/CodeGen/AArch64/aarch64-be-bv.ll | 64 +- .../AArch64/aarch64-fix-cortex-a53-835769.ll | 58 +- llvm/test/CodeGen/AArch64/aarch64-gep-opt.ll | 8 +- llvm/test/CodeGen/AArch64/aarch64-smull.ll | 96 +-- llvm/test/CodeGen/AArch64/addsub-shifted.ll | 20 +- llvm/test/CodeGen/AArch64/addsub.ll | 18 +- llvm/test/CodeGen/AArch64/addsub_ext.ll | 16 +- llvm/test/CodeGen/AArch64/alloca.ll | 2 +- llvm/test/CodeGen/AArch64/and-mask-removal.ll | 4 +- .../AArch64/arm64-2011-04-21-CPSRBug.ll | 2 +- .../AArch64/arm64-2011-10-18-LdStOptBug.ll | 2 +- .../arm64-2012-01-11-ComparisonDAGCrash.ll | 2 +- .../AArch64/arm64-2012-05-22-LdStOptBug.ll | 10 +- .../AArch64/arm64-2012-06-06-FPToUI.ll | 16 +- .../test/CodeGen/AArch64/arm64-abi-varargs.ll | 34 +- llvm/test/CodeGen/AArch64/arm64-abi.ll | 12 +- llvm/test/CodeGen/AArch64/arm64-abi_align.ll | 52 +- .../AArch64/arm64-addr-mode-folding.ll | 36 +- .../AArch64/arm64-addr-type-promotion.ll | 14 +- llvm/test/CodeGen/AArch64/arm64-addrmode.ll | 34 +- .../AArch64/arm64-alloc-no-stack-realign.ll | 4 +- .../arm64-alloca-frame-pointer-offset.ll | 10 +- .../CodeGen/AArch64/arm64-andCmpBrToTBZ.ll | 4 +- llvm/test/CodeGen/AArch64/arm64-atomic-128.ll | 4 +- llvm/test/CodeGen/AArch64/arm64-atomic.ll | 34 +- llvm/test/CodeGen/AArch64/arm64-basic-pic.ll | 8 +- llvm/test/CodeGen/AArch64/arm64-bcc.ll | 4 +- .../AArch64/arm64-big-endian-bitconverts.ll | 168 ++-- .../AArch64/arm64-big-endian-varargs.ll | 8 +- .../AArch64/arm64-big-endian-vector-caller.ll | 168 ++-- .../CodeGen/AArch64/arm64-bitfield-extract.ll | 40 +- .../CodeGen/AArch64/arm64-blockaddress.ll | 2 +- .../CodeGen/AArch64/arm64-call-tailcalls.ll | 2 +- llvm/test/CodeGen/AArch64/arm64-cast-opt.ll | 6 +- .../CodeGen/AArch64/arm64-ccmp-heuristics.ll | 74 +- llvm/test/CodeGen/AArch64/arm64-ccmp.ll | 2 +- .../AArch64/arm64-code-model-large-abs.ll | 8 +- .../arm64-collect-loh-garbage-crash.ll | 6 +- .../test/CodeGen/AArch64/arm64-collect-loh.ll | 10 +- .../AArch64/arm64-complex-copy-noneon.ll | 4 +- llvm/test/CodeGen/AArch64/arm64-const-addr.ll | 6 +- .../CodeGen/AArch64/arm64-convert-v4f64.ll | 4 +- llvm/test/CodeGen/AArch64/arm64-cse.ll | 4 +- .../arm64-dagcombiner-dead-indexed-load.ll | 2 +- .../AArch64/arm64-dagcombiner-load-slicing.ll | 18 +- llvm/test/CodeGen/AArch64/arm64-dup.ll | 16 +- .../test/CodeGen/AArch64/arm64-early-ifcvt.ll | 6 +- .../test/CodeGen/AArch64/arm64-elf-globals.ll | 16 +- llvm/test/CodeGen/AArch64/arm64-ext.ll | 46 +- llvm/test/CodeGen/AArch64/arm64-extend.ll | 2 +- .../AArch64/arm64-extload-knownzero.ll | 2 +- .../AArch64/arm64-fast-isel-addr-offset.ll | 8 +- .../CodeGen/AArch64/arm64-fast-isel-br.ll | 18 +- .../CodeGen/AArch64/arm64-fast-isel-call.ll | 4 +- .../AArch64/arm64-fast-isel-conversion.ll | 26 +- .../CodeGen/AArch64/arm64-fast-isel-gv.ll | 4 +- .../AArch64/arm64-fast-isel-indirectbr.ll | 6 +- .../CodeGen/AArch64/arm64-fast-isel-ret.ll | 10 +- llvm/test/CodeGen/AArch64/arm64-fast-isel.ll | 10 +- .../arm64-fastisel-gep-promote-before-add.ll | 4 +- llvm/test/CodeGen/AArch64/arm64-fmuladd.ll | 42 +- .../CodeGen/AArch64/arm64-fold-address.ll | 20 +- llvm/test/CodeGen/AArch64/arm64-fold-lsl.ll | 6 +- .../CodeGen/AArch64/arm64-fp128-folding.ll | 2 +- llvm/test/CodeGen/AArch64/arm64-fp128.ll | 48 +- .../AArch64/arm64-i16-subreg-extract.ll | 2 +- .../CodeGen/AArch64/arm64-indexed-memory.ll | 62 +- .../AArch64/arm64-indexed-vector-ldst-2.ll | 4 +- .../AArch64/arm64-indexed-vector-ldst.ll | 124 +-- llvm/test/CodeGen/AArch64/arm64-inline-asm.ll | 10 +- llvm/test/CodeGen/AArch64/arm64-ld1.ll | 56 +- llvm/test/CodeGen/AArch64/arm64-ldp.ll | 64 +- llvm/test/CodeGen/AArch64/arm64-ldur.ll | 14 +- .../AArch64/arm64-misched-basic-A53.ll | 24 +- .../AArch64/arm64-misched-basic-A57.ll | 24 +- .../AArch64/arm64-neon-simd-ldst-one.ll | 52 +- .../AArch64/arm64-patchpoint-scratch-regs.ll | 2 +- llvm/test/CodeGen/AArch64/arm64-patchpoint.ll | 6 +- .../CodeGen/AArch64/arm64-pic-local-symbol.ll | 2 +- .../CodeGen/AArch64/arm64-platform-reg.ll | 2 +- llvm/test/CodeGen/AArch64/arm64-prefetch.ll | 26 +- llvm/test/CodeGen/AArch64/arm64-redzone.ll | 6 +- .../arm64-register-offset-addressing.ll | 24 +- .../AArch64/arm64-regress-interphase-shift.ll | 2 +- .../CodeGen/AArch64/arm64-return-vector.ll | 2 +- llvm/test/CodeGen/AArch64/arm64-rev.ll | 36 +- llvm/test/CodeGen/AArch64/arm64-scaled_iv.ll | 4 +- llvm/test/CodeGen/AArch64/arm64-scvt.ll | 110 +-- .../AArch64/arm64-sitofp-combine-chains.ll | 2 +- llvm/test/CodeGen/AArch64/arm64-spill-lr.ll | 42 +- llvm/test/CodeGen/AArch64/arm64-spill.ll | 2 +- .../CodeGen/AArch64/arm64-stack-no-frame.ll | 4 +- .../CodeGen/AArch64/arm64-strict-align.ll | 4 +- llvm/test/CodeGen/AArch64/arm64-tls-darwin.ll | 2 +- .../AArch64/arm64-tls-dynamic-together.ll | 2 +- .../CodeGen/AArch64/arm64-tls-dynamics.ll | 8 +- llvm/test/CodeGen/AArch64/arm64-tls-execs.ll | 4 +- .../AArch64/arm64-triv-disjoint-mem-access.ll | 4 +- llvm/test/CodeGen/AArch64/arm64-trn.ll | 40 +- .../test/CodeGen/AArch64/arm64-trunc-store.ll | 6 +- .../CodeGen/AArch64/arm64-unaligned_ldst.ll | 6 +- llvm/test/CodeGen/AArch64/arm64-uzp.ll | 32 +- llvm/test/CodeGen/AArch64/arm64-vabs.ll | 288 +++---- llvm/test/CodeGen/AArch64/arm64-vadd.ll | 248 +++--- llvm/test/CodeGen/AArch64/arm64-vbitwise.ll | 18 +- llvm/test/CodeGen/AArch64/arm64-vcmp.ll | 52 +- llvm/test/CodeGen/AArch64/arm64-vcnt.ll | 12 +- llvm/test/CodeGen/AArch64/arm64-vcvt.ll | 2 +- llvm/test/CodeGen/AArch64/arm64-vector-imm.ll | 8 +- .../test/CodeGen/AArch64/arm64-vector-ldst.ll | 128 +-- llvm/test/CodeGen/AArch64/arm64-vext.ll | 176 ++-- llvm/test/CodeGen/AArch64/arm64-vhadd.ll | 96 +-- llvm/test/CodeGen/AArch64/arm64-vhsub.ll | 48 +- llvm/test/CodeGen/AArch64/arm64-vmax.ll | 264 +++--- llvm/test/CodeGen/AArch64/arm64-vmul.ll | 446 +++++----- llvm/test/CodeGen/AArch64/arm64-volatile.ll | 8 +- llvm/test/CodeGen/AArch64/arm64-vqadd.ll | 112 +-- llvm/test/CodeGen/AArch64/arm64-vqsub.ll | 56 +- llvm/test/CodeGen/AArch64/arm64-vshift.ll | 608 +++++++------- llvm/test/CodeGen/AArch64/arm64-vshr.ll | 12 +- llvm/test/CodeGen/AArch64/arm64-vsqrt.ll | 56 +- llvm/test/CodeGen/AArch64/arm64-vsra.ll | 56 +- llvm/test/CodeGen/AArch64/arm64-vsub.ll | 120 +-- .../CodeGen/AArch64/arm64-weak-reference.ll | 2 +- .../AArch64/arm64-zextload-unscaled.ll | 6 +- llvm/test/CodeGen/AArch64/arm64-zip.ll | 32 +- .../CodeGen/AArch64/assertion-rc-mismatch.ll | 2 +- .../AArch64/atomic-ops-not-barriers.ll | 2 +- llvm/test/CodeGen/AArch64/atomic-ops.ll | 14 +- llvm/test/CodeGen/AArch64/basic-pic.ll | 8 +- .../test/CodeGen/AArch64/bitfield-insert-0.ll | 4 +- llvm/test/CodeGen/AArch64/bitfield-insert.ll | 36 +- llvm/test/CodeGen/AArch64/bitfield.ll | 8 +- llvm/test/CodeGen/AArch64/blockaddress.ll | 2 +- llvm/test/CodeGen/AArch64/bool-loads.ll | 8 +- llvm/test/CodeGen/AArch64/breg.ll | 2 +- llvm/test/CodeGen/AArch64/callee-save.ll | 64 +- llvm/test/CodeGen/AArch64/cmpwithshort.ll | 6 +- .../CodeGen/AArch64/code-model-large-abs.ll | 8 +- .../AArch64/combine-comparisons-by-cse.ll | 84 +- llvm/test/CodeGen/AArch64/compare-branch.ll | 8 +- .../CodeGen/AArch64/complex-copy-noneon.ll | 4 +- .../test/CodeGen/AArch64/complex-int-to-fp.ll | 2 +- .../CodeGen/AArch64/dag-combine-invaraints.ll | 8 +- llvm/test/CodeGen/AArch64/dp-3source.ll | 4 +- llvm/test/CodeGen/AArch64/dp1.ll | 28 +- llvm/test/CodeGen/AArch64/dp2.ll | 54 +- llvm/test/CodeGen/AArch64/eliminate-trunc.ll | 4 +- llvm/test/CodeGen/AArch64/f16-convert.ll | 20 +- .../AArch64/fast-isel-addressing-modes.ll | 90 +- .../test/CodeGen/AArch64/fast-isel-int-ext.ll | 84 +- .../CodeGen/AArch64/fast-isel-int-ext2.ll | 60 +- .../CodeGen/AArch64/fast-isel-int-ext3.ll | 20 +- llvm/test/CodeGen/AArch64/floatdp_1source.ll | 10 +- llvm/test/CodeGen/AArch64/floatdp_2source.ll | 4 +- llvm/test/CodeGen/AArch64/fp128-folding.ll | 2 +- .../test/CodeGen/AArch64/fp16-instructions.ll | 2 +- .../CodeGen/AArch64/fp16-v4-instructions.ll | 2 +- .../CodeGen/AArch64/fp16-v8-instructions.ll | 2 +- .../CodeGen/AArch64/fp16-vector-load-store.ll | 12 +- llvm/test/CodeGen/AArch64/fpimm.ll | 4 +- llvm/test/CodeGen/AArch64/free-zext.ll | 4 +- llvm/test/CodeGen/AArch64/func-argpassing.ll | 16 +- llvm/test/CodeGen/AArch64/func-calls.ll | 12 +- llvm/test/CodeGen/AArch64/funcptr_cast.ll | 2 +- llvm/test/CodeGen/AArch64/ghc-cc.ll | 6 +- llvm/test/CodeGen/AArch64/global-alignment.ll | 10 +- llvm/test/CodeGen/AArch64/global-merge-4.ll | 20 +- llvm/test/CodeGen/AArch64/half.ll | 8 +- llvm/test/CodeGen/AArch64/i1-contents.ll | 4 +- llvm/test/CodeGen/AArch64/ldst-opt.ll | 90 +- llvm/test/CodeGen/AArch64/ldst-regoffset.ll | 78 +- llvm/test/CodeGen/AArch64/ldst-unscaledimm.ll | 50 +- llvm/test/CodeGen/AArch64/ldst-unsignedimm.ll | 64 +- .../CodeGen/AArch64/literal_pools_float.ll | 4 +- llvm/test/CodeGen/AArch64/local_vars.ll | 2 +- .../CodeGen/AArch64/logical_shifted_reg.ll | 12 +- llvm/test/CodeGen/AArch64/machine_cse.ll | 10 +- .../test/CodeGen/AArch64/neon-fpround_f128.ll | 4 +- .../AArch64/neon-truncStore-extLoad.ll | 6 +- llvm/test/CodeGen/AArch64/nzcv-save.ll | 4 +- llvm/test/CodeGen/AArch64/paired-load.ll | 4 +- llvm/test/CodeGen/AArch64/pic-eh-stubs.ll | 2 +- llvm/test/CodeGen/AArch64/ragreedy-csr.ll | 50 +- .../CodeGen/AArch64/regress-tail-livereg.ll | 2 +- .../CodeGen/AArch64/regress-tblgen-chains.ll | 4 +- .../AArch64/regress-w29-reserved-with-fp.ll | 18 +- llvm/test/CodeGen/AArch64/rm_redundant_cmp.ll | 32 +- llvm/test/CodeGen/AArch64/sibling-call.ll | 2 +- .../AArch64/stack-guard-remat-bitcast.ll | 2 +- llvm/test/CodeGen/AArch64/tbz-tbnz.ll | 2 +- llvm/test/CodeGen/AArch64/tst-br.ll | 4 +- .../test/CodeGen/ARM/2006-11-10-CycleInDAG.ll | 2 +- .../CodeGen/ARM/2007-01-19-InfiniteLoop.ll | 22 +- .../CodeGen/ARM/2007-03-07-CombinerCrash.ll | 2 +- .../test/CodeGen/ARM/2007-03-13-InstrSched.ll | 12 +- .../ARM/2007-03-21-JoinIntervalsCrash.ll | 4 +- .../ARM/2007-03-27-RegScavengerAssert.ll | 4 +- .../ARM/2007-04-02-RegScavengerAssert.ll | 4 +- .../CodeGen/ARM/2007-04-03-UndefinedSymbol.ll | 16 +- .../CodeGen/ARM/2007-04-30-CombinerCrash.ll | 10 +- .../ARM/2007-05-03-BadPostIndexedLd.ll | 6 +- .../CodeGen/ARM/2007-05-07-tailmerge-1.ll | 6 +- .../CodeGen/ARM/2007-05-09-tailmerge-2.ll | 8 +- .../CodeGen/ARM/2007-05-22-tailmerge-3.ll | 8 +- llvm/test/CodeGen/ARM/2007-08-15-ReuseBug.ll | 6 +- .../ARM/2008-02-04-LocalRegAllocBug.ll | 2 +- .../CodeGen/ARM/2008-03-05-SxtInRegBug.ll | 2 +- .../ARM/2008-03-07-RegScavengerAssert.ll | 2 +- .../CodeGen/ARM/2008-04-10-ScavengerAssert.ll | 12 +- .../ARM/2008-05-19-LiveIntervalsBug.ll | 4 +- .../ARM/2008-07-24-CodeGenPrepCrash.ll | 2 +- .../CodeGen/ARM/2008-08-07-AsmPrintBug.ll | 2 +- .../test/CodeGen/ARM/2009-02-16-SpillerBug.ll | 2 +- .../ARM/2009-02-22-SoftenFloatVaArg.ll | 2 +- .../test/CodeGen/ARM/2009-02-27-SpillerBug.ll | 4 +- .../test/CodeGen/ARM/2009-03-07-SpillerBug.ll | 2 +- .../CodeGen/ARM/2009-04-06-AsmModifier.ll | 6 +- .../CodeGen/ARM/2009-04-08-AggregateAddr.ll | 6 +- .../test/CodeGen/ARM/2009-04-08-FloatUndef.ll | 2 +- .../CodeGen/ARM/2009-04-09-RegScavengerAsm.ll | 2 +- .../ARM/2009-05-11-CodePlacementCrash.ll | 2 +- .../CodeGen/ARM/2009-06-04-MissingLiveIn.ll | 2 +- .../CodeGen/ARM/2009-06-22-CoalescerBug.ll | 4 +- .../ARM/2009-06-30-RegScavengerAssert.ll | 10 +- .../ARM/2009-06-30-RegScavengerAssert2.ll | 6 +- .../ARM/2009-06-30-RegScavengerAssert3.ll | 10 +- .../ARM/2009-06-30-RegScavengerAssert4.ll | 10 +- .../ARM/2009-06-30-RegScavengerAssert5.ll | 2 +- .../test/CodeGen/ARM/2009-07-01-CommuteBug.ll | 10 +- .../CodeGen/ARM/2009-07-18-RewriterBug.ll | 442 +++++----- .../CodeGen/ARM/2009-07-22-ScavengerAssert.ll | 2 +- .../CodeGen/ARM/2009-07-22-SchedulerAssert.ll | 2 +- .../CodeGen/ARM/2009-07-29-VFP3Registers.ll | 2 +- .../ARM/2009-08-02-RegScavengerAssert-Neon.ll | 8 +- .../2009-08-15-RegScavenger-EarlyClobber.ll | 2 +- .../test/CodeGen/ARM/2009-08-21-PostRAKill.ll | 8 +- llvm/test/CodeGen/ARM/2009-08-31-LSDA-Name.ll | 16 +- .../CodeGen/ARM/2009-08-31-TwoRegShuffle.ll | 2 +- llvm/test/CodeGen/ARM/2009-09-09-fpcmp-ole.ll | 4 +- .../CodeGen/ARM/2009-09-13-InvalidSubreg.ll | 6 +- .../CodeGen/ARM/2009-09-13-InvalidSuperReg.ll | 14 +- .../ARM/2009-09-23-LiveVariablesBug.ll | 2 +- .../CodeGen/ARM/2009-09-24-spill-align.ll | 2 +- .../CodeGen/ARM/2009-10-02-NEONSubregsBug.ll | 4 +- .../CodeGen/ARM/2009-11-02-NegativeLane.ll | 2 +- .../ARM/2009-11-07-SubRegAsmPrinting.ll | 8 +- .../CodeGen/ARM/2009-11-13-ScavengerAssert.ll | 4 +- .../ARM/2009-11-13-ScavengerAssert2.ll | 30 +- .../CodeGen/ARM/2009-11-13-VRRewriterCrash.ll | 22 +- .../CodeGen/ARM/2010-03-04-eabi-fp-spill.ll | 16 +- .../CodeGen/ARM/2010-03-04-stm-undef-addr.ll | 6 +- .../CodeGen/ARM/2010-05-17-FastAllocCrash.ll | 6 +- .../CodeGen/ARM/2010-05-18-LocalAllocCrash.ll | 2 +- .../CodeGen/ARM/2010-05-18-PostIndexBug.ll | 2 +- llvm/test/CodeGen/ARM/2010-05-19-Shuffles.ll | 2 +- .../CodeGen/ARM/2010-05-21-BuildVector.ll | 10 +- .../CodeGen/ARM/2010-06-11-vmovdrr-bitcast.ll | 2 +- .../CodeGen/ARM/2010-06-21-LdStMultipleBug.ll | 18 +- .../CodeGen/ARM/2010-06-21-nondarwin-tc.ll | 20 +- .../ARM/2010-06-25-Thumb2ITInvalidIterator.ll | 2 +- .../CodeGen/ARM/2010-06-29-SubregImpDefs.ll | 2 +- .../CodeGen/ARM/2010-07-26-GlobalMerge.ll | 10 +- llvm/test/CodeGen/ARM/2010-08-04-EHCrash.ll | 12 +- .../CodeGen/ARM/2010-08-04-StackVariable.ll | 8 +- .../ARM/2010-11-15-SpillEarlyClobber.ll | 24 +- llvm/test/CodeGen/ARM/2010-12-08-tpsoft.ll | 2 +- llvm/test/CodeGen/ARM/2010-12-15-elf-lcomm.ll | 4 +- .../CodeGen/ARM/2011-01-19-MergedGlobalDbg.ll | 10 +- .../CodeGen/ARM/2011-02-07-AntidepClobber.ll | 10 +- .../CodeGen/ARM/2011-03-10-DAGCombineCrash.ll | 2 +- .../CodeGen/ARM/2011-03-15-LdStMultipleBug.ll | 6 +- llvm/test/CodeGen/ARM/2011-04-07-schediv.ll | 2 +- .../CodeGen/ARM/2011-04-11-MachineLICMBug.ll | 2 +- .../CodeGen/ARM/2011-04-12-FastRegAlloc.ll | 2 +- .../test/CodeGen/ARM/2011-04-26-SchedTweak.ll | 8 +- .../CodeGen/ARM/2011-08-02-MergedGlobalDbg.ll | 10 +- .../test/CodeGen/ARM/2011-08-29-SchedCycle.ll | 2 +- .../CodeGen/ARM/2011-08-29-ldr_pre_imm.ll | 2 +- .../ARM/2011-09-09-OddVectorDivision.ll | 8 +- .../CodeGen/ARM/2011-09-28-CMovCombineBug.ll | 2 +- .../2011-10-26-ExpandUnalignedLoadCrash.ll | 4 +- .../ARM/2011-11-07-PromoteVectorLoadStore.ll | 8 +- .../ARM/2011-11-09-BitcastVectorDouble.ll | 2 +- .../2011-11-09-IllegalVectorFPIntConvert.ll | 8 +- .../CodeGen/ARM/2011-11-14-EarlyClobber.ll | 14 +- .../CodeGen/ARM/2011-11-28-DAGCombineBug.ll | 16 +- .../ARM/2011-11-29-128bitArithmetics.ll | 22 +- .../CodeGen/ARM/2011-11-30-MergeAlignment.ll | 4 +- .../CodeGen/ARM/2011-12-14-machine-sink.ll | 6 +- .../CodeGen/ARM/2011-12-19-sjlj-clobber.ll | 4 +- .../CodeGen/ARM/2012-01-23-PostRA-LICM.ll | 14 +- .../ARM/2012-01-24-RegSequenceLiveRange.ll | 2 +- .../CodeGen/ARM/2012-01-26-CopyPropKills.ll | 8 +- .../CodeGen/ARM/2012-02-01-CoalescerBug.ll | 2 +- .../CodeGen/ARM/2012-03-13-DAGCombineBug.ll | 2 +- .../CodeGen/ARM/2012-06-12-SchedMemLatency.ll | 4 +- .../ARM/2012-08-04-DtripleSpillReload.ll | 2 +- .../ARM/2012-08-08-legalize-unaligned.ll | 2 +- .../CodeGen/ARM/2012-08-09-neon-extload.ll | 12 +- .../CodeGen/ARM/2012-08-23-legalize-vmull.ll | 30 +- .../ARM/2012-10-04-AAPCS-byval-align8.ll | 2 +- .../ARM/2012-10-04-FixedFrame-vs-byval.ll | 2 +- llvm/test/CodeGen/ARM/2013-01-21-PR14992.ll | 6 +- .../ARM/2013-04-18-load-overlap-PR14824.ll | 10 +- .../ARM/2013-05-07-ByteLoadSameAddress.ll | 18 +- .../ARM/2013-05-31-char-shift-crash.ll | 2 +- .../ARM/2013-07-29-vector-or-combine.ll | 2 +- .../2014-01-09-pseudo_expand_implicit_reg.ll | 6 +- .../ARM/2015-01-21-thumbv4t-ldstr-opt.ll | 16 +- .../CodeGen/ARM/MergeConsecutiveStores.ll | 18 +- .../ARM/Windows/chkstk-movw-movt-isel.ll | 4 +- llvm/test/CodeGen/ARM/Windows/dllimport.ll | 4 +- .../CodeGen/ARM/Windows/frame-register.ll | 6 +- .../ARM/Windows/movw-movt-relocations.ll | 4 +- llvm/test/CodeGen/ARM/Windows/pic.ll | 2 +- .../ARM/Windows/stack-probe-non-default.ll | 2 +- llvm/test/CodeGen/ARM/Windows/vla.ll | 2 +- llvm/test/CodeGen/ARM/a15-partial-update.ll | 4 +- llvm/test/CodeGen/ARM/addrmode.ll | 4 +- llvm/test/CodeGen/ARM/aliases.ll | 6 +- .../CodeGen/ARM/alloc-no-stack-realign.ll | 8 +- llvm/test/CodeGen/ARM/arm-and-tst-peephole.ll | 6 +- llvm/test/CodeGen/ARM/arm-modifier.ll | 8 +- llvm/test/CodeGen/ARM/atomic-64bit.ll | 2 +- llvm/test/CodeGen/ARM/atomic-load-store.ll | 8 +- llvm/test/CodeGen/ARM/atomic-op.ll | 8 +- llvm/test/CodeGen/ARM/atomic-ops-v8.ll | 16 +- llvm/test/CodeGen/ARM/available_externally.ll | 2 +- llvm/test/CodeGen/ARM/avoid-cpsr-rmw.ll | 18 +- llvm/test/CodeGen/ARM/bfi.ll | 2 +- llvm/test/CodeGen/ARM/bfx.ll | 6 +- .../CodeGen/ARM/big-endian-neon-bitconv.ll | 96 +-- .../CodeGen/ARM/big-endian-neon-extend.ll | 14 +- .../ARM/big-endian-neon-trunc-store.ll | 4 +- llvm/test/CodeGen/ARM/big-endian-ret-f64.ll | 2 +- .../CodeGen/ARM/big-endian-vector-caller.ll | 168 ++-- llvm/test/CodeGen/ARM/bswap16.ll | 4 +- llvm/test/CodeGen/ARM/call-tc.ll | 6 +- llvm/test/CodeGen/ARM/call.ll | 6 +- llvm/test/CodeGen/ARM/call_nolink.ll | 20 +- llvm/test/CodeGen/ARM/coalesce-dbgvalue.ll | 6 +- llvm/test/CodeGen/ARM/coalesce-subregs.ll | 22 +- llvm/test/CodeGen/ARM/code-placement.ll | 4 +- llvm/test/CodeGen/ARM/commute-movcc.ll | 2 +- llvm/test/CodeGen/ARM/compare-call.ll | 4 +- llvm/test/CodeGen/ARM/copy-paired-reg.ll | 2 +- llvm/test/CodeGen/ARM/crash-greedy-v6.ll | 2 +- llvm/test/CodeGen/ARM/crash.ll | 2 +- llvm/test/CodeGen/ARM/cse-ldrlit.ll | 2 +- llvm/test/CodeGen/ARM/cse-libcalls.ll | 2 +- .../CodeGen/ARM/dagcombine-anyexttozeroext.ll | 4 +- .../CodeGen/ARM/debug-frame-large-stack.ll | 2 +- llvm/test/CodeGen/ARM/debug-frame-vararg.ll | 4 +- llvm/test/CodeGen/ARM/debug-info-blocks.ll | 26 +- llvm/test/CodeGen/ARM/divmod.ll | 4 +- llvm/test/CodeGen/ARM/dwarf-eh.ll | 8 +- llvm/test/CodeGen/ARM/dyn-stackalloc.ll | 4 +- llvm/test/CodeGen/ARM/emit-big-cst.ll | 2 +- llvm/test/CodeGen/ARM/extload-knownzero.ll | 2 +- llvm/test/CodeGen/ARM/extloadi1.ll | 2 +- .../CodeGen/ARM/fast-isel-GEP-coalesce.ll | 8 +- llvm/test/CodeGen/ARM/fast-isel-align.ll | 10 +- llvm/test/CodeGen/ARM/fast-isel-call.ll | 2 +- llvm/test/CodeGen/ARM/fast-isel-fold.ll | 12 +- .../test/CodeGen/ARM/fast-isel-ldr-str-arm.ll | 12 +- .../ARM/fast-isel-ldr-str-thumb-neg-index.ll | 18 +- .../CodeGen/ARM/fast-isel-ldrh-strh-arm.ll | 22 +- .../ARM/fast-isel-load-store-verify.ll | 6 +- llvm/test/CodeGen/ARM/fast-isel-pic.ll | 4 +- llvm/test/CodeGen/ARM/fast-isel-pred.ll | 20 +- .../CodeGen/ARM/fast-isel-redefinition.ll | 2 +- llvm/test/CodeGen/ARM/fast-isel-static.ll | 10 +- llvm/test/CodeGen/ARM/fast-isel-vararg.ll | 12 +- llvm/test/CodeGen/ARM/fast-isel.ll | 12 +- .../ARM/fastisel-gep-promote-before-add.ll | 4 +- llvm/test/CodeGen/ARM/flag-crash.ll | 6 +- llvm/test/CodeGen/ARM/fnegs.ll | 4 +- llvm/test/CodeGen/ARM/fold-stack-adjust.ll | 6 +- llvm/test/CodeGen/ARM/fp.ll | 2 +- llvm/test/CodeGen/ARM/fp16.ll | 4 +- llvm/test/CodeGen/ARM/fpcmp-opt.ll | 8 +- llvm/test/CodeGen/ARM/fpmem.ll | 6 +- llvm/test/CodeGen/ARM/fptoint.ll | 4 +- llvm/test/CodeGen/ARM/frame-register.ll | 6 +- llvm/test/CodeGen/ARM/fusedMAC.ll | 2 +- llvm/test/CodeGen/ARM/ghc-tcreturn-lowered.ll | 2 +- llvm/test/CodeGen/ARM/global-merge-1.ll | 8 +- llvm/test/CodeGen/ARM/globals.ll | 2 +- llvm/test/CodeGen/ARM/gv-stubs-crash.ll | 2 +- llvm/test/CodeGen/ARM/half.ll | 8 +- llvm/test/CodeGen/ARM/hidden-vis-2.ll | 2 +- llvm/test/CodeGen/ARM/hidden-vis-3.ll | 4 +- llvm/test/CodeGen/ARM/ifconv-kills.ll | 4 +- llvm/test/CodeGen/ARM/ifconv-regmask.ll | 2 +- llvm/test/CodeGen/ARM/ifcvt-branch-weight.ll | 4 +- llvm/test/CodeGen/ARM/ifcvt11.ll | 6 +- llvm/test/CodeGen/ARM/ifcvt5.ll | 2 +- llvm/test/CodeGen/ARM/ifcvt7.ll | 6 +- .../CodeGen/ARM/illegal-vector-bitcast.ll | 4 +- llvm/test/CodeGen/ARM/indirectbr-2.ll | 6 +- llvm/test/CodeGen/ARM/indirectbr.ll | 4 +- llvm/test/CodeGen/ARM/inline-diagnostics.ll | 2 +- llvm/test/CodeGen/ARM/interrupt-attr.ll | 8 +- llvm/test/CodeGen/ARM/intrinsics-crypto.ll | 10 +- .../CodeGen/ARM/invoke-donothing-assert.ll | 2 +- llvm/test/CodeGen/ARM/isel-v8i32-crash.ll | 2 +- .../CodeGen/ARM/krait-cpu-div-attribute.ll | 4 +- llvm/test/CodeGen/ARM/large-stack.ll | 2 +- llvm/test/CodeGen/ARM/ldm.ll | 16 +- llvm/test/CodeGen/ARM/ldr.ll | 14 +- llvm/test/CodeGen/ARM/ldr_ext.ll | 10 +- llvm/test/CodeGen/ARM/ldr_frame.ll | 8 +- llvm/test/CodeGen/ARM/ldr_post.ll | 4 +- llvm/test/CodeGen/ARM/ldr_pre.ll | 4 +- llvm/test/CodeGen/ARM/ldrd-memoper.ll | 4 +- llvm/test/CodeGen/ARM/ldrd.ll | 12 +- llvm/test/CodeGen/ARM/ldst-f32-2-i32.ll | 2 +- llvm/test/CodeGen/ARM/ldstrex-m.ll | 6 +- llvm/test/CodeGen/ARM/ldstrex.ll | 6 +- llvm/test/CodeGen/ARM/load-global.ll | 2 +- llvm/test/CodeGen/ARM/load.ll | 8 +- llvm/test/CodeGen/ARM/load_i1_select.ll | 2 +- llvm/test/CodeGen/ARM/long.ll | 2 +- llvm/test/CodeGen/ARM/lsr-code-insertion.ll | 12 +- llvm/test/CodeGen/ARM/lsr-icmp-imm.ll | 2 +- llvm/test/CodeGen/ARM/lsr-unfolded-offset.ll | 8 +- llvm/test/CodeGen/ARM/machine-cse-cmp.ll | 4 +- llvm/test/CodeGen/ARM/machine-licm.ll | 4 +- llvm/test/CodeGen/ARM/minsize-litpools.ll | 4 +- llvm/test/CodeGen/ARM/misched-copy-arm.ll | 4 +- llvm/test/CodeGen/ARM/mult-alt-generic-arm.ll | 38 +- llvm/test/CodeGen/ARM/negative-offset.ll | 4 +- llvm/test/CodeGen/ARM/neon_cmp.ll | 4 +- llvm/test/CodeGen/ARM/neon_div.ll | 16 +- llvm/test/CodeGen/ARM/neon_fpconv.ll | 4 +- llvm/test/CodeGen/ARM/neon_ld1.ll | 8 +- llvm/test/CodeGen/ARM/neon_ld2.ll | 12 +- llvm/test/CodeGen/ARM/neon_spill.ll | 2 +- llvm/test/CodeGen/ARM/no-fpu.ll | 2 +- llvm/test/CodeGen/ARM/no-tail-call.ll | 4 +- llvm/test/CodeGen/ARM/none-macho.ll | 6 +- llvm/test/CodeGen/ARM/nop_concat_vectors.ll | 2 +- llvm/test/CodeGen/ARM/optselect-regclass.ll | 2 +- llvm/test/CodeGen/ARM/phi.ll | 2 +- llvm/test/CodeGen/ARM/popcnt.ll | 36 +- llvm/test/CodeGen/ARM/pr13249.ll | 4 +- llvm/test/CodeGen/ARM/pr18364-movw.ll | 4 +- llvm/test/CodeGen/ARM/pr3502.ll | 2 +- llvm/test/CodeGen/ARM/private.ll | 2 +- llvm/test/CodeGen/ARM/reg_sequence.ll | 12 +- llvm/test/CodeGen/ARM/saxpy10-a9.ll | 40 +- llvm/test/CodeGen/ARM/segmented-stacks.ll | 2 +- llvm/test/CodeGen/ARM/select_xform.ll | 2 +- llvm/test/CodeGen/ARM/shifter_operand.ll | 6 +- llvm/test/CodeGen/ARM/smul.ll | 2 +- llvm/test/CodeGen/ARM/space-directive.ll | 2 +- llvm/test/CodeGen/ARM/spill-q.ll | 2 +- llvm/test/CodeGen/ARM/ssp-data-layout.ll | 42 +- llvm/test/CodeGen/ARM/stack-alignment.ll | 60 +- llvm/test/CodeGen/ARM/str_post.ll | 4 +- llvm/test/CodeGen/ARM/str_pre-2.ll | 4 +- llvm/test/CodeGen/ARM/str_pre.ll | 4 +- .../CodeGen/ARM/struct-byval-frame-index.ll | 36 +- llvm/test/CodeGen/ARM/sub-cmp-peephole.ll | 4 +- llvm/test/CodeGen/ARM/swift-atomics.ll | 4 +- llvm/test/CodeGen/ARM/swift-vldm.ll | 10 +- llvm/test/CodeGen/ARM/tail-dup.ll | 8 +- llvm/test/CodeGen/ARM/test-sharedidx.ll | 16 +- llvm/test/CodeGen/ARM/thumb1-varalloc.ll | 2 +- .../CodeGen/ARM/thumb1_return_sequence.ll | 24 +- llvm/test/CodeGen/ARM/thumb_indirect_calls.ll | 2 +- llvm/test/CodeGen/ARM/tls1.ll | 2 +- llvm/test/CodeGen/ARM/tls2.ll | 2 +- llvm/test/CodeGen/ARM/tls3.ll | 2 +- llvm/test/CodeGen/ARM/trunc_ldr.ll | 4 +- .../CodeGen/ARM/truncstore-dag-combine.ll | 4 +- llvm/test/CodeGen/ARM/twoaddrinstr.ll | 2 +- llvm/test/CodeGen/ARM/uint64tof64.ll | 2 +- llvm/test/CodeGen/ARM/umulo-32.ll | 2 +- llvm/test/CodeGen/ARM/unaligned_load_store.ll | 8 +- .../ARM/unaligned_load_store_vector.ll | 54 +- llvm/test/CodeGen/ARM/undef-sext.ll | 2 +- llvm/test/CodeGen/ARM/vaba.ll | 108 +-- llvm/test/CodeGen/ARM/vabd.ll | 80 +- llvm/test/CodeGen/ARM/vabs.ll | 28 +- llvm/test/CodeGen/ARM/vadd.ll | 100 +-- llvm/test/CodeGen/ARM/vargs_align.ll | 4 +- llvm/test/CodeGen/ARM/vbits.ll | 208 ++--- llvm/test/CodeGen/ARM/vbsl-constant.ll | 48 +- llvm/test/CodeGen/ARM/vbsl.ll | 48 +- llvm/test/CodeGen/ARM/vceq.ll | 34 +- llvm/test/CodeGen/ARM/vcge.ll | 68 +- llvm/test/CodeGen/ARM/vcgt.ll | 72 +- llvm/test/CodeGen/ARM/vcnt.ll | 28 +- llvm/test/CodeGen/ARM/vcombine.ll | 24 +- llvm/test/CodeGen/ARM/vcvt-cost.ll | 20 +- llvm/test/CodeGen/ARM/vcvt-v8.ll | 32 +- llvm/test/CodeGen/ARM/vcvt.ll | 36 +- llvm/test/CodeGen/ARM/vcvt_combine.ll | 12 +- llvm/test/CodeGen/ARM/vdiv_combine.ll | 12 +- llvm/test/CodeGen/ARM/vdup.ll | 16 +- llvm/test/CodeGen/ARM/vector-DAGCombine.ll | 32 +- llvm/test/CodeGen/ARM/vector-extend-narrow.ll | 8 +- llvm/test/CodeGen/ARM/vector-load.ll | 104 +-- llvm/test/CodeGen/ARM/vector-promotion.ll | 76 +- llvm/test/CodeGen/ARM/vector-spilling.ll | 8 +- llvm/test/CodeGen/ARM/vector-store.ll | 52 +- llvm/test/CodeGen/ARM/vext.ll | 50 +- llvm/test/CodeGen/ARM/vfcmp.ll | 44 +- llvm/test/CodeGen/ARM/vfp.ll | 36 +- llvm/test/CodeGen/ARM/vget_lane.ll | 44 +- llvm/test/CodeGen/ARM/vhadd.ll | 96 +-- llvm/test/CodeGen/ARM/vhsub.ll | 48 +- llvm/test/CodeGen/ARM/vicmp.ll | 40 +- llvm/test/CodeGen/ARM/vld1.ll | 6 +- llvm/test/CodeGen/ARM/vld2.ll | 4 +- llvm/test/CodeGen/ARM/vld3.ll | 4 +- llvm/test/CodeGen/ARM/vld4.ll | 4 +- llvm/test/CodeGen/ARM/vlddup.ll | 18 +- llvm/test/CodeGen/ARM/vldlane.ll | 90 +- llvm/test/CodeGen/ARM/vldm-liveness.ll | 8 +- llvm/test/CodeGen/ARM/vldm-sched-a9.ll | 18 +- llvm/test/CodeGen/ARM/vminmax.ll | 112 +-- llvm/test/CodeGen/ARM/vminmaxnm.ll | 16 +- llvm/test/CodeGen/ARM/vmla.ll | 84 +- llvm/test/CodeGen/ARM/vmls.ll | 84 +- llvm/test/CodeGen/ARM/vmov.ll | 40 +- llvm/test/CodeGen/ARM/vmul.ll | 102 +-- llvm/test/CodeGen/ARM/vneg.ll | 28 +- llvm/test/CodeGen/ARM/vpadal.ll | 48 +- llvm/test/CodeGen/ARM/vpadd.ll | 44 +- llvm/test/CodeGen/ARM/vpminmax.ll | 56 +- llvm/test/CodeGen/ARM/vqadd.ll | 64 +- llvm/test/CodeGen/ARM/vqdmul.ll | 64 +- llvm/test/CodeGen/ARM/vqshl.ll | 176 ++-- llvm/test/CodeGen/ARM/vqshrn.ll | 36 +- llvm/test/CodeGen/ARM/vqsub.ll | 64 +- llvm/test/CodeGen/ARM/vrec.ll | 32 +- llvm/test/CodeGen/ARM/vrev.ll | 38 +- llvm/test/CodeGen/ARM/vselect_imax.ll | 24 +- llvm/test/CodeGen/ARM/vshift.ll | 144 ++-- llvm/test/CodeGen/ARM/vshiftins.ll | 64 +- llvm/test/CodeGen/ARM/vshl.ll | 208 ++--- llvm/test/CodeGen/ARM/vshll.ll | 24 +- llvm/test/CodeGen/ARM/vshrn.ll | 18 +- llvm/test/CodeGen/ARM/vsra.ll | 128 +-- llvm/test/CodeGen/ARM/vst1.ll | 30 +- llvm/test/CodeGen/ARM/vst2.ll | 30 +- llvm/test/CodeGen/ARM/vst3.ll | 30 +- llvm/test/CodeGen/ARM/vst4.ll | 30 +- llvm/test/CodeGen/ARM/vstlane.ll | 78 +- llvm/test/CodeGen/ARM/vsub.ll | 100 +-- llvm/test/CodeGen/ARM/vtbl.ll | 40 +- llvm/test/CodeGen/ARM/vtrn.ll | 40 +- llvm/test/CodeGen/ARM/vuzp.ll | 32 +- llvm/test/CodeGen/ARM/vzip.ll | 32 +- .../test/CodeGen/ARM/zextload_demandedbits.ll | 2 +- llvm/test/CodeGen/BPF/basictest.ll | 2 +- llvm/test/CodeGen/BPF/ex1.ll | 2 +- llvm/test/CodeGen/BPF/intrinsics.ll | 4 +- llvm/test/CodeGen/BPF/load.ll | 10 +- llvm/test/CodeGen/BPF/loops.ll | 10 +- llvm/test/CodeGen/BPF/struct_ret1.ll | 4 +- .../CodeGen/CPP/2009-05-01-Long-Double.ll | 2 +- llvm/test/CodeGen/CPP/2009-05-04-CondBr.ll | 10 +- .../CodeGen/Generic/2003-05-28-ManyArgs.ll | 96 +-- .../CodeGen/Generic/2003-05-30-BadFoldGEP.ll | 2 +- .../Generic/2003-07-29-BadConstSbyte.ll | 2 +- .../2004-05-09-LiveVarPartialRegister.ll | 4 +- .../Generic/2006-02-12-InsertLibcall.ll | 2 +- .../Generic/2006-03-01-dagcombineinfloop.ll | 22 +- .../CodeGen/Generic/2006-04-26-SetCCAnd.ll | 4 +- .../2006-06-13-ComputeMaskedBitsCrash.ll | 4 +- .../Generic/2006-06-28-SimplifySetCCCrash.ll | 8 +- .../Generic/2006-09-02-LocalAllocCrash.ll | 14 +- .../Generic/2006-11-20-DAGCombineCrash.ll | 4 +- .../Generic/2007-01-15-LoadSelectCycle.ll | 4 +- .../Generic/2008-01-25-dag-combine-mul.ll | 12 +- .../CodeGen/Generic/2008-01-30-LoadCrash.ll | 2 +- .../CodeGen/Generic/2008-02-25-NegateZero.ll | 4 +- .../2009-03-29-SoftFloatVectorExtract.ll | 2 +- .../Generic/2009-04-28-i128-cmp-crash.ll | 4 +- .../Generic/2011-07-07-ScheduleDAGCrash.ll | 4 +- .../CodeGen/Generic/2012-06-08-APIntCrash.ll | 2 +- .../Generic/2014-02-05-OpaqueConstants.ll | 2 +- llvm/test/CodeGen/Generic/APIntLoadStore.ll | 512 ++++++------ llvm/test/CodeGen/Generic/badFoldGEP.ll | 2 +- llvm/test/CodeGen/Generic/builtin-expect.ll | 34 +- llvm/test/CodeGen/Generic/cast-fp.ll | 4 +- llvm/test/CodeGen/Generic/constindices.ll | 8 +- llvm/test/CodeGen/Generic/crash.ll | 8 +- .../test/CodeGen/Generic/dag-combine-crash.ll | 2 +- llvm/test/CodeGen/Generic/empty-load-store.ll | 4 +- llvm/test/CodeGen/Generic/empty-phi.ll | 2 +- .../test/CodeGen/Generic/fp-to-int-invalid.ll | 4 +- llvm/test/CodeGen/Generic/fwdtwice.ll | 2 +- llvm/test/CodeGen/Generic/global-ret0.ll | 2 +- .../CodeGen/Generic/inline-asm-mem-clobber.ll | 6 +- llvm/test/CodeGen/Generic/pr2625.ll | 4 +- llvm/test/CodeGen/Generic/print-arith-fp.ll | 4 +- llvm/test/CodeGen/Generic/print-arith-int.ll | 4 +- llvm/test/CodeGen/Generic/print-mul-exp.ll | 2 +- llvm/test/CodeGen/Generic/print-mul.ll | 4 +- llvm/test/CodeGen/Generic/print-shift.ll | 4 +- llvm/test/CodeGen/Generic/select.ll | 6 +- llvm/test/CodeGen/Generic/undef-phi.ll | 4 +- llvm/test/CodeGen/Generic/v-split.ll | 4 +- llvm/test/CodeGen/Generic/vector-casts.ll | 14 +- .../Generic/vector-identity-shuffle.ll | 2 +- llvm/test/CodeGen/Generic/vector.ll | 42 +- llvm/test/CodeGen/Hexagon/BranchPredict.ll | 2 +- llvm/test/CodeGen/Hexagon/absaddr-store.ll | 6 +- llvm/test/CodeGen/Hexagon/absimm.ll | 2 +- llvm/test/CodeGen/Hexagon/always-ext.ll | 4 +- llvm/test/CodeGen/Hexagon/block-addr.ll | 14 +- llvm/test/CodeGen/Hexagon/cext-check.ll | 10 +- .../CodeGen/Hexagon/cext-valid-packet2.ll | 10 +- llvm/test/CodeGen/Hexagon/cmp_pred2.ll | 8 +- llvm/test/CodeGen/Hexagon/cmpb_pred.ll | 6 +- llvm/test/CodeGen/Hexagon/combine.ll | 4 +- llvm/test/CodeGen/Hexagon/combine_ir.ll | 10 +- llvm/test/CodeGen/Hexagon/convertdptoint.ll | 8 +- llvm/test/CodeGen/Hexagon/convertdptoll.ll | 8 +- llvm/test/CodeGen/Hexagon/convertsptoint.ll | 8 +- llvm/test/CodeGen/Hexagon/convertsptoll.ll | 8 +- llvm/test/CodeGen/Hexagon/dadd.ll | 4 +- llvm/test/CodeGen/Hexagon/dmul.ll | 4 +- llvm/test/CodeGen/Hexagon/double.ll | 10 +- .../Hexagon/doubleconvert-ieee-rnd-near.ll | 8 +- llvm/test/CodeGen/Hexagon/dsub.ll | 4 +- llvm/test/CodeGen/Hexagon/extload-combine.ll | 12 +- llvm/test/CodeGen/Hexagon/fadd.ll | 4 +- llvm/test/CodeGen/Hexagon/fcmp.ll | 6 +- llvm/test/CodeGen/Hexagon/float.ll | 10 +- .../Hexagon/floatconvert-ieee-rnd-near.ll | 10 +- llvm/test/CodeGen/Hexagon/fmul.ll | 4 +- llvm/test/CodeGen/Hexagon/frame.ll | 10 +- llvm/test/CodeGen/Hexagon/fsub.ll | 4 +- llvm/test/CodeGen/Hexagon/fusedandshift.ll | 2 +- .../CodeGen/Hexagon/gp-plus-offset-load.ll | 6 +- llvm/test/CodeGen/Hexagon/gp-rel.ll | 6 +- llvm/test/CodeGen/Hexagon/hwloop-cleanup.ll | 4 +- llvm/test/CodeGen/Hexagon/hwloop-dbg.ll | 2 +- llvm/test/CodeGen/Hexagon/hwloop-le.ll | 30 +- llvm/test/CodeGen/Hexagon/hwloop-lt.ll | 30 +- llvm/test/CodeGen/Hexagon/hwloop-ne.ll | 30 +- llvm/test/CodeGen/Hexagon/i16_VarArg.ll | 4 +- llvm/test/CodeGen/Hexagon/i1_VarArg.ll | 4 +- llvm/test/CodeGen/Hexagon/i8_VarArg.ll | 4 +- .../Hexagon/idxload-with-zero-offset.ll | 12 +- llvm/test/CodeGen/Hexagon/macint.ll | 2 +- llvm/test/CodeGen/Hexagon/memops.ll | 252 +++--- llvm/test/CodeGen/Hexagon/memops1.ll | 10 +- llvm/test/CodeGen/Hexagon/memops2.ll | 4 +- llvm/test/CodeGen/Hexagon/memops3.ll | 4 +- .../test/CodeGen/Hexagon/misaligned-access.ll | 4 +- llvm/test/CodeGen/Hexagon/mpy.ll | 6 +- llvm/test/CodeGen/Hexagon/newvaluejump.ll | 4 +- llvm/test/CodeGen/Hexagon/newvaluejump2.ll | 4 +- llvm/test/CodeGen/Hexagon/newvaluestore.ll | 6 +- llvm/test/CodeGen/Hexagon/opt-fabs.ll | 2 +- llvm/test/CodeGen/Hexagon/opt-fneg.ll | 2 +- llvm/test/CodeGen/Hexagon/postinc-load.ll | 4 +- llvm/test/CodeGen/Hexagon/postinc-store.ll | 4 +- llvm/test/CodeGen/Hexagon/pred-gp.ll | 4 +- llvm/test/CodeGen/Hexagon/pred-instrs.ll | 2 +- llvm/test/CodeGen/Hexagon/remove_lsr.ll | 6 +- llvm/test/CodeGen/Hexagon/static.ll | 6 +- llvm/test/CodeGen/Hexagon/struct_args.ll | 2 +- llvm/test/CodeGen/Hexagon/tfr-to-combine.ll | 2 +- llvm/test/CodeGen/Hexagon/union-1.ll | 4 +- llvm/test/CodeGen/Hexagon/vaddh.ll | 4 +- llvm/test/CodeGen/Hexagon/validate-offset.ll | 14 +- llvm/test/CodeGen/Hexagon/zextloadi1.ll | 4 +- .../CodeGen/MSP430/2009-05-10-CyclicDAG.ll | 2 +- llvm/test/CodeGen/MSP430/2009-05-17-Rot.ll | 6 +- llvm/test/CodeGen/MSP430/2009-05-17-Shift.ll | 4 +- .../MSP430/2009-08-25-DynamicStackAlloc.ll | 4 +- .../CodeGen/MSP430/2009-09-18-AbsoluteAddr.ll | 8 +- .../CodeGen/MSP430/2009-10-10-OrImpDef.ll | 2 +- .../CodeGen/MSP430/2009-11-08-InvalidResNo.ll | 4 +- .../CodeGen/MSP430/2010-05-01-CombinerAnd.ll | 2 +- llvm/test/CodeGen/MSP430/AddrMode-bis-rx.ll | 14 +- llvm/test/CodeGen/MSP430/AddrMode-bis-xr.ll | 14 +- llvm/test/CodeGen/MSP430/AddrMode-mov-rx.ll | 14 +- llvm/test/CodeGen/MSP430/Inst16mi.ll | 8 +- llvm/test/CodeGen/MSP430/Inst16mm.ll | 22 +- llvm/test/CodeGen/MSP430/Inst16mr.ll | 10 +- llvm/test/CodeGen/MSP430/Inst16rm.ll | 10 +- llvm/test/CodeGen/MSP430/Inst8mi.ll | 8 +- llvm/test/CodeGen/MSP430/Inst8mm.ll | 18 +- llvm/test/CodeGen/MSP430/Inst8mr.ll | 10 +- llvm/test/CodeGen/MSP430/Inst8rm.ll | 10 +- llvm/test/CodeGen/MSP430/bit.ll | 24 +- llvm/test/CodeGen/MSP430/byval.ll | 2 +- llvm/test/CodeGen/MSP430/indirectbr.ll | 4 +- llvm/test/CodeGen/MSP430/indirectbr2.ll | 2 +- llvm/test/CodeGen/MSP430/inline-asm.ll | 2 +- llvm/test/CodeGen/MSP430/jumptable.ll | 4 +- llvm/test/CodeGen/MSP430/memset.ll | 2 +- llvm/test/CodeGen/MSP430/misched-msp430.ll | 2 +- .../CodeGen/MSP430/mult-alt-generic-msp430.ll | 38 +- llvm/test/CodeGen/MSP430/postinc.ll | 10 +- .../CodeGen/Mips/2008-07-15-SmallSection.ll | 4 +- .../test/CodeGen/Mips/2008-08-01-AsmInline.ll | 10 +- .../CodeGen/Mips/2008-08-03-ReturnDouble.ll | 4 +- .../CodeGen/Mips/2008-10-13-LegalizerBug.ll | 2 +- .../CodeGen/Mips/2008-11-10-xint_to_fp.ll | 14 +- llvm/test/CodeGen/Mips/2010-07-20-Switch.ll | 2 +- llvm/test/CodeGen/Mips/Fast-ISel/br1.ll | 2 +- llvm/test/CodeGen/Mips/Fast-ISel/callabi.ll | 8 +- llvm/test/CodeGen/Mips/Fast-ISel/fpcmpa.ll | 48 +- llvm/test/CodeGen/Mips/Fast-ISel/fpext.ll | 2 +- llvm/test/CodeGen/Mips/Fast-ISel/fpintconv.ll | 4 +- llvm/test/CodeGen/Mips/Fast-ISel/fptrunc.ll | 2 +- llvm/test/CodeGen/Mips/Fast-ISel/icmpa.ll | 40 +- .../test/CodeGen/Mips/Fast-ISel/loadstore2.ll | 10 +- .../CodeGen/Mips/Fast-ISel/loadstoreconv.ll | 30 +- llvm/test/CodeGen/Mips/Fast-ISel/overflt.ll | 6 +- llvm/test/CodeGen/Mips/Fast-ISel/retabi.ll | 10 +- llvm/test/CodeGen/Mips/Fast-ISel/shift.ll | 2 +- llvm/test/CodeGen/Mips/addi.ll | 8 +- llvm/test/CodeGen/Mips/addressing-mode.ll | 4 +- llvm/test/CodeGen/Mips/align16.ll | 4 +- llvm/test/CodeGen/Mips/alloca.ll | 14 +- llvm/test/CodeGen/Mips/alloca16.ll | 44 +- llvm/test/CodeGen/Mips/and1.ll | 4 +- llvm/test/CodeGen/Mips/atomic.ll | 4 +- llvm/test/CodeGen/Mips/atomicops.ll | 6 +- llvm/test/CodeGen/Mips/beqzc.ll | 2 +- llvm/test/CodeGen/Mips/beqzc1.ll | 2 +- llvm/test/CodeGen/Mips/biggot.ll | 2 +- llvm/test/CodeGen/Mips/brconeq.ll | 4 +- llvm/test/CodeGen/Mips/brconeqk.ll | 2 +- llvm/test/CodeGen/Mips/brconeqz.ll | 2 +- llvm/test/CodeGen/Mips/brconge.ll | 6 +- llvm/test/CodeGen/Mips/brcongt.ll | 4 +- llvm/test/CodeGen/Mips/brconle.ll | 6 +- llvm/test/CodeGen/Mips/brconlt.ll | 4 +- llvm/test/CodeGen/Mips/brconne.ll | 4 +- llvm/test/CodeGen/Mips/brconnek.ll | 2 +- llvm/test/CodeGen/Mips/brconnez.ll | 2 +- llvm/test/CodeGen/Mips/brdelayslot.ll | 12 +- llvm/test/CodeGen/Mips/brind.ll | 2 +- .../arguments-varargs-small-structs-byte.ll | 38 +- ...ents-varargs-small-structs-combinations.ll | 20 +- ...nts-varargs-small-structs-multiple-args.ll | 36 +- llvm/test/CodeGen/Mips/cconv/return-float.ll | 4 +- .../CodeGen/Mips/cconv/return-hard-float.ll | 6 +- .../CodeGen/Mips/cconv/return-hard-fp128.ll | 2 +- .../Mips/cconv/return-hard-struct-f128.ll | 2 +- llvm/test/CodeGen/Mips/cconv/return-struct.ll | 8 +- llvm/test/CodeGen/Mips/cconv/return.ll | 6 +- llvm/test/CodeGen/Mips/cfi_offset.ll | 4 +- llvm/test/CodeGen/Mips/ci2.ll | 2 +- llvm/test/CodeGen/Mips/cmov.ll | 6 +- llvm/test/CodeGen/Mips/cmplarge.ll | 4 +- llvm/test/CodeGen/Mips/const4a.ll | 2 +- llvm/test/CodeGen/Mips/ctlz.ll | 2 +- llvm/test/CodeGen/Mips/disable-tail-merge.ll | 6 +- llvm/test/CodeGen/Mips/div.ll | 4 +- llvm/test/CodeGen/Mips/div_rem.ll | 4 +- llvm/test/CodeGen/Mips/divrem.ll | 4 +- llvm/test/CodeGen/Mips/divu.ll | 4 +- llvm/test/CodeGen/Mips/divu_remu.ll | 4 +- llvm/test/CodeGen/Mips/dsp-patterns.ll | 6 +- llvm/test/CodeGen/Mips/dsp-vec-load-store.ll | 2 +- llvm/test/CodeGen/Mips/eh.ll | 2 +- llvm/test/CodeGen/Mips/emit-big-cst.ll | 2 +- llvm/test/CodeGen/Mips/ex2.ll | 2 +- llvm/test/CodeGen/Mips/extins.ll | 2 +- llvm/test/CodeGen/Mips/f16abs.ll | 4 +- llvm/test/CodeGen/Mips/fastcc.ll | 120 +-- llvm/test/CodeGen/Mips/fixdfsf.ll | 2 +- llvm/test/CodeGen/Mips/fp-indexed-ls.ll | 18 +- llvm/test/CodeGen/Mips/fp-spill-reload.ll | 16 +- llvm/test/CodeGen/Mips/fp16instrinsmc.ll | 60 +- llvm/test/CodeGen/Mips/fp16static.ll | 4 +- llvm/test/CodeGen/Mips/fpneeded.ll | 6 +- llvm/test/CodeGen/Mips/fpnotneeded.ll | 2 +- llvm/test/CodeGen/Mips/global-address.ll | 4 +- llvm/test/CodeGen/Mips/gpreg-lazy-binding.ll | 2 +- llvm/test/CodeGen/Mips/gprestore.ll | 6 +- llvm/test/CodeGen/Mips/hf16_1.ll | 80 +- llvm/test/CodeGen/Mips/hf16call32.ll | 530 ++++++------ llvm/test/CodeGen/Mips/hf16call32_body.ll | 54 +- llvm/test/CodeGen/Mips/hf1_body.ll | 2 +- llvm/test/CodeGen/Mips/hfptrcall.ll | 32 +- .../Mips/inlineasm-assembler-directives.ll | 2 +- .../CodeGen/Mips/inlineasm-operand-code.ll | 6 +- llvm/test/CodeGen/Mips/inlineasm64.ll | 4 +- llvm/test/CodeGen/Mips/internalfunc.ll | 4 +- llvm/test/CodeGen/Mips/jtstat.ll | 2 +- llvm/test/CodeGen/Mips/l3mc.ll | 32 +- llvm/test/CodeGen/Mips/lb1.ll | 4 +- llvm/test/CodeGen/Mips/lbu1.ll | 4 +- llvm/test/CodeGen/Mips/lcb2.ll | 16 +- llvm/test/CodeGen/Mips/lcb3c.ll | 4 +- llvm/test/CodeGen/Mips/lcb4a.ll | 4 +- llvm/test/CodeGen/Mips/lcb5.ll | 32 +- llvm/test/CodeGen/Mips/lh1.ll | 4 +- llvm/test/CodeGen/Mips/lhu1.ll | 4 +- llvm/test/CodeGen/Mips/llcarry.ll | 10 +- .../CodeGen/Mips/load-store-left-right.ll | 14 +- llvm/test/CodeGen/Mips/machineverifier.ll | 2 +- llvm/test/CodeGen/Mips/mbrsize4a.ll | 2 +- llvm/test/CodeGen/Mips/micromips-addiu.ll | 6 +- llvm/test/CodeGen/Mips/micromips-and16.ll | 4 +- llvm/test/CodeGen/Mips/micromips-andi.ll | 4 +- .../Mips/micromips-compact-branches.ll | 2 +- .../CodeGen/Mips/micromips-delay-slot-jr.ll | 2 +- .../test/CodeGen/Mips/micromips-delay-slot.ll | 2 +- llvm/test/CodeGen/Mips/micromips-gp-rc.ll | 2 +- llvm/test/CodeGen/Mips/micromips-jal.ll | 10 +- .../Mips/micromips-load-effective-address.ll | 8 +- llvm/test/CodeGen/Mips/micromips-or16.ll | 4 +- .../Mips/micromips-rdhwr-directives.ll | 2 +- llvm/test/CodeGen/Mips/micromips-shift.ll | 8 +- llvm/test/CodeGen/Mips/micromips-sw-lw-16.ll | 12 +- llvm/test/CodeGen/Mips/micromips-xor16.ll | 4 +- llvm/test/CodeGen/Mips/mips16_32_8.ll | 10 +- llvm/test/CodeGen/Mips/mips16_fpret.ll | 16 +- llvm/test/CodeGen/Mips/mips16ex.ll | 12 +- llvm/test/CodeGen/Mips/mips16fpe.ll | 112 +-- llvm/test/CodeGen/Mips/mips64-f128-call.ll | 4 +- llvm/test/CodeGen/Mips/mips64-f128.ll | 72 +- llvm/test/CodeGen/Mips/mips64directive.ll | 2 +- llvm/test/CodeGen/Mips/mips64fpldst.ll | 8 +- llvm/test/CodeGen/Mips/mips64instrs.ll | 8 +- llvm/test/CodeGen/Mips/mips64intldst.ll | 22 +- llvm/test/CodeGen/Mips/mips64sinttofpsf.ll | 2 +- llvm/test/CodeGen/Mips/mipslopat.ll | 4 +- llvm/test/CodeGen/Mips/misha.ll | 8 +- llvm/test/CodeGen/Mips/mno-ldc1-sdc1.ll | 4 +- llvm/test/CodeGen/Mips/msa/2r.ll | 24 +- .../test/CodeGen/Mips/msa/2r_vector_scalar.ll | 8 +- llvm/test/CodeGen/Mips/msa/2rf.ll | 32 +- llvm/test/CodeGen/Mips/msa/2rf_exup.ll | 8 +- llvm/test/CodeGen/Mips/msa/2rf_float_int.ll | 8 +- llvm/test/CodeGen/Mips/msa/2rf_fq.ll | 8 +- llvm/test/CodeGen/Mips/msa/2rf_int_float.ll | 20 +- llvm/test/CodeGen/Mips/msa/2rf_tq.ll | 8 +- llvm/test/CodeGen/Mips/msa/3r-a.ll | 192 ++--- llvm/test/CodeGen/Mips/msa/3r-b.ll | 96 +-- llvm/test/CodeGen/Mips/msa/3r-c.ll | 80 +- llvm/test/CodeGen/Mips/msa/3r-d.ll | 88 +- llvm/test/CodeGen/Mips/msa/3r-i.ll | 64 +- llvm/test/CodeGen/Mips/msa/3r-m.ll | 160 ++-- llvm/test/CodeGen/Mips/msa/3r-p.ll | 32 +- llvm/test/CodeGen/Mips/msa/3r-s.ll | 248 +++--- llvm/test/CodeGen/Mips/msa/3r-v.ll | 24 +- llvm/test/CodeGen/Mips/msa/3r_4r.ll | 48 +- llvm/test/CodeGen/Mips/msa/3r_4r_widen.ll | 72 +- llvm/test/CodeGen/Mips/msa/3r_splat.ll | 8 +- llvm/test/CodeGen/Mips/msa/3rf.ll | 96 +-- llvm/test/CodeGen/Mips/msa/3rf_4rf.ll | 24 +- llvm/test/CodeGen/Mips/msa/3rf_4rf_q.ll | 48 +- llvm/test/CodeGen/Mips/msa/3rf_exdo.ll | 8 +- llvm/test/CodeGen/Mips/msa/3rf_float_int.ll | 8 +- llvm/test/CodeGen/Mips/msa/3rf_int_float.ll | 176 ++-- llvm/test/CodeGen/Mips/msa/3rf_q.ll | 16 +- llvm/test/CodeGen/Mips/msa/arithmetic.ll | 176 ++-- .../test/CodeGen/Mips/msa/arithmetic_float.ll | 88 +- .../test/CodeGen/Mips/msa/basic_operations.ll | 72 +- .../Mips/msa/basic_operations_float.ll | 34 +- llvm/test/CodeGen/Mips/msa/bit.ll | 56 +- llvm/test/CodeGen/Mips/msa/bitcast.ll | 98 +-- llvm/test/CodeGen/Mips/msa/bitwise.ll | 310 +++---- llvm/test/CodeGen/Mips/msa/compare.ll | 408 +++++----- llvm/test/CodeGen/Mips/msa/compare_float.ll | 156 ++-- llvm/test/CodeGen/Mips/msa/elm_copy.ll | 16 +- llvm/test/CodeGen/Mips/msa/elm_insv.ll | 32 +- llvm/test/CodeGen/Mips/msa/elm_move.ll | 2 +- llvm/test/CodeGen/Mips/msa/elm_shift_slide.ll | 24 +- llvm/test/CodeGen/Mips/msa/frameindex.ll | 46 +- llvm/test/CodeGen/Mips/msa/i10.ll | 8 +- llvm/test/CodeGen/Mips/msa/i5-a.ll | 8 +- llvm/test/CodeGen/Mips/msa/i5-b.ll | 56 +- llvm/test/CodeGen/Mips/msa/i5-c.ll | 40 +- llvm/test/CodeGen/Mips/msa/i5-m.ll | 32 +- llvm/test/CodeGen/Mips/msa/i5-s.ll | 8 +- llvm/test/CodeGen/Mips/msa/i5_ld_st.ll | 8 +- llvm/test/CodeGen/Mips/msa/i8.ll | 26 +- llvm/test/CodeGen/Mips/msa/inline-asm.ll | 4 +- .../Mips/msa/llvm-stress-s1704963983.ll | 22 +- .../Mips/msa/llvm-stress-s1935737938.ll | 22 +- .../Mips/msa/llvm-stress-s2704903805.ll | 22 +- .../Mips/msa/llvm-stress-s3861334421.ll | 22 +- .../Mips/msa/llvm-stress-s3926023935.ll | 22 +- .../Mips/msa/llvm-stress-s3997499501.ll | 22 +- .../Mips/msa/llvm-stress-s525530439.ll | 22 +- .../Mips/msa/llvm-stress-s997348632.ll | 22 +- llvm/test/CodeGen/Mips/msa/shuffle.ll | 166 ++-- llvm/test/CodeGen/Mips/msa/spill.ll | 272 +++---- llvm/test/CodeGen/Mips/msa/vec.ll | 184 ++--- llvm/test/CodeGen/Mips/msa/vecs10.ll | 4 +- llvm/test/CodeGen/Mips/mul.ll | 4 +- llvm/test/CodeGen/Mips/mulll.ll | 4 +- llvm/test/CodeGen/Mips/mulull.ll | 4 +- llvm/test/CodeGen/Mips/nacl-align.ll | 2 +- llvm/test/CodeGen/Mips/nacl-branch-delay.ll | 2 +- llvm/test/CodeGen/Mips/nacl-reserved-regs.ll | 32 +- llvm/test/CodeGen/Mips/neg1.ll | 2 +- llvm/test/CodeGen/Mips/no-odd-spreg-msa.ll | 8 +- llvm/test/CodeGen/Mips/nomips16.ll | 4 +- llvm/test/CodeGen/Mips/not1.ll | 2 +- llvm/test/CodeGen/Mips/o32_cc_byval.ll | 22 +- llvm/test/CodeGen/Mips/o32_cc_vararg.ll | 20 +- llvm/test/CodeGen/Mips/optimize-pic-o0.ll | 6 +- llvm/test/CodeGen/Mips/or1.ll | 4 +- llvm/test/CodeGen/Mips/prevent-hoisting.ll | 12 +- llvm/test/CodeGen/Mips/private.ll | 2 +- llvm/test/CodeGen/Mips/ra-allocatable.ll | 242 +++--- llvm/test/CodeGen/Mips/rdhwr-directives.ll | 2 +- llvm/test/CodeGen/Mips/rem.ll | 4 +- llvm/test/CodeGen/Mips/remu.ll | 4 +- llvm/test/CodeGen/Mips/s2rem.ll | 4 +- llvm/test/CodeGen/Mips/sb1.ll | 6 +- llvm/test/CodeGen/Mips/sel1c.ll | 4 +- llvm/test/CodeGen/Mips/sel2c.ll | 4 +- llvm/test/CodeGen/Mips/selTBteqzCmpi.ll | 6 +- llvm/test/CodeGen/Mips/selTBtnezCmpi.ll | 6 +- llvm/test/CodeGen/Mips/selTBtnezSlti.ll | 6 +- llvm/test/CodeGen/Mips/select.ll | 12 +- llvm/test/CodeGen/Mips/seleq.ll | 32 +- llvm/test/CodeGen/Mips/seleqk.ll | 24 +- llvm/test/CodeGen/Mips/selgek.ll | 24 +- llvm/test/CodeGen/Mips/selgt.ll | 34 +- llvm/test/CodeGen/Mips/selle.ll | 32 +- llvm/test/CodeGen/Mips/selltk.ll | 24 +- llvm/test/CodeGen/Mips/selne.ll | 32 +- llvm/test/CodeGen/Mips/selnek.ll | 32 +- llvm/test/CodeGen/Mips/selpat.ll | 136 ++-- llvm/test/CodeGen/Mips/seteq.ll | 4 +- llvm/test/CodeGen/Mips/seteqz.ll | 4 +- llvm/test/CodeGen/Mips/setge.ll | 6 +- llvm/test/CodeGen/Mips/setgek.ll | 2 +- llvm/test/CodeGen/Mips/setle.ll | 6 +- llvm/test/CodeGen/Mips/setlt.ll | 4 +- llvm/test/CodeGen/Mips/setltk.ll | 2 +- llvm/test/CodeGen/Mips/setne.ll | 4 +- llvm/test/CodeGen/Mips/setuge.ll | 6 +- llvm/test/CodeGen/Mips/setugt.ll | 4 +- llvm/test/CodeGen/Mips/setule.ll | 6 +- llvm/test/CodeGen/Mips/setult.ll | 4 +- llvm/test/CodeGen/Mips/setultk.ll | 2 +- llvm/test/CodeGen/Mips/sh1.ll | 6 +- llvm/test/CodeGen/Mips/simplebr.ll | 2 +- llvm/test/CodeGen/Mips/sitofp-selectcc-opt.ll | 2 +- llvm/test/CodeGen/Mips/sll1.ll | 4 +- llvm/test/CodeGen/Mips/sll2.ll | 6 +- .../CodeGen/Mips/small-section-reserve-gp.ll | 2 +- llvm/test/CodeGen/Mips/spill-copy-acreg.ll | 6 +- llvm/test/CodeGen/Mips/sra1.ll | 2 +- llvm/test/CodeGen/Mips/sra2.ll | 4 +- llvm/test/CodeGen/Mips/srl1.ll | 4 +- llvm/test/CodeGen/Mips/srl2.ll | 6 +- llvm/test/CodeGen/Mips/stackcoloring.ll | 4 +- llvm/test/CodeGen/Mips/stchar.ll | 28 +- llvm/test/CodeGen/Mips/stldst.ll | 16 +- llvm/test/CodeGen/Mips/sub1.ll | 2 +- llvm/test/CodeGen/Mips/sub2.ll | 4 +- llvm/test/CodeGen/Mips/tailcall.ll | 20 +- llvm/test/CodeGen/Mips/tls.ll | 6 +- llvm/test/CodeGen/Mips/tls16.ll | 2 +- llvm/test/CodeGen/Mips/tls16_2.ll | 2 +- llvm/test/CodeGen/Mips/uitofp.ll | 2 +- llvm/test/CodeGen/Mips/vector-load-store.ll | 4 +- llvm/test/CodeGen/Mips/vector-setcc.ll | 4 +- llvm/test/CodeGen/Mips/xor1.ll | 4 +- llvm/test/CodeGen/Mips/zeroreg.ll | 8 +- llvm/test/CodeGen/NVPTX/access-non-generic.ll | 14 +- llvm/test/CodeGen/NVPTX/addrspacecast.ll | 16 +- llvm/test/CodeGen/NVPTX/bug21465.ll | 2 +- llvm/test/CodeGen/NVPTX/bug22322.ll | 2 +- .../CodeGen/NVPTX/call-with-alloca-buffer.ll | 8 +- llvm/test/CodeGen/NVPTX/fp16.ll | 8 +- llvm/test/CodeGen/NVPTX/generic-to-nvvm.ll | 4 +- llvm/test/CodeGen/NVPTX/half.ll | 14 +- llvm/test/CodeGen/NVPTX/i1-global.ll | 2 +- llvm/test/CodeGen/NVPTX/i8-param.ll | 2 +- llvm/test/CodeGen/NVPTX/ld-addrspace.ll | 36 +- llvm/test/CodeGen/NVPTX/ld-generic.ll | 12 +- llvm/test/CodeGen/NVPTX/load-sext-i1.ll | 2 +- llvm/test/CodeGen/NVPTX/machine-sink.ll | 4 +- .../CodeGen/NVPTX/misaligned-vector-ldst.ll | 8 +- .../CodeGen/NVPTX/noduplicate-syncthreads.ll | 24 +- llvm/test/CodeGen/NVPTX/nounroll.ll | 2 +- llvm/test/CodeGen/NVPTX/pr13291-i1-store.ll | 2 +- llvm/test/CodeGen/NVPTX/pr16278.ll | 2 +- llvm/test/CodeGen/NVPTX/refl1.ll | 2 +- llvm/test/CodeGen/NVPTX/sched1.ll | 8 +- llvm/test/CodeGen/NVPTX/sched2.ll | 8 +- llvm/test/CodeGen/NVPTX/shift-parts.ll | 8 +- llvm/test/CodeGen/NVPTX/simple-call.ll | 2 +- llvm/test/CodeGen/NVPTX/vector-compare.ll | 4 +- llvm/test/CodeGen/NVPTX/vector-loads.ll | 12 +- llvm/test/CodeGen/NVPTX/vector-select.ll | 6 +- llvm/test/CodeGen/NVPTX/weak-global.ll | 2 +- .../PowerPC/2005-11-30-vastart-crash.ll | 2 +- .../PowerPC/2006-01-20-ShiftPartsCrash.ll | 4 +- .../CodeGen/PowerPC/2006-04-05-splat-ish.ll | 2 +- .../PowerPC/2006-05-12-rlwimi-crash.ll | 14 +- .../PowerPC/2006-07-07-ComputeMaskedBits.ll | 8 +- .../PowerPC/2006-07-19-stwbrx-crash.ll | 2 +- .../PowerPC/2006-08-15-SelectionCrash.ll | 2 +- .../CodeGen/PowerPC/2006-12-07-SelectCrash.ll | 2 +- .../CodeGen/PowerPC/2007-01-15-AsmDialect.ll | 10 +- .../test/CodeGen/PowerPC/2007-03-24-cntlzd.ll | 2 +- .../PowerPC/2007-03-30-SpillerCrash.ll | 242 +++--- .../2007-04-30-InlineAsmEarlyClobber.ll | 2 +- .../CodeGen/PowerPC/2007-05-22-tailmerge-3.ll | 8 +- .../PowerPC/2007-09-07-LoadStoreIdxForms.ll | 2 +- .../CodeGen/PowerPC/2007-09-08-unaligned.ll | 12 +- .../PowerPC/2007-10-18-PtrArithmetic.ll | 4 +- .../PowerPC/2007-10-21-LocalRegAllocAssert.ll | 4 +- .../2007-10-21-LocalRegAllocAssert2.ll | 8 +- .../PowerPC/2007-11-16-landingpad-split.ll | 2 +- .../PowerPC/2007-11-19-VectorSplitting.ll | 2 +- .../PowerPC/2008-02-09-LocalRegAllocAssert.ll | 2 +- .../PowerPC/2008-03-05-RegScavengerAssert.ll | 2 +- .../PowerPC/2008-03-17-RegScavengerCrash.ll | 2 +- .../PowerPC/2008-03-24-AddressRegImm.ll | 2 +- .../PowerPC/2008-03-26-CoalescerBug.ll | 2 +- .../PowerPC/2008-04-23-CoalescerCrash.ll | 4 +- .../PowerPC/2008-06-21-F128LoadStore.ll | 2 +- .../PowerPC/2008-06-23-LiveVariablesCrash.ll | 2 +- llvm/test/CodeGen/PowerPC/2008-07-15-Bswap.ll | 40 +- .../PowerPC/2008-07-15-SignExtendInreg.ll | 2 +- .../PowerPC/2008-09-12-CoalescerBug.ll | 46 +- .../PowerPC/2008-10-28-UnprocessedNode.ll | 2 +- .../PowerPC/2008-10-31-PPCF128Libcalls.ll | 10 +- ...009-08-17-inline-asm-addr-mode-breakage.ll | 2 +- .../PowerPC/2010-03-09-indirect-call.ll | 2 +- .../PowerPC/2010-12-18-PPCStackRefs.ll | 4 +- .../PowerPC/2011-12-05-NoSpillDupCR.ll | 20 +- .../PowerPC/2011-12-06-SpillAndRestoreCR.ll | 34 +- .../2011-12-08-DemandedBitsMiscompile.ll | 2 +- llvm/test/CodeGen/PowerPC/Atomics-64.ll | 160 ++-- llvm/test/CodeGen/PowerPC/a2-fp-basic.ll | 12 +- llvm/test/CodeGen/PowerPC/addi-licm.ll | 4 +- llvm/test/CodeGen/PowerPC/addi-reassoc.ll | 4 +- llvm/test/CodeGen/PowerPC/alias.ll | 4 +- llvm/test/CodeGen/PowerPC/and-elim.ll | 2 +- llvm/test/CodeGen/PowerPC/anon_aggr.ll | 8 +- llvm/test/CodeGen/PowerPC/asm-constraints.ll | 2 +- llvm/test/CodeGen/PowerPC/atomic-2.ll | 2 +- llvm/test/CodeGen/PowerPC/atomics-indexed.ll | 8 +- llvm/test/CodeGen/PowerPC/atomics.ll | 8 +- llvm/test/CodeGen/PowerPC/bdzlr.ll | 4 +- llvm/test/CodeGen/PowerPC/bswap-load-store.ll | 6 +- .../CodeGen/PowerPC/buildvec_canonicalize.ll | 4 +- llvm/test/CodeGen/PowerPC/byval-aliased.ll | 2 +- llvm/test/CodeGen/PowerPC/code-align.ll | 6 +- llvm/test/CodeGen/PowerPC/complex-return.ll | 12 +- llvm/test/CodeGen/PowerPC/cr-spills.ll | 94 +-- llvm/test/CodeGen/PowerPC/crbits.ll | 2 +- llvm/test/CodeGen/PowerPC/crsave.ll | 4 +- llvm/test/CodeGen/PowerPC/ctrloop-cpsgn.ll | 2 +- llvm/test/CodeGen/PowerPC/ctrloop-fp64.ll | 4 +- llvm/test/CodeGen/PowerPC/ctrloop-i64.ll | 8 +- llvm/test/CodeGen/PowerPC/ctrloop-le.ll | 30 +- llvm/test/CodeGen/PowerPC/ctrloop-lt.ll | 30 +- llvm/test/CodeGen/PowerPC/ctrloop-ne.ll | 30 +- llvm/test/CodeGen/PowerPC/ctrloop-s000.ll | 32 +- llvm/test/CodeGen/PowerPC/ctrloop-sh.ll | 12 +- llvm/test/CodeGen/PowerPC/ctrloop-sums.ll | 4 +- llvm/test/CodeGen/PowerPC/ctrloops.ll | 6 +- llvm/test/CodeGen/PowerPC/dcbt-sched.ll | 4 +- llvm/test/CodeGen/PowerPC/delete-node.ll | 4 +- .../CodeGen/PowerPC/dyn-alloca-aligned.ll | 4 +- llvm/test/CodeGen/PowerPC/emptystruct.ll | 2 +- llvm/test/CodeGen/PowerPC/eqv-andc-orc-nor.ll | 8 +- .../CodeGen/PowerPC/fast-isel-GEP-coalesce.ll | 8 +- llvm/test/CodeGen/PowerPC/fast-isel-call.ll | 2 +- llvm/test/CodeGen/PowerPC/fast-isel-fold.ll | 26 +- .../CodeGen/PowerPC/fast-isel-load-store.ll | 18 +- .../CodeGen/PowerPC/fast-isel-redefinition.ll | 2 +- .../fastisel-gep-promote-before-add.ll | 4 +- llvm/test/CodeGen/PowerPC/floatPSA.ll | 30 +- llvm/test/CodeGen/PowerPC/flt-preinc.ll | 8 +- llvm/test/CodeGen/PowerPC/fp-to-int-ext.ll | 8 +- llvm/test/CodeGen/PowerPC/frounds.ll | 4 +- .../CodeGen/PowerPC/glob-comp-aa-crash.ll | 8 +- llvm/test/CodeGen/PowerPC/hidden-vis-2.ll | 4 +- llvm/test/CodeGen/PowerPC/hidden-vis.ll | 2 +- llvm/test/CodeGen/PowerPC/ia-mem-r0.ll | 6 +- llvm/test/CodeGen/PowerPC/indexed-load.ll | 2 +- llvm/test/CodeGen/PowerPC/indirectbr.ll | 4 +- .../test/CodeGen/PowerPC/inlineasm-i64-reg.ll | 20 +- llvm/test/CodeGen/PowerPC/isel-rc-nox0.ll | 2 +- .../test/CodeGen/PowerPC/lbz-from-ld-shift.ll | 2 +- llvm/test/CodeGen/PowerPC/lbzux.ll | 4 +- llvm/test/CodeGen/PowerPC/ld-st-upd.ll | 2 +- llvm/test/CodeGen/PowerPC/ldtoc-inv.ll | 2 +- llvm/test/CodeGen/PowerPC/lha.ll | 2 +- .../CodeGen/PowerPC/load-constant-addr.ll | 2 +- .../CodeGen/PowerPC/load-shift-combine.ll | 12 +- .../CodeGen/PowerPC/loop-data-prefetch.ll | 2 +- llvm/test/CodeGen/PowerPC/lsa.ll | 4 +- llvm/test/CodeGen/PowerPC/lsr-postinc-pos.ll | 4 +- llvm/test/CodeGen/PowerPC/mask64.ll | 4 +- llvm/test/CodeGen/PowerPC/mcm-1.ll | 2 +- llvm/test/CodeGen/PowerPC/mcm-10.ll | 2 +- llvm/test/CodeGen/PowerPC/mcm-11.ll | 2 +- llvm/test/CodeGen/PowerPC/mcm-2.ll | 2 +- llvm/test/CodeGen/PowerPC/mcm-3.ll | 2 +- llvm/test/CodeGen/PowerPC/mcm-5.ll | 12 +- llvm/test/CodeGen/PowerPC/mcm-6.ll | 2 +- llvm/test/CodeGen/PowerPC/mcm-7.ll | 2 +- llvm/test/CodeGen/PowerPC/mcm-8.ll | 2 +- llvm/test/CodeGen/PowerPC/mcm-9.ll | 2 +- llvm/test/CodeGen/PowerPC/mcm-default.ll | 2 +- llvm/test/CodeGen/PowerPC/mcm-obj-2.ll | 4 +- llvm/test/CodeGen/PowerPC/mcm-obj.ll | 22 +- llvm/test/CodeGen/PowerPC/mem-rr-addr-mode.ll | 6 +- llvm/test/CodeGen/PowerPC/mem_update.ll | 12 +- .../PowerPC/misched-inorder-latency.ll | 4 +- .../PowerPC/mult-alt-generic-powerpc.ll | 38 +- .../PowerPC/mult-alt-generic-powerpc64.ll | 38 +- .../CodeGen/PowerPC/no-extra-fp-conv-ldst.ll | 4 +- llvm/test/CodeGen/PowerPC/novrsave.ll | 2 +- .../CodeGen/PowerPC/or-addressing-mode.ll | 4 +- llvm/test/CodeGen/PowerPC/post-ra-ec.ll | 4 +- llvm/test/CodeGen/PowerPC/ppc-prologue.ll | 4 +- llvm/test/CodeGen/PowerPC/ppc32-lshrti3.ll | 2 +- llvm/test/CodeGen/PowerPC/ppc32-pic-large.ll | 2 +- llvm/test/CodeGen/PowerPC/ppc32-pic.ll | 2 +- llvm/test/CodeGen/PowerPC/ppc440-fp-basic.ll | 12 +- llvm/test/CodeGen/PowerPC/ppc64-abi-extend.ll | 8 +- .../PowerPC/ppc64-align-long-double.ll | 2 +- .../test/CodeGen/PowerPC/ppc64-byval-align.ll | 2 +- llvm/test/CodeGen/PowerPC/ppc64-calls.ll | 2 +- llvm/test/CodeGen/PowerPC/ppc64-gep-opt.ll | 8 +- llvm/test/CodeGen/PowerPC/ppc64-patchpoint.ll | 6 +- llvm/test/CodeGen/PowerPC/ppc64-smallarg.ll | 4 +- llvm/test/CodeGen/PowerPC/ppc64-toc.ll | 6 +- .../CodeGen/PowerPC/ppc64le-aggregates.ll | 14 +- .../CodeGen/PowerPC/ppc64le-localentry.ll | 2 +- llvm/test/CodeGen/PowerPC/ppc64le-smallarg.ll | 4 +- llvm/test/CodeGen/PowerPC/ppcf128-1.ll | 32 +- llvm/test/CodeGen/PowerPC/ppcf128-endian.ll | 6 +- llvm/test/CodeGen/PowerPC/pr13891.ll | 2 +- llvm/test/CodeGen/PowerPC/pr15031.ll | 14 +- llvm/test/CodeGen/PowerPC/pr15630.ll | 2 +- llvm/test/CodeGen/PowerPC/pr16556-2.ll | 6 +- llvm/test/CodeGen/PowerPC/pr17168.ll | 2 +- llvm/test/CodeGen/PowerPC/pr18663.ll | 18 +- llvm/test/CodeGen/PowerPC/pr20442.ll | 10 +- .../test/CodeGen/PowerPC/preincprep-invoke.ll | 2 +- llvm/test/CodeGen/PowerPC/private.ll | 2 +- llvm/test/CodeGen/PowerPC/pwr7-gt-nop.ll | 6 +- llvm/test/CodeGen/PowerPC/qpx-load.ll | 4 +- llvm/test/CodeGen/PowerPC/qpx-s-load.ll | 4 +- llvm/test/CodeGen/PowerPC/qpx-s-sel.ll | 2 +- llvm/test/CodeGen/PowerPC/qpx-sel.ll | 2 +- llvm/test/CodeGen/PowerPC/qpx-unalperm.ll | 24 +- llvm/test/CodeGen/PowerPC/quadint-return.ll | 2 +- .../CodeGen/PowerPC/reg-coalesce-simple.ll | 2 +- llvm/test/CodeGen/PowerPC/reloc-align.ll | 2 +- .../test/CodeGen/PowerPC/resolvefi-basereg.ll | 140 ++-- llvm/test/CodeGen/PowerPC/resolvefi-disp.ll | 8 +- llvm/test/CodeGen/PowerPC/return-val-i128.ll | 10 +- llvm/test/CodeGen/PowerPC/rlwimi-and.ll | 4 +- llvm/test/CodeGen/PowerPC/rlwimi-commute.ll | 8 +- llvm/test/CodeGen/PowerPC/rlwimi-dyn-and.ll | 8 +- llvm/test/CodeGen/PowerPC/rm-zext.ll | 4 +- llvm/test/CodeGen/PowerPC/rs-undef-use.ll | 6 +- .../CodeGen/PowerPC/s000-alias-misched.ll | 8 +- llvm/test/CodeGen/PowerPC/sjlj.ll | 4 +- llvm/test/CodeGen/PowerPC/small-arguments.ll | 6 +- llvm/test/CodeGen/PowerPC/split-index-tc.ll | 6 +- llvm/test/CodeGen/PowerPC/stack-protector.ll | 2 +- llvm/test/CodeGen/PowerPC/stack-realign.ll | 14 +- llvm/test/CodeGen/PowerPC/std-unal-fi.ll | 14 +- llvm/test/CodeGen/PowerPC/store-load-fwd.ll | 2 +- llvm/test/CodeGen/PowerPC/structsinmem.ll | 28 +- llvm/test/CodeGen/PowerPC/structsinregs.ll | 28 +- llvm/test/CodeGen/PowerPC/subreg-postra-2.ll | 4 +- llvm/test/CodeGen/PowerPC/subreg-postra.ll | 6 +- .../CodeGen/PowerPC/subsumes-pred-regs.ll | 2 +- llvm/test/CodeGen/PowerPC/tls-cse.ll | 4 +- llvm/test/CodeGen/PowerPC/tls-pic.ll | 4 +- llvm/test/CodeGen/PowerPC/tls.ll | 2 +- .../CodeGen/PowerPC/toc-load-sched-bug.ll | 50 +- llvm/test/CodeGen/PowerPC/trampoline.ll | 44 +- .../test/CodeGen/PowerPC/unal-altivec-wint.ll | 4 +- llvm/test/CodeGen/PowerPC/unal-altivec.ll | 4 +- llvm/test/CodeGen/PowerPC/unal-altivec2.ll | 34 +- llvm/test/CodeGen/PowerPC/unaligned.ll | 12 +- llvm/test/CodeGen/PowerPC/vaddsplat.ll | 24 +- .../CodeGen/PowerPC/varargs-struct-float.ll | 2 +- llvm/test/CodeGen/PowerPC/vcmp-fold.ll | 8 +- llvm/test/CodeGen/PowerPC/vec-abi-align.ll | 8 +- .../test/CodeGen/PowerPC/vec_auto_constant.ll | 4 +- llvm/test/CodeGen/PowerPC/vec_br_cmp.ll | 4 +- .../PowerPC/vec_buildvector_loadstore.ll | 2 +- llvm/test/CodeGen/PowerPC/vec_constants.ll | 6 +- llvm/test/CodeGen/PowerPC/vec_conv.ll | 8 +- llvm/test/CodeGen/PowerPC/vec_fneg.ll | 2 +- llvm/test/CodeGen/PowerPC/vec_misaligned.ll | 8 +- llvm/test/CodeGen/PowerPC/vec_mul.ll | 16 +- llvm/test/CodeGen/PowerPC/vec_perf_shuffle.ll | 20 +- llvm/test/CodeGen/PowerPC/vec_shuffle.ll | 56 +- llvm/test/CodeGen/PowerPC/vec_shuffle_le.ll | 54 +- llvm/test/CodeGen/PowerPC/vec_splat.ll | 10 +- .../CodeGen/PowerPC/vec_splat_constant.ll | 4 +- llvm/test/CodeGen/PowerPC/vec_zero.ll | 2 +- .../PowerPC/vector-identity-shuffle.ll | 2 +- llvm/test/CodeGen/PowerPC/vector.ll | 46 +- llvm/test/CodeGen/PowerPC/vsx-div.ll | 4 +- llvm/test/CodeGen/PowerPC/vsx-infl-copy1.ll | 36 +- llvm/test/CodeGen/PowerPC/vsx-infl-copy2.ll | 18 +- .../CodeGen/PowerPC/vsx-ldst-builtin-le.ll | 72 +- llvm/test/CodeGen/PowerPC/vsx-ldst.ll | 12 +- llvm/test/CodeGen/PowerPC/vsx-minmax.ll | 22 +- llvm/test/CodeGen/PowerPC/vsx-p8.ll | 4 +- llvm/test/CodeGen/PowerPC/vsx.ll | 14 +- .../CodeGen/PowerPC/vsx_insert_extract_le.ll | 12 +- llvm/test/CodeGen/PowerPC/vsx_shuffle_le.ll | 64 +- .../CodeGen/PowerPC/weak_def_can_be_hidden.ll | 4 +- llvm/test/CodeGen/PowerPC/zero-not-run.ll | 2 +- llvm/test/CodeGen/PowerPC/zext-free.ll | 6 +- .../R600/32-bit-local-address-space.ll | 12 +- llvm/test/CodeGen/R600/add-debug.ll | 2 +- llvm/test/CodeGen/R600/add.ll | 16 +- llvm/test/CodeGen/R600/add_i64.ll | 12 +- llvm/test/CodeGen/R600/address-space.ll | 4 +- llvm/test/CodeGen/R600/and.ll | 30 +- llvm/test/CodeGen/R600/array-ptr-calc-i32.ll | 6 +- llvm/test/CodeGen/R600/array-ptr-calc-i64.ll | 4 +- llvm/test/CodeGen/R600/big_alu.ll | 40 +- llvm/test/CodeGen/R600/bitcast.ll | 16 +- llvm/test/CodeGen/R600/bswap.ll | 14 +- llvm/test/CodeGen/R600/call.ll | 8 +- llvm/test/CodeGen/R600/combine_vloads.ll | 2 +- llvm/test/CodeGen/R600/commute_modifiers.ll | 32 +- llvm/test/CodeGen/R600/copy-illegal-type.ll | 18 +- llvm/test/CodeGen/R600/copy-to-reg.ll | 2 +- llvm/test/CodeGen/R600/ctlz_zero_undef.ll | 6 +- llvm/test/CodeGen/R600/ctpop.ll | 32 +- llvm/test/CodeGen/R600/ctpop64.ll | 8 +- llvm/test/CodeGen/R600/cttz_zero_undef.ll | 6 +- llvm/test/CodeGen/R600/cvt_f32_ubyte.ll | 24 +- .../dagcombiner-bug-illegal-vec4-int-to-fp.ll | 4 +- llvm/test/CodeGen/R600/dot4-folding.ll | 4 +- ...ds-negative-offset-addressing-mode-loop.ll | 10 +- llvm/test/CodeGen/R600/ds_read2.ll | 120 +-- .../CodeGen/R600/ds_read2_offset_order.ll | 14 +- llvm/test/CodeGen/R600/ds_read2st64.ll | 52 +- llvm/test/CodeGen/R600/ds_write2.ll | 52 +- llvm/test/CodeGen/R600/ds_write2st64.ll | 16 +- llvm/test/CodeGen/R600/extload-private.ll | 8 +- llvm/test/CodeGen/R600/extload.ll | 8 +- llvm/test/CodeGen/R600/fabs.f64.ll | 2 +- llvm/test/CodeGen/R600/fadd.ll | 4 +- llvm/test/CodeGen/R600/fadd64.ll | 4 +- llvm/test/CodeGen/R600/fcmp-cnd.ll | 2 +- llvm/test/CodeGen/R600/fcmp-cnde-int-args.ll | 2 +- llvm/test/CodeGen/R600/fcmp.ll | 4 +- llvm/test/CodeGen/R600/fcmp64.ll | 24 +- llvm/test/CodeGen/R600/fconst64.ll | 2 +- llvm/test/CodeGen/R600/fdiv.f64.ll | 16 +- llvm/test/CodeGen/R600/fdiv.ll | 4 +- llvm/test/CodeGen/R600/fetch-limits.r600.ll | 18 +- llvm/test/CodeGen/R600/fetch-limits.r700+.ll | 34 +- llvm/test/CodeGen/R600/flat-address-space.ll | 18 +- llvm/test/CodeGen/R600/fma-combine.ll | 90 +- llvm/test/CodeGen/R600/fma.f64.ll | 18 +- llvm/test/CodeGen/R600/fma.ll | 26 +- llvm/test/CodeGen/R600/fmax3.ll | 12 +- llvm/test/CodeGen/R600/fmax_legacy.f64.ll | 16 +- llvm/test/CodeGen/R600/fmax_legacy.ll | 20 +- llvm/test/CodeGen/R600/fmin3.ll | 12 +- llvm/test/CodeGen/R600/fmin_legacy.f64.ll | 16 +- llvm/test/CodeGen/R600/fmin_legacy.ll | 20 +- llvm/test/CodeGen/R600/fmul.ll | 4 +- llvm/test/CodeGen/R600/fmul64.ll | 12 +- llvm/test/CodeGen/R600/fmuladd.ll | 44 +- llvm/test/CodeGen/R600/fneg-fabs.f64.ll | 4 +- llvm/test/CodeGen/R600/fneg-fabs.ll | 2 +- llvm/test/CodeGen/R600/fp16_to_fp.ll | 4 +- llvm/test/CodeGen/R600/fp32_to_fp16.ll | 2 +- llvm/test/CodeGen/R600/fp_to_sint.f64.ll | 2 +- llvm/test/CodeGen/R600/fp_to_sint.ll | 2 +- llvm/test/CodeGen/R600/fp_to_uint.f64.ll | 2 +- llvm/test/CodeGen/R600/fp_to_uint.ll | 2 +- llvm/test/CodeGen/R600/frem.ll | 28 +- llvm/test/CodeGen/R600/fsqrt.ll | 4 +- llvm/test/CodeGen/R600/fsub.ll | 8 +- llvm/test/CodeGen/R600/fsub64.ll | 16 +- llvm/test/CodeGen/R600/ftrunc.f64.ll | 2 +- llvm/test/CodeGen/R600/global-directive.ll | 4 +- llvm/test/CodeGen/R600/global-extload-i1.ll | 64 +- llvm/test/CodeGen/R600/global-extload-i16.ll | 64 +- llvm/test/CodeGen/R600/global-extload-i32.ll | 28 +- llvm/test/CodeGen/R600/global-extload-i8.ll | 64 +- .../CodeGen/R600/global-zero-initializer.ll | 2 +- .../CodeGen/R600/gv-const-addrspace-fail.ll | 8 +- llvm/test/CodeGen/R600/gv-const-addrspace.ll | 10 +- llvm/test/CodeGen/R600/half.ll | 12 +- .../CodeGen/R600/i8-to-double-to-float.ll | 2 +- .../R600/icmp-select-sete-reverse-args.ll | 4 +- llvm/test/CodeGen/R600/imm.ll | 4 +- llvm/test/CodeGen/R600/indirect-private-64.ll | 16 +- llvm/test/CodeGen/R600/insert_vector_elt.ll | 4 +- llvm/test/CodeGen/R600/jump-address.ll | 6 +- llvm/test/CodeGen/R600/kcache-fold.ll | 48 +- llvm/test/CodeGen/R600/large-alloca.ll | 2 +- .../R600/large-constant-initializer.ll | 2 +- llvm/test/CodeGen/R600/lds-initializer.ll | 2 +- llvm/test/CodeGen/R600/lds-oqap-crash.ll | 2 +- llvm/test/CodeGen/R600/lds-output-queue.ll | 12 +- .../test/CodeGen/R600/lds-zero-initializer.ll | 2 +- llvm/test/CodeGen/R600/llvm.AMDGPU.abs.ll | 4 +- .../R600/llvm.AMDGPU.barrier.global.ll | 2 +- .../CodeGen/R600/llvm.AMDGPU.barrier.local.ll | 2 +- llvm/test/CodeGen/R600/llvm.AMDGPU.bfe.i32.ll | 24 +- llvm/test/CodeGen/R600/llvm.AMDGPU.bfe.u32.ll | 44 +- llvm/test/CodeGen/R600/llvm.AMDGPU.brev.ll | 2 +- llvm/test/CodeGen/R600/llvm.AMDGPU.class.ll | 24 +- llvm/test/CodeGen/R600/llvm.AMDGPU.cube.ll | 8 +- .../CodeGen/R600/llvm.AMDGPU.cvt_f32_ubyte.ll | 8 +- .../test/CodeGen/R600/llvm.AMDGPU.div_fmas.ll | 14 +- .../CodeGen/R600/llvm.AMDGPU.div_scale.ll | 44 +- llvm/test/CodeGen/R600/llvm.AMDGPU.fract.ll | 4 +- llvm/test/CodeGen/R600/llvm.AMDGPU.imax.ll | 2 +- llvm/test/CodeGen/R600/llvm.AMDGPU.imin.ll | 2 +- llvm/test/CodeGen/R600/llvm.AMDGPU.tex.ll | 2 +- .../CodeGen/R600/llvm.AMDGPU.trig_preop.ll | 6 +- llvm/test/CodeGen/R600/llvm.AMDGPU.umad24.ll | 4 +- llvm/test/CodeGen/R600/llvm.AMDGPU.umax.ll | 4 +- llvm/test/CodeGen/R600/llvm.AMDGPU.umin.ll | 4 +- llvm/test/CodeGen/R600/llvm.SI.imageload.ll | 10 +- llvm/test/CodeGen/R600/llvm.SI.load.dword.ll | 2 +- llvm/test/CodeGen/R600/llvm.amdgpu.dp4.ll | 4 +- llvm/test/CodeGen/R600/llvm.round.f64.ll | 2 +- llvm/test/CodeGen/R600/load-i1.ll | 14 +- llvm/test/CodeGen/R600/load-input-fold.ll | 38 +- llvm/test/CodeGen/R600/load.ll | 92 +-- llvm/test/CodeGen/R600/load.vec.ll | 4 +- llvm/test/CodeGen/R600/load64.ll | 6 +- llvm/test/CodeGen/R600/local-64.ll | 16 +- .../CodeGen/R600/local-memory-two-objects.ll | 4 +- llvm/test/CodeGen/R600/local-memory.ll | 2 +- llvm/test/CodeGen/R600/loop-idiom.ll | 2 +- llvm/test/CodeGen/R600/m0-spill.ll | 2 +- llvm/test/CodeGen/R600/mad-combine.ll | 110 +-- llvm/test/CodeGen/R600/mad-sub.ll | 50 +- llvm/test/CodeGen/R600/madak.ll | 28 +- llvm/test/CodeGen/R600/madmk.ll | 28 +- llvm/test/CodeGen/R600/max.ll | 16 +- llvm/test/CodeGen/R600/max3.ll | 12 +- llvm/test/CodeGen/R600/min.ll | 20 +- llvm/test/CodeGen/R600/min3.ll | 28 +- llvm/test/CodeGen/R600/missing-store.ll | 4 +- llvm/test/CodeGen/R600/mubuf.ll | 12 +- llvm/test/CodeGen/R600/mul.ll | 28 +- .../R600/no-initializer-constant-addrspace.ll | 4 +- llvm/test/CodeGen/R600/no-shrink-extloads.ll | 16 +- llvm/test/CodeGen/R600/or.ll | 28 +- .../CodeGen/R600/parallelandifcollapse.ll | 16 +- .../test/CodeGen/R600/parallelorifcollapse.ll | 16 +- llvm/test/CodeGen/R600/private-memory.ll | 46 +- llvm/test/CodeGen/R600/pv-packing.ll | 4 +- llvm/test/CodeGen/R600/pv.ll | 68 +- llvm/test/CodeGen/R600/r600-export-fix.ll | 50 +- llvm/test/CodeGen/R600/r600cfg.ll | 2 +- .../CodeGen/R600/register-count-comments.ll | 4 +- llvm/test/CodeGen/R600/reorder-stores.ll | 16 +- llvm/test/CodeGen/R600/rotl.i64.ll | 4 +- llvm/test/CodeGen/R600/rotr.i64.ll | 8 +- llvm/test/CodeGen/R600/rsq.ll | 10 +- llvm/test/CodeGen/R600/s_movk_i32.ll | 26 +- llvm/test/CodeGen/R600/saddo.ll | 8 +- llvm/test/CodeGen/R600/salu-to-valu.ll | 16 +- llvm/test/CodeGen/R600/scalar_to_vector.ll | 6 +- .../CodeGen/R600/schedule-fs-loop-nested.ll | 8 +- llvm/test/CodeGen/R600/schedule-fs-loop.ll | 8 +- .../CodeGen/R600/schedule-global-loads.ll | 8 +- llvm/test/CodeGen/R600/schedule-if-2.ll | 8 +- llvm/test/CodeGen/R600/schedule-if.ll | 6 +- .../schedule-vs-if-nested-loop-failure.ll | 32 +- .../R600/schedule-vs-if-nested-loop.ll | 32 +- llvm/test/CodeGen/R600/scratch-buffer.ll | 12 +- llvm/test/CodeGen/R600/sdiv.ll | 20 +- llvm/test/CodeGen/R600/sdivrem24.ll | 48 +- llvm/test/CodeGen/R600/select64.ll | 8 +- llvm/test/CodeGen/R600/selectcc-cnd.ll | 2 +- llvm/test/CodeGen/R600/selectcc-cnde-int.ll | 2 +- .../R600/selectcc-icmp-select-float.ll | 2 +- llvm/test/CodeGen/R600/setcc-opt.ll | 2 +- llvm/test/CodeGen/R600/setcc.ll | 12 +- llvm/test/CodeGen/R600/sext-in-reg.ll | 44 +- llvm/test/CodeGen/R600/sgpr-control-flow.ll | 4 +- .../R600/sgpr-copy-duplicate-operand.ll | 2 +- llvm/test/CodeGen/R600/sgpr-copy.ll | 24 +- llvm/test/CodeGen/R600/shl.ll | 20 +- llvm/test/CodeGen/R600/shl_add_constant.ll | 6 +- llvm/test/CodeGen/R600/shl_add_ptr.ll | 12 +- llvm/test/CodeGen/R600/si-lod-bias.ll | 6 +- llvm/test/CodeGen/R600/si-sgpr-spill.ll | 96 +-- .../R600/si-triv-disjoint-mem-access.ll | 56 +- llvm/test/CodeGen/R600/si-vector-hang.ll | 32 +- llvm/test/CodeGen/R600/sign_extend.ll | 2 +- .../R600/simplify-demanded-bits-build-pair.ll | 2 +- llvm/test/CodeGen/R600/sint_to_fp.f64.ll | 2 +- llvm/test/CodeGen/R600/sint_to_fp.ll | 2 +- llvm/test/CodeGen/R600/smrd.ll | 14 +- .../test/CodeGen/R600/split-scalar-i64-add.ll | 2 +- llvm/test/CodeGen/R600/sra.ll | 20 +- llvm/test/CodeGen/R600/srem.ll | 38 +- llvm/test/CodeGen/R600/srl.ll | 24 +- llvm/test/CodeGen/R600/ssubo.ll | 8 +- llvm/test/CodeGen/R600/store-barrier.ll | 10 +- llvm/test/CodeGen/R600/store.ll | 4 +- llvm/test/CodeGen/R600/store.r600.ll | 4 +- llvm/test/CodeGen/R600/sub.ll | 24 +- llvm/test/CodeGen/R600/swizzle-export.ll | 42 +- llvm/test/CodeGen/R600/trunc-cmp-constant.ll | 26 +- llvm/test/CodeGen/R600/trunc.ll | 4 +- llvm/test/CodeGen/R600/uaddo.ll | 8 +- llvm/test/CodeGen/R600/udiv.ll | 12 +- llvm/test/CodeGen/R600/udivrem24.ll | 48 +- llvm/test/CodeGen/R600/uint_to_fp.f64.ll | 2 +- llvm/test/CodeGen/R600/uint_to_fp.ll | 2 +- .../test/CodeGen/R600/unaligned-load-store.ll | 24 +- .../unhandled-loop-condition-assertion.ll | 30 +- llvm/test/CodeGen/R600/unroll.ll | 2 +- llvm/test/CodeGen/R600/urem.ll | 26 +- llvm/test/CodeGen/R600/usubo.ll | 8 +- llvm/test/CodeGen/R600/v_cndmask.ll | 2 +- llvm/test/CodeGen/R600/valu-i1.ll | 8 +- llvm/test/CodeGen/R600/vector-alloca.ll | 6 +- .../CodeGen/R600/vertex-fetch-encoding.ll | 4 +- llvm/test/CodeGen/R600/vselect.ll | 16 +- llvm/test/CodeGen/R600/vtx-fetch-branch.ll | 2 +- llvm/test/CodeGen/R600/vtx-schedule.ll | 4 +- llvm/test/CodeGen/R600/wait.ll | 6 +- llvm/test/CodeGen/R600/xor.ll | 34 +- .../2008-10-10-InlineAsmMemoryOperand.ll | 2 +- llvm/test/CodeGen/SPARC/2009-08-28-PIC.ll | 4 +- llvm/test/CodeGen/SPARC/2011-01-11-CC.ll | 6 +- llvm/test/CodeGen/SPARC/2011-01-22-SRet.ll | 6 +- llvm/test/CodeGen/SPARC/64abi.ll | 16 +- llvm/test/CodeGen/SPARC/64bit.ll | 16 +- llvm/test/CodeGen/SPARC/atomics.ll | 8 +- llvm/test/CodeGen/SPARC/fp128.ll | 34 +- llvm/test/CodeGen/SPARC/globals.ll | 2 +- llvm/test/CodeGen/SPARC/leafproc.ll | 2 +- .../CodeGen/SPARC/mult-alt-generic-sparc.ll | 38 +- llvm/test/CodeGen/SPARC/obj-relocs.ll | 2 +- llvm/test/CodeGen/SPARC/private.ll | 2 +- llvm/test/CodeGen/SPARC/setjmp.ll | 4 +- llvm/test/CodeGen/SPARC/spillsize.ll | 4 +- llvm/test/CodeGen/SPARC/tls.ll | 4 +- llvm/test/CodeGen/SPARC/varargs.ll | 2 +- llvm/test/CodeGen/SystemZ/addr-01.ll | 16 +- llvm/test/CodeGen/SystemZ/addr-02.ll | 16 +- llvm/test/CodeGen/SystemZ/addr-03.ll | 10 +- llvm/test/CodeGen/SystemZ/alias-01.ll | 2 +- llvm/test/CodeGen/SystemZ/and-01.ll | 40 +- llvm/test/CodeGen/SystemZ/and-03.ll | 34 +- llvm/test/CodeGen/SystemZ/and-05.ll | 26 +- llvm/test/CodeGen/SystemZ/and-06.ll | 16 +- llvm/test/CodeGen/SystemZ/and-08.ll | 104 +-- llvm/test/CodeGen/SystemZ/asm-18.ll | 48 +- llvm/test/CodeGen/SystemZ/atomic-load-01.ll | 2 +- llvm/test/CodeGen/SystemZ/atomic-load-02.ll | 2 +- llvm/test/CodeGen/SystemZ/atomic-load-03.ll | 2 +- llvm/test/CodeGen/SystemZ/atomic-load-04.ll | 2 +- llvm/test/CodeGen/SystemZ/branch-02.ll | 12 +- llvm/test/CodeGen/SystemZ/branch-03.ll | 8 +- llvm/test/CodeGen/SystemZ/branch-04.ll | 28 +- llvm/test/CodeGen/SystemZ/branch-06.ll | 14 +- llvm/test/CodeGen/SystemZ/branch-08.ll | 2 +- llvm/test/CodeGen/SystemZ/bswap-02.ll | 48 +- llvm/test/CodeGen/SystemZ/bswap-03.ll | 48 +- llvm/test/CodeGen/SystemZ/cond-load-01.ll | 18 +- llvm/test/CodeGen/SystemZ/cond-load-02.ll | 18 +- llvm/test/CodeGen/SystemZ/cond-store-01.ll | 44 +- llvm/test/CodeGen/SystemZ/cond-store-02.ll | 44 +- llvm/test/CodeGen/SystemZ/cond-store-03.ll | 36 +- llvm/test/CodeGen/SystemZ/cond-store-04.ll | 24 +- llvm/test/CodeGen/SystemZ/cond-store-05.ll | 24 +- llvm/test/CodeGen/SystemZ/cond-store-06.ll | 24 +- llvm/test/CodeGen/SystemZ/cond-store-07.ll | 22 +- llvm/test/CodeGen/SystemZ/cond-store-08.ll | 14 +- llvm/test/CodeGen/SystemZ/fp-abs-01.ll | 4 +- llvm/test/CodeGen/SystemZ/fp-abs-02.ll | 4 +- llvm/test/CodeGen/SystemZ/fp-add-01.ll | 32 +- llvm/test/CodeGen/SystemZ/fp-add-02.ll | 32 +- llvm/test/CodeGen/SystemZ/fp-add-03.ll | 2 +- llvm/test/CodeGen/SystemZ/fp-cmp-01.ll | 56 +- llvm/test/CodeGen/SystemZ/fp-cmp-02.ll | 34 +- llvm/test/CodeGen/SystemZ/fp-cmp-03.ll | 4 +- llvm/test/CodeGen/SystemZ/fp-cmp-04.ll | 6 +- llvm/test/CodeGen/SystemZ/fp-conv-01.ll | 8 +- llvm/test/CodeGen/SystemZ/fp-conv-02.ll | 44 +- llvm/test/CodeGen/SystemZ/fp-conv-03.ll | 44 +- llvm/test/CodeGen/SystemZ/fp-conv-04.ll | 44 +- llvm/test/CodeGen/SystemZ/fp-conv-09.ll | 2 +- llvm/test/CodeGen/SystemZ/fp-conv-10.ll | 2 +- llvm/test/CodeGen/SystemZ/fp-conv-11.ll | 2 +- llvm/test/CodeGen/SystemZ/fp-conv-12.ll | 2 +- llvm/test/CodeGen/SystemZ/fp-conv-14.ll | 4 +- llvm/test/CodeGen/SystemZ/fp-copysign-01.ll | 12 +- llvm/test/CodeGen/SystemZ/fp-div-01.ll | 32 +- llvm/test/CodeGen/SystemZ/fp-div-02.ll | 32 +- llvm/test/CodeGen/SystemZ/fp-div-03.ll | 2 +- llvm/test/CodeGen/SystemZ/fp-move-01.ll | 2 +- llvm/test/CodeGen/SystemZ/fp-move-02.ll | 48 +- llvm/test/CodeGen/SystemZ/fp-move-03.ll | 20 +- llvm/test/CodeGen/SystemZ/fp-move-04.ll | 20 +- llvm/test/CodeGen/SystemZ/fp-move-05.ll | 22 +- llvm/test/CodeGen/SystemZ/fp-move-09.ll | 6 +- llvm/test/CodeGen/SystemZ/fp-mul-01.ll | 32 +- llvm/test/CodeGen/SystemZ/fp-mul-02.ll | 32 +- llvm/test/CodeGen/SystemZ/fp-mul-03.ll | 32 +- llvm/test/CodeGen/SystemZ/fp-mul-04.ll | 32 +- llvm/test/CodeGen/SystemZ/fp-mul-05.ll | 2 +- llvm/test/CodeGen/SystemZ/fp-mul-06.ll | 14 +- llvm/test/CodeGen/SystemZ/fp-mul-07.ll | 14 +- llvm/test/CodeGen/SystemZ/fp-mul-08.ll | 14 +- llvm/test/CodeGen/SystemZ/fp-mul-09.ll | 14 +- llvm/test/CodeGen/SystemZ/fp-neg-01.ll | 4 +- llvm/test/CodeGen/SystemZ/fp-round-01.ll | 2 +- llvm/test/CodeGen/SystemZ/fp-round-02.ll | 12 +- llvm/test/CodeGen/SystemZ/fp-sqrt-01.ll | 44 +- llvm/test/CodeGen/SystemZ/fp-sqrt-02.ll | 44 +- llvm/test/CodeGen/SystemZ/fp-sqrt-03.ll | 2 +- llvm/test/CodeGen/SystemZ/fp-sub-01.ll | 32 +- llvm/test/CodeGen/SystemZ/fp-sub-02.ll | 32 +- llvm/test/CodeGen/SystemZ/fp-sub-03.ll | 2 +- llvm/test/CodeGen/SystemZ/frame-02.ll | 96 +-- llvm/test/CodeGen/SystemZ/frame-03.ll | 96 +-- llvm/test/CodeGen/SystemZ/frame-04.ll | 48 +- llvm/test/CodeGen/SystemZ/frame-05.ll | 76 +- llvm/test/CodeGen/SystemZ/frame-06.ll | 76 +- llvm/test/CodeGen/SystemZ/frame-07.ll | 64 +- llvm/test/CodeGen/SystemZ/frame-08.ll | 92 +-- llvm/test/CodeGen/SystemZ/frame-09.ll | 26 +- llvm/test/CodeGen/SystemZ/frame-13.ll | 38 +- llvm/test/CodeGen/SystemZ/frame-14.ll | 38 +- llvm/test/CodeGen/SystemZ/frame-15.ll | 58 +- llvm/test/CodeGen/SystemZ/frame-16.ll | 34 +- llvm/test/CodeGen/SystemZ/frame-17.ll | 86 +- llvm/test/CodeGen/SystemZ/frame-18.ll | 60 +- llvm/test/CodeGen/SystemZ/insert-01.ll | 34 +- llvm/test/CodeGen/SystemZ/insert-02.ll | 34 +- llvm/test/CodeGen/SystemZ/insert-06.ll | 6 +- llvm/test/CodeGen/SystemZ/int-add-01.ll | 20 +- llvm/test/CodeGen/SystemZ/int-add-02.ll | 40 +- llvm/test/CodeGen/SystemZ/int-add-03.ll | 34 +- llvm/test/CodeGen/SystemZ/int-add-04.ll | 34 +- llvm/test/CodeGen/SystemZ/int-add-05.ll | 34 +- llvm/test/CodeGen/SystemZ/int-add-08.ll | 38 +- llvm/test/CodeGen/SystemZ/int-add-09.ll | 8 +- llvm/test/CodeGen/SystemZ/int-add-10.ll | 34 +- llvm/test/CodeGen/SystemZ/int-add-11.ll | 84 +- llvm/test/CodeGen/SystemZ/int-add-12.ll | 84 +- llvm/test/CodeGen/SystemZ/int-cmp-01.ll | 22 +- llvm/test/CodeGen/SystemZ/int-cmp-02.ll | 22 +- llvm/test/CodeGen/SystemZ/int-cmp-03.ll | 22 +- llvm/test/CodeGen/SystemZ/int-cmp-04.ll | 16 +- llvm/test/CodeGen/SystemZ/int-cmp-05.ll | 42 +- llvm/test/CodeGen/SystemZ/int-cmp-06.ll | 42 +- llvm/test/CodeGen/SystemZ/int-cmp-07.ll | 16 +- llvm/test/CodeGen/SystemZ/int-cmp-08.ll | 16 +- llvm/test/CodeGen/SystemZ/int-cmp-15.ll | 38 +- llvm/test/CodeGen/SystemZ/int-cmp-16.ll | 20 +- llvm/test/CodeGen/SystemZ/int-cmp-17.ll | 20 +- llvm/test/CodeGen/SystemZ/int-cmp-18.ll | 20 +- llvm/test/CodeGen/SystemZ/int-cmp-19.ll | 20 +- llvm/test/CodeGen/SystemZ/int-cmp-20.ll | 32 +- llvm/test/CodeGen/SystemZ/int-cmp-21.ll | 32 +- llvm/test/CodeGen/SystemZ/int-cmp-22.ll | 18 +- llvm/test/CodeGen/SystemZ/int-cmp-23.ll | 12 +- llvm/test/CodeGen/SystemZ/int-cmp-24.ll | 8 +- llvm/test/CodeGen/SystemZ/int-cmp-25.ll | 8 +- llvm/test/CodeGen/SystemZ/int-cmp-26.ll | 20 +- llvm/test/CodeGen/SystemZ/int-cmp-27.ll | 20 +- llvm/test/CodeGen/SystemZ/int-cmp-28.ll | 20 +- llvm/test/CodeGen/SystemZ/int-cmp-29.ll | 20 +- llvm/test/CodeGen/SystemZ/int-cmp-30.ll | 32 +- llvm/test/CodeGen/SystemZ/int-cmp-31.ll | 32 +- llvm/test/CodeGen/SystemZ/int-cmp-32.ll | 36 +- llvm/test/CodeGen/SystemZ/int-cmp-33.ll | 20 +- llvm/test/CodeGen/SystemZ/int-cmp-34.ll | 36 +- llvm/test/CodeGen/SystemZ/int-cmp-35.ll | 20 +- llvm/test/CodeGen/SystemZ/int-cmp-36.ll | 12 +- llvm/test/CodeGen/SystemZ/int-cmp-37.ll | 12 +- llvm/test/CodeGen/SystemZ/int-cmp-38.ll | 14 +- llvm/test/CodeGen/SystemZ/int-cmp-39.ll | 12 +- llvm/test/CodeGen/SystemZ/int-cmp-40.ll | 12 +- llvm/test/CodeGen/SystemZ/int-cmp-41.ll | 12 +- llvm/test/CodeGen/SystemZ/int-cmp-42.ll | 12 +- llvm/test/CodeGen/SystemZ/int-cmp-43.ll | 12 +- llvm/test/CodeGen/SystemZ/int-cmp-44.ll | 20 +- llvm/test/CodeGen/SystemZ/int-cmp-45.ll | 12 +- llvm/test/CodeGen/SystemZ/int-cmp-48.ll | 32 +- llvm/test/CodeGen/SystemZ/int-conv-01.ll | 46 +- llvm/test/CodeGen/SystemZ/int-conv-02.ll | 46 +- llvm/test/CodeGen/SystemZ/int-conv-03.ll | 46 +- llvm/test/CodeGen/SystemZ/int-conv-04.ll | 46 +- llvm/test/CodeGen/SystemZ/int-conv-05.ll | 52 +- llvm/test/CodeGen/SystemZ/int-conv-06.ll | 46 +- llvm/test/CodeGen/SystemZ/int-conv-07.ll | 46 +- llvm/test/CodeGen/SystemZ/int-conv-08.ll | 46 +- llvm/test/CodeGen/SystemZ/int-conv-09.ll | 14 +- llvm/test/CodeGen/SystemZ/int-conv-10.ll | 14 +- llvm/test/CodeGen/SystemZ/int-conv-11.ll | 128 +-- llvm/test/CodeGen/SystemZ/int-div-01.ll | 42 +- llvm/test/CodeGen/SystemZ/int-div-02.ll | 38 +- llvm/test/CodeGen/SystemZ/int-div-03.ll | 20 +- llvm/test/CodeGen/SystemZ/int-div-04.ll | 40 +- llvm/test/CodeGen/SystemZ/int-div-05.ll | 40 +- llvm/test/CodeGen/SystemZ/int-move-02.ll | 20 +- llvm/test/CodeGen/SystemZ/int-move-03.ll | 14 +- llvm/test/CodeGen/SystemZ/int-move-08.ll | 16 +- llvm/test/CodeGen/SystemZ/int-move-09.ll | 20 +- llvm/test/CodeGen/SystemZ/int-mul-01.ll | 20 +- llvm/test/CodeGen/SystemZ/int-mul-02.ll | 40 +- llvm/test/CodeGen/SystemZ/int-mul-03.ll | 34 +- llvm/test/CodeGen/SystemZ/int-mul-04.ll | 34 +- llvm/test/CodeGen/SystemZ/int-mul-08.ll | 34 +- llvm/test/CodeGen/SystemZ/int-sub-01.ll | 40 +- llvm/test/CodeGen/SystemZ/int-sub-02.ll | 34 +- llvm/test/CodeGen/SystemZ/int-sub-03.ll | 34 +- llvm/test/CodeGen/SystemZ/int-sub-04.ll | 34 +- llvm/test/CodeGen/SystemZ/int-sub-05.ll | 38 +- llvm/test/CodeGen/SystemZ/int-sub-06.ll | 34 +- llvm/test/CodeGen/SystemZ/int-sub-07.ll | 20 +- llvm/test/CodeGen/SystemZ/loop-01.ll | 6 +- llvm/test/CodeGen/SystemZ/memchr-02.ll | 4 +- llvm/test/CodeGen/SystemZ/memcpy-02.ll | 64 +- llvm/test/CodeGen/SystemZ/or-01.ll | 40 +- llvm/test/CodeGen/SystemZ/or-03.ll | 34 +- llvm/test/CodeGen/SystemZ/or-05.ll | 26 +- llvm/test/CodeGen/SystemZ/or-06.ll | 16 +- llvm/test/CodeGen/SystemZ/or-08.ll | 16 +- llvm/test/CodeGen/SystemZ/serialize-01.ll | 2 +- llvm/test/CodeGen/SystemZ/shift-01.ll | 2 +- llvm/test/CodeGen/SystemZ/shift-02.ll | 2 +- llvm/test/CodeGen/SystemZ/shift-03.ll | 2 +- llvm/test/CodeGen/SystemZ/shift-04.ll | 2 +- llvm/test/CodeGen/SystemZ/shift-05.ll | 2 +- llvm/test/CodeGen/SystemZ/shift-06.ll | 2 +- llvm/test/CodeGen/SystemZ/shift-07.ll | 2 +- llvm/test/CodeGen/SystemZ/shift-08.ll | 2 +- llvm/test/CodeGen/SystemZ/spill-01.ll | 212 ++--- llvm/test/CodeGen/SystemZ/strcpy-01.ll | 2 +- llvm/test/CodeGen/SystemZ/tls-05.ll | 2 +- llvm/test/CodeGen/SystemZ/tls-06.ll | 4 +- llvm/test/CodeGen/SystemZ/tls-07.ll | 4 +- llvm/test/CodeGen/SystemZ/unaligned-01.ll | 10 +- llvm/test/CodeGen/SystemZ/xor-01.ll | 40 +- llvm/test/CodeGen/SystemZ/xor-03.ll | 34 +- llvm/test/CodeGen/SystemZ/xor-05.ll | 26 +- llvm/test/CodeGen/SystemZ/xor-06.ll | 16 +- llvm/test/CodeGen/SystemZ/xor-08.ll | 16 +- .../CodeGen/Thumb/2007-01-31-RegInfoAssert.ll | 2 +- .../Thumb/2007-05-05-InvalidPushPop.ll | 10 +- .../CodeGen/Thumb/2009-07-20-TwoAddrBug.ll | 2 +- .../Thumb/2009-08-12-ConstIslandAssert.ll | 310 +++---- .../CodeGen/Thumb/2009-08-12-RegInfoAssert.ll | 6 +- llvm/test/CodeGen/Thumb/2009-08-20-ISelBug.ll | 8 +- .../Thumb/2009-12-17-pre-regalloc-taildup.ll | 12 +- .../CodeGen/Thumb/2011-05-11-DAGLegalizer.ll | 10 +- llvm/test/CodeGen/Thumb/2011-EpilogueBug.ll | 2 +- .../Thumb/2014-06-10-thumb1-ldst-opt-bug.ll | 4 +- llvm/test/CodeGen/Thumb/asmprinter-bug.ll | 30 +- .../Thumb/cortex-m0-unaligned-access.ll | 2 +- llvm/test/CodeGen/Thumb/dyn-stackalloc.ll | 4 +- llvm/test/CodeGen/Thumb/large-stack.ll | 2 +- llvm/test/CodeGen/Thumb/ldm-merge-call.ll | 4 +- llvm/test/CodeGen/Thumb/ldm-merge-struct.ll | 4 +- .../Thumb/ldm-stm-base-materialization.ll | 4 +- llvm/test/CodeGen/Thumb/ldr_ext.ll | 10 +- llvm/test/CodeGen/Thumb/ldr_frame.ll | 8 +- llvm/test/CodeGen/Thumb/long.ll | 2 +- llvm/test/CodeGen/Thumb/segmented-stacks.ll | 2 +- llvm/test/CodeGen/Thumb/stack-access.ll | 12 +- llvm/test/CodeGen/Thumb/stm-merge.ll | 2 +- llvm/test/CodeGen/Thumb/thumb-ldm.ll | 16 +- llvm/test/CodeGen/Thumb/vargs.ll | 4 +- .../Thumb2/2009-07-17-CrossRegClassCopy.ll | 2 +- .../test/CodeGen/Thumb2/2009-07-21-ISelBug.ll | 10 +- .../CodeGen/Thumb2/2009-07-30-PEICrash.ll | 34 +- .../CodeGen/Thumb2/2009-08-01-WrongLDRBOpc.ll | 2 +- .../CodeGen/Thumb2/2009-08-02-CoalescerBug.ll | 4 +- .../CodeGen/Thumb2/2009-08-04-CoalescerBug.ll | 12 +- .../Thumb2/2009-08-04-ScavengerAssert.ll | 70 +- .../Thumb2/2009-08-04-SubregLoweringBug3.ll | 2 +- .../CodeGen/Thumb2/2009-08-07-NeonFPBug.ll | 6 +- .../test/CodeGen/Thumb2/2009-08-10-ISelBug.ll | 6 +- .../CodeGen/Thumb2/2009-09-01-PostRAProlog.ll | 22 +- .../CodeGen/Thumb2/2009-09-28-ITBlockBug.ll | 14 +- .../Thumb2/2009-11-11-ScavengerAssert.ll | 10 +- .../CodeGen/Thumb2/2009-12-01-LoopIVUsers.ll | 40 +- .../Thumb2/2010-01-06-TailDuplicateLabels.ll | 12 +- .../CodeGen/Thumb2/2010-03-08-addi12-ccout.ll | 8 +- .../CodeGen/Thumb2/2010-03-15-AsmCCClobber.ll | 2 +- .../CodeGen/Thumb2/2010-06-19-ITBlockCrash.ll | 2 +- .../CodeGen/Thumb2/2010-06-21-TailMergeBug.ll | 4 +- .../Thumb2/2010-08-10-VarSizedAllocaBug.ll | 2 +- .../Thumb2/2011-06-07-TwoAddrEarlyClobber.ll | 4 +- .../Thumb2/2011-12-16-T2SizeReduceAssert.ll | 8 +- .../test/CodeGen/Thumb2/2012-01-13-CBNZBug.ll | 12 +- .../2013-02-19-tail-call-register-hint.ll | 6 +- llvm/test/CodeGen/Thumb2/aligned-constants.ll | 4 +- llvm/test/CodeGen/Thumb2/aligned-spill.ll | 2 +- llvm/test/CodeGen/Thumb2/bfi.ll | 2 +- .../constant-islands-new-island-padding.ll | 8 +- llvm/test/CodeGen/Thumb2/constant-islands.ll | 254 +++--- llvm/test/CodeGen/Thumb2/crash.ll | 16 +- .../CodeGen/Thumb2/cross-rc-coalescing-2.ll | 10 +- llvm/test/CodeGen/Thumb2/float-ops.ll | 4 +- llvm/test/CodeGen/Thumb2/frameless2.ll | 2 +- llvm/test/CodeGen/Thumb2/ifcvt-neon.ll | 4 +- llvm/test/CodeGen/Thumb2/inflate-regs.ll | 4 +- llvm/test/CodeGen/Thumb2/large-call.ll | 2 +- llvm/test/CodeGen/Thumb2/large-stack.ll | 2 +- llvm/test/CodeGen/Thumb2/lsr-deficiency.ll | 6 +- llvm/test/CodeGen/Thumb2/machine-licm.ll | 4 +- llvm/test/CodeGen/Thumb2/tail-call-r9.ll | 2 +- llvm/test/CodeGen/Thumb2/thumb2-call-tc.ll | 2 +- llvm/test/CodeGen/Thumb2/thumb2-call.ll | 2 +- llvm/test/CodeGen/Thumb2/thumb2-ifcvt1-tc.ll | 2 +- llvm/test/CodeGen/Thumb2/thumb2-ifcvt1.ll | 2 +- llvm/test/CodeGen/Thumb2/thumb2-ifcvt2.ll | 6 +- llvm/test/CodeGen/Thumb2/thumb2-ifcvt3.ll | 2 +- llvm/test/CodeGen/Thumb2/thumb2-ldm.ll | 16 +- llvm/test/CodeGen/Thumb2/thumb2-ldr.ll | 14 +- llvm/test/CodeGen/Thumb2/thumb2-ldr_ext.ll | 8 +- llvm/test/CodeGen/Thumb2/thumb2-ldr_post.ll | 2 +- llvm/test/CodeGen/Thumb2/thumb2-ldr_pre.ll | 6 +- llvm/test/CodeGen/Thumb2/thumb2-ldrb.ll | 14 +- llvm/test/CodeGen/Thumb2/thumb2-ldrd.ll | 4 +- llvm/test/CodeGen/Thumb2/thumb2-ldrh.ll | 14 +- llvm/test/CodeGen/Thumb2/thumb2-smul.ll | 2 +- llvm/test/CodeGen/Thumb2/thumb2-spill-q.ll | 2 +- llvm/test/CodeGen/Thumb2/thumb2-str_post.ll | 4 +- llvm/test/CodeGen/Thumb2/thumb2-str_pre.ll | 4 +- llvm/test/CodeGen/Thumb2/thumb2-tbh.ll | 2 +- llvm/test/CodeGen/Thumb2/tls1.ll | 2 +- llvm/test/CodeGen/Thumb2/tls2.ll | 2 +- llvm/test/CodeGen/Thumb2/tpsoft.ll | 2 +- llvm/test/CodeGen/Thumb2/v8_IT_2.ll | 6 +- llvm/test/CodeGen/Thumb2/v8_IT_3.ll | 8 +- .../test/CodeGen/X86/2005-01-17-CycleInDAG.ll | 4 +- .../CodeGen/X86/2006-01-19-ISelFoldingBug.ll | 2 +- .../CodeGen/X86/2006-04-27-ISelFoldingBug.ll | 6 +- .../X86/2006-05-01-SchedCausingSpills.ll | 8 +- .../CodeGen/X86/2006-05-02-InstrSched1.ll | 8 +- .../CodeGen/X86/2006-05-02-InstrSched2.ll | 4 +- .../X86/2006-05-08-CoalesceSubRegClass.ll | 4 +- .../test/CodeGen/X86/2006-05-08-InstrSched.ll | 8 +- .../test/CodeGen/X86/2006-05-11-InstrSched.ll | 8 +- .../test/CodeGen/X86/2006-05-25-CycleInDAG.ll | 2 +- llvm/test/CodeGen/X86/2006-07-20-InlineAsm.ll | 4 +- .../test/CodeGen/X86/2006-08-07-CycleInDAG.ll | 4 +- .../test/CodeGen/X86/2006-08-16-CycleInDAG.ll | 2 +- .../test/CodeGen/X86/2006-09-01-CycleInDAG.ll | 10 +- .../test/CodeGen/X86/2006-10-09-CycleInDAG.ll | 4 +- .../X86/2006-10-10-FindModifiedNodeSlotBug.ll | 6 +- .../test/CodeGen/X86/2006-10-12-CycleInDAG.ll | 4 +- .../test/CodeGen/X86/2006-10-13-CycleInDAG.ll | 4 +- llvm/test/CodeGen/X86/2006-11-12-CSRetCC.ll | 20 +- .../CodeGen/X86/2006-11-17-IllegalMove.ll | 6 +- .../CodeGen/X86/2006-12-16-InlineAsmCrash.ll | 2 +- .../CodeGen/X86/2007-01-13-StackPtrIndex.ll | 60 +- .../test/CodeGen/X86/2007-02-04-OrAddrMode.ll | 2 +- .../test/CodeGen/X86/2007-02-16-BranchFold.ll | 8 +- .../X86/2007-02-19-LiveIntervalAssert.ll | 4 +- .../CodeGen/X86/2007-03-01-SpillerCrash.ll | 2 +- .../CodeGen/X86/2007-03-15-GEP-Idx-Sink.ll | 20 +- llvm/test/CodeGen/X86/2007-03-16-InlineAsm.ll | 8 +- .../CodeGen/X86/2007-03-26-CoalescerBug.ll | 2 +- .../X86/2007-04-17-LiveIntervalAssert.ll | 2 +- .../CodeGen/X86/2007-05-05-VecCastExpand.ll | 4 +- .../X86/2007-06-29-VecFPConstantCSEBug.ll | 2 +- .../CodeGen/X86/2007-07-10-StackerAssert.ll | 4 +- .../CodeGen/X86/2007-07-18-Vector-Extract.ll | 4 +- .../X86/2007-08-09-IllegalX86-64Asm.ll | 68 +- .../test/CodeGen/X86/2007-09-05-InvalidAsm.ll | 10 +- .../CodeGen/X86/2007-10-04-AvoidEFLAGSCopy.ll | 2 +- .../X86/2007-10-12-CoalesceExtSubReg.ll | 8 +- .../CodeGen/X86/2007-10-12-SpillerUnfold1.ll | 8 +- .../CodeGen/X86/2007-10-12-SpillerUnfold2.ll | 8 +- .../CodeGen/X86/2007-10-14-CoalescerCrash.ll | 4 +- .../CodeGen/X86/2007-10-19-SpillerUnfold.ll | 8 +- .../CodeGen/X86/2007-10-29-ExtendSetCC.ll | 2 +- .../X86/2007-10-31-extractelement-i64.ll | 24 +- .../X86/2007-11-04-LiveIntervalCrash.ll | 2 +- .../test/CodeGen/X86/2007-11-06-InstrSched.ll | 4 +- llvm/test/CodeGen/X86/2007-11-07-MulBy4.ll | 4 +- .../CodeGen/X86/2007-12-16-BURRSchedCrash.ll | 4 +- .../test/CodeGen/X86/2007-12-18-LoadCSEBug.ll | 4 +- .../CodeGen/X86/2008-01-08-SchedulerCrash.ll | 2 +- .../X86/2008-01-16-FPStackifierAssert.ll | 10 +- .../X86/2008-01-16-InvalidDAGCombineXform.ll | 22 +- llvm/test/CodeGen/X86/2008-02-05-ISelCrash.ll | 2 +- .../CodeGen/X86/2008-02-06-LoadFoldingBug.ll | 4 +- .../CodeGen/X86/2008-02-18-TailMergingBug.ll | 24 +- .../X86/2008-02-20-InlineAsmClobber.ll | 2 +- .../X86/2008-02-22-LocalRegAllocBug.ll | 28 +- .../X86/2008-02-25-X86-64-CoalescerBug.ll | 8 +- .../CodeGen/X86/2008-02-27-DeadSlotElimBug.ll | 6 +- llvm/test/CodeGen/X86/2008-03-07-APIntBug.ll | 20 +- .../CodeGen/X86/2008-03-10-RegAllocInfLoop.ll | 2 +- .../X86/2008-03-12-ThreadLocalAlias.ll | 8 +- .../CodeGen/X86/2008-03-14-SpillerCrash.ll | 4 +- .../X86/2008-03-23-DarwinAsmComments.ll | 10 +- .../X86/2008-03-31-SpillerFoldingBug.ll | 2 +- .../CodeGen/X86/2008-04-09-BranchFolding.ll | 2 +- .../CodeGen/X86/2008-04-15-LiveVariableBug.ll | 4 +- .../CodeGen/X86/2008-04-16-CoalescerBug.ll | 2 +- .../CodeGen/X86/2008-04-17-CoalescerBug.ll | 4 +- .../X86/2008-04-24-pblendw-fold-crash.ll | 2 +- .../CodeGen/X86/2008-04-28-CoalescerBug.ll | 2 +- .../X86/2008-05-09-ShuffleLoweringBug.ll | 2 +- .../CodeGen/X86/2008-05-12-tailmerge-5.ll | 20 +- .../CodeGen/X86/2008-05-21-CoalescerBug.ll | 2 +- .../X86/2008-05-22-FoldUnalignedLoad.ll | 2 +- .../X86/2008-06-13-NotVolatileLoadStore.ll | 4 +- .../X86/2008-06-13-VolatileLoadStore.ll | 4 +- .../test/CodeGen/X86/2008-06-16-SubregsBug.ll | 2 +- .../X86/2008-07-07-DanglingDeadInsts.ll | 2 +- .../CodeGen/X86/2008-07-19-movups-spills.ll | 128 +-- .../CodeGen/X86/2008-07-22-CombinerCrash.ll | 2 +- .../CodeGen/X86/2008-08-06-RewriterBug.ll | 10 +- .../CodeGen/X86/2008-08-31-EH_RETURN64.ll | 2 +- .../CodeGen/X86/2008-09-09-LinearScanBug.ll | 2 +- .../CodeGen/X86/2008-09-11-CoalescerBug.ll | 4 +- .../CodeGen/X86/2008-09-11-CoalescerBug2.ll | 4 +- .../CodeGen/X86/2008-09-17-inline-asm-1.ll | 2 +- .../CodeGen/X86/2008-09-18-inline-asm-2.ll | 6 +- .../CodeGen/X86/2008-09-19-RegAllocBug.ll | 2 +- llvm/test/CodeGen/X86/2008-09-29-ReMatBug.ll | 8 +- .../CodeGen/X86/2008-09-29-VolatileBug.ll | 2 +- .../CodeGen/X86/2008-10-06-x87ld-nan-2.ll | 2 +- .../test/CodeGen/X86/2008-10-07-SSEISelBug.ll | 6 +- llvm/test/CodeGen/X86/2008-10-11-CallCrash.ll | 2 +- .../test/CodeGen/X86/2008-10-16-VecUnaryOp.ll | 2 +- .../CodeGen/X86/2008-10-27-CoalescerBug.ll | 2 +- llvm/test/CodeGen/X86/2008-11-06-testb.ll | 2 +- .../2008-12-01-loop-iv-used-outside-loop.ll | 2 +- .../X86/2008-12-02-IllegalResultType.ll | 2 +- .../CodeGen/X86/2009-01-16-SchedulerBug.ll | 4 +- .../X86/2009-01-18-ConstantExprCrash.ll | 2 +- llvm/test/CodeGen/X86/2009-01-31-BigShift2.ll | 2 +- llvm/test/CodeGen/X86/2009-02-01-LargeMask.ll | 2 +- .../CodeGen/X86/2009-02-03-AnalyzedTwice.ll | 2 +- .../X86/2009-02-11-codegenprepare-reuse.ll | 6 +- .../CodeGen/X86/2009-02-12-DebugInfoVLA.ll | 26 +- .../CodeGen/X86/2009-02-26-MachineLICMBug.ll | 6 +- llvm/test/CodeGen/X86/2009-03-03-BTHang.ll | 4 +- .../CodeGen/X86/2009-03-05-burr-list-crash.ll | 2 +- .../test/CodeGen/X86/2009-03-09-APIntCrash.ll | 2 +- .../CodeGen/X86/2009-03-10-CoalescerBug.ll | 2 +- .../CodeGen/X86/2009-03-23-LinearScanBug.ll | 6 +- .../CodeGen/X86/2009-03-23-MultiUseSched.ll | 48 +- llvm/test/CodeGen/X86/2009-03-25-TestBug.ll | 2 +- .../CodeGen/X86/2009-04-14-IllegalRegs.ll | 6 +- .../CodeGen/X86/2009-04-16-SpillerUnfold.ll | 16 +- llvm/test/CodeGen/X86/2009-04-24.ll | 2 +- .../CodeGen/X86/2009-04-25-CoalescerBug.ll | 2 +- .../CodeGen/X86/2009-04-27-CoalescerAssert.ll | 170 ++-- .../X86/2009-04-29-IndirectDestOperands.ll | 8 +- .../CodeGen/X86/2009-04-29-LinearScanBug.ll | 26 +- .../CodeGen/X86/2009-04-29-RegAllocAssert.ll | 2 +- llvm/test/CodeGen/X86/2009-04-scale.ll | 4 +- .../CodeGen/X86/2009-05-11-tailmerge-crash.ll | 2 +- .../CodeGen/X86/2009-05-28-DAGCombineCrash.ll | 2 +- llvm/test/CodeGen/X86/2009-05-30-ISelBug.ll | 4 +- .../CodeGen/X86/2009-06-02-RewriterBug.ll | 40 +- .../CodeGen/X86/2009-06-04-VirtualLiveIn.ll | 2 +- .../CodeGen/X86/2009-06-05-VZextByteShort.ll | 8 +- .../CodeGen/X86/2009-07-15-CoalescerBug.ll | 2 +- .../CodeGen/X86/2009-07-20-DAGCombineBug.ll | 2 +- .../X86/2009-08-06-branchfolder-crash.ll | 8 +- .../X86/2009-08-14-Win64MemoryIndirectArg.ll | 6 +- .../X86/2009-08-19-LoadNarrowingMiscompile.ll | 2 +- .../CodeGen/X86/2009-08-23-SubRegReuseUndo.ll | 6 +- .../CodeGen/X86/2009-09-10-LoadFoldingBug.ll | 2 +- .../CodeGen/X86/2009-09-10-SpillComments.ll | 28 +- .../CodeGen/X86/2009-09-16-CoalescerBug.ll | 2 +- .../X86/2009-09-21-NoSpillLoopCount.ll | 4 +- .../CodeGen/X86/2009-09-22-CoalescerBug.ll | 2 +- .../CodeGen/X86/2009-10-19-EmergencySpill.ll | 10 +- .../X86/2009-10-19-atomic-cmp-eflags.ll | 4 +- .../CodeGen/X86/2009-10-25-RewriterBug.ll | 8 +- .../CodeGen/X86/2009-11-16-MachineLICM.ll | 8 +- llvm/test/CodeGen/X86/2009-11-25-ImpDefBug.ll | 2 +- .../CodeGen/X86/2009-12-01-EarlyClobberBug.ll | 8 +- .../CodeGen/X86/2009-12-11-TLSNoRedZone.ll | 10 +- llvm/test/CodeGen/X86/20090313-signext.ll | 2 +- llvm/test/CodeGen/X86/2010-01-13-OptExtBug.ll | 10 +- .../X86/2010-01-15-SelectionDAGCycle.ll | 4 +- llvm/test/CodeGen/X86/2010-01-18-DbgValue.ll | 6 +- llvm/test/CodeGen/X86/2010-01-19-OptExtBug.ll | 2 +- .../CodeGen/X86/2010-02-04-SchedulerBug.ll | 8 +- .../CodeGen/X86/2010-02-11-NonTemporal.ll | 4 +- .../X86/2010-02-12-CoalescerBug-Impdef.ll | 2 +- .../X86/2010-02-19-TailCallRetAddrBug.ll | 18 +- .../X86/2010-02-23-RematImplicitSubreg.ll | 4 +- llvm/test/CodeGen/X86/2010-03-17-ISelBug.ll | 2 +- .../X86/2010-04-06-SSEDomainFixCrash.ll | 2 +- .../CodeGen/X86/2010-04-08-CoalescerBug.ll | 2 +- .../X86/2010-04-13-AnalyzeBranchCrash.ll | 2 +- .../X86/2010-04-30-LocalAlloc-LandingPad.ll | 12 +- .../X86/2010-05-05-LocalAllocEarlyClobber.ll | 4 +- llvm/test/CodeGen/X86/2010-05-07-ldconvert.ll | 4 +- .../CodeGen/X86/2010-05-10-DAGCombinerBug.ll | 2 +- .../CodeGen/X86/2010-05-16-nosseconversion.ll | 2 +- .../CodeGen/X86/2010-05-26-DotDebugLoc.ll | 2 +- .../CodeGen/X86/2010-05-26-FP_TO_INT-crash.ll | 2 +- .../X86/2010-06-14-fast-isel-fs-load.ll | 2 +- .../X86/2010-06-15-FastAllocEarlyCLobber.ll | 6 +- .../X86/2010-06-25-CoalescerSubRegDefDead.ll | 2 +- .../CodeGen/X86/2010-06-25-asm-RA-crash.ll | 4 +- .../X86/2010-06-28-matched-g-constraint.ll | 2 +- llvm/test/CodeGen/X86/2010-07-02-UnfoldBug.ll | 2 +- .../CodeGen/X86/2010-07-11-FPStackLoneUse.ll | 2 +- .../X86/2010-08-04-MaskedSignedCompare.ll | 4 +- .../CodeGen/X86/2010-08-04-StackVariable.ll | 8 +- .../2010-09-01-RemoveCopyByCommutingDef.ll | 2 +- .../X86/2010-09-17-SideEffectsInChain.ll | 4 +- llvm/test/CodeGen/X86/2010-11-09-MOVLPS.ll | 14 +- .../CodeGen/X86/2010-11-18-SelectOfExtload.ll | 4 +- llvm/test/CodeGen/X86/2011-02-12-shuffle.ll | 2 +- .../CodeGen/X86/2011-03-02-DAGCombiner.ll | 14 +- .../X86/2011-03-09-Physreg-Coalescing.ll | 2 +- .../CodeGen/X86/2011-04-13-SchedCmpJmp.ll | 6 +- llvm/test/CodeGen/X86/2011-05-09-loaduse.ll | 2 +- .../X86/2011-05-26-UnreachableBlockElim.ll | 2 +- .../X86/2011-05-27-CrossClassCoalescing.ll | 4 +- llvm/test/CodeGen/X86/2011-06-01-fildll.ll | 2 +- llvm/test/CodeGen/X86/2011-06-03-x87chain.ll | 6 +- .../CodeGen/X86/2011-06-12-FastAllocSpill.ll | 6 +- .../2011-07-13-BadFrameIndexDisplacement.ll | 2 +- .../CodeGen/X86/2011-09-14-valcoalesce.ll | 2 +- llvm/test/CodeGen/X86/2011-09-21-setcc-bug.ll | 16 +- llvm/test/CodeGen/X86/2011-10-11-srl.ll | 2 +- .../test/CodeGen/X86/2011-10-12-MachineCSE.ll | 32 +- .../X86/2011-10-18-FastISel-VectorParams.ll | 10 +- .../CodeGen/X86/2011-10-19-LegelizeLoad.ll | 4 +- .../CodeGen/X86/2011-10-19-widen_vselect.ll | 4 +- llvm/test/CodeGen/X86/2011-10-27-tstore.ll | 2 +- .../CodeGen/X86/2011-11-22-AVX2-Domains.ll | 18 +- .../CodeGen/X86/2011-12-08-AVXISelBugs.ll | 8 +- ...011-12-26-extractelement-duplicate-load.ll | 2 +- .../X86/2012-01-10-UndefExceptionEdge.ll | 2 +- llvm/test/CodeGen/X86/2012-01-11-split-cv.ll | 2 +- .../test/CodeGen/X86/2012-01-12-extract-sv.ll | 2 +- .../X86/2012-01-16-mfence-nosse-flags.ll | 2 +- llvm/test/CodeGen/X86/2012-02-12-dagco.ll | 4 +- .../CodeGen/X86/2012-02-29-CoalescerBug.ll | 4 +- .../CodeGen/X86/2012-03-26-PostRALICMBug.ll | 8 +- llvm/test/CodeGen/X86/2012-04-26-sdglue.ll | 4 +- llvm/test/CodeGen/X86/2012-07-10-extload64.ll | 4 +- .../CodeGen/X86/2012-07-15-broadcastfold.ll | 2 +- .../CodeGen/X86/2012-08-17-legalizer-crash.ll | 4 +- llvm/test/CodeGen/X86/2012-09-28-CGPBug.ll | 6 +- llvm/test/CodeGen/X86/2012-10-02-DAGCycle.ll | 8 +- llvm/test/CodeGen/X86/2012-10-03-DAGCycle.ll | 4 +- .../CodeGen/X86/2012-10-18-crash-dagco.ll | 14 +- .../X86/2012-11-28-merge-store-alias.ll | 4 +- .../CodeGen/X86/2012-11-30-handlemove-dbg.ll | 2 +- .../CodeGen/X86/2012-11-30-misched-dbg.ll | 2 +- .../X86/2012-12-06-python27-miscompile.ll | 2 +- .../CodeGen/X86/2012-12-19-NoImplicitFloat.ll | 2 +- .../CodeGen/X86/2013-03-13-VEX-DestReg.ll | 2 +- .../X86/2013-10-14-FastISel-incorrect-vreg.ll | 6 +- llvm/test/CodeGen/X86/Atomics-64.ll | 200 ++--- llvm/test/CodeGen/X86/GC/alloc_loop.ll | 4 +- llvm/test/CodeGen/X86/GC/argpromotion.ll | 2 +- llvm/test/CodeGen/X86/GC/inline.ll | 2 +- llvm/test/CodeGen/X86/GC/inline2.ll | 2 +- llvm/test/CodeGen/X86/MachineBranchProb.ll | 2 +- llvm/test/CodeGen/X86/MachineSink-DbgValue.ll | 2 +- llvm/test/CodeGen/X86/MachineSink-eflags.ll | 12 +- .../CodeGen/X86/MergeConsecutiveStores.ll | 50 +- llvm/test/CodeGen/X86/StackColoring.ll | 2 +- llvm/test/CodeGen/X86/SwitchLowering.ll | 2 +- llvm/test/CodeGen/X86/SwizzleShuff.ll | 20 +- llvm/test/CodeGen/X86/abi-isel.ll | 182 ++--- llvm/test/CodeGen/X86/addr-mode-matcher.ll | 4 +- .../address-type-promotion-constantexpr.ll | 2 +- llvm/test/CodeGen/X86/aliases.ll | 6 +- llvm/test/CodeGen/X86/aligned-variadic.ll | 2 +- llvm/test/CodeGen/X86/and-su.ll | 2 +- .../atom-call-reg-indirect-foldedreload32.ll | 28 +- .../atom-call-reg-indirect-foldedreload64.ll | 42 +- .../CodeGen/X86/atom-call-reg-indirect.ll | 8 +- llvm/test/CodeGen/X86/atom-cmpb.ll | 4 +- llvm/test/CodeGen/X86/atom-fixup-lea1.ll | 2 +- llvm/test/CodeGen/X86/atom-fixup-lea2.ll | 12 +- llvm/test/CodeGen/X86/atom-fixup-lea3.ll | 6 +- llvm/test/CodeGen/X86/atom-fixup-lea4.ll | 2 +- llvm/test/CodeGen/X86/atom-lea-addw-bug.ll | 6 +- llvm/test/CodeGen/X86/atom-sched.ll | 8 +- llvm/test/CodeGen/X86/atomic-dagsched.ll | 18 +- .../CodeGen/X86/atomic-load-store-wide.ll | 2 +- llvm/test/CodeGen/X86/atomic-load-store.ll | 2 +- llvm/test/CodeGen/X86/atomic-or.ll | 4 +- llvm/test/CodeGen/X86/atomic-pointer.ll | 2 +- llvm/test/CodeGen/X86/atomic128.ll | 4 +- llvm/test/CodeGen/X86/atomic_mi.ll | 60 +- llvm/test/CodeGen/X86/atomic_op.ll | 2 +- llvm/test/CodeGen/X86/avoid-loop-align-2.ll | 4 +- llvm/test/CodeGen/X86/avoid-loop-align.ll | 2 +- llvm/test/CodeGen/X86/avoid_complex_am.ll | 4 +- llvm/test/CodeGen/X86/avx-arith.ll | 6 +- llvm/test/CodeGen/X86/avx-basic.ll | 4 +- llvm/test/CodeGen/X86/avx-bitcast.ll | 2 +- llvm/test/CodeGen/X86/avx-cvt.ll | 10 +- llvm/test/CodeGen/X86/avx-intel-ocl.ll | 4 +- llvm/test/CodeGen/X86/avx-intrinsics-x86.ll | 14 +- llvm/test/CodeGen/X86/avx-load-store.ll | 16 +- llvm/test/CodeGen/X86/avx-logic.ll | 4 +- llvm/test/CodeGen/X86/avx-splat.ll | 2 +- llvm/test/CodeGen/X86/avx-unpack.ll | 16 +- llvm/test/CodeGen/X86/avx-varargs-x86_64.ll | 2 +- llvm/test/CodeGen/X86/avx-vbroadcast.ll | 30 +- llvm/test/CodeGen/X86/avx-vinsertf128.ll | 4 +- llvm/test/CodeGen/X86/avx-vperm2x128.ll | 4 +- llvm/test/CodeGen/X86/avx-vzeroupper.ll | 4 +- llvm/test/CodeGen/X86/avx.ll | 12 +- .../CodeGen/X86/avx1-logical-load-folding.ll | 8 +- llvm/test/CodeGen/X86/avx2-conversions.ll | 10 +- .../CodeGen/X86/avx2-pmovxrm-intrinsics.ll | 24 +- llvm/test/CodeGen/X86/avx2-shift.ll | 20 +- llvm/test/CodeGen/X86/avx2-vbroadcast.ll | 50 +- llvm/test/CodeGen/X86/avx512-arith.ll | 28 +- llvm/test/CodeGen/X86/avx512-build-vector.ll | 2 +- llvm/test/CodeGen/X86/avx512-cvt.ll | 12 +- .../X86/avx512-gather-scatter-intrin.ll | 8 +- llvm/test/CodeGen/X86/avx512-i1test.ll | 2 +- .../test/CodeGen/X86/avx512-insert-extract.ll | 10 +- llvm/test/CodeGen/X86/avx512-intel-ocl.ll | 4 +- llvm/test/CodeGen/X86/avx512-intrinsics.ll | 4 +- llvm/test/CodeGen/X86/avx512-logic.ll | 4 +- llvm/test/CodeGen/X86/avx512-mask-op.ll | 6 +- llvm/test/CodeGen/X86/avx512-mov.ll | 60 +- llvm/test/CodeGen/X86/avx512-round.ll | 2 +- llvm/test/CodeGen/X86/avx512-shift.ll | 8 +- llvm/test/CodeGen/X86/avx512-vbroadcast.ll | 12 +- llvm/test/CodeGen/X86/avx512-vec-cmp.ll | 20 +- llvm/test/CodeGen/X86/avx512bw-arith.ll | 8 +- llvm/test/CodeGen/X86/avx512bw-mask-op.ll | 4 +- llvm/test/CodeGen/X86/avx512bw-mov.ll | 12 +- llvm/test/CodeGen/X86/avx512bw-vec-cmp.ll | 12 +- llvm/test/CodeGen/X86/avx512bwvl-arith.ll | 16 +- .../test/CodeGen/X86/avx512bwvl-intrinsics.ll | 28 +- llvm/test/CodeGen/X86/avx512bwvl-mov.ll | 24 +- llvm/test/CodeGen/X86/avx512bwvl-vec-cmp.ll | 24 +- llvm/test/CodeGen/X86/avx512dq-mask-op.ll | 2 +- llvm/test/CodeGen/X86/avx512er-intrinsics.ll | 4 +- llvm/test/CodeGen/X86/avx512vl-arith.ll | 40 +- llvm/test/CodeGen/X86/avx512vl-intrinsics.ll | 4 +- llvm/test/CodeGen/X86/avx512vl-mov.ll | 96 +-- llvm/test/CodeGen/X86/avx512vl-vec-cmp.ll | 40 +- llvm/test/CodeGen/X86/bitcast-mmx.ll | 4 +- llvm/test/CodeGen/X86/block-placement.ll | 186 ++--- llvm/test/CodeGen/X86/bmi.ll | 22 +- .../CodeGen/X86/break-anti-dependencies.ll | 4 +- llvm/test/CodeGen/X86/break-false-dep.ll | 24 +- llvm/test/CodeGen/X86/bswap.ll | 6 +- llvm/test/CodeGen/X86/byval-align.ll | 8 +- llvm/test/CodeGen/X86/byval.ll | 2 +- llvm/test/CodeGen/X86/call-push.ll | 2 +- llvm/test/CodeGen/X86/cas.ll | 24 +- llvm/test/CodeGen/X86/chain_order.ll | 8 +- .../CodeGen/X86/change-compare-stride-1.ll | 18 +- llvm/test/CodeGen/X86/clobber-fi0.ll | 6 +- llvm/test/CodeGen/X86/cmov-into-branch.ll | 8 +- llvm/test/CodeGen/X86/cmov.ll | 16 +- llvm/test/CodeGen/X86/cmp.ll | 8 +- .../test/CodeGen/X86/cmpxchg-clobber-flags.ll | 2 +- llvm/test/CodeGen/X86/cmpxchg-i1.ll | 2 +- llvm/test/CodeGen/X86/cmpxchg-i128-i1.ll | 2 +- llvm/test/CodeGen/X86/coalesce-esp.ll | 2 +- llvm/test/CodeGen/X86/coalesce-implicitdef.ll | 12 +- llvm/test/CodeGen/X86/coalescer-commute1.ll | 4 +- llvm/test/CodeGen/X86/coalescer-commute4.ll | 4 +- llvm/test/CodeGen/X86/coalescer-cross.ll | 4 +- llvm/test/CodeGen/X86/coalescer-dce2.ll | 16 +- llvm/test/CodeGen/X86/coalescer-identity.ll | 6 +- llvm/test/CodeGen/X86/code_placement.ll | 38 +- .../X86/codegen-prepare-addrmode-sext.ll | 72 +- llvm/test/CodeGen/X86/codegen-prepare-cast.ll | 4 +- .../CodeGen/X86/codegen-prepare-extload.ll | 44 +- llvm/test/CodeGen/X86/codegen-prepare.ll | 4 +- llvm/test/CodeGen/X86/codemodel.ll | 12 +- llvm/test/CodeGen/X86/combiner-aa-0.ll | 6 +- llvm/test/CodeGen/X86/combiner-aa-1.ll | 4 +- llvm/test/CodeGen/X86/commute-blend-avx2.ll | 16 +- llvm/test/CodeGen/X86/commute-blend-sse41.ll | 6 +- llvm/test/CodeGen/X86/commute-clmul.ll | 8 +- llvm/test/CodeGen/X86/commute-fcmp.ll | 48 +- llvm/test/CodeGen/X86/commute-intrinsic.ll | 2 +- llvm/test/CodeGen/X86/commute-xop.ll | 40 +- llvm/test/CodeGen/X86/compact-unwind.ll | 10 +- llvm/test/CodeGen/X86/complex-asm.ll | 4 +- .../test/CodeGen/X86/computeKnownBits_urem.ll | 2 +- llvm/test/CodeGen/X86/const-base-addr.ll | 6 +- llvm/test/CodeGen/X86/constant-combines.ll | 2 +- .../CodeGen/X86/constant-hoisting-optnone.ll | 4 +- .../X86/constant-hoisting-shift-immediate.ll | 4 +- .../X86/convert-2-addr-3-addr-inc64.ll | 2 +- llvm/test/CodeGen/X86/cppeh-catch-all.ll | 4 +- llvm/test/CodeGen/X86/cppeh-catch-scalar.ll | 18 +- llvm/test/CodeGen/X86/cppeh-frame-vars.ll | 62 +- llvm/test/CodeGen/X86/crash-O0.ll | 2 +- llvm/test/CodeGen/X86/crash-nosse.ll | 2 +- llvm/test/CodeGen/X86/crash.ll | 44 +- .../CodeGen/X86/critical-anti-dep-breaker.ll | 4 +- .../test/CodeGen/X86/cse-add-with-overflow.ll | 4 +- llvm/test/CodeGen/X86/cvt16.ll | 4 +- .../CodeGen/X86/dagcombine-buildvector.ll | 2 +- llvm/test/CodeGen/X86/dagcombine-cse.ll | 4 +- llvm/test/CodeGen/X86/darwin-quote.ll | 2 +- llvm/test/CodeGen/X86/dbg-changes-codegen.ll | 6 +- llvm/test/CodeGen/X86/dbg-combine.ll | 8 +- llvm/test/CodeGen/X86/discontiguous-loops.ll | 2 +- llvm/test/CodeGen/X86/div8.ll | 6 +- llvm/test/CodeGen/X86/dllimport-x86_64.ll | 6 +- llvm/test/CodeGen/X86/dllimport.ll | 6 +- llvm/test/CodeGen/X86/dollar-name.ll | 4 +- .../X86/dont-trunc-store-double-to-float.ll | 2 +- llvm/test/CodeGen/X86/dynamic-allocas-VLAs.ll | 22 +- llvm/test/CodeGen/X86/early-ifcvt.ll | 2 +- llvm/test/CodeGen/X86/emit-big-cst.ll | 2 +- llvm/test/CodeGen/X86/expand-opaque-const.ll | 6 +- llvm/test/CodeGen/X86/extend.ll | 4 +- llvm/test/CodeGen/X86/extract-extract.ll | 4 +- llvm/test/CodeGen/X86/extractelement-load.ll | 8 +- llvm/test/CodeGen/X86/extractps.ll | 4 +- llvm/test/CodeGen/X86/f16c-intrinsics.ll | 4 +- llvm/test/CodeGen/X86/fast-isel-args-fail.ll | 2 +- .../fast-isel-avoid-unnecessary-pic-base.ll | 6 +- llvm/test/CodeGen/X86/fast-isel-call-bool.ll | 2 +- llvm/test/CodeGen/X86/fast-isel-fold-mem.ll | 2 +- .../CodeGen/X86/fast-isel-fptrunc-fpext.ll | 4 +- llvm/test/CodeGen/X86/fast-isel-gep.ll | 18 +- llvm/test/CodeGen/X86/fast-isel-gv.ll | 6 +- llvm/test/CodeGen/X86/fast-isel-i1.ll | 2 +- .../X86/fast-isel-int-float-conversion.ll | 4 +- llvm/test/CodeGen/X86/fast-isel-mem.ll | 4 +- llvm/test/CodeGen/X86/fast-isel-tailcall.ll | 2 +- llvm/test/CodeGen/X86/fast-isel-tls.ll | 4 +- llvm/test/CodeGen/X86/fast-isel-x86-64.ll | 6 +- llvm/test/CodeGen/X86/fast-isel-x86.ll | 4 +- llvm/test/CodeGen/X86/fast-isel.ll | 16 +- llvm/test/CodeGen/X86/fastcc-byval.ll | 2 +- llvm/test/CodeGen/X86/fastcc-sret.ll | 2 +- llvm/test/CodeGen/X86/fastcc.ll | 8 +- .../X86/fastisel-gep-promote-before-add.ll | 10 +- llvm/test/CodeGen/X86/fma-do-not-commute.ll | 4 +- .../X86/fma4-intrinsics-x86_64-folded-load.ll | 24 +- llvm/test/CodeGen/X86/fma_patterns.ll | 4 +- llvm/test/CodeGen/X86/fmul-zero.ll | 2 +- llvm/test/CodeGen/X86/fold-add.ll | 4 +- llvm/test/CodeGen/X86/fold-and-shift.ll | 12 +- llvm/test/CodeGen/X86/fold-call-2.ll | 2 +- llvm/test/CodeGen/X86/fold-call-3.ll | 10 +- llvm/test/CodeGen/X86/fold-call-oper.ll | 6 +- llvm/test/CodeGen/X86/fold-call.ll | 2 +- llvm/test/CodeGen/X86/fold-load-unops.ll | 8 +- llvm/test/CodeGen/X86/fold-load-vec.ll | 18 +- llvm/test/CodeGen/X86/fold-load.ll | 8 +- llvm/test/CodeGen/X86/fold-mul-lohi.ll | 2 +- llvm/test/CodeGen/X86/fold-pcmpeqd-2.ll | 4 +- llvm/test/CodeGen/X86/fold-sext-trunc.ll | 4 +- llvm/test/CodeGen/X86/fold-tied-op.ll | 12 +- llvm/test/CodeGen/X86/fold-vex.ll | 2 +- llvm/test/CodeGen/X86/fold-zext-trunc.ll | 4 +- .../CodeGen/X86/force-align-stack-alloca.ll | 2 +- llvm/test/CodeGen/X86/fp-double-rounding.ll | 2 +- llvm/test/CodeGen/X86/fp-load-trunc.ll | 8 +- llvm/test/CodeGen/X86/fp-stack-O0-crash.ll | 8 +- .../test/CodeGen/X86/fp-stack-compare-cmov.ll | 2 +- llvm/test/CodeGen/X86/fp-stack-compare.ll | 2 +- llvm/test/CodeGen/X86/fp-stack-ret.ll | 2 +- llvm/test/CodeGen/X86/fp-stack.ll | 6 +- llvm/test/CodeGen/X86/fp2sint.ll | 4 +- llvm/test/CodeGen/X86/fp_load_cast_fold.ll | 6 +- llvm/test/CodeGen/X86/fp_load_fold.ll | 12 +- llvm/test/CodeGen/X86/frameallocate.ll | 2 +- llvm/test/CodeGen/X86/full-lsr.ll | 8 +- llvm/test/CodeGen/X86/gather-addresses.ll | 16 +- llvm/test/CodeGen/X86/ghc-cc.ll | 8 +- llvm/test/CodeGen/X86/ghc-cc64.ll | 32 +- llvm/test/CodeGen/X86/gs-fold.ll | 4 +- .../CodeGen/X86/h-register-addressing-32.ll | 14 +- .../CodeGen/X86/h-register-addressing-64.ll | 14 +- llvm/test/CodeGen/X86/half.ll | 8 +- llvm/test/CodeGen/X86/hidden-vis-2.ll | 2 +- llvm/test/CodeGen/X86/hidden-vis-3.ll | 4 +- llvm/test/CodeGen/X86/hidden-vis-4.ll | 2 +- llvm/test/CodeGen/X86/hidden-vis-pic.ll | 2 +- llvm/test/CodeGen/X86/hipe-cc.ll | 12 +- llvm/test/CodeGen/X86/hipe-cc64.ll | 14 +- llvm/test/CodeGen/X86/hoist-invariant-load.ll | 2 +- llvm/test/CodeGen/X86/i128-mul.ll | 2 +- llvm/test/CodeGen/X86/i128-ret.ll | 2 +- llvm/test/CodeGen/X86/i1narrowfail.ll | 2 +- llvm/test/CodeGen/X86/i256-add.ll | 8 +- llvm/test/CodeGen/X86/i2k.ll | 4 +- llvm/test/CodeGen/X86/i486-fence-loop.ll | 4 +- llvm/test/CodeGen/X86/i64-mem-copy.ll | 2 +- llvm/test/CodeGen/X86/inline-asm-fpstack.ll | 6 +- llvm/test/CodeGen/X86/inline-asm-out-regs.ll | 4 +- llvm/test/CodeGen/X86/inline-asm-ptr-cast.ll | 6 +- .../CodeGen/X86/inline-asm-stack-realign.ll | 2 +- .../CodeGen/X86/inline-asm-stack-realign2.ll | 2 +- .../CodeGen/X86/inline-asm-stack-realign3.ll | 2 +- llvm/test/CodeGen/X86/inline-asm-tied.ll | 6 +- llvm/test/CodeGen/X86/ins_split_regalloc.ll | 2 +- .../test/CodeGen/X86/ins_subreg_coalesce-1.ll | 2 +- .../test/CodeGen/X86/ins_subreg_coalesce-3.ll | 16 +- llvm/test/CodeGen/X86/insertps-O0-bug.ll | 4 +- .../CodeGen/X86/invalid-shift-immediate.ll | 2 +- llvm/test/CodeGen/X86/isel-optnone.ll | 12 +- llvm/test/CodeGen/X86/isel-sink.ll | 2 +- llvm/test/CodeGen/X86/isel-sink2.ll | 4 +- llvm/test/CodeGen/X86/isel-sink3.ll | 4 +- llvm/test/CodeGen/X86/jump_sign.ll | 8 +- llvm/test/CodeGen/X86/large-constants.ll | 16 +- llvm/test/CodeGen/X86/ldzero.ll | 12 +- llvm/test/CodeGen/X86/lea-5.ll | 4 +- llvm/test/CodeGen/X86/lea-recursion.ll | 16 +- llvm/test/CodeGen/X86/legalize-shift-64.ll | 2 +- llvm/test/CodeGen/X86/licm-nested.ll | 4 +- .../CodeGen/X86/liveness-local-regalloc.ll | 2 +- llvm/test/CodeGen/X86/load-slice.ll | 10 +- llvm/test/CodeGen/X86/longlong-deadload.ll | 2 +- .../test/CodeGen/X86/loop-strength-reduce4.ll | 16 +- .../test/CodeGen/X86/loop-strength-reduce7.ll | 2 +- .../test/CodeGen/X86/loop-strength-reduce8.ll | 8 +- llvm/test/CodeGen/X86/lsr-delayed-fold.ll | 4 +- llvm/test/CodeGen/X86/lsr-i386.ll | 2 +- llvm/test/CodeGen/X86/lsr-loop-exit-cond.ll | 40 +- llvm/test/CodeGen/X86/lsr-normalization.ll | 10 +- .../CodeGen/X86/lsr-redundant-addressing.ll | 8 +- llvm/test/CodeGen/X86/lsr-reuse-trunc.ll | 6 +- llvm/test/CodeGen/X86/lsr-reuse.ll | 86 +- llvm/test/CodeGen/X86/lsr-static-addr.ll | 2 +- llvm/test/CodeGen/X86/lsr-wrap.ll | 2 +- llvm/test/CodeGen/X86/lzcnt-tzcnt.ll | 36 +- llvm/test/CodeGen/X86/machine-cse.ll | 2 +- llvm/test/CodeGen/X86/masked-iv-safe.ll | 48 +- llvm/test/CodeGen/X86/masked-iv-unsafe.ll | 78 +- llvm/test/CodeGen/X86/mcinst-lowering.ll | 2 +- llvm/test/CodeGen/X86/mem-intrin-base-reg.ll | 12 +- llvm/test/CodeGen/X86/mem-promote-integers.ll | 70 +- llvm/test/CodeGen/X86/misaligned-memset.ll | 2 +- llvm/test/CodeGen/X86/misched-aa-colored.ll | 4 +- llvm/test/CodeGen/X86/misched-aa-mmos.ll | 4 +- llvm/test/CodeGen/X86/misched-balance.ll | 90 +- .../X86/misched-code-difference-with-debug.ll | 8 +- llvm/test/CodeGen/X86/misched-crash.ll | 4 +- llvm/test/CodeGen/X86/misched-fusion.ll | 14 +- llvm/test/CodeGen/X86/misched-matmul.ll | 64 +- llvm/test/CodeGen/X86/misched-matrix.ll | 64 +- llvm/test/CodeGen/X86/misched-new.ll | 6 +- .../CodeGen/X86/mmx-arg-passing-x86-64.ll | 2 +- llvm/test/CodeGen/X86/mmx-arith.ll | 66 +- llvm/test/CodeGen/X86/mmx-bitcast.ll | 8 +- llvm/test/CodeGen/X86/mmx-copy-gprs.ll | 2 +- llvm/test/CodeGen/X86/mmx-fold-load.ll | 50 +- llvm/test/CodeGen/X86/movbe.ll | 6 +- llvm/test/CodeGen/X86/movfs.ll | 4 +- llvm/test/CodeGen/X86/movgs.ll | 16 +- llvm/test/CodeGen/X86/movmsk.ll | 4 +- llvm/test/CodeGen/X86/movtopush.ll | 6 +- llvm/test/CodeGen/X86/ms-inline-asm.ll | 6 +- llvm/test/CodeGen/X86/mul128_sext_loop.ll | 2 +- llvm/test/CodeGen/X86/muloti.ll | 10 +- .../test/CodeGen/X86/mult-alt-generic-i686.ll | 38 +- .../CodeGen/X86/mult-alt-generic-x86_64.ll | 38 +- llvm/test/CodeGen/X86/mult-alt-x86.ll | 48 +- .../CodeGen/X86/multiple-loop-post-inc.ll | 28 +- llvm/test/CodeGen/X86/mulx32.ll | 2 +- llvm/test/CodeGen/X86/mulx64.ll | 2 +- llvm/test/CodeGen/X86/musttail-indirect.ll | 30 +- llvm/test/CodeGen/X86/musttail-varargs.ll | 6 +- llvm/test/CodeGen/X86/nancvt.ll | 54 +- llvm/test/CodeGen/X86/narrow-shl-load.ll | 6 +- llvm/test/CodeGen/X86/narrow_op-1.ll | 4 +- llvm/test/CodeGen/X86/negate-add-zero.ll | 16 +- llvm/test/CodeGen/X86/no-cmov.ll | 2 +- llvm/test/CodeGen/X86/norex-subreg.ll | 6 +- llvm/test/CodeGen/X86/nosse-error1.ll | 8 +- llvm/test/CodeGen/X86/nosse-error2.ll | 8 +- llvm/test/CodeGen/X86/nosse-varargs.ll | 8 +- llvm/test/CodeGen/X86/object-size.ll | 16 +- llvm/test/CodeGen/X86/opt-ext-uses.ll | 2 +- llvm/test/CodeGen/X86/optimize-max-0.ll | 12 +- llvm/test/CodeGen/X86/optimize-max-2.ll | 2 +- llvm/test/CodeGen/X86/optimize-max-3.ll | 2 +- llvm/test/CodeGen/X86/packed_struct.ll | 10 +- llvm/test/CodeGen/X86/palignr-2.ll | 4 +- llvm/test/CodeGen/X86/patchpoint.ll | 6 +- llvm/test/CodeGen/X86/peep-test-0.ll | 2 +- llvm/test/CodeGen/X86/peep-test-1.ll | 2 +- llvm/test/CodeGen/X86/peephole-fold-movsd.ll | 4 +- .../CodeGen/X86/peephole-multiple-folds.ll | 4 +- llvm/test/CodeGen/X86/phi-bit-propagation.ll | 4 +- llvm/test/CodeGen/X86/phielim-split.ll | 2 +- .../CodeGen/X86/phys-reg-local-regalloc.ll | 6 +- .../CodeGen/X86/phys_subreg_coalesce-3.ll | 2 +- llvm/test/CodeGen/X86/pic.ll | 8 +- llvm/test/CodeGen/X86/pic_jumptable.ll | 2 +- llvm/test/CodeGen/X86/pmovext.ll | 2 +- llvm/test/CodeGen/X86/pmovsx-inreg.ll | 24 +- llvm/test/CodeGen/X86/pmulld.ll | 2 +- llvm/test/CodeGen/X86/pointer-vector.ll | 24 +- llvm/test/CodeGen/X86/postra-licm.ll | 8 +- llvm/test/CodeGen/X86/pr10475.ll | 2 +- llvm/test/CodeGen/X86/pr10525.ll | 2 +- llvm/test/CodeGen/X86/pr11334.ll | 2 +- llvm/test/CodeGen/X86/pr12360.ll | 4 +- llvm/test/CodeGen/X86/pr12889.ll | 2 +- llvm/test/CodeGen/X86/pr13209.ll | 30 +- llvm/test/CodeGen/X86/pr13859.ll | 2 +- llvm/test/CodeGen/X86/pr13899.ll | 20 +- llvm/test/CodeGen/X86/pr14161.ll | 4 +- llvm/test/CodeGen/X86/pr14562.ll | 2 +- llvm/test/CodeGen/X86/pr1505b.ll | 4 +- llvm/test/CodeGen/X86/pr15267.ll | 8 +- llvm/test/CodeGen/X86/pr15309.ll | 2 +- llvm/test/CodeGen/X86/pr18023.ll | 8 +- llvm/test/CodeGen/X86/pr18162.ll | 6 +- llvm/test/CodeGen/X86/pr18846.ll | 24 +- llvm/test/CodeGen/X86/pr20020.ll | 8 +- llvm/test/CodeGen/X86/pr2177.ll | 4 +- llvm/test/CodeGen/X86/pr2182.ll | 8 +- llvm/test/CodeGen/X86/pr2326.ll | 8 +- llvm/test/CodeGen/X86/pr2656.ll | 4 +- llvm/test/CodeGen/X86/pr2849.ll | 8 +- llvm/test/CodeGen/X86/pr2924.ll | 8 +- llvm/test/CodeGen/X86/pr2982.ll | 6 +- llvm/test/CodeGen/X86/pr3216.ll | 2 +- llvm/test/CodeGen/X86/pr3241.ll | 2 +- llvm/test/CodeGen/X86/pr3244.ll | 4 +- llvm/test/CodeGen/X86/pr3317.ll | 8 +- llvm/test/CodeGen/X86/pr3366.ll | 2 +- llvm/test/CodeGen/X86/pr9127.ll | 2 +- llvm/test/CodeGen/X86/pre-ra-sched.ll | 14 +- llvm/test/CodeGen/X86/private-2.ll | 2 +- llvm/test/CodeGen/X86/private.ll | 2 +- llvm/test/CodeGen/X86/promote-assert-zext.ll | 2 +- llvm/test/CodeGen/X86/promote-trunc.ll | 4 +- llvm/test/CodeGen/X86/promote.ll | 4 +- llvm/test/CodeGen/X86/pshufb-mask-comments.ll | 4 +- llvm/test/CodeGen/X86/psubus.ll | 24 +- llvm/test/CodeGen/X86/ragreedy-bug.ll | 50 +- llvm/test/CodeGen/X86/ragreedy-hoist-spill.ll | 4 +- .../X86/ragreedy-last-chance-recoloring.ll | 34 +- llvm/test/CodeGen/X86/rd-mod-wr-eflags.ll | 30 +- .../X86/regalloc-reconcile-broken-hints.ll | 22 +- llvm/test/CodeGen/X86/regpressure.ll | 60 +- llvm/test/CodeGen/X86/remat-constant.ll | 2 +- llvm/test/CodeGen/X86/remat-fold-load.ll | 20 +- .../CodeGen/X86/remat-invalid-liveness.ll | 6 +- llvm/test/CodeGen/X86/remat-scalar-zero.ll | 34 +- llvm/test/CodeGen/X86/reverse_branches.ll | 2 +- llvm/test/CodeGen/X86/rip-rel-address.ll | 2 +- llvm/test/CodeGen/X86/rot32.ll | 4 +- llvm/test/CodeGen/X86/rot64.ll | 4 +- llvm/test/CodeGen/X86/rotate4.ll | 8 +- llvm/test/CodeGen/X86/sandybridge-loads.ll | 10 +- llvm/test/CodeGen/X86/scalar-extract.ll | 2 +- llvm/test/CodeGen/X86/scalar_widen_div.ll | 20 +- llvm/test/CodeGen/X86/scalarize-bitcast.ll | 2 +- llvm/test/CodeGen/X86/scev-interchange.ll | 2 +- llvm/test/CodeGen/X86/segmented-stacks.ll | 2 +- llvm/test/CodeGen/X86/seh-safe-div.ll | 14 +- llvm/test/CodeGen/X86/select-with-and-or.ll | 2 +- llvm/test/CodeGen/X86/select.ll | 10 +- llvm/test/CodeGen/X86/setcc-narrowing.ll | 2 +- llvm/test/CodeGen/X86/sext-load.ll | 2 +- llvm/test/CodeGen/X86/sha.ll | 14 +- llvm/test/CodeGen/X86/shift-and.ll | 4 +- llvm/test/CodeGen/X86/shift-bmi2.ll | 16 +- llvm/test/CodeGen/X86/shift-coalesce.ll | 2 +- llvm/test/CodeGen/X86/shift-codegen.ll | 4 +- llvm/test/CodeGen/X86/shift-combine.ll | 2 +- llvm/test/CodeGen/X86/shift-folding.ll | 4 +- llvm/test/CodeGen/X86/shift-one.ll | 2 +- llvm/test/CodeGen/X86/shift-parts.ll | 2 +- llvm/test/CodeGen/X86/shl-i64.ll | 4 +- llvm/test/CodeGen/X86/shl_undef.ll | 4 +- llvm/test/CodeGen/X86/shrink-compare.ll | 4 +- .../test/CodeGen/X86/shuffle-combine-crash.ll | 2 +- llvm/test/CodeGen/X86/sibcall-4.ll | 2 +- llvm/test/CodeGen/X86/sibcall-5.ll | 2 +- llvm/test/CodeGen/X86/sibcall.ll | 4 +- llvm/test/CodeGen/X86/simple-zext.ll | 2 +- llvm/test/CodeGen/X86/sink-hoist.ll | 10 +- llvm/test/CodeGen/X86/slow-incdec.ll | 4 +- llvm/test/CodeGen/X86/split-vector-bitcast.ll | 2 +- llvm/test/CodeGen/X86/sse-align-0.ll | 4 +- llvm/test/CodeGen/X86/sse-align-1.ll | 4 +- llvm/test/CodeGen/X86/sse-align-10.ll | 2 +- llvm/test/CodeGen/X86/sse-align-12.ll | 8 +- llvm/test/CodeGen/X86/sse-align-2.ll | 4 +- llvm/test/CodeGen/X86/sse-align-5.ll | 2 +- llvm/test/CodeGen/X86/sse-align-6.ll | 2 +- llvm/test/CodeGen/X86/sse-align-9.ll | 4 +- llvm/test/CodeGen/X86/sse-domains.ll | 2 +- llvm/test/CodeGen/X86/sse-intel-ocl.ll | 4 +- llvm/test/CodeGen/X86/sse-load-ret.ll | 2 +- .../CodeGen/X86/sse-unaligned-mem-feature.ll | 2 +- llvm/test/CodeGen/X86/sse2.ll | 38 +- llvm/test/CodeGen/X86/sse3-avx-addsub.ll | 8 +- llvm/test/CodeGen/X86/sse3.ll | 18 +- .../CodeGen/X86/sse41-pmovxrm-intrinsics.ll | 24 +- llvm/test/CodeGen/X86/sse41.ll | 30 +- llvm/test/CodeGen/X86/sse42-intrinsics-x86.ll | 12 +- llvm/test/CodeGen/X86/ssp-data-layout.ll | 38 +- llvm/test/CodeGen/X86/stack-align.ll | 4 +- .../CodeGen/X86/stack-protector-dbginfo.ll | 2 +- .../X86/stack-protector-vreg-to-vreg-copy.ll | 2 +- .../CodeGen/X86/stack-protector-weight.ll | 2 +- llvm/test/CodeGen/X86/stack-protector.ll | 132 +-- llvm/test/CodeGen/X86/stackmap.ll | 2 +- llvm/test/CodeGen/X86/statepoint-forward.ll | 12 +- llvm/test/CodeGen/X86/store-narrow.ll | 22 +- llvm/test/CodeGen/X86/store_op_load_fold.ll | 4 +- llvm/test/CodeGen/X86/store_op_load_fold2.ll | 4 +- .../CodeGen/X86/stride-nine-with-base-reg.ll | 2 +- llvm/test/CodeGen/X86/stride-reuse.ll | 2 +- llvm/test/CodeGen/X86/subreg-to-reg-0.ll | 2 +- llvm/test/CodeGen/X86/subreg-to-reg-2.ll | 6 +- llvm/test/CodeGen/X86/subreg-to-reg-4.ll | 16 +- llvm/test/CodeGen/X86/subreg-to-reg-6.ll | 2 +- llvm/test/CodeGen/X86/switch-bt.ll | 4 +- llvm/test/CodeGen/X86/switch-zextload.ll | 2 +- llvm/test/CodeGen/X86/tail-call-win64.ll | 2 +- llvm/test/CodeGen/X86/tail-dup-addr.ll | 2 +- llvm/test/CodeGen/X86/tail-opts.ll | 24 +- llvm/test/CodeGen/X86/tailcall-64.ll | 4 +- .../CodeGen/X86/tailcall-returndup-void.ll | 4 +- llvm/test/CodeGen/X86/tailcall-ri64.ll | 4 +- llvm/test/CodeGen/X86/tailcallbyval.ll | 2 +- llvm/test/CodeGen/X86/tailcallbyval64.ll | 2 +- .../test/CodeGen/X86/tbm-intrinsics-x86_64.ll | 4 +- llvm/test/CodeGen/X86/tbm_patterns.ll | 4 +- llvm/test/CodeGen/X86/test-shrink-bug.ll | 2 +- llvm/test/CodeGen/X86/testl-commute.ll | 12 +- .../CodeGen/X86/tls-addr-non-leaf-function.ll | 2 +- llvm/test/CodeGen/X86/tls-local-dynamic.ll | 4 +- llvm/test/CodeGen/X86/tls-pic.ll | 8 +- llvm/test/CodeGen/X86/tls-pie.ll | 4 +- llvm/test/CodeGen/X86/tls.ll | 18 +- llvm/test/CodeGen/X86/tlv-1.ll | 4 +- llvm/test/CodeGen/X86/trunc-ext-ld-st.ll | 12 +- llvm/test/CodeGen/X86/trunc-to-bool.ll | 2 +- llvm/test/CodeGen/X86/twoaddr-pass-sink.ll | 6 +- .../CodeGen/X86/unaligned-32-byte-memops.ll | 38 +- .../CodeGen/X86/unaligned-spill-folding.ll | 2 +- llvm/test/CodeGen/X86/unwindraise.ll | 40 +- llvm/test/CodeGen/X86/use-add-flags.ll | 2 +- llvm/test/CodeGen/X86/v4i32load-crash.ll | 8 +- llvm/test/CodeGen/X86/v8i1-masks.ll | 10 +- llvm/test/CodeGen/X86/vaargs.ll | 4 +- llvm/test/CodeGen/X86/vararg_tailcall.ll | 28 +- .../CodeGen/X86/vec-loadsingles-alignment.ll | 16 +- llvm/test/CodeGen/X86/vec-trunc-store.ll | 4 +- llvm/test/CodeGen/X86/vec_align.ll | 8 +- llvm/test/CodeGen/X86/vec_anyext.ll | 24 +- llvm/test/CodeGen/X86/vec_extract-mmx.ll | 6 +- llvm/test/CodeGen/X86/vec_extract-sse4.ll | 8 +- llvm/test/CodeGen/X86/vec_extract.ll | 6 +- llvm/test/CodeGen/X86/vec_fpext.ll | 6 +- llvm/test/CodeGen/X86/vec_i64.ll | 4 +- llvm/test/CodeGen/X86/vec_ins_extract.ll | 12 +- llvm/test/CodeGen/X86/vec_insert-5.ll | 6 +- llvm/test/CodeGen/X86/vec_insert-mmx.ll | 4 +- llvm/test/CodeGen/X86/vec_loadsingles.ll | 52 +- llvm/test/CodeGen/X86/vec_logical.ll | 2 +- llvm/test/CodeGen/X86/vec_set-7.ll | 2 +- llvm/test/CodeGen/X86/vec_set-F.ll | 2 +- llvm/test/CodeGen/X86/vec_setcc-2.ll | 4 +- llvm/test/CodeGen/X86/vec_ss_load_fold.ll | 4 +- llvm/test/CodeGen/X86/vec_trunc_sext.ll | 2 +- llvm/test/CodeGen/X86/vec_zero.ll | 4 +- llvm/test/CodeGen/X86/vector-gep.ll | 4 +- llvm/test/CodeGen/X86/vector-intrinsics.ll | 8 +- llvm/test/CodeGen/X86/vector-sext.ll | 18 +- .../test/CodeGen/X86/vector-shuffle-128-v2.ll | 16 +- .../test/CodeGen/X86/vector-shuffle-128-v4.ll | 14 +- .../test/CodeGen/X86/vector-shuffle-256-v4.ll | 10 +- .../test/CodeGen/X86/vector-shuffle-256-v8.ll | 14 +- .../CodeGen/X86/vector-shuffle-combining.ll | 20 +- llvm/test/CodeGen/X86/vector-shuffle-mmx.ll | 4 +- llvm/test/CodeGen/X86/vector-shuffle-sse1.ll | 8 +- llvm/test/CodeGen/X86/vector-variable-idx2.ll | 8 +- llvm/test/CodeGen/X86/vector-zext.ll | 6 +- llvm/test/CodeGen/X86/vector-zmov.ll | 4 +- llvm/test/CodeGen/X86/vector.ll | 42 +- llvm/test/CodeGen/X86/viabs.ll | 2 +- llvm/test/CodeGen/X86/visibility2.ll | 2 +- llvm/test/CodeGen/X86/volatile.ll | 6 +- llvm/test/CodeGen/X86/vselect-avx.ll | 2 +- llvm/test/CodeGen/X86/vselect-minmax.ll | 768 +++++++++--------- llvm/test/CodeGen/X86/vshift-5.ll | 4 +- llvm/test/CodeGen/X86/vshift-6.ll | 2 +- .../CodeGen/X86/weak_def_can_be_hidden.ll | 4 +- llvm/test/CodeGen/X86/widen_arith-1.ll | 16 +- llvm/test/CodeGen/X86/widen_arith-2.ll | 24 +- llvm/test/CodeGen/X86/widen_arith-3.ll | 16 +- llvm/test/CodeGen/X86/widen_arith-4.ll | 16 +- llvm/test/CodeGen/X86/widen_arith-5.ll | 16 +- llvm/test/CodeGen/X86/widen_arith-6.ll | 18 +- llvm/test/CodeGen/X86/widen_cast-1.ll | 14 +- llvm/test/CodeGen/X86/widen_cast-2.ll | 14 +- llvm/test/CodeGen/X86/widen_cast-4.ll | 24 +- llvm/test/CodeGen/X86/widen_conversions.ll | 2 +- llvm/test/CodeGen/X86/widen_load-0.ll | 4 +- llvm/test/CodeGen/X86/widen_load-1.ll | 4 +- llvm/test/CodeGen/X86/widen_load-2.ll | 44 +- llvm/test/CodeGen/X86/win32_sret.ll | 4 +- llvm/test/CodeGen/X86/win64_eh.ll | 16 +- llvm/test/CodeGen/X86/win_eh_prepare.ll | 2 +- .../CodeGen/X86/x32-function_pointer-1.ll | 4 +- llvm/test/CodeGen/X86/x86-64-gv-offset.ll | 4 +- llvm/test/CodeGen/X86/x86-64-jumps.ll | 4 +- llvm/test/CodeGen/X86/x86-64-mem.ll | 2 +- llvm/test/CodeGen/X86/x86-64-pic-4.ll | 2 +- llvm/test/CodeGen/X86/x86-64-pic-5.ll | 2 +- llvm/test/CodeGen/X86/x86-64-pic-6.ll | 2 +- .../test/CodeGen/X86/x86-64-ptr-arg-simple.ll | 2 +- llvm/test/CodeGen/X86/x86-64-sret-return.ll | 18 +- .../CodeGen/X86/x86-64-static-relo-movl.ll | 2 +- .../X86/x86-mixed-alignment-dagcombine.ll | 8 +- .../test/CodeGen/X86/xop-intrinsics-x86_64.ll | 40 +- llvm/test/CodeGen/X86/zext-extract_subreg.ll | 2 +- llvm/test/CodeGen/X86/zext-sext.ll | 10 +- llvm/test/CodeGen/X86/zlib-longest-match.ll | 74 +- llvm/test/CodeGen/XCore/2009-01-08-Crash.ll | 2 +- .../CodeGen/XCore/2010-02-25-LSR-Crash.ll | 2 +- .../CodeGen/XCore/2011-01-31-DAGCombineBug.ll | 2 +- llvm/test/CodeGen/XCore/atomic.ll | 18 +- llvm/test/CodeGen/XCore/codemodel.ll | 18 +- llvm/test/CodeGen/XCore/dwarf_debug.ll | 2 +- llvm/test/CodeGen/XCore/exception.ll | 2 +- llvm/test/CodeGen/XCore/indirectbr.ll | 4 +- llvm/test/CodeGen/XCore/llvm-intrinsics.ll | 2 +- llvm/test/CodeGen/XCore/load.ll | 10 +- llvm/test/CodeGen/XCore/private.ll | 2 +- llvm/test/CodeGen/XCore/scavenging.ll | 26 +- llvm/test/CodeGen/XCore/trampoline.ll | 2 +- llvm/test/CodeGen/XCore/unaligned_load.ll | 6 +- .../CodeGen/XCore/unaligned_store_combine.ll | 2 +- llvm/test/CodeGen/XCore/zextfree.ll | 2 +- llvm/test/DebugInfo/2010-01-05-DbgScope.ll | 2 +- llvm/test/DebugInfo/2010-03-24-MemberFn.ll | 6 +- .../DebugInfo/2010-04-06-NestedFnDbgInfo.ll | 12 +- llvm/test/DebugInfo/2010-04-19-FramePtr.ll | 4 +- llvm/test/DebugInfo/2010-05-03-OriginDIE.ll | 6 +- .../DebugInfo/2010-06-29-InlinedFnLocalVar.ll | 2 +- llvm/test/DebugInfo/AArch64/frameindices.ll | 8 +- .../test/DebugInfo/AArch64/struct_by_value.ll | 2 +- .../test/DebugInfo/ARM/lowerbdgdeclare_vla.ll | 2 +- llvm/test/DebugInfo/COFF/cpp-mangling.ll | 2 +- llvm/test/DebugInfo/Inputs/line.ll | 4 +- llvm/test/DebugInfo/PR20038.ll | 14 +- llvm/test/DebugInfo/SystemZ/variable-loc.ll | 2 +- llvm/test/DebugInfo/X86/2010-04-13-PubType.ll | 4 +- .../X86/2011-09-26-GlobalVarContext.ll | 4 +- .../DebugInfo/X86/2011-12-16-BadStructRef.ll | 16 +- llvm/test/DebugInfo/X86/DW_AT_byte_size.ll | 4 +- llvm/test/DebugInfo/X86/DW_AT_linkage_name.ll | 4 +- .../DebugInfo/X86/DW_AT_object_pointer.ll | 6 +- llvm/test/DebugInfo/X86/arguments.ll | 2 +- llvm/test/DebugInfo/X86/array.ll | 2 +- llvm/test/DebugInfo/X86/array2.ll | 4 +- llvm/test/DebugInfo/X86/block-capture.ll | 6 +- llvm/test/DebugInfo/X86/byvalstruct.ll | 2 +- llvm/test/DebugInfo/X86/cu-ranges-odr.ll | 4 +- llvm/test/DebugInfo/X86/cu-ranges.ll | 4 +- .../test/DebugInfo/X86/dbg-byval-parameter.ll | 6 +- llvm/test/DebugInfo/X86/dbg-declare-arg.ll | 12 +- llvm/test/DebugInfo/X86/dbg-declare.ll | 6 +- llvm/test/DebugInfo/X86/dbg-prolog-end.ll | 8 +- .../DebugInfo/X86/dbg-value-dag-combine.ll | 2 +- llvm/test/DebugInfo/X86/dbg-value-location.ll | 2 +- llvm/test/DebugInfo/X86/dbg-value-range.ll | 2 +- .../DebugInfo/X86/dbg-value-terminator.ll | 2 +- llvm/test/DebugInfo/X86/dbg_value_direct.ll | 6 +- llvm/test/DebugInfo/X86/debug-info-blocks.ll | 46 +- .../DebugInfo/X86/debug-info-static-member.ll | 2 +- llvm/test/DebugInfo/X86/debug-loc-asan.ll | 10 +- llvm/test/DebugInfo/X86/debug-loc-offset.ll | 8 +- .../test/DebugInfo/X86/debug-ranges-offset.ll | 20 +- .../test/DebugInfo/X86/decl-derived-member.ll | 6 +- llvm/test/DebugInfo/X86/discriminator.ll | 6 +- .../X86/dwarf-aranges-no-dwarf-labels.ll | 2 +- llvm/test/DebugInfo/X86/dwarf-aranges.ll | 6 +- llvm/test/DebugInfo/X86/dwarf-public-names.ll | 4 +- llvm/test/DebugInfo/X86/elf-names.ll | 8 +- .../DebugInfo/X86/empty-and-one-elem-array.ll | 4 +- llvm/test/DebugInfo/X86/ending-run.ll | 6 +- llvm/test/DebugInfo/X86/fission-ranges.ll | 2 +- llvm/test/DebugInfo/X86/formal_parameter.ll | 2 +- llvm/test/DebugInfo/X86/generate-odr-hash.ll | 2 +- llvm/test/DebugInfo/X86/gnu-public-names.ll | 12 +- .../DebugInfo/X86/inline-member-function.ll | 6 +- llvm/test/DebugInfo/X86/inline-seldag-test.ll | 4 +- .../DebugInfo/X86/instcombine-instrinsics.ll | 4 +- llvm/test/DebugInfo/X86/lexical_block.ll | 2 +- llvm/test/DebugInfo/X86/line-info.ll | 2 +- llvm/test/DebugInfo/X86/linkage-name.ll | 4 +- llvm/test/DebugInfo/X86/misched-dbg-value.ll | 4 +- .../DebugInfo/X86/nodebug_with_debug_loc.ll | 6 +- llvm/test/DebugInfo/X86/op_deref.ll | 16 +- llvm/test/DebugInfo/X86/parameters.ll | 2 +- llvm/test/DebugInfo/X86/pieces-2.ll | 2 +- llvm/test/DebugInfo/X86/pr11300.ll | 4 +- llvm/test/DebugInfo/X86/pr12831.ll | 10 +- llvm/test/DebugInfo/X86/pr19307.ll | 6 +- llvm/test/DebugInfo/X86/recursive_inlining.ll | 18 +- llvm/test/DebugInfo/X86/reference-argument.ll | 2 +- llvm/test/DebugInfo/X86/rvalue-ref.ll | 4 +- llvm/test/DebugInfo/X86/sret.ll | 42 +- llvm/test/DebugInfo/X86/sroasplit-1.ll | 2 +- llvm/test/DebugInfo/X86/sroasplit-2.ll | 2 +- llvm/test/DebugInfo/X86/sroasplit-3.ll | 2 +- llvm/test/DebugInfo/X86/sroasplit-4.ll | 10 +- llvm/test/DebugInfo/X86/sroasplit-5.ll | 4 +- .../X86/stmt-list-multiple-compile-units.ll | 4 +- llvm/test/DebugInfo/X86/subregisters.ll | 2 +- llvm/test/DebugInfo/X86/vla.ll | 10 +- llvm/test/DebugInfo/block-asan.ll | 4 +- llvm/test/DebugInfo/cross-cu-inlining.ll | 6 +- .../DebugInfo/cross-cu-linkonce-distinct.ll | 2 +- llvm/test/DebugInfo/cross-cu-linkonce.ll | 2 +- llvm/test/DebugInfo/cu-range-hole.ll | 6 +- llvm/test/DebugInfo/cu-ranges.ll | 4 +- .../DebugInfo/debug-info-always-inline.ll | 4 +- llvm/test/DebugInfo/dwarf-public-names.ll | 4 +- .../DebugInfo/incorrect-variable-debugloc.ll | 10 +- .../DebugInfo/incorrect-variable-debugloc1.ll | 2 +- llvm/test/DebugInfo/inheritance.ll | 14 +- .../DebugInfo/inline-debug-info-multiret.ll | 22 +- llvm/test/DebugInfo/inline-debug-info.ll | 20 +- llvm/test/DebugInfo/inline-scopes.ll | 8 +- llvm/test/DebugInfo/member-order.ll | 2 +- .../DebugInfo/missing-abstract-variable.ll | 2 +- llvm/test/DebugInfo/namespace.ll | 14 +- .../namespace_inline_function_definition.ll | 6 +- llvm/test/DebugInfo/tu-composite.ll | 2 +- llvm/test/DebugInfo/unconditional-branch.ll | 2 +- .../MCJIT/2002-12-16-ArgTest.ll | 4 +- .../MCJIT/2003-05-06-LivenessClobber.ll | 2 +- .../MCJIT/2003-05-07-ArgumentTest.ll | 2 +- .../MCJIT/2003-08-21-EnvironmentTest.ll | 2 +- .../MCJIT/2007-12-10-APIntLoadStore.ll | 4 +- .../MCJIT/2008-06-05-APInt-OverAShr.ll | 10 +- .../MCJIT/2013-04-04-RelocAddend.ll | 4 +- llvm/test/ExecutionEngine/MCJIT/pr13727.ll | 22 +- .../MCJIT/remote/stubs-remote.ll | 2 +- .../MCJIT/remote/stubs-sm-pic.ll | 2 +- .../remote/test-common-symbols-remote.ll | 22 +- .../test-fp-no-external-funcs-remote.ll | 2 +- .../remote/test-global-init-nonzero-remote.ll | 8 +- .../remote/test-global-init-nonzero-sm-pic.ll | 8 +- .../MCJIT/remote/test-ptr-reloc-remote.ll | 4 +- .../MCJIT/remote/test-ptr-reloc-sm-pic.ll | 4 +- .../ExecutionEngine/MCJIT/stubs-sm-pic.ll | 2 +- llvm/test/ExecutionEngine/MCJIT/stubs.ll | 2 +- .../MCJIT/test-common-symbols-alignment.ll | 4 +- .../MCJIT/test-common-symbols.ll | 22 +- .../MCJIT/test-fp-no-external-funcs.ll | 2 +- llvm/test/ExecutionEngine/MCJIT/test-fp.ll | 2 +- .../MCJIT/test-global-ctors.ll | 2 +- .../MCJIT/test-global-init-nonzero-sm-pic.ll | 8 +- .../MCJIT/test-global-init-nonzero.ll | 8 +- .../test/ExecutionEngine/MCJIT/test-global.ll | 8 +- .../ExecutionEngine/MCJIT/test-loadstore.ll | 10 +- llvm/test/ExecutionEngine/MCJIT/test-local.ll | 8 +- .../MCJIT/test-ptr-reloc-sm-pic.ll | 4 +- .../ExecutionEngine/MCJIT/test-ptr-reloc.ll | 4 +- .../OrcJIT/2002-12-16-ArgTest.ll | 4 +- .../OrcJIT/2003-05-06-LivenessClobber.ll | 2 +- .../OrcJIT/2003-05-07-ArgumentTest.ll | 2 +- .../OrcJIT/2003-08-21-EnvironmentTest.ll | 2 +- .../OrcJIT/2007-12-10-APIntLoadStore.ll | 4 +- .../OrcJIT/2008-06-05-APInt-OverAShr.ll | 10 +- .../OrcJIT/2013-04-04-RelocAddend.ll | 4 +- llvm/test/ExecutionEngine/OrcJIT/pr13727.ll | 22 +- .../OrcJIT/remote/stubs-remote.ll | 2 +- .../OrcJIT/remote/stubs-sm-pic.ll | 2 +- .../remote/test-common-symbols-remote.ll | 22 +- .../test-fp-no-external-funcs-remote.ll | 2 +- .../remote/test-global-init-nonzero-remote.ll | 8 +- .../remote/test-global-init-nonzero-sm-pic.ll | 8 +- .../OrcJIT/remote/test-ptr-reloc-remote.ll | 4 +- .../OrcJIT/remote/test-ptr-reloc-sm-pic.ll | 4 +- .../ExecutionEngine/OrcJIT/stubs-sm-pic.ll | 2 +- llvm/test/ExecutionEngine/OrcJIT/stubs.ll | 2 +- .../OrcJIT/test-common-symbols-alignment.ll | 4 +- .../OrcJIT/test-common-symbols.ll | 22 +- .../OrcJIT/test-fp-no-external-funcs.ll | 2 +- llvm/test/ExecutionEngine/OrcJIT/test-fp.ll | 2 +- .../OrcJIT/test-global-ctors.ll | 2 +- .../OrcJIT/test-global-init-nonzero-sm-pic.ll | 8 +- .../OrcJIT/test-global-init-nonzero.ll | 8 +- .../ExecutionEngine/OrcJIT/test-global.ll | 8 +- .../ExecutionEngine/OrcJIT/test-loadstore.ll | 10 +- .../test/ExecutionEngine/OrcJIT/test-local.ll | 8 +- .../OrcJIT/test-ptr-reloc-sm-pic.ll | 4 +- .../ExecutionEngine/OrcJIT/test-ptr-reloc.ll | 4 +- llvm/test/ExecutionEngine/frem.ll | 2 +- .../test-interp-vec-loadstore.ll | 30 +- llvm/test/Feature/aliases.ll | 6 +- llvm/test/Feature/md_on_instruction.ll | 2 +- llvm/test/Feature/memorymarkers.ll | 2 +- llvm/test/Feature/optnone-llc.ll | 2 +- llvm/test/Feature/optnone-opt.ll | 2 +- llvm/test/Feature/packed.ll | 4 +- llvm/test/Feature/packed_struct.ll | 10 +- llvm/test/Feature/ppcld.ll | 6 +- llvm/test/Feature/recursivetype.ll | 10 +- llvm/test/Feature/sparcld.ll | 6 +- llvm/test/Feature/testalloca.ll | 2 +- llvm/test/Feature/varargs_new.ll | 2 +- llvm/test/Feature/weak_constant.ll | 8 +- llvm/test/Feature/x86ld.ll | 6 +- .../AddressSanitizer/X86/bug_11395.ll | 4 +- .../AddressSanitizer/asan-vs-gvn.ll | 8 +- .../Instrumentation/AddressSanitizer/basic.ll | 14 +- .../AddressSanitizer/debug_info.ll | 4 +- .../do-not-instrument-promotable-allocas.ll | 4 +- .../AddressSanitizer/freebsd.ll | 2 +- .../AddressSanitizer/global_metadata.ll | 2 +- .../instrument-dynamic-allocas.ll | 2 +- .../AddressSanitizer/instrument_global.ll | 10 +- .../instrument_load_then_store.ll | 2 +- .../instrumentation-with-call-threshold.ll | 8 +- .../AddressSanitizer/stack-poisoning.ll | 2 +- .../AddressSanitizer/stack_dynamic_alloca.ll | 2 +- .../AddressSanitizer/test64.ll | 2 +- .../Instrumentation/AddressSanitizer/ubsan.ll | 6 +- .../BoundsChecking/many-trap.ll | 4 +- .../Instrumentation/BoundsChecking/phi.ll | 12 +- .../BoundsChecking/simple-32.ll | 4 +- .../Instrumentation/BoundsChecking/simple.ll | 24 +- .../DataFlowSanitizer/abilist.ll | 2 +- .../DataFlowSanitizer/debug-nonzero-labels.ll | 4 +- .../Instrumentation/DataFlowSanitizer/load.ll | 52 +- .../DataFlowSanitizer/store.ll | 24 +- .../Instrumentation/InstrProfiling/linkage.ll | 2 +- .../InstrProfiling/noruntime.ll | 2 +- .../MemorySanitizer/array_types.ll | 16 +- .../MemorySanitizer/atomics.ll | 24 +- .../MemorySanitizer/check_access_address.ll | 4 +- .../instrumentation-with-call-threshold.ll | 2 +- .../MemorySanitizer/missing_origin.ll | 2 +- .../MemorySanitizer/msan_basic.ll | 58 +- .../MemorySanitizer/unreachable.ll | 2 +- .../MemorySanitizer/vector_cvt.ll | 2 +- .../SanitizerCoverage/coverage-dbg.ll | 2 +- .../SanitizerCoverage/coverage.ll | 6 +- .../Instrumentation/ThreadSanitizer/atomic.ll | 40 +- .../ThreadSanitizer/capture.ll | 4 +- .../ThreadSanitizer/no_sanitize_thread.ll | 8 +- .../ThreadSanitizer/read_before_write.ll | 4 +- .../ThreadSanitizer/read_from_global.ll | 10 +- .../ThreadSanitizer/tsan-vs-gvn.ll | 4 +- .../ThreadSanitizer/tsan_basic.ll | 4 +- .../ThreadSanitizer/unaligned.ll | 20 +- .../ThreadSanitizer/vptr_read.ll | 2 +- llvm/test/Integer/2007-01-19-TruncSext.ll | 2 +- llvm/test/Integer/BitPacked.ll | 4 +- llvm/test/Integer/packed_bt.ll | 4 +- llvm/test/Integer/packed_struct_bt.ll | 10 +- llvm/test/JitListener/multiple.ll | 12 +- llvm/test/JitListener/simple.ll | 2 +- llvm/test/LTO/X86/cfi_endproc.ll | 2 +- llvm/test/LTO/X86/linkonce_odr_func.ll | 4 +- llvm/test/LTO/X86/set-merged.ll | 6 +- .../test/Linker/2004-05-07-TypeResolution2.ll | 2 +- llvm/test/Linker/2008-03-05-AliasReference.ll | 2 +- llvm/test/Linker/2009-09-03-mdnode.ll | 2 +- llvm/test/Linker/2009-09-03-mdnode2.ll | 2 +- llvm/test/Linker/DbgDeclare.ll | 4 +- llvm/test/Linker/DbgDeclare2.ll | 12 +- llvm/test/Linker/Inputs/linkage.b.ll | 2 +- ...laced-function-matches-first-subprogram.ll | 2 +- llvm/test/Linker/Inputs/testlink.ll | 2 +- llvm/test/Linker/link-global-to-func.ll | 2 +- llvm/test/Linker/partial-type-refinement.ll | 4 +- ...laced-function-matches-first-subprogram.ll | 2 +- llvm/test/Linker/testlink.ll | 8 +- llvm/test/Linker/type-unique-odr-b.ll | 2 +- llvm/test/Linker/type-unique-simple2-a.ll | 4 +- llvm/test/Linker/type-unique-simple2-b.ll | 4 +- llvm/test/Linker/type-unique-type-array-a.ll | 6 +- llvm/test/Linker/type-unique-type-array-b.ll | 6 +- llvm/test/MC/AArch64/elf-globaladdress.ll | 8 +- llvm/test/MC/ARM/data-in-code.ll | 2 +- llvm/test/MC/ARM/elf-reloc-03.ll | 4 +- llvm/test/MC/COFF/tricky-names.ll | 6 +- llvm/test/MC/MachO/tlv-bss.ll | 2 +- llvm/test/MC/MachO/x86-data-in-code.ll | 2 +- llvm/test/MC/Mips/elf-bigendian.ll | 2 +- .../Other/2004-08-16-PackedGlobalConstant.ll | 2 +- llvm/test/Other/2004-08-16-PackedSelect.ll | 2 +- llvm/test/Other/2004-08-16-PackedSimple.ll | 2 +- .../Other/2004-08-20-PackedControlFlow.ll | 2 +- llvm/test/Other/2007-09-10-PassManager.ll | 6 +- llvm/test/Other/lint.ll | 10 +- .../test/Other/optimization-remarks-inline.ll | 8 +- .../ADCE/2002-05-23-ZeroArgPHITest.ll | 4 +- llvm/test/Transforms/ADCE/2002-05-28-Crash.ll | 6 +- .../ADCE/2002-07-17-AssertionFailure.ll | 2 +- .../ADCE/2002-07-17-PHIAssertion.ll | 6 +- .../Transforms/ADCE/2003-06-11-InvalidCFG.ll | 2 +- .../ADCE/2003-06-24-BadSuccessor.ll | 4 +- .../ADCE/2003-06-24-BasicFunctionality.ll | 4 +- llvm/test/Transforms/ADCE/basictest1.ll | 14 +- llvm/test/Transforms/ADCE/basictest2.ll | 14 +- .../Transforms/AddDiscriminators/basic.ll | 6 +- .../AddDiscriminators/first-only.ll | 8 +- .../Transforms/AddDiscriminators/multiple.ll | 10 +- .../AddDiscriminators/no-discriminators.ll | 6 +- .../AlignmentFromAssumptions/simple.ll | 32 +- .../AlignmentFromAssumptions/simple32.ll | 32 +- .../AlignmentFromAssumptions/start-unk.ll | 4 +- .../2008-02-01-ReturnAttrs.ll | 2 +- .../2008-07-02-array-indexing.ll | 4 +- .../ArgumentPromotion/aggregate-promote.ll | 4 +- .../Transforms/ArgumentPromotion/attrs.ll | 2 +- .../Transforms/ArgumentPromotion/basictest.ll | 4 +- .../Transforms/ArgumentPromotion/byval-2.ll | 2 +- .../Transforms/ArgumentPromotion/byval.ll | 2 +- .../Transforms/ArgumentPromotion/chained.ll | 4 +- .../ArgumentPromotion/control-flow.ll | 2 +- .../ArgumentPromotion/control-flow2.ll | 4 +- .../Transforms/ArgumentPromotion/crash.ll | 4 +- llvm/test/Transforms/ArgumentPromotion/dbg.ll | 4 +- .../test/Transforms/ArgumentPromotion/fp80.ll | 6 +- .../Transforms/ArgumentPromotion/inalloca.ll | 4 +- .../ArgumentPromotion/reserve-tbaa.ll | 12 +- llvm/test/Transforms/BBVectorize/X86/loop1.ll | 4 +- .../Transforms/BBVectorize/X86/sh-rec2.ll | 16 +- .../Transforms/BBVectorize/X86/sh-rec3.ll | 30 +- .../BBVectorize/X86/simple-ldstr.ll | 12 +- .../Transforms/BBVectorize/X86/wr-aliases.ll | 38 +- .../test/Transforms/BBVectorize/func-alias.ll | 114 +-- llvm/test/Transforms/BBVectorize/ld1.ll | 18 +- llvm/test/Transforms/BBVectorize/loop1.ll | 12 +- .../Transforms/BBVectorize/mem-op-depth.ll | 10 +- llvm/test/Transforms/BBVectorize/metadata.ll | 16 +- .../Transforms/BBVectorize/no-ldstr-conn.ll | 4 +- .../BBVectorize/simple-ldstr-ptrs.ll | 64 +- .../Transforms/BBVectorize/simple-ldstr.ll | 64 +- .../2004-03-14-DominanceProblem.ll | 2 +- .../CodeGenPrepare/X86/extend-sink-hoist.ll | 8 +- .../CodeGenPrepare/X86/sink-addrspacecast.ll | 2 +- .../CodeGenPrepare/statepoint-relocate.ll | 12 +- llvm/test/Transforms/ConstProp/loads.ll | 38 +- .../ConstantHoisting/AArch64/const-addr.ll | 6 +- .../PowerPC/const-base-addr.ll | 6 +- .../ConstantHoisting/PowerPC/masks.ll | 4 +- .../ConstantHoisting/X86/cast-inst.ll | 12 +- .../ConstantHoisting/X86/const-base-addr.ll | 6 +- .../CorrelatedValuePropagation/basic.ll | 4 +- .../CorrelatedValuePropagation/non-null.ll | 2 +- .../test/Transforms/DeadArgElim/aggregates.ll | 2 +- .../Transforms/DeadArgElim/deadexternal.ll | 4 +- .../Transforms/DeadArgElim/deadretval2.ll | 2 +- llvm/test/Transforms/DeadArgElim/keepalive.ll | 2 +- .../2011-03-25-DSEMiscompile.ll | 2 +- .../2011-09-06-EndOfFunction.ll | 2 +- .../DeadStoreElimination/2011-09-06-MemCpy.ll | 4 +- .../DeadStoreElimination/PartialStore.ll | 2 +- .../Transforms/DeadStoreElimination/atomic.ll | 16 +- .../DeadStoreElimination/const-pointers.ll | 4 +- .../Transforms/DeadStoreElimination/crash.ll | 4 +- .../Transforms/DeadStoreElimination/free.ll | 2 +- .../Transforms/DeadStoreElimination/simple.ll | 26 +- llvm/test/Transforms/EarlyCSE/basic.ll | 34 +- .../FunctionAttrs/2008-09-03-ReadNone.ll | 2 +- .../FunctionAttrs/2008-09-13-VolatileRead.ll | 2 +- .../FunctionAttrs/2008-12-29-Constant.ll | 2 +- .../FunctionAttrs/2009-01-02-LocalStores.ll | 2 +- .../FunctionAttrs/2010-10-30-volatile.ll | 2 +- llvm/test/Transforms/FunctionAttrs/atomic.ll | 4 +- .../Transforms/FunctionAttrs/nocapture.ll | 8 +- .../FunctionAttrs/optnone-simple.ll | 24 +- .../test/Transforms/GCOVProfiling/linezero.ll | 16 +- .../Transforms/GCOVProfiling/return-block.ll | 2 +- .../GVN/2007-07-25-DominatedLoop.ll | 4 +- .../Transforms/GVN/2007-07-25-InfiniteLoop.ll | 2 +- llvm/test/Transforms/GVN/2007-07-25-Loop.ll | 2 +- .../Transforms/GVN/2007-07-25-NestedLoop.ll | 6 +- .../GVN/2007-07-25-SinglePredecessor.ll | 2 +- .../GVN/2007-07-26-InterlockingLoops.ll | 6 +- .../Transforms/GVN/2007-07-26-NonRedundant.ll | 2 +- .../Transforms/GVN/2007-07-26-PhiErasure.ll | 4 +- .../Transforms/GVN/2007-07-30-PredIDom.ll | 2 +- .../Transforms/GVN/2007-07-31-NoDomInherit.ll | 66 +- .../Transforms/GVN/2007-07-31-RedundantPhi.ll | 2 +- .../Transforms/GVN/2008-02-12-UndefLoad.ll | 2 +- llvm/test/Transforms/GVN/2008-02-13-NewPHI.ll | 4 +- .../Transforms/GVN/2008-07-02-Unreachable.ll | 4 +- .../Transforms/GVN/2008-12-09-SelfRemove.ll | 2 +- .../Transforms/GVN/2008-12-12-RLE-Crash.ll | 4 +- .../GVN/2008-12-14-rle-reanalyze.ll | 2 +- .../Transforms/GVN/2008-12-15-CacheVisited.ll | 4 +- .../GVN/2009-01-21-SortInvalidation.ll | 2 +- .../GVN/2009-01-22-SortInvalidation.ll | 4 +- .../Transforms/GVN/2009-02-17-LoadPRECrash.ll | 18 +- .../Transforms/GVN/2009-06-17-InvalidPRE.ll | 8 +- .../GVN/2009-07-13-MemDepSortFail.ll | 4 +- .../GVN/2009-11-12-MemDepMallocBitCast.ll | 4 +- .../GVN/2010-03-31-RedundantPHIs.ll | 4 +- llvm/test/Transforms/GVN/2010-05-08-OneBit.ll | 2 +- .../Transforms/GVN/2011-04-27-phioperands.ll | 2 +- .../2011-06-01-NonLocalMemdepMiscompile.ll | 14 +- llvm/test/Transforms/GVN/MemdepMiscompile.ll | 8 +- llvm/test/Transforms/GVN/atomic.ll | 56 +- .../Transforms/GVN/calloc-load-removal.ll | 4 +- llvm/test/Transforms/GVN/cond_br.ll | 8 +- llvm/test/Transforms/GVN/cond_br2.ll | 12 +- llvm/test/Transforms/GVN/condprop.ll | 14 +- llvm/test/Transforms/GVN/crash-no-aa.ll | 2 +- llvm/test/Transforms/GVN/crash.ll | 32 +- llvm/test/Transforms/GVN/invariant-load.ll | 18 +- llvm/test/Transforms/GVN/lifetime-simple.ll | 4 +- llvm/test/Transforms/GVN/load-constant-mem.ll | 4 +- .../GVN/load-from-unreachable-predecessor.ll | 6 +- llvm/test/Transforms/GVN/load-pre-align.ll | 4 +- llvm/test/Transforms/GVN/load-pre-licm.ll | 4 +- llvm/test/Transforms/GVN/load-pre-nonlocal.ll | 18 +- llvm/test/Transforms/GVN/lpre-call-wrap-2.ll | 6 +- llvm/test/Transforms/GVN/lpre-call-wrap.ll | 6 +- .../Transforms/GVN/malloc-load-removal.ll | 4 +- llvm/test/Transforms/GVN/noalias.ll | 18 +- llvm/test/Transforms/GVN/non-local-offset.ll | 8 +- .../test/Transforms/GVN/nonescaping-malloc.ll | 12 +- .../Transforms/GVN/null-aliases-nothing.ll | 4 +- .../GVN/phi-translate-partial-alias.ll | 8 +- llvm/test/Transforms/GVN/phi-translate.ll | 4 +- llvm/test/Transforms/GVN/pr10820.ll | 2 +- llvm/test/Transforms/GVN/pr14166.ll | 6 +- llvm/test/Transforms/GVN/pr17732.ll | 4 +- llvm/test/Transforms/GVN/pr17852.ll | 16 +- llvm/test/Transforms/GVN/pre-basic-add.ll | 2 +- llvm/test/Transforms/GVN/pre-gep-load.ll | 8 +- llvm/test/Transforms/GVN/pre-load.ll | 76 +- llvm/test/Transforms/GVN/pre-single-pred.ll | 6 +- llvm/test/Transforms/GVN/preserve-tbaa.ll | 4 +- llvm/test/Transforms/GVN/range.ll | 48 +- llvm/test/Transforms/GVN/readattrs.ll | 2 +- llvm/test/Transforms/GVN/rle-must-alias.ll | 6 +- .../Transforms/GVN/rle-no-phi-translate.ll | 2 +- llvm/test/Transforms/GVN/rle-nonlocal.ll | 10 +- llvm/test/Transforms/GVN/rle-phi-translate.ll | 18 +- llvm/test/Transforms/GVN/rle-semidominated.ll | 4 +- llvm/test/Transforms/GVN/rle.ll | 110 +-- llvm/test/Transforms/GVN/tbaa.ll | 8 +- llvm/test/Transforms/GVN/volatile.ll | 80 +- .../GlobalDCE/2002-08-17-FunctionDGE.ll | 2 +- .../GlobalDCE/2002-09-12-Redeletion.ll | 2 +- .../GlobalDCE/complex-constantexpr.ll | 8 +- .../GlobalDCE/global_ctors_integration.ll | 8 +- llvm/test/Transforms/GlobalDCE/indirectbr.ll | 2 +- .../GlobalOpt/2004-10-10-CastStoreOnce.ll | 4 +- .../2005-06-15-LocalizeConstExprCrash.ll | 2 +- .../GlobalOpt/2006-07-07-InlineAsmCrash.ll | 4 +- .../2006-11-01-ShrinkGlobalPhiCrash.ll | 2 +- .../Transforms/GlobalOpt/2007-04-05-Crash.ll | 2 +- .../Transforms/GlobalOpt/2007-05-13-Crash.ll | 16 +- .../GlobalOpt/2007-11-09-GEP-GEP-Crash.ll | 2 +- .../Transforms/GlobalOpt/2008-01-03-Crash.ll | 2 +- .../GlobalOpt/2008-01-13-OutOfRangeSROA.ll | 2 +- .../GlobalOpt/2008-01-29-VolatileGlobal.ll | 2 +- .../GlobalOpt/2008-04-26-SROA-Global-Align.ll | 6 +- .../GlobalOpt/2008-07-17-addrspace.ll | 4 +- .../GlobalOpt/2008-12-16-HeapSRACrash-2.ll | 2 +- .../GlobalOpt/2008-12-16-HeapSRACrash.ll | 2 +- .../GlobalOpt/2009-01-13-phi-user.ll | 4 +- .../GlobalOpt/2009-02-15-BitcastAlias.ll | 2 +- .../Transforms/GlobalOpt/2009-03-05-dbg.ll | 2 +- .../GlobalOpt/2009-03-07-PromotePtrToBool.ll | 2 +- .../GlobalOpt/2009-06-01-RecursivePHI.ll | 6 +- .../2009-11-16-BrokenPerformHeapAllocSRoA.ll | 2 +- ...2009-11-16-MallocSingleStoreToGlobalVar.ll | 4 +- .../GlobalOpt/2010-02-25-MallocPromote.ll | 4 +- .../GlobalOpt/2010-02-26-MallocSROA.ll | 4 +- .../Transforms/GlobalOpt/array-elem-refs.ll | 6 +- llvm/test/Transforms/GlobalOpt/atomic.ll | 4 +- llvm/test/Transforms/GlobalOpt/basictest.ll | 2 +- .../GlobalOpt/constantfold-initializers.ll | 6 +- llvm/test/Transforms/GlobalOpt/crash-2.ll | 2 +- llvm/test/Transforms/GlobalOpt/crash.ll | 6 +- .../GlobalOpt/ctor-list-opt-inbounds.ll | 4 +- .../Transforms/GlobalOpt/ctor-list-opt.ll | 10 +- .../test/Transforms/GlobalOpt/deadfunction.ll | 2 +- .../externally-initialized-global-ctr.ll | 6 +- llvm/test/Transforms/GlobalOpt/fastcc.ll | 8 +- .../Transforms/GlobalOpt/globalsra-partial.ll | 2 +- .../GlobalOpt/globalsra-unknown-index.ll | 12 +- llvm/test/Transforms/GlobalOpt/globalsra.ll | 4 +- llvm/test/Transforms/GlobalOpt/heap-sra-1.ll | 4 +- llvm/test/Transforms/GlobalOpt/heap-sra-2.ll | 4 +- llvm/test/Transforms/GlobalOpt/heap-sra-3.ll | 4 +- llvm/test/Transforms/GlobalOpt/heap-sra-4.ll | 4 +- .../test/Transforms/GlobalOpt/heap-sra-phi.ll | 8 +- .../test/Transforms/GlobalOpt/integer-bool.ll | 2 +- llvm/test/Transforms/GlobalOpt/iterate.ll | 4 +- .../Transforms/GlobalOpt/load-store-global.ll | 6 +- .../Transforms/GlobalOpt/malloc-promote-1.ll | 6 +- .../Transforms/GlobalOpt/malloc-promote-2.ll | 2 +- .../Transforms/GlobalOpt/malloc-promote-3.ll | 2 +- llvm/test/Transforms/GlobalOpt/memset-null.ll | 2 +- llvm/test/Transforms/GlobalOpt/phi-select.ll | 4 +- .../GlobalOpt/storepointer-compare.ll | 2 +- .../test/Transforms/GlobalOpt/storepointer.ll | 2 +- llvm/test/Transforms/GlobalOpt/tls.ll | 6 +- .../test/Transforms/GlobalOpt/trivialstore.ll | 2 +- llvm/test/Transforms/GlobalOpt/undef-init.ll | 2 +- .../test/Transforms/GlobalOpt/unnamed-addr.ll | 8 +- .../GlobalOpt/zeroinitializer-gep-load.ll | 2 +- .../IPConstantProp/2009-09-24-byval-ptr.ll | 8 +- .../IPConstantProp/dangling-block-address.ll | 4 +- llvm/test/Transforms/IPConstantProp/global.ll | 4 +- .../IPConstantProp/return-argument.ll | 2 +- .../test/Transforms/IRCE/decrementing-loop.ll | 2 +- llvm/test/Transforms/IRCE/low-becount.ll | 2 +- .../IRCE/multiple-access-no-preloop.ll | 4 +- llvm/test/Transforms/IRCE/not-likely-taken.ll | 4 +- .../IRCE/single-access-no-preloop.ll | 4 +- .../IRCE/single-access-with-preloop.ll | 2 +- llvm/test/Transforms/IRCE/unhandled.ll | 2 +- .../test/Transforms/IRCE/with-parent-loops.ll | 18 +- .../2005-02-17-TruncateExprCrash.ll | 2 +- .../2006-06-16-Indvar-LCSSA-Crash.ll | 4 +- .../IndVarSimplify/2007-01-06-TripCount.ll | 2 +- .../IndVarSimplify/2008-09-02-IVType.ll | 6 +- .../2008-10-03-CouldNotCompute.ll | 2 +- .../2009-04-14-shorten_iv_vars.ll | 60 +- .../2009-04-15-shorten-iv-vars-2.ll | 66 +- .../IndVarSimplify/2011-09-27-hoistsext.ll | 2 +- .../IndVarSimplify/2011-11-01-lftrptr.ll | 8 +- .../IndVarSimplify/2011-11-15-multiexit.ll | 2 +- .../2014-06-21-congruent-constant.ll | 12 +- .../IndVarSimplify/ashr-tripcount.ll | 20 +- .../Transforms/IndVarSimplify/avoid-i0.ll | 28 +- .../IndVarSimplify/eliminate-comparison.ll | 18 +- .../IndVarSimplify/eliminate-rem.ll | 6 +- .../Transforms/IndVarSimplify/indirectbr.ll | 2 +- .../test/Transforms/IndVarSimplify/iv-fold.ll | 8 +- .../test/Transforms/IndVarSimplify/iv-sext.ll | 14 +- .../Transforms/IndVarSimplify/iv-widen.ll | 2 +- .../test/Transforms/IndVarSimplify/iv-zext.ll | 6 +- .../Transforms/IndVarSimplify/lftr-promote.ll | 2 +- .../Transforms/IndVarSimplify/lftr-reuse.ll | 8 +- .../Transforms/IndVarSimplify/lftr-zext.ll | 2 +- .../IndVarSimplify/loop_evaluate7.ll | 2 +- .../IndVarSimplify/loop_evaluate8.ll | 2 +- .../IndVarSimplify/no-iv-rewrite.ll | 8 +- .../IndVarSimplify/overflowcheck.ll | 2 +- .../phi-uses-value-multiple-times.ll | 2 +- .../IndVarSimplify/polynomial-expand.ll | 2 +- .../test/Transforms/IndVarSimplify/pr18223.ll | 2 +- .../test/Transforms/IndVarSimplify/pr20680.ll | 12 +- .../test/Transforms/IndVarSimplify/pr22222.ll | 2 +- .../promote-iv-to-eliminate-casts.ll | 2 +- .../IndVarSimplify/sharpen-range.ll | 4 +- .../IndVarSimplify/single-element-range.ll | 2 +- .../Transforms/IndVarSimplify/sink-alloca.ll | 2 +- llvm/test/Transforms/IndVarSimplify/udiv.ll | 4 +- .../test/Transforms/IndVarSimplify/uglygep.ll | 4 +- .../IndVarSimplify/ult-sub-to-eq.ll | 4 +- .../IndVarSimplify/use-range-metadata.ll | 2 +- .../IndVarSimplify/variable-stride-ivs-0.ll | 6 +- .../Transforms/IndVarSimplify/verify-scev.ll | 4 +- .../IndVarSimplify/widen-loop-comp.ll | 18 +- .../Transforms/IndVarSimplify/widen-nsw.ll | 2 +- .../Inline/2006-07-12-InlinePruneCGUpdate.ll | 2 +- .../2009-01-08-NoInlineDynamicAlloca.ll | 8 +- .../Inline/2009-01-13-RecursiveInlineCrash.ll | 42 +- llvm/test/Transforms/Inline/align.ll | 22 +- llvm/test/Transforms/Inline/alloca-bonus.ll | 10 +- .../Transforms/Inline/alloca-dbgdeclare.ll | 2 +- .../Inline/alloca-merge-align-nodl.ll | 12 +- .../Transforms/Inline/alloca-merge-align.ll | 16 +- llvm/test/Transforms/Inline/basictest.ll | 2 +- .../test/Transforms/Inline/byval-tail-call.ll | 2 +- llvm/test/Transforms/Inline/byval.ll | 10 +- llvm/test/Transforms/Inline/byval_lifetime.ll | 2 +- llvm/test/Transforms/Inline/crash2.ll | 2 +- llvm/test/Transforms/Inline/devirtualize-3.ll | 14 +- llvm/test/Transforms/Inline/devirtualize.ll | 6 +- llvm/test/Transforms/Inline/ephemeral.ll | 2 +- .../Transforms/Inline/gvn-inline-iteration.ll | 2 +- .../Transforms/Inline/inline-byval-bonus.ll | 32 +- llvm/test/Transforms/Inline/inline-cold.ll | 120 +-- .../Inline/inline-fast-math-flags.ll | 6 +- llvm/test/Transforms/Inline/inline-fp.ll | 24 +- .../Transforms/Inline/inline-invoke-tail.ll | 2 +- llvm/test/Transforms/Inline/inline-optsize.ll | 10 +- .../Transforms/Inline/inline_constprop.ll | 4 +- .../Transforms/Inline/inline_dbg_declare.ll | 8 +- .../test/Transforms/Inline/inline_minisize.ll | 96 +-- .../Inline/invoke-combine-clauses.ll | 2 +- llvm/test/Transforms/Inline/noalias-cs.ll | 12 +- llvm/test/Transforms/Inline/noalias.ll | 16 +- llvm/test/Transforms/Inline/noalias2.ll | 20 +- .../Transforms/Inline/optimization-remarks.ll | 16 +- llvm/test/Transforms/Inline/ptr-diff.ll | 16 +- .../2003-07-21-ExternalConstant.ll | 6 +- .../2003-09-09-VolatileLoadElim.ll | 2 +- .../2004-01-13-InstCombineInvokePHI.ll | 2 +- .../InstCombine/2004-05-07-UnsizedCastLoad.ll | 2 +- .../InstCombine/2004-09-20-BadLoadCombine.ll | 2 +- .../InstCombine/2004-09-20-BadLoadCombine2.ll | 2 +- .../2005-06-16-SetCCOrSetCCMiscompile.ll | 2 +- .../InstCombine/2006-09-15-CastToBool.ll | 2 +- .../2006-12-08-Phi-ICmp-Op-Fold.ll | 6 +- .../InstCombine/2006-12-08-Select-ICmp.ll | 6 +- .../InstCombine/2006-12-15-Range-Test.ll | 2 +- .../InstCombine/2006-12-23-Select-Cmp-Cmp.ll | 4 +- .../InstCombine/2007-02-01-LoadSinkAlloca.ll | 4 +- .../InstCombine/2007-02-07-PointerCast.ll | 2 +- .../InstCombine/2007-03-25-BadShiftMask.ll | 2 +- .../InstCombine/2007-06-06-AshrSignBit.ll | 4 +- .../InstCombine/2007-09-10-AliasConstFold.ll | 2 +- .../InstCombine/2007-10-10-EliminateMemCpy.ll | 2 +- .../InstCombine/2007-10-31-RangeCrash.ll | 2 +- .../InstCombine/2007-10-31-StringCrash.ll | 2 +- .../2007-11-07-OpaqueAlignCrash.ll | 4 +- .../InstCombine/2007-12-28-IcmpSub2.ll | 16 +- .../InstCombine/2008-03-13-IntToPtr.ll | 2 +- .../2008-04-29-VolatileLoadDontMerge.ll | 4 +- .../2008-04-29-VolatileLoadMerge.ll | 4 +- .../InstCombine/2008-05-09-SinkOfInvoke.ll | 2 +- .../InstCombine/2008-05-17-InfLoop.ll | 4 +- .../InstCombine/2008-05-23-CompareFold.ll | 2 +- .../InstCombine/2008-06-19-UncondLoad.ll | 6 +- .../2008-07-08-VolatileLoadMerge.ll | 4 +- .../Transforms/InstCombine/2008-08-05-And.ll | 2 +- .../InstCombine/2009-01-08-AlignAlloca.ll | 2 +- ...2009-01-19-fmod-constant-float-specials.ll | 64 +- .../2009-01-19-fmod-constant-float.ll | 16 +- .../2009-02-20-InstCombine-SROA.ll | 92 +-- .../InstCombine/2009-02-21-LoadCST.ll | 2 +- .../2009-02-25-CrashZeroSizeArray.ll | 4 +- .../2009-03-18-vector-ashr-crash.ll | 4 +- .../InstCombine/2009-05-23-FCmpToICmp.ll | 2 +- .../InstCombine/2010-03-03-ExtElim.ll | 2 +- .../InstCombine/2011-05-02-VectorBoolean.ll | 4 +- .../InstCombine/2011-05-28-swapmulsub.ll | 14 +- .../InstCombine/2011-06-13-nsw-alloca.ll | 12 +- .../InstCombine/2011-10-07-AlignPromotion.ll | 2 +- .../2012-05-27-Negative-Shift-Crash.ll | 6 +- .../InstCombine/2012-05-28-select-hang.ll | 4 +- .../InstCombine/2012-06-06-LoadOfPHIs.ll | 50 +- .../InstCombine/2012-07-25-LoadPart.ll | 2 +- .../2012-10-25-vector-of-pointers.ll | 2 +- .../InstCombine/2012-12-14-simp-vgep.ll | 2 +- ...013-03-05-Combine-BitcastTy-Into-Alloca.ll | 6 +- .../Transforms/InstCombine/CPP_min_max.ll | 12 +- llvm/test/Transforms/InstCombine/add3.ll | 2 +- .../Transforms/InstCombine/addrspacecast.ll | 6 +- .../Transforms/InstCombine/alias-recursion.ll | 2 +- .../test/Transforms/InstCombine/align-addr.ll | 10 +- .../test/Transforms/InstCombine/align-attr.ll | 4 +- .../Transforms/InstCombine/align-external.ll | 2 +- .../Transforms/InstCombine/aligned-altivec.ll | 16 +- .../Transforms/InstCombine/aligned-qpx.ll | 20 +- llvm/test/Transforms/InstCombine/alloca.ll | 2 +- .../InstCombine/assume-loop-align.ll | 4 +- .../InstCombine/assume-redundant.ll | 6 +- llvm/test/Transforms/InstCombine/assume.ll | 16 +- llvm/test/Transforms/InstCombine/atomic.ll | 6 +- .../InstCombine/bitcast-alias-function.ll | 34 +- llvm/test/Transforms/InstCombine/bitcast.ll | 2 +- llvm/test/Transforms/InstCombine/bittest.ll | 2 +- llvm/test/Transforms/InstCombine/call2.ll | 4 +- llvm/test/Transforms/InstCombine/cast.ll | 72 +- llvm/test/Transforms/InstCombine/cast_ptr.ll | 6 +- .../constant-fold-address-space-pointer.ll | 34 +- llvm/test/Transforms/InstCombine/crash.ll | 20 +- llvm/test/Transforms/InstCombine/debuginfo.ll | 8 +- .../Transforms/InstCombine/descale-zero.ll | 6 +- .../Transforms/InstCombine/div-shift-crash.ll | 2 +- .../Transforms/InstCombine/err-rep-cold.ll | 6 +- .../Transforms/InstCombine/extractvalue.ll | 12 +- llvm/test/Transforms/InstCombine/fmul.ll | 2 +- .../InstCombine/fold-vector-zero.ll | 2 +- .../Transforms/InstCombine/fp-ret-bitcast.ll | 6 +- llvm/test/Transforms/InstCombine/fpextend.ll | 12 +- .../Transforms/InstCombine/gc.relocate.ll | 2 +- .../Transforms/InstCombine/gep-addrspace.ll | 2 +- llvm/test/Transforms/InstCombine/gep-sext.ll | 12 +- llvm/test/Transforms/InstCombine/gepphigep.ll | 10 +- .../Transforms/InstCombine/getelementptr.ll | 38 +- .../test/Transforms/InstCombine/icmp-range.ll | 12 +- llvm/test/Transforms/InstCombine/invariant.ll | 2 +- .../Transforms/InstCombine/known_align.ll | 10 +- llvm/test/Transforms/InstCombine/load-cmp.ll | 48 +- .../Transforms/InstCombine/load-select.ll | 2 +- llvm/test/Transforms/InstCombine/load.ll | 46 +- llvm/test/Transforms/InstCombine/load3.ll | 8 +- .../InstCombine/loadstore-alignment.ll | 16 +- .../InstCombine/loadstore-metadata.ll | 28 +- llvm/test/Transforms/InstCombine/lshr-phi.ll | 6 +- .../InstCombine/malloc-free-delete.ll | 2 +- .../Transforms/InstCombine/mem-gep-zidx.ll | 4 +- llvm/test/Transforms/InstCombine/memcmp-1.ll | 4 +- .../InstCombine/memcpy-from-global.ll | 8 +- .../test/Transforms/InstCombine/merge-icmp.ll | 4 +- llvm/test/Transforms/InstCombine/mul.ll | 2 +- .../multi-size-address-space-pointer.ll | 16 +- .../test/Transforms/InstCombine/no-negzero.ll | 6 +- .../InstCombine/obfuscated_splat.ll | 2 +- llvm/test/Transforms/InstCombine/objsize.ll | 8 +- .../Transforms/InstCombine/odr-linkage.ll | 8 +- llvm/test/Transforms/InstCombine/or.ll | 4 +- .../Transforms/InstCombine/phi-merge-gep.ll | 12 +- llvm/test/Transforms/InstCombine/phi.ll | 26 +- llvm/test/Transforms/InstCombine/pr12251.ll | 4 +- llvm/test/Transforms/InstCombine/pr2645-0.ll | 4 +- llvm/test/Transforms/InstCombine/pr2645-1.ll | 2 +- .../Transforms/InstCombine/select-cmp-br.ll | 24 +- .../InstCombine/select-load-call.ll | 2 +- llvm/test/Transforms/InstCombine/select.ll | 86 +- .../InstCombine/shufflemask-undef.ll | 2 +- .../InstCombine/signed-comparison.ll | 2 +- .../simplify-demanded-bits-pointer.ll | 2 +- .../InstCombine/simplify-libcalls.ll | 2 +- llvm/test/Transforms/InstCombine/sincospi.ll | 12 +- llvm/test/Transforms/InstCombine/sqrt.ll | 2 +- llvm/test/Transforms/InstCombine/store.ll | 10 +- llvm/test/Transforms/InstCombine/strcmp-1.ll | 4 +- llvm/test/Transforms/InstCombine/strncmp-1.ll | 8 +- .../InstCombine/struct-assign-tbaa.ll | 4 +- .../InstCombine/vec_demanded_elts.ll | 6 +- .../InstCombine/vec_extract_var_elt.ll | 2 +- .../Transforms/InstCombine/vec_shuffle.ll | 4 +- .../Transforms/InstCombine/volatile_store.ll | 2 +- .../Transforms/InstCombine/vsx-unaligned.ll | 12 +- .../Transforms/InstCombine/zext-or-icmp.ll | 8 +- llvm/test/Transforms/InstMerge/ld_hoist1.ll | 10 +- .../Transforms/InstMerge/ld_hoist_st_sink.ll | 36 +- .../InstMerge/st_sink_barrier_call.ll | 8 +- .../InstMerge/st_sink_bugfix_22613.ll | 20 +- .../InstMerge/st_sink_no_barrier_call.ll | 8 +- .../InstMerge/st_sink_no_barrier_load.ll | 12 +- .../InstMerge/st_sink_no_barrier_store.ll | 8 +- .../InstMerge/st_sink_two_stores.ll | 8 +- .../InstMerge/st_sink_with_barrier.ll | 12 +- .../Transforms/InstSimplify/call-callconv.ll | 2 +- llvm/test/Transforms/InstSimplify/compare.ll | 6 +- llvm/test/Transforms/InstSimplify/load.ll | 4 +- .../InstSimplify/vector_ptr_bitcast.ll | 4 +- .../2009-01-05-InternalizeAliases.ll | 2 +- .../JumpThreading/2010-08-26-and.ll | 2 +- .../JumpThreading/2011-04-14-InfLoop.ll | 2 +- llvm/test/Transforms/JumpThreading/crash.ll | 2 +- .../Transforms/JumpThreading/landing-pad.ll | 26 +- .../test/Transforms/JumpThreading/lvi-load.ll | 2 +- .../test/Transforms/JumpThreading/or-undef.ll | 8 +- llvm/test/Transforms/JumpThreading/phi-eq.ll | 28 +- llvm/test/Transforms/JumpThreading/select.ll | 4 +- .../Transforms/JumpThreading/thread-loads.ll | 10 +- .../LCSSA/2006-06-03-IncorrectIDFPhis.ll | 2 +- .../LCSSA/2006-07-09-NoDominator.ll | 2 +- .../Transforms/LCSSA/2007-07-12-LICM-2.ll | 2 +- .../Transforms/LCSSA/2007-07-12-LICM-3.ll | 2 +- llvm/test/Transforms/LCSSA/2007-07-12-LICM.ll | 2 +- llvm/test/Transforms/LCSSA/unreachable-use.ll | 4 +- .../Transforms/LICM/2003-05-02-LoadHoist.ll | 4 +- .../2004-09-14-AliasAnalysisInvalidate.ll | 4 +- .../LICM/2007-05-22-VolatileSink.ll | 4 +- .../Transforms/LICM/2007-07-30-AliasSet.ll | 2 +- .../LICM/2008-07-22-LoadGlobalConstant.ll | 4 +- .../LICM/2009-12-10-LICM-Indbr-Crash.ll | 2 +- .../LICM/2011-04-06-HoistMissedASTUpdate.ll | 4 +- .../2011-04-06-PromoteResultOfPromotion.ll | 6 +- .../Transforms/LICM/2011-04-09-RAUW-AST.ll | 4 +- llvm/test/Transforms/LICM/PR21582.ll | 2 +- llvm/test/Transforms/LICM/atomics.ll | 14 +- llvm/test/Transforms/LICM/constexpr.ll | 6 +- llvm/test/Transforms/LICM/crash.ll | 6 +- .../Transforms/LICM/hoist-bitcast-load.ll | 48 +- llvm/test/Transforms/LICM/hoist-deref-load.ll | 32 +- .../Transforms/LICM/hoist-invariant-load.ll | 8 +- llvm/test/Transforms/LICM/hoisting.ll | 8 +- .../Transforms/LICM/lcssa-ssa-promoter.ll | 2 +- .../LICM/scalar-promote-memmodel.ll | 4 +- llvm/test/Transforms/LICM/scalar_promote.ll | 26 +- llvm/test/Transforms/LICM/sinking.ll | 20 +- llvm/test/Transforms/LICM/speculate.ll | 10 +- llvm/test/Transforms/LICM/volatile-alias.ll | 22 +- .../Transforms/LoadCombine/load-combine-aa.ll | 14 +- .../LoadCombine/load-combine-assume.ll | 12 +- .../Transforms/LoadCombine/load-combine.ll | 78 +- .../Transforms/LoopDeletion/2008-05-06-Phi.ll | 10 +- .../LoopIdiom/basic-address-space.ll | 4 +- llvm/test/Transforms/LoopIdiom/basic.ll | 10 +- .../Transforms/LoopIdiom/scev-invalidation.ll | 2 +- llvm/test/Transforms/LoopReroll/basic.ll | 60 +- .../test/Transforms/LoopReroll/nonconst_lb.ll | 30 +- llvm/test/Transforms/LoopReroll/reduction.ll | 28 +- .../test/Transforms/LoopRotate/PhiRename-1.ll | 28 +- llvm/test/Transforms/LoopRotate/alloca.ll | 2 +- llvm/test/Transforms/LoopRotate/dbgvalue.ll | 6 +- llvm/test/Transforms/LoopRotate/indirectbr.ll | 2 +- .../Transforms/LoopRotate/multiple-exits.ll | 2 +- .../Transforms/LoopRotate/nosimplifylatch.ll | 4 +- .../Transforms/LoopRotate/phi-duplicate.ll | 4 +- .../Transforms/LoopRotate/simplifylatch.ll | 8 +- .../LoopSimplify/2003-08-15-PreheadersFail.ll | 14 +- .../2003-12-10-ExitBlocksProblem.ll | 8 +- .../Transforms/LoopSimplify/ashr-crash.ll | 6 +- .../Transforms/LoopSimplify/merge-exits.ll | 6 +- .../LoopSimplify/phi-node-simplify.ll | 8 +- .../Transforms/LoopSimplify/preserve-scev.ll | 4 +- .../LoopStrengthReduce/2005-08-15-AddRecIV.ll | 2 +- .../2005-08-17-OutOfLoopVariant.ll | 2 +- .../2008-08-13-CmpStride.ll | 2 +- .../LoopStrengthReduce/2008-09-09-Overflow.ll | 4 +- ...9-01-13-nonconstant-stride-outside-loop.ll | 4 +- .../2009-04-28-no-reduce-mul.ll | 2 +- .../2011-07-19-CritEdgeBreakCrash.ll | 2 +- .../LoopStrengthReduce/2011-10-06-ReusePhi.ll | 4 +- .../2011-12-19-PostincQuadratic.ll | 2 +- .../2012-03-15-nopreheader.ll | 2 +- .../2012-03-26-constexpr.ll | 8 +- .../2012-07-13-ExpandUDiv.ll | 4 +- .../2012-07-18-LimitReassociate.ll | 10 +- .../2013-01-14-ReuseCast.ll | 2 +- .../LoopStrengthReduce/AArch64/lsr-memcpy.ll | 2 +- .../LoopStrengthReduce/AArch64/req-regs.ll | 2 +- .../ARM/2012-06-15-lsr-noaddrmode.ll | 10 +- .../LoopStrengthReduce/ARM/ivchain-ARM.ll | 42 +- .../X86/2011-12-04-loserreg.ll | 20 +- .../X86/2012-01-13-phielim.ll | 8 +- .../LoopStrengthReduce/X86/ivchain-X86.ll | 44 +- .../X86/ivchain-stress-X86.ll | 16 +- .../X86/no_superflous_induction_vars.ll | 2 +- .../LoopStrengthReduce/X86/pr17473.ll | 4 +- .../addrec-gep-address-space.ll | 4 +- .../LoopStrengthReduce/addrec-gep.ll | 4 +- .../LoopStrengthReduce/address-space-loop.ll | 4 +- .../LoopStrengthReduce/dont_reverse.ll | 2 +- .../LoopStrengthReduce/post-inc-icmpzero.ll | 10 +- .../Transforms/LoopStrengthReduce/pr12691.ll | 8 +- .../Transforms/LoopStrengthReduce/pr18165.ll | 10 +- .../Transforms/LoopStrengthReduce/pr2570.ll | 40 +- .../Transforms/LoopStrengthReduce/pr3086.ll | 4 +- .../Transforms/LoopStrengthReduce/pr3399.ll | 2 +- .../Transforms/LoopStrengthReduce/pr3571.ll | 2 +- .../share_code_in_preheader.ll | 4 +- .../uglygep-address-space.ll | 4 +- .../Transforms/LoopStrengthReduce/uglygep.ll | 4 +- .../LoopUnroll/2011-08-08-PhiUpdate.ll | 4 +- .../LoopUnroll/2011-08-09-IVSimplify.ll | 2 +- .../LoopUnroll/2011-10-01-NoopTrunc.ll | 2 +- .../LoopUnroll/PowerPC/a2-unrolling.ll | 2 +- .../test/Transforms/LoopUnroll/X86/partial.ll | 12 +- llvm/test/Transforms/LoopUnroll/ephemeral.ll | 2 +- .../LoopUnroll/full-unroll-heuristics.ll | 4 +- .../Transforms/LoopUnroll/runtime-loop.ll | 6 +- .../Transforms/LoopUnroll/runtime-loop1.ll | 2 +- .../Transforms/LoopUnroll/runtime-loop2.ll | 2 +- .../Transforms/LoopUnroll/runtime-loop3.ll | 2 +- llvm/test/Transforms/LoopUnroll/scevunroll.ll | 10 +- .../LoopUnroll/shifted-tripcount.ll | 4 +- .../LoopUnroll/unroll-pragmas-disabled.ll | 10 +- .../Transforms/LoopUnroll/unroll-pragmas.ll | 18 +- .../LoopUnswitch/2008-06-17-DomFrontier.ll | 2 +- .../LoopUnswitch/2010-11-18-LCSSA.ll | 2 +- .../LoopUnswitch/2011-09-26-EHCrash.ll | 2 +- .../LoopUnswitch/2011-11-18-SimpleSwitch.ll | 10 +- .../2011-11-18-TwoSwitches-Threshold.ll | 6 +- .../LoopUnswitch/2011-11-18-TwoSwitches.ll | 14 +- .../2012-04-30-LoopUnswitch-LPad-Crash.ll | 4 +- .../Transforms/LoopUnswitch/2012-05-20-Phi.ll | 4 +- .../test/Transforms/LoopUnswitch/basictest.ll | 8 +- .../LoopUnswitch/preserve-analyses.ll | 6 +- .../LoopVectorize/12-12-11-if-conv.ll | 2 +- .../LoopVectorize/AArch64/aarch64-unroll.ll | 4 +- .../AArch64/arbitrary-induction-step.ll | 36 +- .../LoopVectorize/AArch64/arm64-unroll.ll | 4 +- .../LoopVectorize/AArch64/gather-cost.ll | 14 +- .../LoopVectorize/AArch64/sdiv-pow2.ll | 4 +- .../LoopVectorize/ARM/arm-unroll.ll | 4 +- .../LoopVectorize/ARM/gather-cost.ll | 14 +- .../LoopVectorize/ARM/gcc-examples.ll | 6 +- .../LoopVectorize/ARM/mul-cast-vect.ll | 24 +- .../LoopVectorize/ARM/width-detect.ll | 4 +- .../LoopVectorize/PowerPC/small-loop-rdx.ll | 2 +- .../LoopVectorize/PowerPC/vsx-tsvc-s173.ll | 10 +- .../LoopVectorize/X86/already-vectorized.ll | 2 +- .../Transforms/LoopVectorize/X86/assume.ll | 8 +- .../test/Transforms/LoopVectorize/X86/avx1.ll | 4 +- .../X86/constant-vector-operand.ll | 2 +- .../LoopVectorize/X86/cost-model.ll | 6 +- .../X86/fp32_to_uint32-cost-model.ll | 2 +- .../X86/fp64_to_uint32-cost-model.ll | 4 +- .../X86/fp_to_sint8-cost-model.ll | 2 +- .../LoopVectorize/X86/gather-cost.ll | 14 +- .../LoopVectorize/X86/gcc-examples.ll | 6 +- .../illegal-parallel-loop-uniform-write.ll | 4 +- .../LoopVectorize/X86/masked_load_store.ll | 150 ++-- .../LoopVectorize/X86/metadata-enable.ll | 12 +- .../X86/min-trip-count-switch.ll | 2 +- .../Transforms/LoopVectorize/X86/no-vector.ll | 2 +- .../X86/parallel-loops-after-reg2mem.ll | 14 +- .../LoopVectorize/X86/parallel-loops.ll | 18 +- .../Transforms/LoopVectorize/X86/powof2div.ll | 4 +- .../LoopVectorize/X86/reduction-crash.ll | 4 +- .../LoopVectorize/X86/small-size.ll | 16 +- .../Transforms/LoopVectorize/X86/tripcount.ll | 2 +- .../X86/uint64_to_fp64-cost-model.ll | 2 +- .../Transforms/LoopVectorize/X86/unroll-pm.ll | 2 +- .../LoopVectorize/X86/unroll-small-loops.ll | 10 +- .../LoopVectorize/X86/unroll_selection.ll | 4 +- .../LoopVectorize/X86/vect.omp.force.ll | 4 +- .../X86/vect.omp.force.small-tc.ll | 8 +- .../X86/vector-scalar-select-cost.ll | 8 +- .../X86/vector_ptr_load_store.ll | 8 +- .../X86/vectorization-remarks-missed.ll | 4 +- .../X86/vectorization-remarks.ll | 4 +- llvm/test/Transforms/LoopVectorize/align.ll | 8 +- .../LoopVectorize/bzip_reverse_loops.ll | 4 +- llvm/test/Transforms/LoopVectorize/calloc.ll | 2 +- .../LoopVectorize/conditional-assignment.ll | 2 +- .../Transforms/LoopVectorize/control-flow.ll | 2 +- .../Transforms/LoopVectorize/cpp-new-array.ll | 6 +- .../Transforms/LoopVectorize/dbg.value.ll | 4 +- .../test/Transforms/LoopVectorize/debugloc.ll | 4 +- .../LoopVectorize/duplicated-metadata.ll | 2 +- .../test/Transforms/LoopVectorize/ee-crash.ll | 2 +- llvm/test/Transforms/LoopVectorize/exact.ll | 2 +- llvm/test/Transforms/LoopVectorize/flags.ll | 6 +- .../LoopVectorize/float-reduction.ll | 4 +- llvm/test/Transforms/LoopVectorize/funcall.ll | 2 +- .../Transforms/LoopVectorize/gcc-examples.ll | 96 +-- .../Transforms/LoopVectorize/global_alias.ll | 362 ++++----- .../Transforms/LoopVectorize/hoist-loads.ll | 8 +- .../Transforms/LoopVectorize/i8-induction.ll | 2 +- .../LoopVectorize/if-conversion-edgemasks.ll | 10 +- .../LoopVectorize/if-conversion-nest.ll | 4 +- .../LoopVectorize/if-conversion-reduction.ll | 2 +- .../Transforms/LoopVectorize/if-conversion.ll | 6 +- .../LoopVectorize/if-pred-stores.ll | 8 +- .../LoopVectorize/incorrect-dom-info.ll | 4 +- .../Transforms/LoopVectorize/increment.ll | 6 +- .../Transforms/LoopVectorize/induction.ll | 10 +- .../Transforms/LoopVectorize/intrinsic.ll | 122 +-- .../test/Transforms/LoopVectorize/lifetime.ll | 6 +- .../LoopVectorize/loop-vect-memdep.ll | 6 +- llvm/test/Transforms/LoopVectorize/memdep.ll | 20 +- .../LoopVectorize/metadata-unroll.ll | 2 +- .../test/Transforms/LoopVectorize/metadata.ll | 4 +- .../LoopVectorize/minmax_reduction.ll | 68 +- .../LoopVectorize/multi-use-reduction-bug.ll | 6 +- .../LoopVectorize/multiple-address-spaces.ll | 2 +- .../LoopVectorize/no_array_bounds.ll | 8 +- .../LoopVectorize/no_idiv_reduction.ll | 2 +- .../LoopVectorize/no_int_induction.ll | 4 +- .../LoopVectorize/no_outside_user.ll | 4 +- .../Transforms/LoopVectorize/no_switch.ll | 2 +- .../Transforms/LoopVectorize/non-const-n.ll | 4 +- llvm/test/Transforms/LoopVectorize/opt.ll | 2 +- .../Transforms/LoopVectorize/ptr_loops.ll | 6 +- .../Transforms/LoopVectorize/read-only.ll | 4 +- .../Transforms/LoopVectorize/reduction.ll | 46 +- .../LoopVectorize/reverse_induction.ll | 6 +- .../Transforms/LoopVectorize/reverse_iter.ll | 2 +- .../runtime-check-address-space.ll | 14 +- .../runtime-check-readonly-address-space.ll | 20 +- .../LoopVectorize/runtime-check-readonly.ll | 4 +- .../Transforms/LoopVectorize/runtime-check.ll | 6 +- .../Transforms/LoopVectorize/runtime-limit.ll | 30 +- llvm/test/Transforms/LoopVectorize/safegep.ll | 8 +- .../LoopVectorize/same-base-access.ll | 24 +- .../Transforms/LoopVectorize/scalar-select.ll | 4 +- .../LoopVectorize/scev-exitlim-crash.ll | 10 +- .../Transforms/LoopVectorize/simple-unroll.ll | 2 +- .../Transforms/LoopVectorize/small-loop.ll | 4 +- .../LoopVectorize/start-non-zero.ll | 2 +- .../LoopVectorize/store-shuffle-bug.ll | 6 +- .../Transforms/LoopVectorize/struct_access.ll | 4 +- .../Transforms/LoopVectorize/tbaa-nodep.ll | 14 +- .../Transforms/LoopVectorize/unroll_novec.ll | 12 +- .../Transforms/LoopVectorize/value-ptr-bug.ll | 2 +- .../LoopVectorize/vect.omp.persistence.ll | 4 +- .../Transforms/LoopVectorize/vect.stats.ll | 6 +- .../LoopVectorize/vectorize-once.ll | 4 +- .../LoopVectorize/version-mem-access.ll | 6 +- .../Transforms/LoopVectorize/write-only.ll | 2 +- .../Transforms/LowerAtomic/atomic-swap.ll | 2 +- llvm/test/Transforms/LowerBitSets/simple.ll | 4 +- .../Transforms/LowerExpectIntrinsic/basic.ll | 38 +- .../2014-06-10-SwitchContiguousOpt.ll | 2 +- .../2014-06-11-SwitchDefaultUnreachableOpt.ll | 4 +- .../Mem2Reg/2002-03-28-UninitializedVal.ll | 2 +- .../2003-04-24-MultipleIdenticalSuccessors.ll | 2 +- .../Mem2Reg/2003-06-26-IterativePromote.ll | 6 +- .../Mem2Reg/2003-10-05-DeadPHIInsertion.ll | 4 +- .../Mem2Reg/2005-06-30-ReadBeforeWrite.ll | 16 +- .../Transforms/Mem2Reg/2005-11-28-Crash.ll | 2 +- .../Mem2Reg/2007-08-27-VolatileLoadsStores.ll | 6 +- .../Transforms/Mem2Reg/ConvertDebugInfo.ll | 8 +- .../Transforms/Mem2Reg/ConvertDebugInfo2.ll | 8 +- .../Mem2Reg/PromoteMemToRegister.ll | 6 +- .../Transforms/Mem2Reg/UndefValuesMerge.ll | 2 +- llvm/test/Transforms/Mem2Reg/atomic.ll | 2 +- llvm/test/Transforms/Mem2Reg/crash.ll | 6 +- .../MemCpyOpt/2008-03-13-ReturnSlotBitcast.ll | 2 +- .../2011-06-02-CallSlotOverwritten.ll | 6 +- llvm/test/Transforms/MemCpyOpt/atomic.ll | 2 +- .../Transforms/MemCpyOpt/loadstore-sret.ll | 2 +- llvm/test/Transforms/MemCpyOpt/memcpy.ll | 2 +- llvm/test/Transforms/MemCpyOpt/sret.ll | 4 +- .../MergeFunc/2011-02-08-RemoveEqual.ll | 68 +- .../Transforms/MergeFunc/address-spaces.ll | 6 +- llvm/test/Transforms/MergeFunc/crash.ll | 4 +- .../MergeFunc/inttoptr-address-space.ll | 4 +- llvm/test/Transforms/MergeFunc/inttoptr.ll | 4 +- .../MergeFunc/mergefunc-struct-return.ll | 4 +- llvm/test/Transforms/MergeFunc/ranges.ll | 24 +- llvm/test/Transforms/MergeFunc/vector.ll | 4 +- .../Transforms/MetaRenamer/metarenamer.ll | 18 +- llvm/test/Transforms/ObjCARC/allocas.ll | 30 +- llvm/test/Transforms/ObjCARC/basic.ll | 142 ++-- llvm/test/Transforms/ObjCARC/cfg-hazards.ll | 2 +- .../ObjCARC/contract-storestrong-ivar.ll | 4 +- .../ObjCARC/contract-storestrong.ll | 36 +- ...e-that-exception-unwind-path-is-visited.ll | 8 +- llvm/test/Transforms/ObjCARC/escape.ll | 8 +- llvm/test/Transforms/ObjCARC/gvn.ll | 10 +- llvm/test/Transforms/ObjCARC/intrinsic-use.ll | 16 +- .../move-and-form-retain-autorelease.ll | 68 +- .../ObjCARC/move-and-merge-autorelease.ll | 14 +- llvm/test/Transforms/ObjCARC/nested.ll | 166 ++-- llvm/test/Transforms/ObjCARC/provenance.ll | 18 +- .../ObjCARC/retain-block-side-effects.ll | 6 +- .../2010-03-22-empty-baseclass.ll | 34 +- llvm/test/Transforms/PhaseOrdering/PR6627.ll | 20 +- llvm/test/Transforms/PhaseOrdering/basic.ll | 8 +- llvm/test/Transforms/PhaseOrdering/gdce.ll | 16 +- .../Reassociate/2011-01-26-UseAfterFree.ll | 2 +- llvm/test/Transforms/Reassociate/basictest.ll | 24 +- llvm/test/Transforms/Reassociate/crash.ll | 6 +- .../Transforms/Reassociate/fast-basictest.ll | 18 +- llvm/test/Transforms/Reassociate/pr12245.ll | 20 +- llvm/test/Transforms/Reassociate/pr21205.ll | 2 +- .../RewriteStatepointsForGC/basics.ll | 8 +- .../SCCP/2003-06-24-OverdefinedPHIValue.ll | 2 +- .../SCCP/2006-10-23-IPSCCP-Crash.ll | 6 +- .../Transforms/SCCP/2006-12-04-PackedType.ll | 4 +- llvm/test/Transforms/SCCP/apint-array.ll | 4 +- llvm/test/Transforms/SCCP/apint-bigarray.ll | 4 +- llvm/test/Transforms/SCCP/apint-bigint2.ll | 4 +- llvm/test/Transforms/SCCP/apint-ipsccp3.ll | 4 +- llvm/test/Transforms/SCCP/apint-ipsccp4.ll | 4 +- llvm/test/Transforms/SCCP/apint-load.ll | 6 +- .../test/Transforms/SCCP/atomic-load-store.ll | 4 +- llvm/test/Transforms/SCCP/ipsccp-basic.ll | 6 +- llvm/test/Transforms/SCCP/loadtest.ll | 8 +- .../SLPVectorizer/AArch64/commute.ll | 12 +- .../SLPVectorizer/AArch64/load-store-q.ll | 8 +- .../SLPVectorizer/AArch64/sdiv-pow2.ll | 16 +- .../Transforms/SLPVectorizer/ARM/memory.ll | 4 +- .../Transforms/SLPVectorizer/R600/simplebb.ll | 36 +- .../Transforms/SLPVectorizer/X86/addsub.ll | 180 ++-- .../Transforms/SLPVectorizer/X86/align.ll | 18 +- .../test/Transforms/SLPVectorizer/X86/call.ll | 40 +- .../test/Transforms/SLPVectorizer/X86/cast.ll | 8 +- .../Transforms/SLPVectorizer/X86/cmp_sel.ll | 4 +- .../SLPVectorizer/X86/compare-reduce.ll | 4 +- .../SLPVectorizer/X86/consecutive-access.ll | 40 +- .../SLPVectorizer/X86/continue_vectorizing.ll | 12 +- .../SLPVectorizer/X86/crash_binaryop.ll | 2 +- .../SLPVectorizer/X86/crash_bullet.ll | 2 +- .../SLPVectorizer/X86/crash_cmpop.ll | 2 +- .../SLPVectorizer/X86/crash_dequeue.ll | 6 +- .../Transforms/SLPVectorizer/X86/crash_gep.ll | 2 +- .../SLPVectorizer/X86/crash_lencod.ll | 4 +- .../SLPVectorizer/X86/crash_mandeltext.ll | 4 +- .../X86/crash_netbsd_decompress.ll | 8 +- .../SLPVectorizer/X86/crash_vectorizeTree.ll | 4 +- .../SLPVectorizer/X86/cross_block_slp.ll | 8 +- llvm/test/Transforms/SLPVectorizer/X86/cse.ll | 42 +- .../Transforms/SLPVectorizer/X86/cycle_dup.ll | 10 +- .../SLPVectorizer/X86/debug_info.ll | 6 +- .../Transforms/SLPVectorizer/X86/diamond.ll | 24 +- .../SLPVectorizer/X86/external_user.ll | 8 +- .../Transforms/SLPVectorizer/X86/extract.ll | 6 +- .../SLPVectorizer/X86/extract_in_tree_user.ll | 18 +- .../test/Transforms/SLPVectorizer/X86/flag.ll | 8 +- llvm/test/Transforms/SLPVectorizer/X86/gep.ll | 8 +- .../Transforms/SLPVectorizer/X86/hoist.ll | 8 +- .../SLPVectorizer/X86/horizontal.ll | 100 +-- .../SLPVectorizer/X86/implicitfloat.ll | 8 +- .../SLPVectorizer/X86/in-tree-user.ll | 4 +- .../Transforms/SLPVectorizer/X86/intrinsic.ll | 136 ++-- .../SLPVectorizer/X86/long_chains.ll | 4 +- .../SLPVectorizer/X86/loopinvariant.ll | 16 +- .../Transforms/SLPVectorizer/X86/metadata.ll | 16 +- .../SLPVectorizer/X86/multi_block.ll | 4 +- .../SLPVectorizer/X86/multi_user.ll | 10 +- .../Transforms/SLPVectorizer/X86/odd_store.ll | 6 +- .../SLPVectorizer/X86/operandorder.ll | 96 +-- llvm/test/Transforms/SLPVectorizer/X86/opt.ll | 8 +- .../Transforms/SLPVectorizer/X86/ordering.ll | 2 +- llvm/test/Transforms/SLPVectorizer/X86/phi.ll | 40 +- .../test/Transforms/SLPVectorizer/X86/phi3.ll | 2 +- .../SLPVectorizer/X86/phi_overalignedtype.ll | 16 +- .../Transforms/SLPVectorizer/X86/powof2div.ll | 18 +- .../Transforms/SLPVectorizer/X86/pr16628.ll | 4 +- .../Transforms/SLPVectorizer/X86/pr16899.ll | 6 +- .../Transforms/SLPVectorizer/X86/pr19657.ll | 58 +- .../SLPVectorizer/X86/propagate_ir_flags.ll | 104 +-- .../Transforms/SLPVectorizer/X86/reduction.ll | 4 +- .../SLPVectorizer/X86/reduction2.ll | 4 +- .../Transforms/SLPVectorizer/X86/return.ll | 24 +- .../Transforms/SLPVectorizer/X86/rgb_phi.ll | 12 +- .../Transforms/SLPVectorizer/X86/saxpy.ll | 20 +- .../SLPVectorizer/X86/scheduling.ll | 16 +- .../SLPVectorizer/X86/simple-loop.ll | 16 +- .../Transforms/SLPVectorizer/X86/simplebb.ll | 32 +- .../Transforms/SLPVectorizer/X86/tiny-tree.ll | 24 +- .../SLPVectorizer/X86/unreachable.ll | 8 +- .../XCore/no-vector-registers.ll | 8 +- llvm/test/Transforms/SROA/address-spaces.ll | 6 +- llvm/test/Transforms/SROA/alignment.ll | 32 +- llvm/test/Transforms/SROA/basictest.ll | 180 ++-- llvm/test/Transforms/SROA/big-endian.ll | 10 +- llvm/test/Transforms/SROA/fca.ll | 6 +- llvm/test/Transforms/SROA/phi-and-select.ll | 66 +- .../SROA/slice-order-independence.ll | 8 +- llvm/test/Transforms/SROA/slice-width.ll | 14 +- .../test/Transforms/SROA/vector-conversion.ll | 6 +- .../SROA/vector-lifetime-intrinsic.ll | 2 +- llvm/test/Transforms/SROA/vector-promotion.ll | 90 +- .../Transforms/SROA/vectors-of-pointers.ll | 2 +- llvm/test/Transforms/SampleProfile/branch.ll | 2 +- llvm/test/Transforms/SampleProfile/calls.ll | 14 +- .../Transforms/SampleProfile/discriminator.ll | 10 +- .../Transforms/SampleProfile/propagate.ll | 58 +- .../ScalarRepl/2003-05-29-ArrayFail.ll | 2 +- .../2006-11-07-InvalidArrayPromote.ll | 2 +- .../ScalarRepl/2007-05-29-MemcpyPreserve.ll | 2 +- .../ScalarRepl/2007-11-03-bigendian_apint.ll | 8 +- .../ScalarRepl/2008-01-29-PromoteBug.ll | 2 +- .../2008-02-28-SubElementExtractCrash.ll | 2 +- .../ScalarRepl/2008-06-05-loadstore-agg.ll | 4 +- .../2008-08-22-out-of-range-array-promote.ll | 2 +- .../2009-02-02-ScalarPromoteOutOfRange.ll | 2 +- .../ScalarRepl/2009-02-05-LoadFCA.ll | 2 +- .../ScalarRepl/2009-12-11-NeonTypes.ll | 10 +- .../2011-06-08-VectorExtractValue.ll | 10 +- .../2011-06-17-VectorPartialMemset.ll | 2 +- .../2011-09-22-PHISpeculateInvoke.ll | 2 +- .../ScalarRepl/2011-11-11-EmptyStruct.ll | 2 +- .../Transforms/ScalarRepl/AggregatePromote.ll | 12 +- .../Transforms/ScalarRepl/DifferingTypes.ll | 2 +- .../Transforms/ScalarRepl/address-space.ll | 4 +- llvm/test/Transforms/ScalarRepl/arraytest.ll | 2 +- llvm/test/Transforms/ScalarRepl/badarray.ll | 4 +- llvm/test/Transforms/ScalarRepl/basictest.ll | 4 +- .../Transforms/ScalarRepl/bitfield-sroa.ll | 4 +- .../Transforms/ScalarRepl/copy-aggregate.ll | 12 +- llvm/test/Transforms/ScalarRepl/crash.ll | 16 +- .../ScalarRepl/debuginfo-preserved.ll | 14 +- .../Transforms/ScalarRepl/inline-vector.ll | 6 +- llvm/test/Transforms/ScalarRepl/lifetime.ll | 8 +- .../ScalarRepl/load-store-aggregate.ll | 6 +- .../memset-aggregate-byte-leader.ll | 2 +- .../Transforms/ScalarRepl/memset-aggregate.ll | 8 +- .../ScalarRepl/nonzero-first-index.ll | 8 +- .../Transforms/ScalarRepl/not-a-vector.ll | 2 +- llvm/test/Transforms/ScalarRepl/phi-cycle.ll | 2 +- llvm/test/Transforms/ScalarRepl/phi-select.ll | 16 +- .../Transforms/ScalarRepl/phinodepromote.ll | 6 +- .../Transforms/ScalarRepl/select_promote.ll | 6 +- llvm/test/Transforms/ScalarRepl/sroa-fca.ll | 4 +- llvm/test/Transforms/ScalarRepl/sroa_two.ll | 2 +- .../Transforms/ScalarRepl/union-fp-int.ll | 2 +- .../Transforms/ScalarRepl/union-packed.ll | 2 +- .../Transforms/ScalarRepl/union-pointer.ll | 12 +- .../Transforms/ScalarRepl/vector_memcpy.ll | 4 +- .../Transforms/ScalarRepl/vector_promote.ll | 34 +- .../vectors-with-mismatched-elements.ll | 4 +- llvm/test/Transforms/ScalarRepl/volatile.ll | 2 +- llvm/test/Transforms/Scalarizer/basic.ll | 82 +- llvm/test/Transforms/Scalarizer/dbginfo.ll | 20 +- .../Transforms/Scalarizer/no-data-layout.ll | 4 +- .../NVPTX/split-gep-and-gvn.ll | 32 +- .../SimplifyCFG/2005-06-16-PHICrash.ll | 4 +- .../2005-12-03-IncorrectPHIFold.ll | 36 +- .../SimplifyCFG/2006-08-03-Crash.ll | 28 +- .../SimplifyCFG/2006-12-08-Ptr-ICmp-Branch.ll | 30 +- .../SimplifyCFG/2008-01-02-hoist-fp-add.ll | 2 +- .../2008-07-13-InfLoopMiscompile.ll | 2 +- .../SimplifyCFG/2008-09-08-MultiplePred.ll | 2 +- .../SimplifyCFG/2009-05-12-externweak.ll | 2 +- .../SimplifyCFG/AArch64/prefer-fma.ll | 20 +- .../Transforms/SimplifyCFG/EmptyBlockMerge.ll | 2 +- llvm/test/Transforms/SimplifyCFG/PR17073.ll | 6 +- .../Transforms/SimplifyCFG/SpeculativeExec.ll | 4 +- .../SimplifyCFG/X86/switch-covered-bug.ll | 2 +- .../SimplifyCFG/X86/switch-table-bug.ll | 2 +- .../SimplifyCFG/X86/switch_to_lookup_table.ll | 10 +- llvm/test/Transforms/SimplifyCFG/basictest.ll | 4 +- .../SimplifyCFG/branch-fold-threshold.ll | 2 +- .../SimplifyCFG/branch-phi-thread.ll | 2 +- llvm/test/Transforms/SimplifyCFG/dbginfo.ll | 2 +- .../SimplifyCFG/hoist-common-code.ll | 4 +- .../SimplifyCFG/hoist-with-range.ll | 6 +- .../test/Transforms/SimplifyCFG/indirectbr.ll | 6 +- .../SimplifyCFG/iterative-simplify.ll | 18 +- .../Transforms/SimplifyCFG/multiple-phis.ll | 2 +- .../no_speculative_loads_with_tsan.ll | 6 +- .../SimplifyCFG/phi-undef-loadstore.ll | 8 +- .../SimplifyCFG/preserve-branchweights.ll | 2 +- .../Transforms/SimplifyCFG/speculate-store.ll | 16 +- .../SimplifyCFG/speculate-with-offset.ll | 8 +- ...h-to-select-multiple-edge-per-block-phi.ll | 4 +- .../Transforms/SimplifyCFG/switch_create.ll | 2 +- .../SimplifyCFG/trapping-load-unreachable.ll | 2 +- .../SimplifyCFG/unreachable-blocks.ll | 2 +- llvm/test/Transforms/Sink/basic.ll | 12 +- .../StripSymbols/strip-dead-debug-info.ll | 2 +- .../post-order-traversal-bug.ll | 8 +- llvm/test/Transforms/TailCallElim/basic.ll | 2 +- .../TailCallElim/dont_reorder_load.ll | 6 +- .../Transforms/TailCallElim/reorder_load.ll | 20 +- llvm/test/Verifier/2006-10-15-AddrLabel.ll | 2 +- llvm/test/Verifier/range-1.ll | 32 +- llvm/test/Verifier/range-2.ll | 10 +- llvm/test/tools/gold/slp-vectorize.ll | 8 +- llvm/test/tools/gold/vectorize.ll | 2 +- 3931 files changed, 29317 insertions(+), 29293 deletions(-) create mode 100644 llvm/test/Assembler/invalid-load-mismatched-explicit-type.ll create mode 100644 llvm/test/Assembler/invalid-load-missing-explicit-type.ll diff --git a/llvm/lib/AsmParser/LLParser.cpp b/llvm/lib/AsmParser/LLParser.cpp index 4244679c8143..cad7c6d96d5a 100644 --- a/llvm/lib/AsmParser/LLParser.cpp +++ b/llvm/lib/AsmParser/LLParser.cpp @@ -5241,7 +5241,11 @@ int LLParser::ParseLoad(Instruction *&Inst, PerFunctionState &PFS) { Lex.Lex(); } - if (ParseTypeAndValue(Val, Loc, PFS) || + Type *Ty = nullptr; + LocTy ExplicitTypeLoc = Lex.getLoc(); + if (ParseType(Ty) || + ParseToken(lltok::comma, "expected comma after load's type") || + ParseTypeAndValue(Val, Loc, PFS) || ParseScopeAndOrdering(isAtomic, Scope, Ordering) || ParseOptionalCommaAlign(Alignment, AteExtraComma)) return true; @@ -5254,6 +5258,10 @@ int LLParser::ParseLoad(Instruction *&Inst, PerFunctionState &PFS) { if (Ordering == Release || Ordering == AcquireRelease) return Error(Loc, "atomic load cannot use Release ordering"); + if (Ty != cast(Val->getType())->getElementType()) + return Error(ExplicitTypeLoc, + "explicit pointee type doesn't match operand's pointee type"); + Inst = new LoadInst(Val, "", isVolatile, Alignment, Ordering, Scope); return AteExtraComma ? InstExtraComma : InstNormal; } diff --git a/llvm/lib/IR/AsmWriter.cpp b/llvm/lib/IR/AsmWriter.cpp index 8f3e5ec92293..51be1b00f213 100644 --- a/llvm/lib/IR/AsmWriter.cpp +++ b/llvm/lib/IR/AsmWriter.cpp @@ -2898,10 +2898,14 @@ void AssemblyWriter::printInstruction(const Instruction &I) { Out << ", "; TypePrinter.print(I.getType(), Out); } else if (Operand) { // Print the normal way. - if (const GetElementPtrInst *GEP = dyn_cast(&I)) { + if (const auto *GEP = dyn_cast(&I)) { Out << ' '; TypePrinter.print(GEP->getSourceElementType(), Out); Out << ','; + } else if (const auto *LI = dyn_cast(&I)) { + Out << ' '; + TypePrinter.print(LI->getType(), Out); + Out << ", "; } // PrintAllTypes - Instructions who have operands of all the same type diff --git a/llvm/test/Analysis/BasicAA/2003-02-26-AccessSizeTest.ll b/llvm/test/Analysis/BasicAA/2003-02-26-AccessSizeTest.ll index b597ff89eb50..d712e33b8c0a 100644 --- a/llvm/test/Analysis/BasicAA/2003-02-26-AccessSizeTest.ll +++ b/llvm/test/Analysis/BasicAA/2003-02-26-AccessSizeTest.ll @@ -5,15 +5,15 @@ ; RUN: opt < %s -basicaa -gvn -instcombine -S | FileCheck %s define i32 @test() { -; CHECK: %Y.DONOTREMOVE = load i32* %A +; CHECK: %Y.DONOTREMOVE = load i32, i32* %A ; CHECK: %Z = sub i32 0, %Y.DONOTREMOVE %A = alloca i32 store i32 0, i32* %A - %X = load i32* %A + %X = load i32, i32* %A %B = bitcast i32* %A to i8* %C = getelementptr i8, i8* %B, i64 1 store i8 1, i8* %C ; Aliases %A - %Y.DONOTREMOVE = load i32* %A + %Y.DONOTREMOVE = load i32, i32* %A %Z = sub i32 %X, %Y.DONOTREMOVE ret i32 %Z } diff --git a/llvm/test/Analysis/BasicAA/2003-04-22-GEPProblem.ll b/llvm/test/Analysis/BasicAA/2003-04-22-GEPProblem.ll index c72ec817011f..96ca071587e6 100644 --- a/llvm/test/Analysis/BasicAA/2003-04-22-GEPProblem.ll +++ b/llvm/test/Analysis/BasicAA/2003-04-22-GEPProblem.ll @@ -6,9 +6,9 @@ define i32 @test(i32 *%Ptr, i64 %V) { ; CHECK: sub i32 %X, %Y %P2 = getelementptr i32, i32* %Ptr, i64 1 %P1 = getelementptr i32, i32* %Ptr, i64 %V - %X = load i32* %P1 + %X = load i32, i32* %P1 store i32 5, i32* %P2 - %Y = load i32* %P1 + %Y = load i32, i32* %P1 %Z = sub i32 %X, %Y ret i32 %Z } diff --git a/llvm/test/Analysis/BasicAA/2003-05-21-GEP-Problem.ll b/llvm/test/Analysis/BasicAA/2003-05-21-GEP-Problem.ll index dbda9542a9af..fb5b3bb4618e 100644 --- a/llvm/test/Analysis/BasicAA/2003-05-21-GEP-Problem.ll +++ b/llvm/test/Analysis/BasicAA/2003-05-21-GEP-Problem.ll @@ -7,7 +7,7 @@ define void @table_reindex(%struct..apr_table_t* %t.1) { ; No predecessors! loopentry: ; preds = %0, %no_exit %tmp.101 = getelementptr %struct..apr_table_t, %struct..apr_table_t* %t.1, i64 0, i32 0, i32 2 - %tmp.11 = load i32* %tmp.101 ; [#uses=0] + %tmp.11 = load i32, i32* %tmp.101 ; [#uses=0] br i1 false, label %no_exit, label %UnifiedExitNode no_exit: ; preds = %loopentry diff --git a/llvm/test/Analysis/BasicAA/2003-06-01-AliasCrash.ll b/llvm/test/Analysis/BasicAA/2003-06-01-AliasCrash.ll index 305546ba77f9..ace5982afaab 100644 --- a/llvm/test/Analysis/BasicAA/2003-06-01-AliasCrash.ll +++ b/llvm/test/Analysis/BasicAA/2003-06-01-AliasCrash.ll @@ -2,10 +2,10 @@ define i32 @MTConcat([3 x i32]* %a.1) { %tmp.961 = getelementptr [3 x i32], [3 x i32]* %a.1, i64 0, i64 4 - %tmp.97 = load i32* %tmp.961 + %tmp.97 = load i32, i32* %tmp.961 %tmp.119 = getelementptr [3 x i32], [3 x i32]* %a.1, i64 1, i64 0 - %tmp.120 = load i32* %tmp.119 + %tmp.120 = load i32, i32* %tmp.119 %tmp.1541 = getelementptr [3 x i32], [3 x i32]* %a.1, i64 0, i64 4 - %tmp.155 = load i32* %tmp.1541 + %tmp.155 = load i32, i32* %tmp.1541 ret i32 0 } diff --git a/llvm/test/Analysis/BasicAA/2003-09-19-LocalArgument.ll b/llvm/test/Analysis/BasicAA/2003-09-19-LocalArgument.ll index fd4c239bbbee..1e75d6461d6f 100644 --- a/llvm/test/Analysis/BasicAA/2003-09-19-LocalArgument.ll +++ b/llvm/test/Analysis/BasicAA/2003-09-19-LocalArgument.ll @@ -7,9 +7,9 @@ define i32 @test(i32* %P) { %X = alloca i32 - %V1 = load i32* %P + %V1 = load i32, i32* %P store i32 0, i32* %X - %V2 = load i32* %P + %V2 = load i32, i32* %P %Diff = sub i32 %V1, %V2 ret i32 %Diff } diff --git a/llvm/test/Analysis/BasicAA/2006-03-03-BadArraySubscript.ll b/llvm/test/Analysis/BasicAA/2006-03-03-BadArraySubscript.ll index 104d2bf350cd..eb05e1edd75b 100644 --- a/llvm/test/Analysis/BasicAA/2006-03-03-BadArraySubscript.ll +++ b/llvm/test/Analysis/BasicAA/2006-03-03-BadArraySubscript.ll @@ -15,9 +15,9 @@ no_exit: ; preds = %no_exit, %entry %tmp.6 = getelementptr [3 x [3 x i32]], [3 x [3 x i32]]* %X, i32 0, i32 0, i32 %i.0.0 ; [#uses=1] store i32 1, i32* %tmp.6 %tmp.8 = getelementptr [3 x [3 x i32]], [3 x [3 x i32]]* %X, i32 0, i32 0, i32 0 ; [#uses=1] - %tmp.9 = load i32* %tmp.8 ; [#uses=1] + %tmp.9 = load i32, i32* %tmp.8 ; [#uses=1] %tmp.11 = getelementptr [3 x [3 x i32]], [3 x [3 x i32]]* %X, i32 0, i32 1, i32 0 ; [#uses=1] - %tmp.12 = load i32* %tmp.11 ; [#uses=1] + %tmp.12 = load i32, i32* %tmp.11 ; [#uses=1] %tmp.13 = add i32 %tmp.12, %tmp.9 ; [#uses=1] %inc = add i32 %i.0.0, 1 ; [#uses=2] %tmp.2 = icmp slt i32 %inc, %N ; [#uses=1] diff --git a/llvm/test/Analysis/BasicAA/2007-01-13-BasePointerBadNoAlias.ll b/llvm/test/Analysis/BasicAA/2007-01-13-BasePointerBadNoAlias.ll index 14d7f58d38a1..86bbd44c6448 100644 --- a/llvm/test/Analysis/BasicAA/2007-01-13-BasePointerBadNoAlias.ll +++ b/llvm/test/Analysis/BasicAA/2007-01-13-BasePointerBadNoAlias.ll @@ -23,12 +23,12 @@ target triple = "i686-apple-darwin8" define i32 @test(%struct.closure_type* %tmp18169) { %tmp18174 = getelementptr %struct.closure_type, %struct.closure_type* %tmp18169, i32 0, i32 4, i32 0, i32 0 ; [#uses=2] %tmp18269 = bitcast i32* %tmp18174 to %struct.STYLE* ; <%struct.STYLE*> [#uses=1] - %A = load i32* %tmp18174 ; [#uses=1] + %A = load i32, i32* %tmp18174 ; [#uses=1] %tmp18272 = getelementptr %struct.STYLE, %struct.STYLE* %tmp18269, i32 0, i32 0, i32 0, i32 2 ; [#uses=1] store i16 123, i16* %tmp18272 - %Q = load i32* %tmp18174 ; [#uses=1] + %Q = load i32, i32* %tmp18174 ; [#uses=1] %Z = sub i32 %A, %Q ; [#uses=1] ret i32 %Z } diff --git a/llvm/test/Analysis/BasicAA/2007-08-05-GetOverloadedModRef.ll b/llvm/test/Analysis/BasicAA/2007-08-05-GetOverloadedModRef.ll index ec0e2bd91413..5f0e1170f044 100644 --- a/llvm/test/Analysis/BasicAA/2007-08-05-GetOverloadedModRef.ll +++ b/llvm/test/Analysis/BasicAA/2007-08-05-GetOverloadedModRef.ll @@ -5,10 +5,10 @@ declare i16 @llvm.cttz.i16(i16, i1) define i32 @test(i32* %P, i16* %Q) { ; CHECK: ret i32 0 - %A = load i16* %Q ; [#uses=1] - %x = load i32* %P ; [#uses=1] + %A = load i16, i16* %Q ; [#uses=1] + %x = load i32, i32* %P ; [#uses=1] %B = call i16 @llvm.cttz.i16( i16 %A, i1 true ) ; [#uses=1] - %y = load i32* %P ; [#uses=1] + %y = load i32, i32* %P ; [#uses=1] store i16 %B, i16* %Q %z = sub i32 %x, %y ; [#uses=1] ret i32 %z diff --git a/llvm/test/Analysis/BasicAA/2007-10-24-ArgumentsGlobals.ll b/llvm/test/Analysis/BasicAA/2007-10-24-ArgumentsGlobals.ll index e0e64fb9f933..9e3745737f3e 100644 --- a/llvm/test/Analysis/BasicAA/2007-10-24-ArgumentsGlobals.ll +++ b/llvm/test/Analysis/BasicAA/2007-10-24-ArgumentsGlobals.ll @@ -11,6 +11,6 @@ entry: store i32 1, i32* getelementptr (%struct.B* @a, i32 0, i32 0, i32 0), align 8 %tmp4 = getelementptr %struct.A, %struct.A* %b, i32 0, i32 0 ; [#uses=1] store i32 0, i32* %tmp4, align 4 - %tmp7 = load i32* getelementptr (%struct.B* @a, i32 0, i32 0, i32 0), align 8 ; [#uses=1] + %tmp7 = load i32, i32* getelementptr (%struct.B* @a, i32 0, i32 0, i32 0), align 8 ; [#uses=1] ret i32 %tmp7 } diff --git a/llvm/test/Analysis/BasicAA/2007-11-05-SizeCrash.ll b/llvm/test/Analysis/BasicAA/2007-11-05-SizeCrash.ll index 8014a24ee259..069bd0bcfd8f 100644 --- a/llvm/test/Analysis/BasicAA/2007-11-05-SizeCrash.ll +++ b/llvm/test/Analysis/BasicAA/2007-11-05-SizeCrash.ll @@ -17,7 +17,7 @@ entry: %tmp17 = getelementptr %struct.usb_hcd, %struct.usb_hcd* %hcd, i32 0, i32 2, i64 1 ; [#uses=1] %tmp1718 = bitcast i64* %tmp17 to i32* ; [#uses=1] - %tmp19 = load i32* %tmp1718, align 4 ; [#uses=0] + %tmp19 = load i32, i32* %tmp1718, align 4 ; [#uses=0] br i1 false, label %cond_true34, label %done_okay cond_true34: ; preds = %entry @@ -25,7 +25,7 @@ cond_true34: ; preds = %entry 2305843009213693950 ; [#uses=1] %tmp70 = bitcast i64* %tmp631 to %struct.device** - %tmp71 = load %struct.device** %tmp70, align 8 + %tmp71 = load %struct.device*, %struct.device** %tmp70, align 8 ret i32 undef diff --git a/llvm/test/Analysis/BasicAA/2007-12-08-OutOfBoundsCrash.ll b/llvm/test/Analysis/BasicAA/2007-12-08-OutOfBoundsCrash.ll index ceba1d24a7ef..20be13d153bb 100644 --- a/llvm/test/Analysis/BasicAA/2007-12-08-OutOfBoundsCrash.ll +++ b/llvm/test/Analysis/BasicAA/2007-12-08-OutOfBoundsCrash.ll @@ -14,7 +14,7 @@ target triple = "x86_64-unknown-linux-gnu" define i32 @ehci_pci_setup(%struct.usb_hcd* %hcd) { entry: %tmp14 = getelementptr %struct.usb_hcd, %struct.usb_hcd* %hcd, i32 0, i32 0, i32 0 ; <%struct.device**> [#uses=1] - %tmp15 = load %struct.device** %tmp14, align 8 ; <%struct.device*> [#uses=0] + %tmp15 = load %struct.device*, %struct.device** %tmp14, align 8 ; <%struct.device*> [#uses=0] br i1 false, label %bb25, label %return bb25: ; preds = %entry @@ -23,7 +23,7 @@ bb25: ; preds = %entry cond_true: ; preds = %bb25 %tmp601 = getelementptr %struct.usb_hcd, %struct.usb_hcd* %hcd, i32 0, i32 1, i64 2305843009213693951 ; [#uses=1] %tmp67 = bitcast i64* %tmp601 to %struct.device** ; <%struct.device**> [#uses=1] - %tmp68 = load %struct.device** %tmp67, align 8 ; <%struct.device*> [#uses=0] + %tmp68 = load %struct.device*, %struct.device** %tmp67, align 8 ; <%struct.device*> [#uses=0] ret i32 undef return: ; preds = %bb25, %entry diff --git a/llvm/test/Analysis/BasicAA/2008-06-02-GEPTailCrash.ll b/llvm/test/Analysis/BasicAA/2008-06-02-GEPTailCrash.ll index 170914447644..9b6dbeccd73e 100644 --- a/llvm/test/Analysis/BasicAA/2008-06-02-GEPTailCrash.ll +++ b/llvm/test/Analysis/BasicAA/2008-06-02-GEPTailCrash.ll @@ -10,6 +10,6 @@ target triple = "i686-pc-linux-gnu" define void @test291() nounwind { entry: store i32 1138410269, i32* getelementptr ([5 x %struct.S291]* @a291, i32 0, i32 2, i32 1) - %tmp54 = load i32* bitcast (%struct.S291* getelementptr ([5 x %struct.S291]* @a291, i32 0, i32 2) to i32*), align 4 ; [#uses=0] + %tmp54 = load i32, i32* bitcast (%struct.S291* getelementptr ([5 x %struct.S291]* @a291, i32 0, i32 2) to i32*), align 4 ; [#uses=0] unreachable } diff --git a/llvm/test/Analysis/BasicAA/2008-11-23-NoaliasRet.ll b/llvm/test/Analysis/BasicAA/2008-11-23-NoaliasRet.ll index 3db9a3fbcdc9..49a742c31f58 100644 --- a/llvm/test/Analysis/BasicAA/2008-11-23-NoaliasRet.ll +++ b/llvm/test/Analysis/BasicAA/2008-11-23-NoaliasRet.ll @@ -9,6 +9,6 @@ define i32 @foo() { %B = call i32* @_Znwj(i32 4) store i32 1, i32* %A store i32 2, i32* %B - %C = load i32* %A + %C = load i32, i32* %A ret i32 %C } diff --git a/llvm/test/Analysis/BasicAA/2009-03-04-GEPNoalias.ll b/llvm/test/Analysis/BasicAA/2009-03-04-GEPNoalias.ll index 643d54dfaf33..65dcf5ceab36 100644 --- a/llvm/test/Analysis/BasicAA/2009-03-04-GEPNoalias.ll +++ b/llvm/test/Analysis/BasicAA/2009-03-04-GEPNoalias.ll @@ -3,12 +3,12 @@ declare noalias i32* @noalias() define i32 @test(i32 %x) { -; CHECK: load i32* %a +; CHECK: load i32, i32* %a %a = call i32* @noalias() store i32 1, i32* %a %b = getelementptr i32, i32* %a, i32 %x store i32 2, i32* %b - %c = load i32* %a + %c = load i32, i32* %a ret i32 %c } diff --git a/llvm/test/Analysis/BasicAA/2009-10-13-AtomicModRef.ll b/llvm/test/Analysis/BasicAA/2009-10-13-AtomicModRef.ll index 8704d19465ce..97a92517dca8 100644 --- a/llvm/test/Analysis/BasicAA/2009-10-13-AtomicModRef.ll +++ b/llvm/test/Analysis/BasicAA/2009-10-13-AtomicModRef.ll @@ -5,9 +5,9 @@ define i8 @foo(i8* %ptr) { %P = getelementptr i8, i8* %ptr, i32 0 %Q = getelementptr i8, i8* %ptr, i32 1 ; CHECK: getelementptr - %X = load i8* %P + %X = load i8, i8* %P %Y = atomicrmw add i8* %Q, i8 1 monotonic - %Z = load i8* %P + %Z = load i8, i8* %P ; CHECK-NOT: = load %A = sub i8 %X, %Z ret i8 %A diff --git a/llvm/test/Analysis/BasicAA/2009-10-13-GEP-BaseNoAlias.ll b/llvm/test/Analysis/BasicAA/2009-10-13-GEP-BaseNoAlias.ll index a2515a614617..43ee96ce9b3d 100644 --- a/llvm/test/Analysis/BasicAA/2009-10-13-GEP-BaseNoAlias.ll +++ b/llvm/test/Analysis/BasicAA/2009-10-13-GEP-BaseNoAlias.ll @@ -23,9 +23,9 @@ bb1: bb2: %P = phi i32* [ %b, %bb ], [ @Y, %bb1 ] - %tmp1 = load i32* @Z, align 4 + %tmp1 = load i32, i32* @Z, align 4 store i32 123, i32* %P, align 4 - %tmp2 = load i32* @Z, align 4 + %tmp2 = load i32, i32* @Z, align 4 br label %return return: diff --git a/llvm/test/Analysis/BasicAA/2010-09-15-GEP-SignedArithmetic.ll b/llvm/test/Analysis/BasicAA/2010-09-15-GEP-SignedArithmetic.ll index 40c65af107da..b2e7a60047bd 100644 --- a/llvm/test/Analysis/BasicAA/2010-09-15-GEP-SignedArithmetic.ll +++ b/llvm/test/Analysis/BasicAA/2010-09-15-GEP-SignedArithmetic.ll @@ -9,9 +9,9 @@ define i32 @test(i32* %tab, i32 %indvar) nounwind { %tmp31 = mul i32 %indvar, -2 %tmp32 = add i32 %tmp31, 30 %t.5 = getelementptr i32, i32* %tab, i32 %tmp32 - %loada = load i32* %tab + %loada = load i32, i32* %tab store i32 0, i32* %t.5 - %loadb = load i32* %tab + %loadb = load i32, i32* %tab %rval = add i32 %loada, %loadb ret i32 %rval } diff --git a/llvm/test/Analysis/BasicAA/2014-03-18-Maxlookup-reached.ll b/llvm/test/Analysis/BasicAA/2014-03-18-Maxlookup-reached.ll index 82e8044635ec..08db5ec4d1d9 100644 --- a/llvm/test/Analysis/BasicAA/2014-03-18-Maxlookup-reached.ll +++ b/llvm/test/Analysis/BasicAA/2014-03-18-Maxlookup-reached.ll @@ -30,7 +30,7 @@ define i32 @main() { store i8 0, i8* %10 %11 = getelementptr inbounds i8, i8* %10, i32 -1 store i8 0, i8* %11 - %12 = load i32* %1, align 4 + %12 = load i32, i32* %1, align 4 ret i32 %12 ; CHECK: ret i32 %12 } diff --git a/llvm/test/Analysis/BasicAA/aligned-overread.ll b/llvm/test/Analysis/BasicAA/aligned-overread.ll index b05f8eb69483..47588e71a71b 100644 --- a/llvm/test/Analysis/BasicAA/aligned-overread.ll +++ b/llvm/test/Analysis/BasicAA/aligned-overread.ll @@ -9,10 +9,10 @@ target triple = "x86_64-apple-macosx10.8.0" define i32 @main() nounwind uwtable ssp { entry: - %tmp = load i8* getelementptr inbounds ({ i8, i8, i8, i8, i8 }* @a, i64 0, i32 4), align 4 + %tmp = load i8, i8* getelementptr inbounds ({ i8, i8, i8, i8, i8 }* @a, i64 0, i32 4), align 4 %tmp1 = or i8 %tmp, -128 store i8 %tmp1, i8* getelementptr inbounds ({ i8, i8, i8, i8, i8 }* @a, i64 0, i32 4), align 4 - %tmp2 = load i64* bitcast ({ i8, i8, i8, i8, i8 }* @a to i64*), align 8 + %tmp2 = load i64, i64* bitcast ({ i8, i8, i8, i8, i8 }* @a to i64*), align 8 store i8 11, i8* getelementptr inbounds ({ i8, i8, i8, i8, i8 }* @a, i64 0, i32 4), align 4 %tmp3 = trunc i64 %tmp2 to i32 ret i32 %tmp3 diff --git a/llvm/test/Analysis/BasicAA/args-rets-allocas-loads.ll b/llvm/test/Analysis/BasicAA/args-rets-allocas-loads.ll index 066f46b16c01..05b56a07e44b 100644 --- a/llvm/test/Analysis/BasicAA/args-rets-allocas-loads.ll +++ b/llvm/test/Analysis/BasicAA/args-rets-allocas-loads.ll @@ -22,8 +22,8 @@ define void @caller_a(double* %arg_a0, %noalias_ret_a0 = call double* @noalias_returner() %noalias_ret_a1 = call double* @noalias_returner() - %loaded_a0 = load double** %indirect_a0 - %loaded_a1 = load double** %indirect_a1 + %loaded_a0 = load double*, double** %indirect_a0 + %loaded_a1 = load double*, double** %indirect_a1 call void @callee(double* %escape_alloca_a0) call void @callee(double* %escape_alloca_a1) diff --git a/llvm/test/Analysis/BasicAA/byval.ll b/llvm/test/Analysis/BasicAA/byval.ll index 260aebe2985e..edbe7b33de17 100644 --- a/llvm/test/Analysis/BasicAA/byval.ll +++ b/llvm/test/Analysis/BasicAA/byval.ll @@ -10,7 +10,7 @@ define i32 @foo(%struct.x* byval %a) nounwind { %tmp2 = getelementptr %struct.x, %struct.x* %a, i32 0, i32 0 ; [#uses=2] store i32 1, i32* %tmp2, align 4 store i32 2, i32* @g, align 4 - %tmp4 = load i32* %tmp2, align 4 ; [#uses=1] + %tmp4 = load i32, i32* %tmp2, align 4 ; [#uses=1] ret i32 %tmp4 } diff --git a/llvm/test/Analysis/BasicAA/cas.ll b/llvm/test/Analysis/BasicAA/cas.ll index d0cd9f40ccc2..b770cb73381a 100644 --- a/llvm/test/Analysis/BasicAA/cas.ll +++ b/llvm/test/Analysis/BasicAA/cas.ll @@ -6,9 +6,9 @@ ; CHECK: ret i32 0 define i32 @main() { - %a = load i32* @flag0 + %a = load i32, i32* @flag0 %b = atomicrmw xchg i32* @turn, i32 1 monotonic - %c = load i32* @flag0 + %c = load i32, i32* @flag0 %d = sub i32 %a, %c ret i32 %d } diff --git a/llvm/test/Analysis/BasicAA/dag.ll b/llvm/test/Analysis/BasicAA/dag.ll index 1d2f6f1a76dd..63e2c1a8c09f 100644 --- a/llvm/test/Analysis/BasicAA/dag.ll +++ b/llvm/test/Analysis/BasicAA/dag.ll @@ -36,6 +36,6 @@ xc: %bigbase = bitcast i8* %base to i16* store i16 -1, i16* %bigbase - %loaded = load i8* %phi + %loaded = load i8, i8* %phi ret i8 %loaded } diff --git a/llvm/test/Analysis/BasicAA/featuretest.ll b/llvm/test/Analysis/BasicAA/featuretest.ll index 19e9b16b0e29..97e97f562cde 100644 --- a/llvm/test/Analysis/BasicAA/featuretest.ll +++ b/llvm/test/Analysis/BasicAA/featuretest.ll @@ -19,12 +19,12 @@ define i32 @different_array_test(i64 %A, i64 %B) { call void @external(i32* %Array2) %pointer = getelementptr i32, i32* %Array1, i64 %A - %val = load i32* %pointer + %val = load i32, i32* %pointer %pointer2 = getelementptr i32, i32* %Array2, i64 %B store i32 7, i32* %pointer2 - %REMOVE = load i32* %pointer ; redundant with above load + %REMOVE = load i32, i32* %pointer ; redundant with above load %retval = sub i32 %REMOVE, %val ret i32 %retval ; CHECK: @different_array_test @@ -41,9 +41,9 @@ define i32 @constant_array_index_test() { %P1 = getelementptr i32, i32* %Array, i64 7 %P2 = getelementptr i32, i32* %Array, i64 6 - %A = load i32* %P1 + %A = load i32, i32* %P1 store i32 1, i32* %P2 ; Should not invalidate load - %BREMOVE = load i32* %P1 + %BREMOVE = load i32, i32* %P1 %Val = sub i32 %A, %BREMOVE ret i32 %Val ; CHECK: @constant_array_index_test @@ -53,10 +53,10 @@ define i32 @constant_array_index_test() { ; Test that if two pointers are spaced out by a constant getelementptr, that ; they cannot alias. define i32 @gep_distance_test(i32* %A) { - %REMOVEu = load i32* %A + %REMOVEu = load i32, i32* %A %B = getelementptr i32, i32* %A, i64 2 ; Cannot alias A store i32 7, i32* %B - %REMOVEv = load i32* %A + %REMOVEv = load i32, i32* %A %r = sub i32 %REMOVEu, %REMOVEv ret i32 %r ; CHECK: @gep_distance_test @@ -67,10 +67,10 @@ define i32 @gep_distance_test(i32* %A) { ; cannot alias, even if there is a variable offset between them... define i32 @gep_distance_test2({i32,i32}* %A, i64 %distance) { %A1 = getelementptr {i32,i32}, {i32,i32}* %A, i64 0, i32 0 - %REMOVEu = load i32* %A1 + %REMOVEu = load i32, i32* %A1 %B = getelementptr {i32,i32}, {i32,i32}* %A, i64 %distance, i32 1 store i32 7, i32* %B ; B cannot alias A, it's at least 4 bytes away - %REMOVEv = load i32* %A1 + %REMOVEv = load i32, i32* %A1 %r = sub i32 %REMOVEu, %REMOVEv ret i32 %r ; CHECK: @gep_distance_test2 @@ -80,11 +80,11 @@ define i32 @gep_distance_test2({i32,i32}* %A, i64 %distance) { ; Test that we can do funny pointer things and that distance calc will still ; work. define i32 @gep_distance_test3(i32 * %A) { - %X = load i32* %A + %X = load i32, i32* %A %B = bitcast i32* %A to i8* %C = getelementptr i8, i8* %B, i64 4 store i8 42, i8* %C - %Y = load i32* %A + %Y = load i32, i32* %A %R = sub i32 %X, %Y ret i32 %R ; CHECK: @gep_distance_test3 @@ -96,9 +96,9 @@ define i32 @constexpr_test() { %X = alloca i32 call void @external(i32* %X) - %Y = load i32* %X + %Y = load i32, i32* %X store i32 5, i32* getelementptr ({ i32 }* @Global, i64 0, i32 0) - %REMOVE = load i32* %X + %REMOVE = load i32, i32* %X %retval = sub i32 %Y, %REMOVE ret i32 %retval ; CHECK: @constexpr_test @@ -113,12 +113,12 @@ define i16 @zext_sext_confusion(i16* %row2col, i5 %j) nounwind{ entry: %sum5.cast = zext i5 %j to i64 ; [#uses=1] %P1 = getelementptr i16, i16* %row2col, i64 %sum5.cast - %row2col.load.1.2 = load i16* %P1, align 1 ; [#uses=1] + %row2col.load.1.2 = load i16, i16* %P1, align 1 ; [#uses=1] %sum13.cast31 = sext i5 %j to i6 ; [#uses=1] %sum13.cast = zext i6 %sum13.cast31 to i64 ; [#uses=1] %P2 = getelementptr i16, i16* %row2col, i64 %sum13.cast - %row2col.load.1.6 = load i16* %P2, align 1 ; [#uses=1] + %row2col.load.1.6 = load i16, i16* %P2, align 1 ; [#uses=1] %.ret = sub i16 %row2col.load.1.6, %row2col.load.1.2 ; [#uses=1] ret i16 %.ret diff --git a/llvm/test/Analysis/BasicAA/full-store-partial-alias.ll b/llvm/test/Analysis/BasicAA/full-store-partial-alias.ll index e046e13ad9f0..341f6ba23b3a 100644 --- a/llvm/test/Analysis/BasicAA/full-store-partial-alias.ll +++ b/llvm/test/Analysis/BasicAA/full-store-partial-alias.ll @@ -20,11 +20,11 @@ entry: %u = alloca %union.anon, align 8 %tmp9 = getelementptr inbounds %union.anon, %union.anon* %u, i64 0, i32 0 store double %x, double* %tmp9, align 8, !tbaa !0 - %tmp2 = load i32* bitcast (i64* @endianness_test to i32*), align 8, !tbaa !3 + %tmp2 = load i32, i32* bitcast (i64* @endianness_test to i32*), align 8, !tbaa !3 %idxprom = sext i32 %tmp2 to i64 %tmp4 = bitcast %union.anon* %u to [2 x i32]* %arrayidx = getelementptr inbounds [2 x i32], [2 x i32]* %tmp4, i64 0, i64 %idxprom - %tmp5 = load i32* %arrayidx, align 4, !tbaa !3 + %tmp5 = load i32, i32* %arrayidx, align 4, !tbaa !3 %tmp5.lobit = lshr i32 %tmp5, 31 ret i32 %tmp5.lobit } diff --git a/llvm/test/Analysis/BasicAA/gcsetest.ll b/llvm/test/Analysis/BasicAA/gcsetest.ll index 64792eb00a2f..cf6ab710bf2f 100644 --- a/llvm/test/Analysis/BasicAA/gcsetest.ll +++ b/llvm/test/Analysis/BasicAA/gcsetest.ll @@ -12,11 +12,11 @@ ; CHECK-NEXT: ret i32 0 define i32 @test() { - %A1 = load i32* @A + %A1 = load i32, i32* @A store i32 123, i32* @B ; Store cannot alias @A - %A2 = load i32* @A + %A2 = load i32, i32* @A %X = sub i32 %A1, %A2 ret i32 %X } @@ -30,13 +30,13 @@ define i32 @test() { ; CHECK-NEXT: ret i32 0 define i32 @test2() { - %A1 = load i32* @A + %A1 = load i32, i32* @A br label %Loop Loop: %AP = phi i32 [0, %0], [%X, %Loop] store i32 %AP, i32* @B ; Store cannot alias @A - %A2 = load i32* @A + %A2 = load i32, i32* @A %X = sub i32 %A1, %A2 %c = icmp eq i32 %X, 0 br i1 %c, label %out, label %Loop @@ -55,7 +55,7 @@ define i32 @test3() { %X = alloca i32 store i32 7, i32* %X call void @external() - %V = load i32* %X + %V = load i32, i32* %X ret i32 %V } diff --git a/llvm/test/Analysis/BasicAA/gep-alias.ll b/llvm/test/Analysis/BasicAA/gep-alias.ll index 3f2e88a94593..f686010f9ead 100644 --- a/llvm/test/Analysis/BasicAA/gep-alias.ll +++ b/llvm/test/Analysis/BasicAA/gep-alias.ll @@ -7,11 +7,11 @@ define i32 @test1(i8 * %P) { entry: %Q = bitcast i8* %P to {i32, i32}* %R = getelementptr {i32, i32}, {i32, i32}* %Q, i32 0, i32 1 - %S = load i32* %R + %S = load i32, i32* %R %q = bitcast i8* %P to {i32, i32}* %r = getelementptr {i32, i32}, {i32, i32}* %q, i32 0, i32 1 - %s = load i32* %r + %s = load i32, i32* %r %t = sub i32 %S, %s ret i32 %t @@ -23,12 +23,12 @@ define i32 @test2(i8 * %P) { entry: %Q = bitcast i8* %P to {i32, i32, i32}* %R = getelementptr {i32, i32, i32}, {i32, i32, i32}* %Q, i32 0, i32 1 - %S = load i32* %R + %S = load i32, i32* %R %r = getelementptr {i32, i32, i32}, {i32, i32, i32}* %Q, i32 0, i32 2 store i32 42, i32* %r - %s = load i32* %R + %s = load i32, i32* %R %t = sub i32 %S, %s ret i32 %t @@ -42,12 +42,12 @@ define i32 @test3({float, {i32, i32, i32}}* %P) { entry: %P2 = getelementptr {float, {i32, i32, i32}}, {float, {i32, i32, i32}}* %P, i32 0, i32 1 %R = getelementptr {i32, i32, i32}, {i32, i32, i32}* %P2, i32 0, i32 1 - %S = load i32* %R + %S = load i32, i32* %R %r = getelementptr {i32, i32, i32}, {i32, i32, i32}* %P2, i32 0, i32 2 store i32 42, i32* %r - %s = load i32* %R + %s = load i32, i32* %R %t = sub i32 %S, %s ret i32 %t @@ -66,7 +66,7 @@ entry: store i32 64, i32* %tmp2, align 8 %tmp3 = getelementptr inbounds %SmallPtrSet64, %SmallPtrSet64* %P, i64 0, i32 0, i32 4, i64 64 store i8* null, i8** %tmp3, align 8 - %tmp4 = load i32* %tmp2, align 8 + %tmp4 = load i32, i32* %tmp2, align 8 ret i32 %tmp4 ; CHECK-LABEL: @test4( ; CHECK: ret i32 64 @@ -77,9 +77,9 @@ define i32 @test5(i32* %p, i64 %i) { %pi = getelementptr i32, i32* %p, i64 %i %i.next = add i64 %i, 1 %pi.next = getelementptr i32, i32* %p, i64 %i.next - %x = load i32* %pi + %x = load i32, i32* %pi store i32 42, i32* %pi.next - %y = load i32* %pi + %y = load i32, i32* %pi %z = sub i32 %x, %y ret i32 %z ; CHECK-LABEL: @test5( @@ -90,9 +90,9 @@ define i32 @test5_as1_smaller_size(i32 addrspace(1)* %p, i8 %i) { %pi = getelementptr i32, i32 addrspace(1)* %p, i8 %i %i.next = add i8 %i, 1 %pi.next = getelementptr i32, i32 addrspace(1)* %p, i8 %i.next - %x = load i32 addrspace(1)* %pi + %x = load i32, i32 addrspace(1)* %pi store i32 42, i32 addrspace(1)* %pi.next - %y = load i32 addrspace(1)* %pi + %y = load i32, i32 addrspace(1)* %pi %z = sub i32 %x, %y ret i32 %z ; CHECK-LABEL: @test5_as1_smaller_size( @@ -104,9 +104,9 @@ define i32 @test5_as1_same_size(i32 addrspace(1)* %p, i16 %i) { %pi = getelementptr i32, i32 addrspace(1)* %p, i16 %i %i.next = add i16 %i, 1 %pi.next = getelementptr i32, i32 addrspace(1)* %p, i16 %i.next - %x = load i32 addrspace(1)* %pi + %x = load i32, i32 addrspace(1)* %pi store i32 42, i32 addrspace(1)* %pi.next - %y = load i32 addrspace(1)* %pi + %y = load i32, i32 addrspace(1)* %pi %z = sub i32 %x, %y ret i32 %z ; CHECK-LABEL: @test5_as1_same_size( @@ -119,9 +119,9 @@ define i32 @test6(i32* %p, i64 %i1) { %pi = getelementptr i32, i32* %p, i64 %i %i.next = or i64 %i, 1 %pi.next = getelementptr i32, i32* %p, i64 %i.next - %x = load i32* %pi + %x = load i32, i32* %pi store i32 42, i32* %pi.next - %y = load i32* %pi + %y = load i32, i32* %pi %z = sub i32 %x, %y ret i32 %z ; CHECK-LABEL: @test6( @@ -133,9 +133,9 @@ define i32 @test7(i32* %p, i64 %i) { %pi = getelementptr i32, i32* %p, i64 1 %i.next = shl i64 %i, 2 %pi.next = getelementptr i32, i32* %p, i64 %i.next - %x = load i32* %pi + %x = load i32, i32* %pi store i32 42, i32* %pi.next - %y = load i32* %pi + %y = load i32, i32* %pi %z = sub i32 %x, %y ret i32 %z ; CHECK-LABEL: @test7( @@ -150,9 +150,9 @@ define i32 @test8(i32* %p, i16 %i) { %i.next = add i16 %i, 1 %i.next2 = zext i16 %i.next to i32 %pi.next = getelementptr i32, i32* %p, i32 %i.next2 - %x = load i32* %pi + %x = load i32, i32* %pi store i32 42, i32* %pi.next - %y = load i32* %pi + %y = load i32, i32* %pi %z = sub i32 %x, %y ret i32 %z ; CHECK-LABEL: @test8( @@ -170,9 +170,9 @@ define i8 @test9([4 x i8] *%P, i32 %i, i32 %j) { ; P4 = P + 4*j %P4 = getelementptr [4 x i8], [4 x i8]* %P, i32 0, i32 %j2 - %x = load i8* %P2 + %x = load i8, i8* %P2 store i8 42, i8* %P4 - %y = load i8* %P2 + %y = load i8, i8* %P2 %z = sub i8 %x, %y ret i8 %z ; CHECK-LABEL: @test9( @@ -188,9 +188,9 @@ define i8 @test10([4 x i8] *%P, i32 %i) { ; P4 = P + 4*i %P4 = getelementptr [4 x i8], [4 x i8]* %P, i32 0, i32 %i2 - %x = load i8* %P2 + %x = load i8, i8* %P2 store i8 42, i8* %P4 - %y = load i8* %P2 + %y = load i8, i8* %P2 %z = sub i8 %x, %y ret i8 %z ; CHECK-LABEL: @test10( @@ -207,7 +207,7 @@ define float @test11(i32 %indvar, [4 x [2 x float]]* %q) nounwind ssp { %y29 = getelementptr inbounds [2 x float], [2 x float]* %arrayidx28, i32 0, i32 1 store float 1.0, float* %y29, align 4 store i64 0, i64* %scevgep35, align 4 - %tmp30 = load float* %y29, align 4 + %tmp30 = load float, float* %y29, align 4 ret float %tmp30 ; CHECK-LABEL: @test11( ; CHECK: ret float %tmp30 @@ -223,7 +223,7 @@ define i32 @test12(i32 %x, i32 %y, i8* %p) nounwind { %castp = bitcast i8* %p to i32* store i32 1, i32* %castp store i32 0, i32* %castd - %r = load i32* %castp + %r = load i32, i32* %castp ret i32 %r ; CHECK-LABEL: @test12( ; CHECK: ret i32 %r diff --git a/llvm/test/Analysis/BasicAA/global-size.ll b/llvm/test/Analysis/BasicAA/global-size.ll index 6d06698d8ae8..bacf3bc9c1b9 100644 --- a/llvm/test/Analysis/BasicAA/global-size.ll +++ b/llvm/test/Analysis/BasicAA/global-size.ll @@ -8,9 +8,9 @@ target datalayout = "E-p:64:64:64-p1:16:16:16-a0:0:8-f32:32:32-f64:64:64-i1:8:8- ; CHECK-LABEL: @test1( define i16 @test1(i32* %P) { - %X = load i16* @B + %X = load i16, i16* @B store i32 7, i32* %P - %Y = load i16* @B + %Y = load i16, i16* @B %Z = sub i16 %Y, %X ret i16 %Z ; CHECK: ret i16 0 @@ -21,9 +21,9 @@ define i16 @test1(i32* %P) { define i16 @test1_as1(i32 addrspace(1)* %P) { ; CHECK-LABEL: @test1_as1( ; CHECK: ret i16 0 - %X = load i16 addrspace(1)* @B_as1 + %X = load i16, i16 addrspace(1)* @B_as1 store i32 7, i32 addrspace(1)* %P - %Y = load i16 addrspace(1)* @B_as1 + %Y = load i16, i16 addrspace(1)* @B_as1 %Z = sub i16 %Y, %X ret i16 %Z } @@ -39,10 +39,10 @@ define i8 @test2(i32 %tmp79, i32 %w.2, i32 %indvar89) nounwind { %tmp93 = add i32 %w.2, %indvar89 %arrayidx416 = getelementptr [0 x i8], [0 x i8]* @window, i32 0, i32 %tmp93 - %A = load i8* %arrayidx412, align 1 + %A = load i8, i8* %arrayidx412, align 1 store i8 4, i8* %arrayidx416, align 1 - %B = load i8* %arrayidx412, align 1 + %B = load i8, i8* %arrayidx412, align 1 %C = sub i8 %A, %B ret i8 %C diff --git a/llvm/test/Analysis/BasicAA/invariant_load.ll b/llvm/test/Analysis/BasicAA/invariant_load.ll index bc629cdfe9ea..722fb5b47650 100644 --- a/llvm/test/Analysis/BasicAA/invariant_load.ll +++ b/llvm/test/Analysis/BasicAA/invariant_load.ll @@ -10,15 +10,15 @@ define i32 @foo(i32* nocapture %p, i8* nocapture %q) { entry: - %0 = load i32* %p, align 4, !invariant.load !3 + %0 = load i32, i32* %p, align 4, !invariant.load !3 %conv = trunc i32 %0 to i8 store i8 %conv, i8* %q, align 1 - %1 = load i32* %p, align 4, !invariant.load !3 + %1 = load i32, i32* %p, align 4, !invariant.load !3 %add = add nsw i32 %1, 1 ret i32 %add ; CHECK: foo -; CHECK: %0 = load i32* %p +; CHECK: %0 = load i32, i32* %p ; CHECK: store i8 %conv, i8* %q, ; CHECK: %add = add nsw i32 %0, 1 } diff --git a/llvm/test/Analysis/BasicAA/memset_pattern.ll b/llvm/test/Analysis/BasicAA/memset_pattern.ll index 590664c5084e..25bdb2e202fb 100644 --- a/llvm/test/Analysis/BasicAA/memset_pattern.ll +++ b/llvm/test/Analysis/BasicAA/memset_pattern.ll @@ -13,7 +13,7 @@ entry: store i32 1, i32* @z tail call void @memset_pattern16(i8* bitcast (i32* @y to i8*), i8* bitcast (i32* @x to i8*), i64 4) nounwind ; CHECK-NOT: load - %l = load i32* @z + %l = load i32, i32* @z ; CHECK: ret i32 1 ret i32 %l } diff --git a/llvm/test/Analysis/BasicAA/modref.ll b/llvm/test/Analysis/BasicAA/modref.ll index 39747f933cdf..e124d6cbe20f 100644 --- a/llvm/test/Analysis/BasicAA/modref.ll +++ b/llvm/test/Analysis/BasicAA/modref.ll @@ -13,7 +13,7 @@ define i32 @test0(i8* %P) { call void @llvm.memset.p0i8.i32(i8* %P, i8 0, i32 42, i32 1, i1 false) - %B = load i32* %A + %B = load i32, i32* %A ret i32 %B ; CHECK-LABEL: @test0 @@ -29,7 +29,7 @@ define i8 @test1() { call void @llvm.memcpy.p0i8.p0i8.i8(i8* %A, i8* %B, i8 -1, i32 0, i1 false) - %C = load i8* %B + %C = load i8, i8* %B ret i8 %C ; CHECK: ret i8 2 } @@ -39,7 +39,7 @@ define i8 @test2(i8* %P) { %P2 = getelementptr i8, i8* %P, i32 127 store i8 1, i8* %P2 ;; Not dead across memset call void @llvm.memset.p0i8.i8(i8* %P, i8 2, i8 127, i32 0, i1 false) - %A = load i8* %P2 + %A = load i8, i8* %P2 ret i8 %A ; CHECK: ret i8 1 } @@ -52,7 +52,7 @@ define i8 @test2a(i8* %P) { store i8 1, i8* %P2 ;; Dead, clobbered by memset. call void @llvm.memset.p0i8.i8(i8* %P, i8 2, i8 127, i32 0, i1 false) - %A = load i8* %P2 + %A = load i8, i8* %P2 ret i8 %A ; CHECK-NOT: load ; CHECK: ret i8 2 @@ -90,9 +90,9 @@ define void @test3a(i8* %P, i8 %X) { @G2 = external global [4000 x i32] define i32 @test4(i8* %P) { - %tmp = load i32* @G1 + %tmp = load i32, i32* @G1 call void @llvm.memset.p0i8.i32(i8* bitcast ([4000 x i32]* @G2 to i8*), i8 0, i32 4000, i32 1, i1 false) - %tmp2 = load i32* @G1 + %tmp2 = load i32, i32* @G1 %sub = sub i32 %tmp2, %tmp ret i32 %sub ; CHECK-LABEL: @test4 @@ -105,9 +105,9 @@ define i32 @test4(i8* %P) { ; Verify that basicaa is handling variable length memcpy, knowing it doesn't ; write to G1. define i32 @test5(i8* %P, i32 %Len) { - %tmp = load i32* @G1 + %tmp = load i32, i32* @G1 call void @llvm.memcpy.p0i8.p0i8.i32(i8* bitcast ([4000 x i32]* @G2 to i8*), i8* bitcast (i32* @G1 to i8*), i32 %Len, i32 1, i1 false) - %tmp2 = load i32* @G1 + %tmp2 = load i32, i32* @G1 %sub = sub i32 %tmp2, %tmp ret i32 %sub ; CHECK: @test5 @@ -118,13 +118,13 @@ define i32 @test5(i8* %P, i32 %Len) { } define i8 @test6(i8* %p, i8* noalias %a) { - %x = load i8* %a + %x = load i8, i8* %a %t = va_arg i8* %p, float - %y = load i8* %a + %y = load i8, i8* %a %z = add i8 %x, %y ret i8 %z ; CHECK-LABEL: @test6 -; CHECK: load i8* %a +; CHECK: load i8, i8* %a ; CHECK-NOT: load ; CHECK: ret } @@ -137,12 +137,12 @@ entry: store i32 0, i32* %x, align 4 %add.ptr = getelementptr inbounds i32, i32* %x, i64 1 call void @test7decl(i32* %add.ptr) - %tmp = load i32* %x, align 4 + %tmp = load i32, i32* %x, align 4 ret i32 %tmp ; CHECK-LABEL: @test7( ; CHECK: store i32 0 ; CHECK: call void @test7decl -; CHECK: load i32* +; CHECK: load i32, i32* } declare void @llvm.memset.p0i8.i32(i8* nocapture, i8, i32, i32, i1) nounwind diff --git a/llvm/test/Analysis/BasicAA/must-and-partial.ll b/llvm/test/Analysis/BasicAA/must-and-partial.ll index e8dc1debb78b..3b4c84a2f81b 100644 --- a/llvm/test/Analysis/BasicAA/must-and-partial.ll +++ b/llvm/test/Analysis/BasicAA/must-and-partial.ll @@ -20,7 +20,7 @@ green: %bigbase0 = bitcast i8* %base to i16* store i16 -1, i16* %bigbase0 - %loaded = load i8* %phi + %loaded = load i8, i8* %phi ret i8 %loaded } @@ -34,6 +34,6 @@ entry: %bigbase1 = bitcast i8* %base to i16* store i16 -1, i16* %bigbase1 - %loaded = load i8* %sel + %loaded = load i8, i8* %sel ret i8 %loaded } diff --git a/llvm/test/Analysis/BasicAA/no-escape-call.ll b/llvm/test/Analysis/BasicAA/no-escape-call.ll index 072575cb2b39..ea335325170a 100644 --- a/llvm/test/Analysis/BasicAA/no-escape-call.ll +++ b/llvm/test/Analysis/BasicAA/no-escape-call.ll @@ -12,9 +12,9 @@ entry: store i8* %tmp2, i8** %tmp4, align 4 %tmp10 = getelementptr i8, i8* %tmp2, i32 10 ; [#uses=1] store i8 42, i8* %tmp10, align 1 - %tmp14 = load i8** %tmp4, align 4 ; [#uses=1] + %tmp14 = load i8*, i8** %tmp4, align 4 ; [#uses=1] %tmp16 = getelementptr i8, i8* %tmp14, i32 10 ; [#uses=1] - %tmp17 = load i8* %tmp16, align 1 ; [#uses=1] + %tmp17 = load i8, i8* %tmp16, align 1 ; [#uses=1] %tmp19 = icmp eq i8 %tmp17, 42 ; [#uses=1] ret i1 %tmp19 } diff --git a/llvm/test/Analysis/BasicAA/noalias-bugs.ll b/llvm/test/Analysis/BasicAA/noalias-bugs.ll index 2ae7660989df..acb230c45de4 100644 --- a/llvm/test/Analysis/BasicAA/noalias-bugs.ll +++ b/llvm/test/Analysis/BasicAA/noalias-bugs.ll @@ -27,7 +27,7 @@ define i64 @testcase(%nested * noalias %p1, %nested * noalias %p2, ; CHECK; store i64 1 store i64 2, i64* %ptr.64, align 8 - %r = load i64* %either_ptr.64, align 8 + %r = load i64, i64* %either_ptr.64, align 8 store i64 1, i64* %ptr.64, align 8 ret i64 %r } diff --git a/llvm/test/Analysis/BasicAA/noalias-param.ll b/llvm/test/Analysis/BasicAA/noalias-param.ll index 6494771fc59f..c5b1ebf1a1d2 100644 --- a/llvm/test/Analysis/BasicAA/noalias-param.ll +++ b/llvm/test/Analysis/BasicAA/noalias-param.ll @@ -6,7 +6,7 @@ define void @no(i32* noalias %a, i32* %b) nounwind { entry: store i32 1, i32* %a %cap = call i32* @captures(i32* %a) nounwind readonly - %l = load i32* %b + %l = load i32, i32* %b ret void } @@ -16,7 +16,7 @@ define void @yes(i32* %c, i32* %d) nounwind { entry: store i32 1, i32* %c %cap = call i32* @captures(i32* %c) nounwind readonly - %l = load i32* %d + %l = load i32, i32* %d ret void } diff --git a/llvm/test/Analysis/BasicAA/nocapture.ll b/llvm/test/Analysis/BasicAA/nocapture.ll index ffc0a09a078d..26cb69bffcc0 100644 --- a/llvm/test/Analysis/BasicAA/nocapture.ll +++ b/llvm/test/Analysis/BasicAA/nocapture.ll @@ -6,9 +6,9 @@ define i32 @test2() { ; CHECK: ret i32 0 %P = alloca i32 %Q = call i32* @test(i32* %P) - %a = load i32* %P + %a = load i32, i32* %P store i32 4, i32* %Q ;; cannot clobber P since it is nocapture. - %b = load i32* %P + %b = load i32, i32* %P %c = sub i32 %a, %b ret i32 %c } @@ -19,7 +19,7 @@ define i32 @test4(i32* noalias nocapture %p) nounwind { ; CHECK: call void @test3 ; CHECK: store i32 0, i32* %p ; CHECK: store i32 1, i32* %x -; CHECK: %y = load i32* %p +; CHECK: %y = load i32, i32* %p ; CHECK: ret i32 %y entry: %q = alloca i32* @@ -27,10 +27,10 @@ entry: ; attribute since the copy doesn't outlive the function. call void @test3(i32** %q, i32* %p) nounwind store i32 0, i32* %p - %x = load i32** %q + %x = load i32*, i32** %q ; This store might write to %p and so we can't eliminate the subsequent ; load store i32 1, i32* %x - %y = load i32* %p + %y = load i32, i32* %p ret i32 %y } diff --git a/llvm/test/Analysis/BasicAA/phi-aa.ll b/llvm/test/Analysis/BasicAA/phi-aa.ll index 1b3341ef1092..3944e9e43566 100644 --- a/llvm/test/Analysis/BasicAA/phi-aa.ll +++ b/llvm/test/Analysis/BasicAA/phi-aa.ll @@ -25,9 +25,9 @@ bb1: bb2: %P = phi i32* [ @X, %bb ], [ @Y, %bb1 ] - %tmp1 = load i32* @Z, align 4 + %tmp1 = load i32, i32* @Z, align 4 store i32 123, i32* %P, align 4 - %tmp2 = load i32* @Z, align 4 + %tmp2 = load i32, i32* @Z, align 4 br label %return return: @@ -52,14 +52,14 @@ codeRepl: br i1 %targetBlock, label %for.body, label %bye for.body: - %1 = load i32* %jj7, align 4 + %1 = load i32, i32* %jj7, align 4 %idxprom4 = zext i32 %1 to i64 %arrayidx5 = getelementptr inbounds [100 x i32], [100 x i32]* %oa5, i64 0, i64 %idxprom4 - %2 = load i32* %arrayidx5, align 4 + %2 = load i32, i32* %arrayidx5, align 4 %sub6 = sub i32 %2, 6 store i32 %sub6, i32* %arrayidx5, align 4 ; %0 and %arrayidx5 can alias! It is not safe to DSE the above store. - %3 = load i32* %0, align 4 + %3 = load i32, i32* %0, align 4 store i32 %3, i32* %arrayidx5, align 4 %sub11 = add i32 %1, -1 %idxprom12 = zext i32 %sub11 to i64 @@ -68,7 +68,7 @@ for.body: br label %codeRepl bye: - %.reload = load i32* %jj7, align 4 + %.reload = load i32, i32* %jj7, align 4 ret i32 %.reload } diff --git a/llvm/test/Analysis/BasicAA/phi-spec-order.ll b/llvm/test/Analysis/BasicAA/phi-spec-order.ll index 0d1a6f44ec5d..30aff8c5a48d 100644 --- a/llvm/test/Analysis/BasicAA/phi-spec-order.ll +++ b/llvm/test/Analysis/BasicAA/phi-spec-order.ll @@ -24,20 +24,20 @@ for.body4: ; preds = %for.body4, %for.con %lsr.iv46 = bitcast [16000 x double]* %lsr.iv4 to <4 x double>* %lsr.iv12 = bitcast [16000 x double]* %lsr.iv1 to <4 x double>* %scevgep11 = getelementptr <4 x double>, <4 x double>* %lsr.iv46, i64 -2 - %i6 = load <4 x double>* %scevgep11, align 32 + %i6 = load <4 x double>, <4 x double>* %scevgep11, align 32 %add = fadd <4 x double> %i6, store <4 x double> %add, <4 x double>* %lsr.iv12, align 32 %scevgep10 = getelementptr <4 x double>, <4 x double>* %lsr.iv46, i64 -1 - %i7 = load <4 x double>* %scevgep10, align 32 + %i7 = load <4 x double>, <4 x double>* %scevgep10, align 32 %add.4 = fadd <4 x double> %i7, %scevgep9 = getelementptr <4 x double>, <4 x double>* %lsr.iv12, i64 1 store <4 x double> %add.4, <4 x double>* %scevgep9, align 32 - %i8 = load <4 x double>* %lsr.iv46, align 32 + %i8 = load <4 x double>, <4 x double>* %lsr.iv46, align 32 %add.8 = fadd <4 x double> %i8, %scevgep8 = getelementptr <4 x double>, <4 x double>* %lsr.iv12, i64 2 store <4 x double> %add.8, <4 x double>* %scevgep8, align 32 %scevgep7 = getelementptr <4 x double>, <4 x double>* %lsr.iv46, i64 1 - %i9 = load <4 x double>* %scevgep7, align 32 + %i9 = load <4 x double>, <4 x double>* %scevgep7, align 32 %add.12 = fadd <4 x double> %i9, %scevgep3 = getelementptr <4 x double>, <4 x double>* %lsr.iv12, i64 3 store <4 x double> %add.12, <4 x double>* %scevgep3, align 32 diff --git a/llvm/test/Analysis/BasicAA/phi-speculation.ll b/llvm/test/Analysis/BasicAA/phi-speculation.ll index 8965056f95d6..ed0d49bc0994 100644 --- a/llvm/test/Analysis/BasicAA/phi-speculation.ll +++ b/llvm/test/Analysis/BasicAA/phi-speculation.ll @@ -17,10 +17,10 @@ while.body: %ptr2_phi = phi i32* [ %ptr2, %entry ], [ %ptr2_inc, %while.body ] %result.09 = phi i32 [ 0 , %entry ], [ %add, %while.body ] %dec = add nsw i32 %num, -1 - %0 = load i32* %ptr_phi, align 4 + %0 = load i32, i32* %ptr_phi, align 4 store i32 %0, i32* %ptr2_phi, align 4 - %1 = load i32* %coeff, align 4 - %2 = load i32* %ptr_phi, align 4 + %1 = load i32, i32* %coeff, align 4 + %2 = load i32, i32* %ptr_phi, align 4 %mul = mul nsw i32 %1, %2 %add = add nsw i32 %mul, %result.09 %tobool = icmp eq i32 %dec, 0 @@ -52,10 +52,10 @@ while.body: %ptr2_phi = phi i32* [ %ptr_outer_phi2, %outer.while.header ], [ %ptr2_inc, %while.body ] %result.09 = phi i32 [ 0 , %outer.while.header ], [ %add, %while.body ] %dec = add nsw i32 %num, -1 - %0 = load i32* %ptr_phi, align 4 + %0 = load i32, i32* %ptr_phi, align 4 store i32 %0, i32* %ptr2_phi, align 4 - %1 = load i32* %coeff, align 4 - %2 = load i32* %ptr_phi, align 4 + %1 = load i32, i32* %coeff, align 4 + %2 = load i32, i32* %ptr_phi, align 4 %mul = mul nsw i32 %1, %2 %add = add nsw i32 %mul, %result.09 %tobool = icmp eq i32 %dec, 0 diff --git a/llvm/test/Analysis/BasicAA/pr18573.ll b/llvm/test/Analysis/BasicAA/pr18573.ll index 25f9d94d4dc6..ea5e4a2082f3 100644 --- a/llvm/test/Analysis/BasicAA/pr18573.ll +++ b/llvm/test/Analysis/BasicAA/pr18573.ll @@ -10,7 +10,7 @@ declare <8 x float> @llvm.x86.avx2.gather.d.ps.256(<8 x float>, i8*, <8 x i32>, ; Function Attrs: nounwind define <8 x float> @foo1(i8* noalias readonly %arr.ptr, <8 x i32>* noalias readonly %vix.ptr, i8* noalias %t2.ptr) #1 { allocas: - %vix = load <8 x i32>* %vix.ptr, align 4 + %vix = load <8 x i32>, <8 x i32>* %vix.ptr, align 4 %t1.ptr = getelementptr i8, i8* %arr.ptr, i8 4 %v1 = tail call <8 x float> @llvm.x86.avx2.gather.d.ps.256(<8 x float> undef, i8* %arr.ptr, <8 x i32> %vix, <8 x float> , i8 1) #2 @@ -31,7 +31,7 @@ allocas: ; Function Attrs: nounwind define <8 x float> @foo2(i8* noalias readonly %arr.ptr, <8 x i32>* noalias readonly %vix.ptr, i8* noalias %t2.ptr) #1 { allocas: - %vix = load <8 x i32>* %vix.ptr, align 4 + %vix = load <8 x i32>, <8 x i32>* %vix.ptr, align 4 %t1.ptr = getelementptr i8, i8* %arr.ptr, i8 4 %v1 = tail call <8 x float> @llvm.x86.avx2.gather.d.ps.256(<8 x float> undef, i8* %arr.ptr, <8 x i32> %vix, <8 x float> , i8 1) #2 diff --git a/llvm/test/Analysis/BasicAA/store-promote.ll b/llvm/test/Analysis/BasicAA/store-promote.ll index bb4258ff12f5..afe11c2a1488 100644 --- a/llvm/test/Analysis/BasicAA/store-promote.ll +++ b/llvm/test/Analysis/BasicAA/store-promote.ll @@ -10,11 +10,11 @@ target datalayout = "E-p:64:64:64-a0:0:8-f32:32:32-f64:64:64-i1:8:8-i8:8:8-i16:1 @C = global [2 x i32] [ i32 4, i32 8 ] ; <[2 x i32]*> [#uses=2] define i32 @test1(i1 %c) { - %Atmp = load i32* @A ; [#uses=2] + %Atmp = load i32, i32* @A ; [#uses=2] br label %Loop Loop: ; preds = %Loop, %0 - %ToRemove = load i32* @A ; [#uses=1] + %ToRemove = load i32, i32* @A ; [#uses=1] store i32 %Atmp, i32* @B br i1 %c, label %Out, label %Loop @@ -24,7 +24,7 @@ Out: ; preds = %Loop ; The Loop block should be empty after the load/store are promoted. ; CHECK: @test1 -; CHECK: load i32* @A +; CHECK: load i32, i32* @A ; CHECK: Loop: ; CHECK-NEXT: br i1 %c, label %Out, label %Loop ; CHECK: Out: @@ -35,10 +35,10 @@ define i32 @test2(i1 %c) { br label %Loop Loop: ; preds = %Loop, %0 - %AVal = load i32* @A ; [#uses=2] + %AVal = load i32, i32* @A ; [#uses=2] %C0 = getelementptr [2 x i32], [2 x i32]* @C, i64 0, i64 0 ; [#uses=1] store i32 %AVal, i32* %C0 - %BVal = load i32* @B ; [#uses=2] + %BVal = load i32, i32* @B ; [#uses=2] %C1 = getelementptr [2 x i32], [2 x i32]* @C, i64 0, i64 1 ; [#uses=1] store i32 %BVal, i32* %C1 br i1 %c, label %Out, label %Loop diff --git a/llvm/test/Analysis/BasicAA/tailcall-modref.ll b/llvm/test/Analysis/BasicAA/tailcall-modref.ll index ebeb28c11310..5857e684bc57 100644 --- a/llvm/test/Analysis/BasicAA/tailcall-modref.ll +++ b/llvm/test/Analysis/BasicAA/tailcall-modref.ll @@ -4,9 +4,9 @@ define i32 @test() { ; CHECK: ret i32 0 %A = alloca i32 ; [#uses=3] call void @foo( i32* %A ) - %X = load i32* %A ; [#uses=1] + %X = load i32, i32* %A ; [#uses=1] tail call void @bar( ) - %Y = load i32* %A ; [#uses=1] + %Y = load i32, i32* %A ; [#uses=1] %Z = sub i32 %X, %Y ; [#uses=1] ret i32 %Z } diff --git a/llvm/test/Analysis/BasicAA/underlying-value.ll b/llvm/test/Analysis/BasicAA/underlying-value.ll index b0d22612e985..0cfbdb893b30 100644 --- a/llvm/test/Analysis/BasicAA/underlying-value.ll +++ b/llvm/test/Analysis/BasicAA/underlying-value.ll @@ -15,9 +15,9 @@ for.cond2: ; preds = %for.body5, %for.con for.body5: ; preds = %for.cond2 %arrayidx = getelementptr inbounds [2 x i64], [2 x i64]* undef, i32 0, i64 0 - %tmp7 = load i64* %arrayidx, align 8 + %tmp7 = load i64, i64* %arrayidx, align 8 %arrayidx9 = getelementptr inbounds [2 x i64], [2 x i64]* undef, i32 0, i64 undef - %tmp10 = load i64* %arrayidx9, align 8 + %tmp10 = load i64, i64* %arrayidx9, align 8 br label %for.cond2 for.end22: ; preds = %for.cond diff --git a/llvm/test/Analysis/BasicAA/zext.ll b/llvm/test/Analysis/BasicAA/zext.ll index bf35a5205745..ed3565640251 100644 --- a/llvm/test/Analysis/BasicAA/zext.ll +++ b/llvm/test/Analysis/BasicAA/zext.ll @@ -112,7 +112,7 @@ for.loop.exit: define void @test_spec2006() { %h = alloca [1 x [2 x i32*]], align 16 - %d.val = load i32* @d, align 4 + %d.val = load i32, i32* @d, align 4 %d.promoted = sext i32 %d.val to i64 %1 = icmp slt i32 %d.val, 2 br i1 %1, label %.lr.ph, label %3 @@ -168,7 +168,7 @@ for.loop.exit: define void @test_modulo_analysis_with_global() { %h = alloca [1 x [2 x i32*]], align 16 - %b = load i32* @b, align 4 + %b = load i32, i32* @b, align 4 %b.promoted = sext i32 %b to i64 br label %for.loop diff --git a/llvm/test/Analysis/BlockFrequencyInfo/basic.ll b/llvm/test/Analysis/BlockFrequencyInfo/basic.ll index 8701bbde8aec..728adf007f42 100644 --- a/llvm/test/Analysis/BlockFrequencyInfo/basic.ll +++ b/llvm/test/Analysis/BlockFrequencyInfo/basic.ll @@ -13,7 +13,7 @@ body: %iv = phi i32 [ 0, %entry ], [ %next, %body ] %base = phi i32 [ 0, %entry ], [ %sum, %body ] %arrayidx = getelementptr inbounds i32, i32* %a, i32 %iv - %0 = load i32* %arrayidx + %0 = load i32, i32* %arrayidx %sum = add nsw i32 %0, %base %next = add i32 %iv, 1 %exitcond = icmp eq i32 %next, %i diff --git a/llvm/test/Analysis/BranchProbabilityInfo/basic.ll b/llvm/test/Analysis/BranchProbabilityInfo/basic.ll index 29cfc4e2c8f0..0f669119bfea 100644 --- a/llvm/test/Analysis/BranchProbabilityInfo/basic.ll +++ b/llvm/test/Analysis/BranchProbabilityInfo/basic.ll @@ -10,7 +10,7 @@ body: %iv = phi i32 [ 0, %entry ], [ %next, %body ] %base = phi i32 [ 0, %entry ], [ %sum, %body ] %arrayidx = getelementptr inbounds i32, i32* %a, i32 %iv - %0 = load i32* %arrayidx + %0 = load i32, i32* %arrayidx %sum = add nsw i32 %0, %base %next = add i32 %iv, 1 %exitcond = icmp eq i32 %next, %i @@ -154,7 +154,7 @@ define i32 @test_cold_call_sites(i32* %a) { entry: %gep1 = getelementptr i32, i32* %a, i32 1 - %val1 = load i32* %gep1 + %val1 = load i32, i32* %gep1 %cond1 = icmp ugt i32 %val1, 1 br i1 %cond1, label %then, label %else @@ -165,7 +165,7 @@ then: else: %gep2 = getelementptr i32, i32* %a, i32 2 - %val2 = load i32* %gep2 + %val2 = load i32, i32* %gep2 %val3 = call i32 @regular_function(i32 %val2) br label %exit diff --git a/llvm/test/Analysis/BranchProbabilityInfo/loop.ll b/llvm/test/Analysis/BranchProbabilityInfo/loop.ll index d072778701c3..e792790f84f8 100644 --- a/llvm/test/Analysis/BranchProbabilityInfo/loop.ll +++ b/llvm/test/Analysis/BranchProbabilityInfo/loop.ll @@ -88,7 +88,7 @@ entry: do.body: %i.0 = phi i32 [ 0, %entry ], [ %inc4, %if.end ] call void @g1() - %0 = load i32* %c, align 4 + %0 = load i32, i32* %c, align 4 %cmp = icmp slt i32 %0, 42 br i1 %cmp, label %do.body1, label %if.end ; CHECK: edge do.body -> do.body1 probability is 16 / 32 = 50% @@ -124,7 +124,7 @@ entry: do.body: %i.0 = phi i32 [ 0, %entry ], [ %inc4, %do.end ] call void @g1() - %0 = load i32* %c, align 4 + %0 = load i32, i32* %c, align 4 %cmp = icmp slt i32 %0, 42 br i1 %cmp, label %return, label %do.body1 ; CHECK: edge do.body -> return probability is 4 / 128 @@ -169,7 +169,7 @@ do.body: do.body1: %j.0 = phi i32 [ 0, %do.body ], [ %inc, %if.end ] - %0 = load i32* %c, align 4 + %0 = load i32, i32* %c, align 4 %cmp = icmp slt i32 %0, 42 br i1 %cmp, label %return, label %if.end ; CHECK: edge do.body1 -> return probability is 4 / 128 @@ -214,7 +214,7 @@ do.body: do.body1: %j.0 = phi i32 [ 0, %do.body ], [ %inc, %do.cond ] call void @g2() - %0 = load i32* %c, align 4 + %0 = load i32, i32* %c, align 4 %cmp = icmp slt i32 %0, 42 br i1 %cmp, label %return, label %do.cond ; CHECK: edge do.body1 -> return probability is 4 / 128 @@ -258,7 +258,7 @@ for.body.lr.ph: for.body: %i.011 = phi i32 [ 0, %for.body.lr.ph ], [ %inc6, %for.inc5 ] - %0 = load i32* %c, align 4 + %0 = load i32, i32* %c, align 4 %cmp1 = icmp eq i32 %0, %i.011 br i1 %cmp1, label %for.inc5, label %if.end ; CHECK: edge for.body -> for.inc5 probability is 16 / 32 = 50% @@ -319,21 +319,21 @@ for.body: for.body3: %j.017 = phi i32 [ 0, %for.body ], [ %inc, %for.inc ] - %0 = load i32* %c, align 4 + %0 = load i32, i32* %c, align 4 %cmp4 = icmp eq i32 %0, %j.017 br i1 %cmp4, label %for.inc, label %if.end ; CHECK: edge for.body3 -> for.inc probability is 16 / 32 = 50% ; CHECK: edge for.body3 -> if.end probability is 16 / 32 = 50% if.end: - %1 = load i32* %arrayidx5, align 4 + %1 = load i32, i32* %arrayidx5, align 4 %cmp6 = icmp eq i32 %1, %j.017 br i1 %cmp6, label %for.inc, label %if.end8 ; CHECK: edge if.end -> for.inc probability is 16 / 32 = 50% ; CHECK: edge if.end -> if.end8 probability is 16 / 32 = 50% if.end8: - %2 = load i32* %arrayidx9, align 4 + %2 = load i32, i32* %arrayidx9, align 4 %cmp10 = icmp eq i32 %2, %j.017 br i1 %cmp10, label %for.inc, label %if.end12 ; CHECK: edge if.end8 -> for.inc probability is 16 / 32 = 50% diff --git a/llvm/test/Analysis/BranchProbabilityInfo/pr18705.ll b/llvm/test/Analysis/BranchProbabilityInfo/pr18705.ll index fa300d155350..aff08a63d506 100644 --- a/llvm/test/Analysis/BranchProbabilityInfo/pr18705.ll +++ b/llvm/test/Analysis/BranchProbabilityInfo/pr18705.ll @@ -23,22 +23,22 @@ while.body: %c.addr.09 = phi i32* [ %c, %while.body.lr.ph ], [ %c.addr.1, %if.end ] %indvars.iv.next = add nsw i64 %indvars.iv, -1 %arrayidx = getelementptr inbounds float, float* %f0, i64 %indvars.iv.next - %1 = load float* %arrayidx, align 4 + %1 = load float, float* %arrayidx, align 4 %arrayidx2 = getelementptr inbounds float, float* %f1, i64 %indvars.iv.next - %2 = load float* %arrayidx2, align 4 + %2 = load float, float* %arrayidx2, align 4 %cmp = fcmp une float %1, %2 br i1 %cmp, label %if.then, label %if.else if.then: %incdec.ptr = getelementptr inbounds i32, i32* %b.addr.011, i64 1 - %3 = load i32* %b.addr.011, align 4 + %3 = load i32, i32* %b.addr.011, align 4 %add = add nsw i32 %3, 12 store i32 %add, i32* %b.addr.011, align 4 br label %if.end if.else: %incdec.ptr3 = getelementptr inbounds i32, i32* %c.addr.09, i64 1 - %4 = load i32* %c.addr.09, align 4 + %4 = load i32, i32* %c.addr.09, align 4 %sub = add nsw i32 %4, -13 store i32 %sub, i32* %c.addr.09, align 4 br label %if.end diff --git a/llvm/test/Analysis/CFLAliasAnalysis/full-store-partial-alias.ll b/llvm/test/Analysis/CFLAliasAnalysis/full-store-partial-alias.ll index 245a0607be62..adacf048d678 100644 --- a/llvm/test/Analysis/CFLAliasAnalysis/full-store-partial-alias.ll +++ b/llvm/test/Analysis/CFLAliasAnalysis/full-store-partial-alias.ll @@ -22,11 +22,11 @@ entry: %u = alloca %union.anon, align 8 %tmp9 = getelementptr inbounds %union.anon, %union.anon* %u, i64 0, i32 0 store double %x, double* %tmp9, align 8, !tbaa !0 - %tmp2 = load i32* bitcast (i64* @endianness_test to i32*), align 8, !tbaa !3 + %tmp2 = load i32, i32* bitcast (i64* @endianness_test to i32*), align 8, !tbaa !3 %idxprom = sext i32 %tmp2 to i64 %tmp4 = bitcast %union.anon* %u to [2 x i32]* %arrayidx = getelementptr inbounds [2 x i32], [2 x i32]* %tmp4, i64 0, i64 %idxprom - %tmp5 = load i32* %arrayidx, align 4, !tbaa !3 + %tmp5 = load i32, i32* %arrayidx, align 4, !tbaa !3 %tmp5.lobit = lshr i32 %tmp5, 31 ret i32 %tmp5.lobit } diff --git a/llvm/test/Analysis/CFLAliasAnalysis/gep-signed-arithmetic.ll b/llvm/test/Analysis/CFLAliasAnalysis/gep-signed-arithmetic.ll index eeb423740e6e..c2fcf32ce06b 100644 --- a/llvm/test/Analysis/CFLAliasAnalysis/gep-signed-arithmetic.ll +++ b/llvm/test/Analysis/CFLAliasAnalysis/gep-signed-arithmetic.ll @@ -11,9 +11,9 @@ define i32 @test(i32 %indvar) nounwind { %tmp31 = mul i32 %indvar, -2 %tmp32 = add i32 %tmp31, 30 %t.5 = getelementptr i32, i32* %tab, i32 %tmp32 - %loada = load i32* %tab + %loada = load i32, i32* %tab store i32 0, i32* %t.5 - %loadb = load i32* %tab + %loadb = load i32, i32* %tab %rval = add i32 %loada, %loadb ret i32 %rval } diff --git a/llvm/test/Analysis/CFLAliasAnalysis/multilevel-combine.ll b/llvm/test/Analysis/CFLAliasAnalysis/multilevel-combine.ll index 9bbc721a7b16..e997374e92d9 100644 --- a/llvm/test/Analysis/CFLAliasAnalysis/multilevel-combine.ll +++ b/llvm/test/Analysis/CFLAliasAnalysis/multilevel-combine.ll @@ -25,7 +25,7 @@ define void @test(i1 %C) { store %T* %MS, %T** %M - %AP = load %T** %M ; PartialAlias with %A, %B + %AP = load %T*, %T** %M ; PartialAlias with %A, %B ret void } diff --git a/llvm/test/Analysis/CFLAliasAnalysis/multilevel.ll b/llvm/test/Analysis/CFLAliasAnalysis/multilevel.ll index 9c9eb9a49779..d42dca442eab 100644 --- a/llvm/test/Analysis/CFLAliasAnalysis/multilevel.ll +++ b/llvm/test/Analysis/CFLAliasAnalysis/multilevel.ll @@ -23,8 +23,8 @@ define void @test() { store %T* %A, %T** %M store %T* %B, %T** %N - %AP = load %T** %M ; PartialAlias with %A - %BP = load %T** %N ; PartialAlias with %B + %AP = load %T*, %T** %M ; PartialAlias with %A + %BP = load %T*, %T** %N ; PartialAlias with %B ret void } diff --git a/llvm/test/Analysis/CFLAliasAnalysis/must-and-partial.ll b/llvm/test/Analysis/CFLAliasAnalysis/must-and-partial.ll index bf1e66c91990..9deacf860ed8 100644 --- a/llvm/test/Analysis/CFLAliasAnalysis/must-and-partial.ll +++ b/llvm/test/Analysis/CFLAliasAnalysis/must-and-partial.ll @@ -21,7 +21,7 @@ green: %bigbase0 = bitcast i8* %base to i16* store i16 -1, i16* %bigbase0 - %loaded = load i8* %phi + %loaded = load i8, i8* %phi ret i8 %loaded } @@ -37,7 +37,7 @@ entry: %bigbase1 = bitcast i8* %base to i16* store i16 -1, i16* %bigbase1 - %loaded = load i8* %sel + %loaded = load i8, i8* %sel ret i8 %loaded } @@ -46,9 +46,9 @@ entry: ; CHECK: MayAlias: double* %A, double* %Index define void @testr2(double* nocapture readonly %A, double* nocapture readonly %Index) { %arrayidx22 = getelementptr inbounds double, double* %Index, i64 2 - %1 = load double* %arrayidx22 + %1 = load double, double* %arrayidx22 %arrayidx25 = getelementptr inbounds double, double* %A, i64 2 - %2 = load double* %arrayidx25 + %2 = load double, double* %arrayidx25 %mul26 = fmul double %1, %2 ret void } diff --git a/llvm/test/Analysis/CostModel/AArch64/store.ll b/llvm/test/Analysis/CostModel/AArch64/store.ll index 0c9883cf2a2f..307f8f8ee974 100644 --- a/llvm/test/Analysis/CostModel/AArch64/store.ll +++ b/llvm/test/Analysis/CostModel/AArch64/store.ll @@ -14,9 +14,9 @@ define void @store() { ; CHECK: cost of 64 {{.*}} store store <4 x i8> undef, <4 x i8> * undef ; CHECK: cost of 16 {{.*}} load - load <2 x i8> * undef + load <2 x i8> , <2 x i8> * undef ; CHECK: cost of 64 {{.*}} load - load <4 x i8> * undef + load <4 x i8> , <4 x i8> * undef ret void } diff --git a/llvm/test/Analysis/CostModel/ARM/insertelement.ll b/llvm/test/Analysis/CostModel/ARM/insertelement.ll index f951b08f9baa..bd1467ef4a56 100644 --- a/llvm/test/Analysis/CostModel/ARM/insertelement.ll +++ b/llvm/test/Analysis/CostModel/ARM/insertelement.ll @@ -10,8 +10,8 @@ target triple = "thumbv7-apple-ios6.0.0" ; CHECK: insertelement_i8 define void @insertelement_i8(%T_i8* %saddr, %T_i8v* %vaddr) { - %v0 = load %T_i8v* %vaddr - %v1 = load %T_i8* %saddr + %v0 = load %T_i8v, %T_i8v* %vaddr + %v1 = load %T_i8, %T_i8* %saddr ;CHECK: estimated cost of 3 for {{.*}} insertelement <8 x i8> %v2 = insertelement %T_i8v %v0, %T_i8 %v1, i32 1 store %T_i8v %v2, %T_i8v* %vaddr @@ -24,8 +24,8 @@ define void @insertelement_i8(%T_i8* %saddr, ; CHECK: insertelement_i16 define void @insertelement_i16(%T_i16* %saddr, %T_i16v* %vaddr) { - %v0 = load %T_i16v* %vaddr - %v1 = load %T_i16* %saddr + %v0 = load %T_i16v, %T_i16v* %vaddr + %v1 = load %T_i16, %T_i16* %saddr ;CHECK: estimated cost of 3 for {{.*}} insertelement <4 x i16> %v2 = insertelement %T_i16v %v0, %T_i16 %v1, i32 1 store %T_i16v %v2, %T_i16v* %vaddr @@ -37,8 +37,8 @@ define void @insertelement_i16(%T_i16* %saddr, ; CHECK: insertelement_i32 define void @insertelement_i32(%T_i32* %saddr, %T_i32v* %vaddr) { - %v0 = load %T_i32v* %vaddr - %v1 = load %T_i32* %saddr + %v0 = load %T_i32v, %T_i32v* %vaddr + %v1 = load %T_i32, %T_i32* %saddr ;CHECK: estimated cost of 3 for {{.*}} insertelement <2 x i32> %v2 = insertelement %T_i32v %v0, %T_i32 %v1, i32 1 store %T_i32v %v2, %T_i32v* %vaddr diff --git a/llvm/test/Analysis/CostModel/PowerPC/load_store.ll b/llvm/test/Analysis/CostModel/PowerPC/load_store.ll index 368f0a73489b..1e50f1651e0a 100644 --- a/llvm/test/Analysis/CostModel/PowerPC/load_store.ll +++ b/llvm/test/Analysis/CostModel/PowerPC/load_store.ll @@ -19,26 +19,26 @@ define i32 @stores(i32 %arg) { } define i32 @loads(i32 %arg) { ; CHECK: cost of 1 {{.*}} load - load i8* undef, align 4 + load i8, i8* undef, align 4 ; CHECK: cost of 1 {{.*}} load - load i16* undef, align 4 + load i16, i16* undef, align 4 ; CHECK: cost of 1 {{.*}} load - load i32* undef, align 4 + load i32, i32* undef, align 4 ; CHECK: cost of 2 {{.*}} load - load i64* undef, align 4 + load i64, i64* undef, align 4 ; CHECK: cost of 4 {{.*}} load - load i128* undef, align 4 + load i128, i128* undef, align 4 ; FIXME: There actually are sub-vector Altivec loads, and so we could handle ; this with a small expense, but we don't currently. ; CHECK: cost of 48 {{.*}} load - load <4 x i16>* undef, align 2 + load <4 x i16>, <4 x i16>* undef, align 2 ; CHECK: cost of 1 {{.*}} load - load <4 x i32>* undef, align 4 + load <4 x i32>, <4 x i32>* undef, align 4 ; CHECK: cost of 46 {{.*}} load - load <3 x float>* undef, align 1 + load <3 x float>, <3 x float>* undef, align 1 ret i32 undef } diff --git a/llvm/test/Analysis/CostModel/X86/intrinsic-cost.ll b/llvm/test/Analysis/CostModel/X86/intrinsic-cost.ll index 196488142485..cbe409d7f475 100644 --- a/llvm/test/Analysis/CostModel/X86/intrinsic-cost.ll +++ b/llvm/test/Analysis/CostModel/X86/intrinsic-cost.ll @@ -11,7 +11,7 @@ vector.body: ; preds = %vector.body, %vecto %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] %0 = getelementptr inbounds float, float* %f, i64 %index %1 = bitcast float* %0 to <4 x float>* - %wide.load = load <4 x float>* %1, align 4 + %wide.load = load <4 x float>, <4 x float>* %1, align 4 %2 = call <4 x float> @llvm.ceil.v4f32(<4 x float> %wide.load) store <4 x float> %2, <4 x float>* %1, align 4 %index.next = add i64 %index, 4 @@ -39,7 +39,7 @@ vector.body: ; preds = %vector.body, %vecto %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] %0 = getelementptr inbounds float, float* %f, i64 %index %1 = bitcast float* %0 to <4 x float>* - %wide.load = load <4 x float>* %1, align 4 + %wide.load = load <4 x float>, <4 x float>* %1, align 4 %2 = call <4 x float> @llvm.nearbyint.v4f32(<4 x float> %wide.load) store <4 x float> %2, <4 x float>* %1, align 4 %index.next = add i64 %index, 4 @@ -67,7 +67,7 @@ vector.body: ; preds = %vector.body, %vecto %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] %0 = getelementptr inbounds float, float* %f, i64 %index %1 = bitcast float* %0 to <4 x float>* - %wide.load = load <4 x float>* %1, align 4 + %wide.load = load <4 x float>, <4 x float>* %1, align 4 %2 = call <4 x float> @llvm.fmuladd.v4f32(<4 x float> %wide.load, <4 x float> %b, <4 x float> %c) store <4 x float> %2, <4 x float>* %1, align 4 %index.next = add i64 %index, 4 diff --git a/llvm/test/Analysis/CostModel/X86/load_store.ll b/llvm/test/Analysis/CostModel/X86/load_store.ll index a53d0bd4e993..ccf110a3a371 100644 --- a/llvm/test/Analysis/CostModel/X86/load_store.ll +++ b/llvm/test/Analysis/CostModel/X86/load_store.ll @@ -34,49 +34,49 @@ define i32 @stores(i32 %arg) { } define i32 @loads(i32 %arg) { ;CHECK: cost of 1 {{.*}} load - load i8* undef, align 4 + load i8, i8* undef, align 4 ;CHECK: cost of 1 {{.*}} load - load i16* undef, align 4 + load i16, i16* undef, align 4 ;CHECK: cost of 1 {{.*}} load - load i32* undef, align 4 + load i32, i32* undef, align 4 ;CHECK: cost of 1 {{.*}} load - load i64* undef, align 4 + load i64, i64* undef, align 4 ;CHECK: cost of 2 {{.*}} load - load i128* undef, align 4 + load i128, i128* undef, align 4 ;CHECK: cost of 1 {{.*}} load - load <2 x i32>* undef, align 4 + load <2 x i32>, <2 x i32>* undef, align 4 ;CHECK: cost of 1 {{.*}} load - load <4 x i32>* undef, align 4 + load <4 x i32>, <4 x i32>* undef, align 4 ;CHECK: cost of 2 {{.*}} load - load <8 x i32>* undef, align 4 + load <8 x i32>, <8 x i32>* undef, align 4 ;CHECK: cost of 1 {{.*}} load - load <2 x i64>* undef, align 4 + load <2 x i64>, <2 x i64>* undef, align 4 ;CHECK: cost of 2 {{.*}} load - load <4 x i64>* undef, align 4 + load <4 x i64>, <4 x i64>* undef, align 4 ;CHECK: cost of 4 {{.*}} load - load <8 x i64>* undef, align 4 + load <8 x i64>, <8 x i64>* undef, align 4 ;CHECK: cost of 3 {{.*}} load - load <3 x float>* undef, align 4 + load <3 x float>, <3 x float>* undef, align 4 ;CHECK: cost of 3 {{.*}} load - load <3 x double>* undef, align 4 + load <3 x double>, <3 x double>* undef, align 4 ;CHECK: cost of 3 {{.*}} load - load <3 x i32>* undef, align 4 + load <3 x i32>, <3 x i32>* undef, align 4 ;CHECK: cost of 3 {{.*}} load - load <3 x i64>* undef, align 4 + load <3 x i64>, <3 x i64>* undef, align 4 ;CHECK: cost of 10 {{.*}} load - load <5 x i32>* undef, align 4 + load <5 x i32>, <5 x i32>* undef, align 4 ;CHECK: cost of 10 {{.*}} load - load <5 x i64>* undef, align 4 + load <5 x i64>, <5 x i64>* undef, align 4 ret i32 undef } diff --git a/llvm/test/Analysis/CostModel/X86/loop_v2.ll b/llvm/test/Analysis/CostModel/X86/loop_v2.ll index bd565128835f..9283310e6003 100644 --- a/llvm/test/Analysis/CostModel/X86/loop_v2.ll +++ b/llvm/test/Analysis/CostModel/X86/loop_v2.ll @@ -12,7 +12,7 @@ vector.body: ; preds = %vector.body, %vecto %vec.phi = phi <2 x i32> [ zeroinitializer, %vector.ph ], [ %12, %vector.body ] %0 = getelementptr inbounds i32, i32* %A, i64 %index %1 = bitcast i32* %0 to <2 x i32>* - %2 = load <2 x i32>* %1, align 4 + %2 = load <2 x i32>, <2 x i32>* %1, align 4 %3 = sext <2 x i32> %2 to <2 x i64> ;CHECK: cost of 1 {{.*}} extract %4 = extractelement <2 x i64> %3, i32 0 @@ -20,10 +20,10 @@ vector.body: ; preds = %vector.body, %vecto ;CHECK: cost of 1 {{.*}} extract %6 = extractelement <2 x i64> %3, i32 1 %7 = getelementptr inbounds i32, i32* %A, i64 %6 - %8 = load i32* %5, align 4 + %8 = load i32, i32* %5, align 4 ;CHECK: cost of 1 {{.*}} insert %9 = insertelement <2 x i32> undef, i32 %8, i32 0 - %10 = load i32* %7, align 4 + %10 = load i32, i32* %7, align 4 ;CHECK: cost of 1 {{.*}} insert %11 = insertelement <2 x i32> %9, i32 %10, i32 1 %12 = add nsw <2 x i32> %11, %vec.phi diff --git a/llvm/test/Analysis/CostModel/X86/vectorized-loop.ll b/llvm/test/Analysis/CostModel/X86/vectorized-loop.ll index a311f7273d7a..2dd52a00782e 100644 --- a/llvm/test/Analysis/CostModel/X86/vectorized-loop.ll +++ b/llvm/test/Analysis/CostModel/X86/vectorized-loop.ll @@ -29,13 +29,13 @@ vector.body: ; preds = %for.body.lr.ph, %ve ;CHECK: cost of 0 {{.*}} bitcast %5 = bitcast i32* %4 to <8 x i32>* ;CHECK: cost of 2 {{.*}} load - %6 = load <8 x i32>* %5, align 4 + %6 = load <8 x i32>, <8 x i32>* %5, align 4 ;CHECK: cost of 4 {{.*}} mul %7 = mul nsw <8 x i32> %6, %8 = getelementptr inbounds i32, i32* %A, i64 %index %9 = bitcast i32* %8 to <8 x i32>* ;CHECK: cost of 2 {{.*}} load - %10 = load <8 x i32>* %9, align 4 + %10 = load <8 x i32>, <8 x i32>* %9, align 4 ;CHECK: cost of 4 {{.*}} add %11 = add nsw <8 x i32> %10, %7 ;CHECK: cost of 2 {{.*}} store @@ -54,12 +54,12 @@ for.body: ; preds = %middle.block, %for. %13 = add nsw i64 %indvars.iv, 2 %arrayidx = getelementptr inbounds i32, i32* %B, i64 %13 ;CHECK: cost of 1 {{.*}} load - %14 = load i32* %arrayidx, align 4 + %14 = load i32, i32* %arrayidx, align 4 ;CHECK: cost of 1 {{.*}} mul %mul = mul nsw i32 %14, 5 %arrayidx2 = getelementptr inbounds i32, i32* %A, i64 %indvars.iv ;CHECK: cost of 1 {{.*}} load - %15 = load i32* %arrayidx2, align 4 + %15 = load i32, i32* %arrayidx2, align 4 %add3 = add nsw i32 %15, %mul store i32 %add3, i32* %arrayidx2, align 4 %indvars.iv.next = add i64 %indvars.iv, 1 diff --git a/llvm/test/Analysis/Delinearization/gcd_multiply_expr.ll b/llvm/test/Analysis/Delinearization/gcd_multiply_expr.ll index e1db0d249297..c30a672b840b 100644 --- a/llvm/test/Analysis/Delinearization/gcd_multiply_expr.ll +++ b/llvm/test/Analysis/Delinearization/gcd_multiply_expr.ll @@ -27,7 +27,7 @@ define i32 @fn2() { entry: - %.pr = load i32* @d, align 4 + %.pr = load i32, i32* @d, align 4 %phitmp = icmp eq i32 %.pr, 0 br label %for.cond @@ -36,11 +36,11 @@ for.cond: br i1 %0, label %for.cond, label %for.cond2thread-pre-split.preheader.i for.cond2thread-pre-split.preheader.i: - %1 = load i32* @g, align 4 - %2 = load i32* @h, align 4 + %1 = load i32, i32* @g, align 4 + %2 = load i32, i32* @h, align 4 %mul = mul nsw i32 %2, %1 - %3 = load i8** @f, align 4 - %.pr.pre.i = load i32* @b, align 4 + %3 = load i8*, i8** @f, align 4 + %.pr.pre.i = load i32, i32* @b, align 4 br label %for.cond2thread-pre-split.i for.cond2thread-pre-split.i: @@ -65,56 +65,56 @@ for.body4.i: %8 = phi i32 [ %inc.7.i, %for.body4.i ], [ %.pr.i, %for.body4.i.preheader ] %arrayidx.sum1 = add i32 %add.i, %8 %arrayidx.i = getelementptr inbounds i8, i8* %3, i32 %arrayidx.sum1 - %9 = load i8* %arrayidx.i, align 1 + %9 = load i8, i8* %arrayidx.i, align 1 %conv.i = sext i8 %9 to i32 store i32 %conv.i, i32* @c, align 4 %inc.i = add nsw i32 %8, 1 store i32 %inc.i, i32* @b, align 4 %arrayidx.sum2 = add i32 %add.i, %inc.i %arrayidx.1.i = getelementptr inbounds i8, i8* %3, i32 %arrayidx.sum2 - %10 = load i8* %arrayidx.1.i, align 1 + %10 = load i8, i8* %arrayidx.1.i, align 1 %conv.1.i = sext i8 %10 to i32 store i32 %conv.1.i, i32* @c, align 4 %inc.1.i = add nsw i32 %8, 2 store i32 %inc.1.i, i32* @b, align 4 %arrayidx.sum3 = add i32 %add.i, %inc.1.i %arrayidx.2.i = getelementptr inbounds i8, i8* %3, i32 %arrayidx.sum3 - %11 = load i8* %arrayidx.2.i, align 1 + %11 = load i8, i8* %arrayidx.2.i, align 1 %conv.2.i = sext i8 %11 to i32 store i32 %conv.2.i, i32* @c, align 4 %inc.2.i = add nsw i32 %8, 3 store i32 %inc.2.i, i32* @b, align 4 %arrayidx.sum4 = add i32 %add.i, %inc.2.i %arrayidx.3.i = getelementptr inbounds i8, i8* %3, i32 %arrayidx.sum4 - %12 = load i8* %arrayidx.3.i, align 1 + %12 = load i8, i8* %arrayidx.3.i, align 1 %conv.3.i = sext i8 %12 to i32 store i32 %conv.3.i, i32* @c, align 4 %inc.3.i = add nsw i32 %8, 4 store i32 %inc.3.i, i32* @b, align 4 %arrayidx.sum5 = add i32 %add.i, %inc.3.i %arrayidx.4.i = getelementptr inbounds i8, i8* %3, i32 %arrayidx.sum5 - %13 = load i8* %arrayidx.4.i, align 1 + %13 = load i8, i8* %arrayidx.4.i, align 1 %conv.4.i = sext i8 %13 to i32 store i32 %conv.4.i, i32* @c, align 4 %inc.4.i = add nsw i32 %8, 5 store i32 %inc.4.i, i32* @b, align 4 %arrayidx.sum6 = add i32 %add.i, %inc.4.i %arrayidx.5.i = getelementptr inbounds i8, i8* %3, i32 %arrayidx.sum6 - %14 = load i8* %arrayidx.5.i, align 1 + %14 = load i8, i8* %arrayidx.5.i, align 1 %conv.5.i = sext i8 %14 to i32 store i32 %conv.5.i, i32* @c, align 4 %inc.5.i = add nsw i32 %8, 6 store i32 %inc.5.i, i32* @b, align 4 %arrayidx.sum7 = add i32 %add.i, %inc.5.i %arrayidx.6.i = getelementptr inbounds i8, i8* %3, i32 %arrayidx.sum7 - %15 = load i8* %arrayidx.6.i, align 1 + %15 = load i8, i8* %arrayidx.6.i, align 1 %conv.6.i = sext i8 %15 to i32 store i32 %conv.6.i, i32* @c, align 4 %inc.6.i = add nsw i32 %8, 7 store i32 %inc.6.i, i32* @b, align 4 %arrayidx.sum8 = add i32 %add.i, %inc.6.i %arrayidx.7.i = getelementptr inbounds i8, i8* %3, i32 %arrayidx.sum8 - %16 = load i8* %arrayidx.7.i, align 1 + %16 = load i8, i8* %arrayidx.7.i, align 1 %conv.7.i = sext i8 %16 to i32 store i32 %conv.7.i, i32* @c, align 4 %inc.7.i = add nsw i32 %8, 8 @@ -136,7 +136,7 @@ for.body4.ur.i: %20 = phi i32 [ %inc.ur.i, %for.body4.ur.i ], [ %.ph, %for.body4.ur.i.preheader ] %arrayidx.sum = add i32 %add.i, %20 %arrayidx.ur.i = getelementptr inbounds i8, i8* %3, i32 %arrayidx.sum - %21 = load i8* %arrayidx.ur.i, align 1 + %21 = load i8, i8* %arrayidx.ur.i, align 1 %conv.ur.i = sext i8 %21 to i32 store i32 %conv.ur.i, i32* @c, align 4 %inc.ur.i = add nsw i32 %20, 1 diff --git a/llvm/test/Analysis/Delinearization/himeno_1.ll b/llvm/test/Analysis/Delinearization/himeno_1.ll index b2e2f955f4d9..bba7b4cb0a13 100644 --- a/llvm/test/Analysis/Delinearization/himeno_1.ll +++ b/llvm/test/Analysis/Delinearization/himeno_1.ll @@ -36,23 +36,23 @@ define void @jacobi(i32 %nn, %struct.Mat* nocapture %a, %struct.Mat* nocapture %p) nounwind uwtable { entry: %p.rows.ptr = getelementptr inbounds %struct.Mat, %struct.Mat* %p, i64 0, i32 2 - %p.rows = load i32* %p.rows.ptr + %p.rows = load i32, i32* %p.rows.ptr %p.rows.sub = add i32 %p.rows, -1 %p.rows.sext = sext i32 %p.rows.sub to i64 %p.cols.ptr = getelementptr inbounds %struct.Mat, %struct.Mat* %p, i64 0, i32 3 - %p.cols = load i32* %p.cols.ptr + %p.cols = load i32, i32* %p.cols.ptr %p.cols.sub = add i32 %p.cols, -1 %p.cols.sext = sext i32 %p.cols.sub to i64 %p.deps.ptr = getelementptr inbounds %struct.Mat, %struct.Mat* %p, i64 0, i32 4 - %p.deps = load i32* %p.deps.ptr + %p.deps = load i32, i32* %p.deps.ptr %p.deps.sub = add i32 %p.deps, -1 %p.deps.sext = sext i32 %p.deps.sub to i64 %a.cols.ptr = getelementptr inbounds %struct.Mat, %struct.Mat* %a, i64 0, i32 3 - %a.cols = load i32* %a.cols.ptr + %a.cols = load i32, i32* %a.cols.ptr %a.deps.ptr = getelementptr inbounds %struct.Mat, %struct.Mat* %a, i64 0, i32 4 - %a.deps = load i32* %a.deps.ptr + %a.deps = load i32, i32* %a.deps.ptr %a.base.ptr = getelementptr inbounds %struct.Mat, %struct.Mat* %a, i64 0, i32 0 - %a.base = load float** %a.base.ptr, align 8 + %a.base = load float*, float** %a.base.ptr, align 8 br label %for.i for.i: ; preds = %for.i.inc, %entry diff --git a/llvm/test/Analysis/Delinearization/himeno_2.ll b/llvm/test/Analysis/Delinearization/himeno_2.ll index 56662f51d845..2cf8ebc28398 100644 --- a/llvm/test/Analysis/Delinearization/himeno_2.ll +++ b/llvm/test/Analysis/Delinearization/himeno_2.ll @@ -36,25 +36,25 @@ define void @jacobi(i32 %nn, %struct.Mat* nocapture %a, %struct.Mat* nocapture %p) nounwind uwtable { entry: %p.rows.ptr = getelementptr inbounds %struct.Mat, %struct.Mat* %p, i64 0, i32 2 - %p.rows = load i32* %p.rows.ptr + %p.rows = load i32, i32* %p.rows.ptr %p.rows.sub = add i32 %p.rows, -1 %p.rows.sext = sext i32 %p.rows.sub to i64 %p.cols.ptr = getelementptr inbounds %struct.Mat, %struct.Mat* %p, i64 0, i32 3 - %p.cols = load i32* %p.cols.ptr + %p.cols = load i32, i32* %p.cols.ptr %p.cols.sub = add i32 %p.cols, -1 %p.cols.sext = sext i32 %p.cols.sub to i64 %p.deps.ptr = getelementptr inbounds %struct.Mat, %struct.Mat* %p, i64 0, i32 4 - %p.deps = load i32* %p.deps.ptr + %p.deps = load i32, i32* %p.deps.ptr %p.deps.sub = add i32 %p.deps, -1 %p.deps.sext = sext i32 %p.deps.sub to i64 %a.cols.ptr = getelementptr inbounds %struct.Mat, %struct.Mat* %a, i64 0, i32 3 - %a.cols = load i32* %a.cols.ptr + %a.cols = load i32, i32* %a.cols.ptr %a.cols.sext = sext i32 %a.cols to i64 %a.deps.ptr = getelementptr inbounds %struct.Mat, %struct.Mat* %a, i64 0, i32 4 - %a.deps = load i32* %a.deps.ptr + %a.deps = load i32, i32* %a.deps.ptr %a.deps.sext = sext i32 %a.deps to i64 %a.base.ptr = getelementptr inbounds %struct.Mat, %struct.Mat* %a, i64 0, i32 0 - %a.base = load float** %a.base.ptr, align 8 + %a.base = load float*, float** %a.base.ptr, align 8 br label %for.i for.i: ; preds = %for.i.inc, %entry diff --git a/llvm/test/Analysis/Delinearization/multidim_only_ivs_2d.ll b/llvm/test/Analysis/Delinearization/multidim_only_ivs_2d.ll index a947c07949ac..9df71093279c 100644 --- a/llvm/test/Analysis/Delinearization/multidim_only_ivs_2d.ll +++ b/llvm/test/Analysis/Delinearization/multidim_only_ivs_2d.ll @@ -8,7 +8,7 @@ ; A[i][j] = 1.0; ; } -; Inst: %val = load double* %arrayidx +; Inst: %val = load double, double* %arrayidx ; In Loop with Header: for.j ; AddRec: {{0,+,(%m * sizeof(double))}<%for.i>,+,sizeof(double)}<%for.j> ; Base offset: %A @@ -35,7 +35,7 @@ for.j: %j = phi i64 [ 0, %for.i ], [ %j.inc, %for.j ] %vlaarrayidx.sum = add i64 %j, %tmp %arrayidx = getelementptr inbounds double, double* %A, i64 %vlaarrayidx.sum - %val = load double* %arrayidx + %val = load double, double* %arrayidx store double %val, double* %arrayidx %j.inc = add nsw i64 %j, 1 %j.exitcond = icmp eq i64 %j.inc, %m diff --git a/llvm/test/Analysis/Delinearization/undef.ll b/llvm/test/Analysis/Delinearization/undef.ll index 71bce89ec212..399ff2782aa4 100644 --- a/llvm/test/Analysis/Delinearization/undef.ll +++ b/llvm/test/Analysis/Delinearization/undef.ll @@ -21,7 +21,7 @@ for.body60: %tmp6 = mul i64 %tmp5, undef %arrayidx69.sum = add i64 undef, %tmp6 %arrayidx70 = getelementptr inbounds double, double* %Ey, i64 %arrayidx69.sum - %1 = load double* %arrayidx70, align 8 + %1 = load double, double* %arrayidx70, align 8 %inc = add nsw i64 %ix.062, 1 br i1 false, label %for.body60, label %for.end diff --git a/llvm/test/Analysis/DependenceAnalysis/Banerjee.ll b/llvm/test/Analysis/DependenceAnalysis/Banerjee.ll index 12e03bbb108c..84459b26aa56 100644 --- a/llvm/test/Analysis/DependenceAnalysis/Banerjee.ll +++ b/llvm/test/Analysis/DependenceAnalysis/Banerjee.ll @@ -46,7 +46,7 @@ for.body3: ; preds = %for.cond1.preheader %add5 = add nsw i64 %mul4, %j.02 %sub = add nsw i64 %add5, -1 %arrayidx6 = getelementptr inbounds i64, i64* %A, i64 %sub - %0 = load i64* %arrayidx6, align 8 + %0 = load i64, i64* %arrayidx6, align 8 %incdec.ptr = getelementptr inbounds i64, i64* %B.addr.11, i64 1 store i64 %0, i64* %B.addr.11, align 8 %inc = add nsw i64 %j.02, 1 @@ -115,7 +115,7 @@ for.body3: ; preds = %for.body3.preheader %add5 = add nsw i64 %mul4, %j.03 %sub = add nsw i64 %add5, -1 %arrayidx6 = getelementptr inbounds i64, i64* %A, i64 %sub - %2 = load i64* %arrayidx6, align 8 + %2 = load i64, i64* %arrayidx6, align 8 %incdec.ptr = getelementptr inbounds i64, i64* %B.addr.12, i64 1 store i64 %2, i64* %B.addr.12, align 8 %inc = add nsw i64 %j.03, 1 @@ -181,7 +181,7 @@ for.body3: ; preds = %for.cond1.preheader %add5 = add nsw i64 %mul4, %j.02 %add6 = add nsw i64 %add5, 100 %arrayidx7 = getelementptr inbounds i64, i64* %A, i64 %add6 - %0 = load i64* %arrayidx7, align 8 + %0 = load i64, i64* %arrayidx7, align 8 %incdec.ptr = getelementptr inbounds i64, i64* %B.addr.11, i64 1 store i64 %0, i64* %B.addr.11, align 8 %inc = add nsw i64 %j.02, 1 @@ -240,7 +240,7 @@ for.body3: ; preds = %for.cond1.preheader %add5 = add nsw i64 %mul4, %j.02 %add6 = add nsw i64 %add5, 99 %arrayidx7 = getelementptr inbounds i64, i64* %A, i64 %add6 - %0 = load i64* %arrayidx7, align 8 + %0 = load i64, i64* %arrayidx7, align 8 %incdec.ptr = getelementptr inbounds i64, i64* %B.addr.11, i64 1 store i64 %0, i64* %B.addr.11, align 8 %inc = add nsw i64 %j.02, 1 @@ -299,7 +299,7 @@ for.body3: ; preds = %for.cond1.preheader %add5 = add nsw i64 %mul4, %j.02 %sub = add nsw i64 %add5, -100 %arrayidx6 = getelementptr inbounds i64, i64* %A, i64 %sub - %0 = load i64* %arrayidx6, align 8 + %0 = load i64, i64* %arrayidx6, align 8 %incdec.ptr = getelementptr inbounds i64, i64* %B.addr.11, i64 1 store i64 %0, i64* %B.addr.11, align 8 %inc = add nsw i64 %j.02, 1 @@ -358,7 +358,7 @@ for.body3: ; preds = %for.cond1.preheader %add5 = add nsw i64 %mul4, %j.02 %sub = add nsw i64 %add5, -99 %arrayidx6 = getelementptr inbounds i64, i64* %A, i64 %sub - %0 = load i64* %arrayidx6, align 8 + %0 = load i64, i64* %arrayidx6, align 8 %incdec.ptr = getelementptr inbounds i64, i64* %B.addr.11, i64 1 store i64 %0, i64* %B.addr.11, align 8 %inc = add nsw i64 %j.02, 1 @@ -417,7 +417,7 @@ for.body3: ; preds = %for.cond1.preheader %add5 = add nsw i64 %mul4, %j.02 %add6 = add nsw i64 %add5, 9 %arrayidx7 = getelementptr inbounds i64, i64* %A, i64 %add6 - %0 = load i64* %arrayidx7, align 8 + %0 = load i64, i64* %arrayidx7, align 8 %incdec.ptr = getelementptr inbounds i64, i64* %B.addr.11, i64 1 store i64 %0, i64* %B.addr.11, align 8 %inc = add nsw i64 %j.02, 1 @@ -476,7 +476,7 @@ for.body3: ; preds = %for.cond1.preheader %add5 = add nsw i64 %mul4, %j.02 %add6 = add nsw i64 %add5, 10 %arrayidx7 = getelementptr inbounds i64, i64* %A, i64 %add6 - %0 = load i64* %arrayidx7, align 8 + %0 = load i64, i64* %arrayidx7, align 8 %incdec.ptr = getelementptr inbounds i64, i64* %B.addr.11, i64 1 store i64 %0, i64* %B.addr.11, align 8 %inc = add nsw i64 %j.02, 1 @@ -535,7 +535,7 @@ for.body3: ; preds = %for.cond1.preheader %add5 = add nsw i64 %mul4, %j.02 %add6 = add nsw i64 %add5, 11 %arrayidx7 = getelementptr inbounds i64, i64* %A, i64 %add6 - %0 = load i64* %arrayidx7, align 8 + %0 = load i64, i64* %arrayidx7, align 8 %incdec.ptr = getelementptr inbounds i64, i64* %B.addr.11, i64 1 store i64 %0, i64* %B.addr.11, align 8 %inc = add nsw i64 %j.02, 1 @@ -595,7 +595,7 @@ for.body3: ; preds = %for.cond1.preheader %sub = add i64 %i.03, %0 %add6 = add nsw i64 %sub, 11 %arrayidx7 = getelementptr inbounds i64, i64* %A, i64 %add6 - %1 = load i64* %arrayidx7, align 8 + %1 = load i64, i64* %arrayidx7, align 8 %incdec.ptr = getelementptr inbounds i64, i64* %B.addr.11, i64 1 store i64 %1, i64* %B.addr.11, align 8 %inc = add nsw i64 %j.02, 1 @@ -654,7 +654,7 @@ for.body3: ; preds = %for.cond1.preheader %sub = add i64 %i.03, %0 %add5 = add nsw i64 %sub, 11 %arrayidx6 = getelementptr inbounds i64, i64* %A, i64 %add5 - %1 = load i64* %arrayidx6, align 8 + %1 = load i64, i64* %arrayidx6, align 8 %incdec.ptr = getelementptr inbounds i64, i64* %B.addr.11, i64 1 store i64 %1, i64* %B.addr.11, align 8 %inc = add nsw i64 %j.02, 1 @@ -713,7 +713,7 @@ for.body3: ; preds = %for.cond1.preheader %sub = sub nsw i64 %mul4, %j.02 %add5 = add nsw i64 %sub, 11 %arrayidx6 = getelementptr inbounds i64, i64* %A, i64 %add5 - %0 = load i64* %arrayidx6, align 8 + %0 = load i64, i64* %arrayidx6, align 8 %incdec.ptr = getelementptr inbounds i64, i64* %B.addr.11, i64 1 store i64 %0, i64* %B.addr.11, align 8 %inc = add nsw i64 %j.02, 1 @@ -772,7 +772,7 @@ for.body3: ; preds = %for.cond1.preheader %sub = sub nsw i64 %mul4, %j.02 %add5 = add nsw i64 %sub, 11 %arrayidx6 = getelementptr inbounds i64, i64* %A, i64 %add5 - %0 = load i64* %arrayidx6, align 8 + %0 = load i64, i64* %arrayidx6, align 8 %incdec.ptr = getelementptr inbounds i64, i64* %B.addr.11, i64 1 store i64 %0, i64* %B.addr.11, align 8 %inc = add nsw i64 %j.02, 1 diff --git a/llvm/test/Analysis/DependenceAnalysis/Coupled.ll b/llvm/test/Analysis/DependenceAnalysis/Coupled.ll index 03154758266f..096add6e80be 100644 --- a/llvm/test/Analysis/DependenceAnalysis/Coupled.ll +++ b/llvm/test/Analysis/DependenceAnalysis/Coupled.ll @@ -29,7 +29,7 @@ for.body: ; preds = %entry, %for.body %add = add nsw i64 %i.02, 9 %add2 = add nsw i64 %i.02, 10 %arrayidx4 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %add2, i64 %add - %0 = load i32* %arrayidx4, align 4 + %0 = load i32, i32* %arrayidx4, align 4 %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1 store i32 %0, i32* %B.addr.01, align 4 %inc = add nsw i64 %i.02, 1 @@ -65,7 +65,7 @@ for.body: ; preds = %entry, %for.body %add = add nsw i64 %i.02, 9 %add2 = add nsw i64 %i.02, 9 %arrayidx4 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %add2, i64 %add - %0 = load i32* %arrayidx4, align 4 + %0 = load i32, i32* %arrayidx4, align 4 %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1 store i32 %0, i32* %B.addr.01, align 4 %inc = add nsw i64 %i.02, 1 @@ -103,7 +103,7 @@ for.body: ; preds = %entry, %for.body %arrayidx3 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %sub2, i64 %sub store i32 %conv, i32* %arrayidx3, align 4 %arrayidx5 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %i.02, i64 %i.02 - %0 = load i32* %arrayidx5, align 4 + %0 = load i32, i32* %arrayidx5, align 4 %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1 store i32 %0, i32* %B.addr.01, align 4 %inc = add nsw i64 %i.02, 1 @@ -141,7 +141,7 @@ for.body: ; preds = %entry, %for.body %arrayidx3 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %sub2, i64 %sub store i32 %conv, i32* %arrayidx3, align 4 %arrayidx5 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %i.02, i64 %i.02 - %0 = load i32* %arrayidx5, align 4 + %0 = load i32, i32* %arrayidx5, align 4 %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1 store i32 %0, i32* %B.addr.01, align 4 %inc = add nsw i64 %i.02, 1 @@ -180,7 +180,7 @@ for.body: ; preds = %entry, %for.body %arrayidx4 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %sub3, i64 %sub store i32 %conv, i32* %arrayidx4, align 4 %arrayidx6 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %i.02, i64 %i.02 - %0 = load i32* %arrayidx6, align 4 + %0 = load i32, i32* %arrayidx6, align 4 %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1 store i32 %0, i32* %B.addr.01, align 4 %inc = add nsw i64 %i.02, 1 @@ -221,7 +221,7 @@ for.body: ; preds = %entry, %for.body %arrayidx5 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %add, i64 %sub store i32 %conv, i32* %arrayidx5, align 4 %arrayidx7 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %i.02, i64 %i.02 - %0 = load i32* %arrayidx7, align 4 + %0 = load i32, i32* %arrayidx7, align 4 %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1 store i32 %0, i32* %B.addr.01, align 4 %inc = add nsw i64 %i.02, 1 @@ -257,7 +257,7 @@ for.body: ; preds = %entry, %for.body %arrayidx1 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %i.02, i64 %sub store i32 %conv, i32* %arrayidx1, align 4 %arrayidx3 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %i.02, i64 %i.02 - %0 = load i32* %arrayidx3, align 4 + %0 = load i32, i32* %arrayidx3, align 4 %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1 store i32 %0, i32* %B.addr.01, align 4 %inc = add nsw i64 %i.02, 1 @@ -293,7 +293,7 @@ for.body: ; preds = %entry, %for.body %arrayidx1 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %i.02, i64 %sub store i32 %conv, i32* %arrayidx1, align 4 %arrayidx3 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %i.02, i64 %i.02 - %0 = load i32* %arrayidx3, align 4 + %0 = load i32, i32* %arrayidx3, align 4 %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1 store i32 %0, i32* %B.addr.01, align 4 %inc = add nsw i64 %i.02, 1 @@ -330,7 +330,7 @@ for.body: ; preds = %entry, %for.body %arrayidx2 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %sub1, i64 %sub store i32 %conv, i32* %arrayidx2, align 4 %arrayidx4 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %i.02, i64 %i.02 - %0 = load i32* %arrayidx4, align 4 + %0 = load i32, i32* %arrayidx4, align 4 %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1 store i32 %0, i32* %B.addr.01, align 4 %inc = add nsw i64 %i.02, 1 @@ -367,7 +367,7 @@ for.body: ; preds = %entry, %for.body %arrayidx2 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %sub1, i64 %sub store i32 %conv, i32* %arrayidx2, align 4 %arrayidx4 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %i.02, i64 %i.02 - %0 = load i32* %arrayidx4, align 4 + %0 = load i32, i32* %arrayidx4, align 4 %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1 store i32 %0, i32* %B.addr.01, align 4 %inc = add nsw i64 %i.02, 1 @@ -405,7 +405,7 @@ for.body: ; preds = %entry, %for.body %arrayidx2 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %sub1, i64 %sub store i32 %conv, i32* %arrayidx2, align 4 %arrayidx4 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %i.02, i64 %i.02 - %0 = load i32* %arrayidx4, align 4 + %0 = load i32, i32* %arrayidx4, align 4 %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1 store i32 %0, i32* %B.addr.01, align 4 %inc = add nsw i64 %i.02, 1 @@ -443,7 +443,7 @@ for.body: ; preds = %entry, %for.body %arrayidx2 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %sub1, i64 %sub store i32 %conv, i32* %arrayidx2, align 4 %arrayidx4 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %i.02, i64 %i.02 - %0 = load i32* %arrayidx4, align 4 + %0 = load i32, i32* %arrayidx4, align 4 %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1 store i32 %0, i32* %B.addr.01, align 4 %inc = add nsw i64 %i.02, 1 @@ -481,7 +481,7 @@ for.body: ; preds = %entry, %for.body %arrayidx2 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %sub1, i64 %sub store i32 %conv, i32* %arrayidx2, align 4 %arrayidx4 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %i.02, i64 %i.02 - %0 = load i32* %arrayidx4, align 4 + %0 = load i32, i32* %arrayidx4, align 4 %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1 store i32 %0, i32* %B.addr.01, align 4 %inc = add nsw i64 %i.02, 1 @@ -518,7 +518,7 @@ for.body: ; preds = %entry, %for.body %arrayidx2 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %sub1, i64 %sub store i32 %conv, i32* %arrayidx2, align 4 %arrayidx4 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %i.02, i64 %i.02 - %0 = load i32* %arrayidx4, align 4 + %0 = load i32, i32* %arrayidx4, align 4 %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1 store i32 %0, i32* %B.addr.01, align 4 %inc = add nsw i64 %i.02, 1 @@ -555,7 +555,7 @@ for.body: ; preds = %entry, %for.body %arrayidx3 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* %A, i64 %sub1, i64 %sub, i64 %i.02 store i32 %conv, i32* %arrayidx3, align 4 %arrayidx6 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* %A, i64 %i.02, i64 %i.02, i64 %i.02 - %0 = load i32* %arrayidx6, align 4 + %0 = load i32, i32* %arrayidx6, align 4 %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1 store i32 %0, i32* %B.addr.01, align 4 %inc = add nsw i64 %i.02, 1 @@ -592,7 +592,7 @@ for.body: ; preds = %entry, %for.body %arrayidx3 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* %A, i64 %sub1, i64 %sub, i64 %i.02 store i32 %conv, i32* %arrayidx3, align 4 %arrayidx6 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* %A, i64 %i.02, i64 %i.02, i64 %i.02 - %0 = load i32* %arrayidx6, align 4 + %0 = load i32, i32* %arrayidx6, align 4 %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1 store i32 %0, i32* %B.addr.01, align 4 %inc = add nsw i64 %i.02, 1 diff --git a/llvm/test/Analysis/DependenceAnalysis/ExactRDIV.ll b/llvm/test/Analysis/DependenceAnalysis/ExactRDIV.ll index a30d0124592a..5b2488c1bc8e 100644 --- a/llvm/test/Analysis/DependenceAnalysis/ExactRDIV.ll +++ b/llvm/test/Analysis/DependenceAnalysis/ExactRDIV.ll @@ -41,7 +41,7 @@ for.body4: ; preds = %for.body4.preheader %mul5 = shl nsw i64 %j.02, 1 %add64 = or i64 %mul5, 1 %arrayidx7 = getelementptr inbounds i32, i32* %A, i64 %add64 - %0 = load i32* %arrayidx7, align 4 + %0 = load i32, i32* %arrayidx7, align 4 %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1 store i32 %0, i32* %B.addr.01, align 4 %inc9 = add nsw i64 %j.02, 1 @@ -87,7 +87,7 @@ for.body4: ; preds = %for.body4.preheader %j.02 = phi i64 [ %inc7, %for.body4 ], [ 0, %for.body4.preheader ] %B.addr.01 = phi i32* [ %incdec.ptr, %for.body4 ], [ %B, %for.body4.preheader ] %arrayidx5 = getelementptr inbounds i32, i32* %A, i64 %j.02 - %0 = load i32* %arrayidx5, align 4 + %0 = load i32, i32* %arrayidx5, align 4 %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1 store i32 %0, i32* %B.addr.01, align 4 %inc7 = add nsw i64 %j.02, 1 @@ -133,7 +133,7 @@ for.body4: ; preds = %for.body4.preheader %j.02 = phi i64 [ %inc7, %for.body4 ], [ 0, %for.body4.preheader ] %B.addr.01 = phi i32* [ %incdec.ptr, %for.body4 ], [ %B, %for.body4.preheader ] %arrayidx5 = getelementptr inbounds i32, i32* %A, i64 %j.02 - %0 = load i32* %arrayidx5, align 4 + %0 = load i32, i32* %arrayidx5, align 4 %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1 store i32 %0, i32* %B.addr.01, align 4 %inc7 = add nsw i64 %j.02, 1 @@ -179,7 +179,7 @@ for.body4: ; preds = %for.body4.preheader %j.02 = phi i64 [ %inc7, %for.body4 ], [ 0, %for.body4.preheader ] %B.addr.01 = phi i32* [ %incdec.ptr, %for.body4 ], [ %B, %for.body4.preheader ] %arrayidx5 = getelementptr inbounds i32, i32* %A, i64 %j.02 - %0 = load i32* %arrayidx5, align 4 + %0 = load i32, i32* %arrayidx5, align 4 %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1 store i32 %0, i32* %B.addr.01, align 4 %inc7 = add nsw i64 %j.02, 1 @@ -225,7 +225,7 @@ for.body4: ; preds = %for.body4.preheader %j.02 = phi i64 [ %inc7, %for.body4 ], [ 0, %for.body4.preheader ] %B.addr.01 = phi i32* [ %incdec.ptr, %for.body4 ], [ %B, %for.body4.preheader ] %arrayidx5 = getelementptr inbounds i32, i32* %A, i64 %j.02 - %0 = load i32* %arrayidx5, align 4 + %0 = load i32, i32* %arrayidx5, align 4 %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1 store i32 %0, i32* %B.addr.01, align 4 %inc7 = add nsw i64 %j.02, 1 @@ -272,7 +272,7 @@ for.body4: ; preds = %for.body4.preheader %B.addr.01 = phi i32* [ %incdec.ptr, %for.body4 ], [ %B, %for.body4.preheader ] %sub = sub nsw i64 0, %j.02 %arrayidx5 = getelementptr inbounds i32, i32* %A, i64 %sub - %0 = load i32* %arrayidx5, align 4 + %0 = load i32, i32* %arrayidx5, align 4 %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1 store i32 %0, i32* %B.addr.01, align 4 %inc7 = add nsw i64 %j.02, 1 @@ -319,7 +319,7 @@ for.body4: ; preds = %for.body4.preheader %B.addr.01 = phi i32* [ %incdec.ptr, %for.body4 ], [ %B, %for.body4.preheader ] %sub = sub nsw i64 0, %j.02 %arrayidx5 = getelementptr inbounds i32, i32* %A, i64 %sub - %0 = load i32* %arrayidx5, align 4 + %0 = load i32, i32* %arrayidx5, align 4 %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1 store i32 %0, i32* %B.addr.01, align 4 %inc7 = add nsw i64 %j.02, 1 @@ -366,7 +366,7 @@ for.body4: ; preds = %for.body4.preheader %B.addr.01 = phi i32* [ %incdec.ptr, %for.body4 ], [ %B, %for.body4.preheader ] %sub = sub nsw i64 0, %j.02 %arrayidx5 = getelementptr inbounds i32, i32* %A, i64 %sub - %0 = load i32* %arrayidx5, align 4 + %0 = load i32, i32* %arrayidx5, align 4 %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1 store i32 %0, i32* %B.addr.01, align 4 %inc7 = add nsw i64 %j.02, 1 @@ -413,7 +413,7 @@ for.body4: ; preds = %for.body4.preheader %B.addr.01 = phi i32* [ %incdec.ptr, %for.body4 ], [ %B, %for.body4.preheader ] %sub = sub nsw i64 0, %j.02 %arrayidx5 = getelementptr inbounds i32, i32* %A, i64 %sub - %0 = load i32* %arrayidx5, align 4 + %0 = load i32, i32* %arrayidx5, align 4 %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1 store i32 %0, i32* %B.addr.01, align 4 %inc7 = add nsw i64 %j.02, 1 @@ -455,7 +455,7 @@ for.body3: ; preds = %for.cond1.preheader %arrayidx = getelementptr inbounds i32, i32* %A, i64 %sub store i32 %conv, i32* %arrayidx, align 4 %arrayidx4 = getelementptr inbounds i32, i32* %A, i64 45 - %0 = load i32* %arrayidx4, align 4 + %0 = load i32, i32* %arrayidx4, align 4 %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.11, i64 1 store i32 %0, i32* %B.addr.11, align 4 %inc = add nsw i64 %j.02, 1 @@ -504,7 +504,7 @@ for.body3: ; preds = %for.cond1.preheader %arrayidx = getelementptr inbounds i32, i32* %A, i64 %sub store i32 %conv, i32* %arrayidx, align 4 %arrayidx4 = getelementptr inbounds i32, i32* %A, i64 45 - %0 = load i32* %arrayidx4, align 4 + %0 = load i32, i32* %arrayidx4, align 4 %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.11, i64 1 store i32 %0, i32* %B.addr.11, align 4 %inc = add nsw i64 %j.02, 1 @@ -552,7 +552,7 @@ for.body3: ; preds = %for.cond1.preheader %arrayidx = getelementptr inbounds i32, i32* %A, i64 %sub store i32 %conv, i32* %arrayidx, align 4 %arrayidx4 = getelementptr inbounds i32, i32* %A, i64 45 - %0 = load i32* %arrayidx4, align 4 + %0 = load i32, i32* %arrayidx4, align 4 %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.11, i64 1 store i32 %0, i32* %B.addr.11, align 4 %inc = add nsw i64 %j.02, 1 @@ -600,7 +600,7 @@ for.body3: ; preds = %for.cond1.preheader %arrayidx = getelementptr inbounds i32, i32* %A, i64 %sub store i32 %conv, i32* %arrayidx, align 4 %arrayidx4 = getelementptr inbounds i32, i32* %A, i64 45 - %0 = load i32* %arrayidx4, align 4 + %0 = load i32, i32* %arrayidx4, align 4 %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.11, i64 1 store i32 %0, i32* %B.addr.11, align 4 %inc = add nsw i64 %j.02, 1 diff --git a/llvm/test/Analysis/DependenceAnalysis/ExactSIV.ll b/llvm/test/Analysis/DependenceAnalysis/ExactSIV.ll index c3fddbba0114..d84cd058718a 100644 --- a/llvm/test/Analysis/DependenceAnalysis/ExactSIV.ll +++ b/llvm/test/Analysis/DependenceAnalysis/ExactSIV.ll @@ -30,7 +30,7 @@ for.body: ; preds = %entry, %for.body %mul = shl i64 %i.02, 1 %add13 = or i64 %mul, 1 %arrayidx2 = getelementptr inbounds i32, i32* %A, i64 %add13 - %0 = load i32* %arrayidx2, align 4 + %0 = load i32, i32* %arrayidx2, align 4 %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1 store i32 %0, i32* %B.addr.01, align 4 %inc = add i64 %i.02, 1 @@ -68,7 +68,7 @@ for.body: ; preds = %entry, %for.body %mul1 = shl i64 %i.02, 1 %add23 = or i64 %mul1, 1 %arrayidx3 = getelementptr inbounds i32, i32* %A, i64 %add23 - %0 = load i32* %arrayidx3, align 4 + %0 = load i32, i32* %arrayidx3, align 4 %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1 store i32 %0, i32* %B.addr.01, align 4 %inc = add i64 %i.02, 1 @@ -104,7 +104,7 @@ for.body: ; preds = %entry, %for.body store i32 %conv, i32* %arrayidx, align 4 %add = add i64 %i.02, 60 %arrayidx1 = getelementptr inbounds i32, i32* %A, i64 %add - %0 = load i32* %arrayidx1, align 4 + %0 = load i32, i32* %arrayidx1, align 4 %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1 store i32 %0, i32* %B.addr.01, align 4 %inc = add i64 %i.02, 1 @@ -140,7 +140,7 @@ for.body: ; preds = %entry, %for.body store i32 %conv, i32* %arrayidx, align 4 %add = add i64 %i.02, 60 %arrayidx1 = getelementptr inbounds i32, i32* %A, i64 %add - %0 = load i32* %arrayidx1, align 4 + %0 = load i32, i32* %arrayidx1, align 4 %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1 store i32 %0, i32* %B.addr.01, align 4 %inc = add i64 %i.02, 1 @@ -176,7 +176,7 @@ for.body: ; preds = %entry, %for.body store i32 %conv, i32* %arrayidx, align 4 %add = add i64 %i.02, 60 %arrayidx1 = getelementptr inbounds i32, i32* %A, i64 %add - %0 = load i32* %arrayidx1, align 4 + %0 = load i32, i32* %arrayidx1, align 4 %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1 store i32 %0, i32* %B.addr.01, align 4 %inc = add i64 %i.02, 1 @@ -212,7 +212,7 @@ for.body: ; preds = %entry, %for.body store i32 %conv, i32* %arrayidx, align 4 %add = add i64 %i.02, 60 %arrayidx1 = getelementptr inbounds i32, i32* %A, i64 %add - %0 = load i32* %arrayidx1, align 4 + %0 = load i32, i32* %arrayidx1, align 4 %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1 store i32 %0, i32* %B.addr.01, align 4 %inc = add i64 %i.02, 1 @@ -248,7 +248,7 @@ for.body: ; preds = %entry, %for.body store i32 %conv, i32* %arrayidx, align 4 %add = add i64 %i.02, 60 %arrayidx1 = getelementptr inbounds i32, i32* %A, i64 %add - %0 = load i32* %arrayidx1, align 4 + %0 = load i32, i32* %arrayidx1, align 4 %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1 store i32 %0, i32* %B.addr.01, align 4 %inc = add i64 %i.02, 1 @@ -284,7 +284,7 @@ for.body: ; preds = %entry, %for.body store i32 %conv, i32* %arrayidx, align 4 %add = add i64 %i.02, 60 %arrayidx1 = getelementptr inbounds i32, i32* %A, i64 %add - %0 = load i32* %arrayidx1, align 4 + %0 = load i32, i32* %arrayidx1, align 4 %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1 store i32 %0, i32* %B.addr.01, align 4 %inc = add i64 %i.02, 1 @@ -320,7 +320,7 @@ for.body: ; preds = %entry, %for.body store i32 %conv, i32* %arrayidx, align 4 %sub1 = sub i64 -60, %i.02 %arrayidx2 = getelementptr inbounds i32, i32* %A, i64 %sub1 - %0 = load i32* %arrayidx2, align 4 + %0 = load i32, i32* %arrayidx2, align 4 %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1 store i32 %0, i32* %B.addr.01, align 4 %inc = add i64 %i.02, 1 @@ -356,7 +356,7 @@ for.body: ; preds = %entry, %for.body store i32 %conv, i32* %arrayidx, align 4 %sub1 = sub i64 -60, %i.02 %arrayidx2 = getelementptr inbounds i32, i32* %A, i64 %sub1 - %0 = load i32* %arrayidx2, align 4 + %0 = load i32, i32* %arrayidx2, align 4 %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1 store i32 %0, i32* %B.addr.01, align 4 %inc = add i64 %i.02, 1 @@ -392,7 +392,7 @@ for.body: ; preds = %entry, %for.body store i32 %conv, i32* %arrayidx, align 4 %sub1 = sub i64 -60, %i.02 %arrayidx2 = getelementptr inbounds i32, i32* %A, i64 %sub1 - %0 = load i32* %arrayidx2, align 4 + %0 = load i32, i32* %arrayidx2, align 4 %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1 store i32 %0, i32* %B.addr.01, align 4 %inc = add i64 %i.02, 1 @@ -428,7 +428,7 @@ for.body: ; preds = %entry, %for.body store i32 %conv, i32* %arrayidx, align 4 %sub1 = sub i64 -60, %i.02 %arrayidx2 = getelementptr inbounds i32, i32* %A, i64 %sub1 - %0 = load i32* %arrayidx2, align 4 + %0 = load i32, i32* %arrayidx2, align 4 %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1 store i32 %0, i32* %B.addr.01, align 4 %inc = add i64 %i.02, 1 @@ -464,7 +464,7 @@ for.body: ; preds = %entry, %for.body store i32 %conv, i32* %arrayidx, align 4 %sub1 = sub i64 -60, %i.02 %arrayidx2 = getelementptr inbounds i32, i32* %A, i64 %sub1 - %0 = load i32* %arrayidx2, align 4 + %0 = load i32, i32* %arrayidx2, align 4 %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1 store i32 %0, i32* %B.addr.01, align 4 %inc = add i64 %i.02, 1 @@ -500,7 +500,7 @@ for.body: ; preds = %entry, %for.body store i32 %conv, i32* %arrayidx, align 4 %sub1 = sub i64 -60, %i.02 %arrayidx2 = getelementptr inbounds i32, i32* %A, i64 %sub1 - %0 = load i32* %arrayidx2, align 4 + %0 = load i32, i32* %arrayidx2, align 4 %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1 store i32 %0, i32* %B.addr.01, align 4 %inc = add i64 %i.02, 1 diff --git a/llvm/test/Analysis/DependenceAnalysis/GCD.ll b/llvm/test/Analysis/DependenceAnalysis/GCD.ll index f2b5860602e4..81d05a10cf1c 100644 --- a/llvm/test/Analysis/DependenceAnalysis/GCD.ll +++ b/llvm/test/Analysis/DependenceAnalysis/GCD.ll @@ -49,7 +49,7 @@ for.body3: ; preds = %for.cond1.preheader %mul6 = shl nsw i64 %j.02, 3 %add = add nsw i64 %mul5, %mul6 %arrayidx7 = getelementptr inbounds i32, i32* %A, i64 %add - %0 = load i32* %arrayidx7, align 4 + %0 = load i32, i32* %arrayidx7, align 4 %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.11, i64 1 store i32 %0, i32* %B.addr.11, align 4 %inc = add nsw i64 %j.02, 1 @@ -111,7 +111,7 @@ for.body3: ; preds = %for.cond1.preheader %add = add nsw i64 %mul5, %mul6 %add7 = or i64 %add, 1 %arrayidx8 = getelementptr inbounds i32, i32* %A, i64 %add7 - %0 = load i32* %arrayidx8, align 4 + %0 = load i32, i32* %arrayidx8, align 4 %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.11, i64 1 store i32 %0, i32* %B.addr.11, align 4 %inc = add nsw i64 %j.02, 1 @@ -173,7 +173,7 @@ for.body3: ; preds = %for.cond1.preheader %mul6 = shl nsw i64 %j.02, 3 %add7 = add nsw i64 %mul5, %mul6 %arrayidx8 = getelementptr inbounds i32, i32* %A, i64 %add7 - %0 = load i32* %arrayidx8, align 4 + %0 = load i32, i32* %arrayidx8, align 4 %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.11, i64 1 store i32 %0, i32* %B.addr.11, align 4 %inc = add nsw i64 %j.02, 1 @@ -233,7 +233,7 @@ for.body3: ; preds = %for.cond1.preheader %add5 = add nsw i64 %i.03, %mul4 %sub = add nsw i64 %add5, -1 %arrayidx6 = getelementptr inbounds i32, i32* %A, i64 %sub - %0 = load i32* %arrayidx6, align 4 + %0 = load i32, i32* %arrayidx6, align 4 %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.11, i64 1 store i32 %0, i32* %B.addr.11, align 4 %inc = add nsw i64 %j.02, 1 @@ -303,7 +303,7 @@ for.body3: ; preds = %for.cond1.preheader %sub = sub nsw i64 %add12, %mul14 %add15 = add nsw i64 %sub, 4 %arrayidx16 = getelementptr inbounds i32, i32* %A, i64 %add15 - %0 = load i32* %arrayidx16, align 4 + %0 = load i32, i32* %arrayidx16, align 4 %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.11, i64 1 store i32 %0, i32* %B.addr.11, align 4 %inc = add nsw i64 %j.02, 1 @@ -373,7 +373,7 @@ for.body3: ; preds = %for.cond1.preheader %sub = sub nsw i64 %add12, %mul14 %add15 = add nsw i64 %sub, 5 %arrayidx16 = getelementptr inbounds i32, i32* %A, i64 %add15 - %0 = load i32* %arrayidx16, align 4 + %0 = load i32, i32* %arrayidx16, align 4 %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.11, i64 1 store i32 %0, i32* %B.addr.11, align 4 %inc = add nsw i64 %j.02, 1 @@ -445,7 +445,7 @@ for.body3: ; preds = %for.body3.preheader %1 = mul nsw i64 %mul7, %n %arrayidx8.sum = add i64 %1, %add7 %arrayidx9 = getelementptr inbounds i32, i32* %A, i64 %arrayidx8.sum - %2 = load i32* %arrayidx9, align 4 + %2 = load i32, i32* %arrayidx9, align 4 %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.12, i64 1 store i32 %2, i32* %B.addr.12, align 4 %inc = add nsw i64 %j.03, 1 @@ -536,7 +536,7 @@ for.body3: ; preds = %for.body3.preheader %10 = mul nsw i64 %idxprom10, %0 %arrayidx11.sum = add i64 %10, %idxprom8 %arrayidx12 = getelementptr inbounds i32, i32* %A, i64 %arrayidx11.sum - %11 = load i32* %arrayidx12, align 4 + %11 = load i32, i32* %arrayidx12, align 4 %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.12, i64 1 store i32 %11, i32* %B.addr.12, align 4 %indvars.iv.next = add i64 %indvars.iv, 1 @@ -623,7 +623,7 @@ for.body3: ; preds = %for.body3.preheader %add10 = or i32 %add9, 1 %idxprom11 = sext i32 %add10 to i64 %arrayidx12 = getelementptr inbounds i32, i32* %A, i64 %idxprom11 - %5 = load i32* %arrayidx12, align 4 + %5 = load i32, i32* %arrayidx12, align 4 %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.12, i64 1 store i32 %5, i32* %B.addr.12, align 4 %indvars.iv.next = add i64 %indvars.iv, 1 @@ -715,7 +715,7 @@ for.body3: ; preds = %for.body3.preheader %10 = mul nsw i64 %idxprom10, %0 %arrayidx11.sum = add i64 %10, %idxprom8 %arrayidx12 = getelementptr inbounds i32, i32* %A, i64 %arrayidx11.sum - %11 = load i32* %arrayidx12, align 4 + %11 = load i32, i32* %arrayidx12, align 4 %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.12, i64 1 store i32 %11, i32* %B.addr.12, align 4 %indvars.iv.next = add i64 %indvars.iv, 1 diff --git a/llvm/test/Analysis/DependenceAnalysis/Invariant.ll b/llvm/test/Analysis/DependenceAnalysis/Invariant.ll index cd878bfae9f8..9fdb4d9b0c46 100644 --- a/llvm/test/Analysis/DependenceAnalysis/Invariant.ll +++ b/llvm/test/Analysis/DependenceAnalysis/Invariant.ll @@ -20,9 +20,9 @@ for.body3: %j.02 = phi i32 [ 0, %for.cond1.preheader ], [ %add8, %for.body3 ] %res.11 = phi float [ %res.03, %for.cond1.preheader ], [ %add.res.1, %for.body3 ] %arrayidx4 = getelementptr inbounds [40 x float], [40 x float]* %rr, i32 %j.02, i32 %j.02 - %0 = load float* %arrayidx4, align 4 + %0 = load float, float* %arrayidx4, align 4 %arrayidx6 = getelementptr inbounds [40 x float], [40 x float]* %rr, i32 %i.04, i32 %j.02 - %1 = load float* %arrayidx6, align 4 + %1 = load float, float* %arrayidx6, align 4 %add = fadd float %0, %1 %cmp7 = fcmp ogt float %add, %g %add.res.1 = select i1 %cmp7, float %add, float %res.11 diff --git a/llvm/test/Analysis/DependenceAnalysis/NonCanonicalizedSubscript.ll b/llvm/test/Analysis/DependenceAnalysis/NonCanonicalizedSubscript.ll index 77c1f3d550fb..1b47341ed4d6 100644 --- a/llvm/test/Analysis/DependenceAnalysis/NonCanonicalizedSubscript.ll +++ b/llvm/test/Analysis/DependenceAnalysis/NonCanonicalizedSubscript.ll @@ -28,7 +28,7 @@ for.body: %i = phi i64 [ 0, %entry ], [ %i.inc, %for.body ] %a.addr = getelementptr [100 x [100 x i32]], [100 x [100 x i32]]* %a, i64 0, i64 %i, i64 %i %a.addr.2 = getelementptr [100 x [100 x i32]], [100 x [100 x i32]]* %a, i64 0, i64 %i, i32 5 - %0 = load i32* %a.addr, align 4 + %0 = load i32, i32* %a.addr, align 4 %1 = add i32 %0, 1 store i32 %1, i32* %a.addr.2, align 4 %i.inc = add nsw i64 %i, 1 diff --git a/llvm/test/Analysis/DependenceAnalysis/Preliminary.ll b/llvm/test/Analysis/DependenceAnalysis/Preliminary.ll index cfe21f391e3e..d6500cc03367 100644 --- a/llvm/test/Analysis/DependenceAnalysis/Preliminary.ll +++ b/llvm/test/Analysis/DependenceAnalysis/Preliminary.ll @@ -18,7 +18,7 @@ entry: ; CHECK: da analyze - none! %arrayidx1 = getelementptr inbounds i32, i32* %B, i64 1 - %0 = load i32* %arrayidx1, align 4 + %0 = load i32, i32* %arrayidx1, align 4 ret i32 %0 } @@ -36,7 +36,7 @@ entry: ; CHECK: da analyze - none! %arrayidx1 = getelementptr inbounds i32, i32* %B, i64 1 - %0 = load i32* %arrayidx1, align 4 + %0 = load i32, i32* %arrayidx1, align 4 ret i32 %0 } @@ -107,7 +107,7 @@ for.body12: ; preds = %for.body12.preheade %add13 = add nsw i64 %j.07, 2 %add14 = add nsw i64 %i.011, 3 %arrayidx17 = getelementptr inbounds [100 x [100 x i64]], [100 x [100 x i64]]* %A, i64 %add14, i64 %add13, i64 %add - %0 = load i64* %arrayidx17, align 8 + %0 = load i64, i64* %arrayidx17, align 8 %incdec.ptr = getelementptr inbounds i64, i64* %B.addr.24, i64 1 store i64 %0, i64* %B.addr.24, align 8 %inc19 = add nsw i64 %k9.05, 1 @@ -290,7 +290,7 @@ for.body33: ; preds = %for.body33.preheade %sub48 = sub nsw i64 1, %k.037 %add49 = add nsw i64 %i.045, 3 %arrayidx57 = getelementptr inbounds [100 x [100 x [100 x [100 x [100 x [100 x [100 x i64]]]]]]], [100 x [100 x [100 x [100 x [100 x [100 x [100 x i64]]]]]]]* %A, i64 %add49, i64 2, i64 %u.06, i64 %sub48, i64 %sub47, i64 %o.025, i64 %add45, i64 %add44 - %0 = load i64* %arrayidx57, align 8 + %0 = load i64, i64* %arrayidx57, align 8 %incdec.ptr = getelementptr inbounds i64, i64* %B.addr.112, i64 1 store i64 %0, i64* %B.addr.112, align 8 %inc = add nsw i64 %t.03, 1 @@ -445,7 +445,7 @@ for.body: ; preds = %for.body.preheader, store i32 %conv2, i32* %arrayidx, align 4 %idxprom4 = sext i8 %i.03 to i64 %arrayidx5 = getelementptr inbounds i32, i32* %A, i64 %idxprom4 - %0 = load i32* %arrayidx5, align 4 + %0 = load i32, i32* %arrayidx5, align 4 %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.02, i64 1 store i32 %0, i32* %B.addr.02, align 4 %inc = add i8 %i.03, 1 @@ -491,7 +491,7 @@ for.body: ; preds = %for.body.preheader, store i32 %conv2, i32* %arrayidx, align 4 %idxprom4 = sext i16 %i.03 to i64 %arrayidx5 = getelementptr inbounds i32, i32* %A, i64 %idxprom4 - %0 = load i32* %arrayidx5, align 4 + %0 = load i32, i32* %arrayidx5, align 4 %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.02, i64 1 store i32 %0, i32* %B.addr.02, align 4 %inc = add i16 %i.03, 1 @@ -535,7 +535,7 @@ for.body: ; preds = %for.body.preheader, %1 = trunc i64 %indvars.iv to i32 store i32 %1, i32* %arrayidx, align 4 %arrayidx3 = getelementptr inbounds i32, i32* %A, i64 %indvars.iv - %2 = load i32* %arrayidx3, align 4 + %2 = load i32, i32* %arrayidx3, align 4 %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.02, i64 1 store i32 %2, i32* %B.addr.02, align 4 %indvars.iv.next = add i64 %indvars.iv, 1 @@ -570,7 +570,7 @@ entry: %conv = sext i8 %n to i64 %add = add i64 %conv, 1 %arrayidx2 = getelementptr inbounds i32, i32* %A, i64 %add - %0 = load i32* %arrayidx2, align 4 + %0 = load i32, i32* %arrayidx2, align 4 store i32 %0, i32* %B, align 4 ret void } @@ -596,7 +596,7 @@ entry: %conv = sext i16 %n to i64 %add = add i64 %conv, 1 %arrayidx2 = getelementptr inbounds i32, i32* %A, i64 %add - %0 = load i32* %arrayidx2, align 4 + %0 = load i32, i32* %arrayidx2, align 4 store i32 %0, i32* %B, align 4 ret void } @@ -622,7 +622,7 @@ entry: %add = add nsw i32 %n, 1 %idxprom1 = sext i32 %add to i64 %arrayidx2 = getelementptr inbounds i32, i32* %A, i64 %idxprom1 - %0 = load i32* %arrayidx2, align 4 + %0 = load i32, i32* %arrayidx2, align 4 store i32 %0, i32* %B, align 4 ret void } @@ -648,7 +648,7 @@ entry: %add = add i32 %n, 1 %idxprom1 = zext i32 %add to i64 %arrayidx2 = getelementptr inbounds i32, i32* %A, i64 %idxprom1 - %0 = load i32* %arrayidx2, align 4 + %0 = load i32, i32* %arrayidx2, align 4 store i32 %0, i32* %B, align 4 ret void } @@ -682,7 +682,7 @@ while.body.preheader: ; preds = %entry while.body: ; preds = %while.body.preheader, %while.body %i.02 = phi %struct.S* [ %incdec.ptr, %while.body ], [ %s, %while.body.preheader ] %0 = getelementptr inbounds %struct.S, %struct.S* %i.02, i64 1, i32 0 - %1 = load i32* %0, align 4 + %1 = load i32, i32* %0, align 4 %2 = getelementptr inbounds %struct.S, %struct.S* %i.02, i64 0, i32 0 store i32 %1, i32* %2, align 4 %incdec.ptr = getelementptr inbounds %struct.S, %struct.S* %i.02, i64 1 diff --git a/llvm/test/Analysis/DependenceAnalysis/Propagating.ll b/llvm/test/Analysis/DependenceAnalysis/Propagating.ll index 5677eeda0808..5a97b9929263 100644 --- a/llvm/test/Analysis/DependenceAnalysis/Propagating.ll +++ b/llvm/test/Analysis/DependenceAnalysis/Propagating.ll @@ -36,7 +36,7 @@ for.body3: ; preds = %for.cond1.preheader store i32 %conv, i32* %arrayidx5, align 4 %add6 = add nsw i64 %i.03, %j.02 %arrayidx8 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %i.03, i64 %add6 - %0 = load i32* %arrayidx8, align 4 + %0 = load i32, i32* %arrayidx8, align 4 %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.11, i64 1 store i32 %0, i32* %B.addr.11, align 4 %inc = add nsw i64 %j.02, 1 @@ -93,7 +93,7 @@ for.body6: ; preds = %for.cond4.preheader %add10 = add nsw i64 %j.03, %k.02 %sub11 = sub nsw i64 %j.03, %i.05 %arrayidx14 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* %A, i64 %sub11, i64 %i.05, i64 %add10 - %0 = load i32* %arrayidx14, align 4 + %0 = load i32, i32* %arrayidx14, align 4 %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.21, i64 1 store i32 %0, i32* %B.addr.21, align 4 %inc = add nsw i64 %k.02, 1 @@ -149,7 +149,7 @@ for.body3: ; preds = %for.cond1.preheader %add = add nsw i64 %i.03, %j.02 %add5 = add nsw i64 %add, 110 %arrayidx7 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %i.03, i64 %add5 - %0 = load i32* %arrayidx7, align 4 + %0 = load i32, i32* %arrayidx7, align 4 %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.11, i64 1 store i32 %0, i32* %B.addr.11, align 4 %inc = add nsw i64 %j.02, 1 @@ -200,7 +200,7 @@ for.body3: ; preds = %for.cond1.preheader %sub = sub nsw i64 %mul5, %i.03 %add6 = add nsw i64 %sub, 5 %arrayidx8 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %i.03, i64 %add6 - %0 = load i32* %arrayidx8, align 4 + %0 = load i32, i32* %arrayidx8, align 4 %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.11, i64 1 store i32 %0, i32* %B.addr.11, align 4 %inc = add nsw i64 %j.02, 1 @@ -252,7 +252,7 @@ for.body3: ; preds = %for.cond1.preheader %mul7 = shl nsw i64 %i.03, 1 %add8 = add nsw i64 %mul7, %j.02 %arrayidx10 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %i.03, i64 %add8 - %0 = load i32* %arrayidx10, align 4 + %0 = load i32, i32* %arrayidx10, align 4 %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.11, i64 1 store i32 %0, i32* %B.addr.11, align 4 %inc = add nsw i64 %j.02, 1 @@ -306,7 +306,7 @@ for.body3: ; preds = %for.cond1.preheader %mul8 = mul nsw i64 %i.03, 3 %add9 = add nsw i64 %mul8, %j.02 %arrayidx12 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* %A, i64 %i.03, i64 %i.03, i64 %add9 - %0 = load i32* %arrayidx12, align 4 + %0 = load i32, i32* %arrayidx12, align 4 %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.11, i64 1 store i32 %0, i32* %B.addr.11, align 4 %inc = add nsw i64 %j.02, 1 @@ -359,7 +359,7 @@ for.body3: ; preds = %for.cond1.preheader %add8 = add nsw i64 %mul7, %j.02 %mul9 = shl nsw i64 %i.03, 1 %arrayidx11 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %mul9, i64 %add8 - %0 = load i32* %arrayidx11, align 4 + %0 = load i32, i32* %arrayidx11, align 4 %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.11, i64 1 store i32 %0, i32* %B.addr.11, align 4 %inc = add nsw i64 %j.02, 1 @@ -415,7 +415,7 @@ for.body3: ; preds = %for.cond1.preheader %mul10 = mul nsw i64 %i.03, -2 %add11 = add nsw i64 %mul10, 20 %arrayidx13 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %add11, i64 %add9 - %0 = load i32* %arrayidx13, align 4 + %0 = load i32, i32* %arrayidx13, align 4 %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.11, i64 1 store i32 %0, i32* %B.addr.11, align 4 %inc = add nsw i64 %j.02, 1 @@ -466,7 +466,7 @@ for.body3: ; preds = %for.cond1.preheader %mul6 = mul nsw i64 %i.03, -2 %add7 = add nsw i64 %mul6, 4 %arrayidx9 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %add7, i64 %add5 - %0 = load i32* %arrayidx9, align 4 + %0 = load i32, i32* %arrayidx9, align 4 %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.11, i64 1 store i32 %0, i32* %B.addr.11, align 4 %inc = add nsw i64 %j.02, 1 @@ -517,7 +517,7 @@ for.body3: ; preds = %for.cond1.preheader %arrayidx7 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %add6, i64 %add4 store i32 %conv, i32* %arrayidx7, align 4 %arrayidx9 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 4, i64 %j.02 - %0 = load i32* %arrayidx9, align 4 + %0 = load i32, i32* %arrayidx9, align 4 %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.11, i64 1 store i32 %0, i32* %B.addr.11, align 4 %inc = add nsw i64 %j.02, 1 diff --git a/llvm/test/Analysis/DependenceAnalysis/Separability.ll b/llvm/test/Analysis/DependenceAnalysis/Separability.ll index 8df18b3b9b94..e56e741295ba 100644 --- a/llvm/test/Analysis/DependenceAnalysis/Separability.ll +++ b/llvm/test/Analysis/DependenceAnalysis/Separability.ll @@ -50,7 +50,7 @@ for.body9: ; preds = %for.cond7.preheader %sub = sub nsw i64 %mul, %l.02 %add12 = add nsw i64 %i.07, 10 %arrayidx15 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* %A, i64 10, i64 %add12, i64 %sub - %0 = load i32* %arrayidx15, align 4 + %0 = load i32, i32* %arrayidx15, align 4 %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.31, i64 1 store i32 %0, i32* %B.addr.31, align 4 %inc = add nsw i64 %l.02, 1 @@ -124,7 +124,7 @@ for.body9: ; preds = %for.cond7.preheader %sub = sub nsw i64 %mul, %l.02 %add12 = add nsw i64 %i.07, 10 %arrayidx15 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* %A, i64 10, i64 %add12, i64 %sub - %0 = load i32* %arrayidx15, align 4 + %0 = load i32, i32* %arrayidx15, align 4 %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.31, i64 1 store i32 %0, i32* %B.addr.31, align 4 %inc = add nsw i64 %l.02, 1 @@ -198,7 +198,7 @@ for.body9: ; preds = %for.cond7.preheader %add14 = add nsw i64 %j.05, %k.03 %add15 = add nsw i64 %i.07, 10 %arrayidx19 = getelementptr inbounds [100 x [100 x [100 x i32]]], [100 x [100 x [100 x i32]]]* %A, i64 10, i64 %add15, i64 %add14, i64 %add13 - %0 = load i32* %arrayidx19, align 4 + %0 = load i32, i32* %arrayidx19, align 4 %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.31, i64 1 store i32 %0, i32* %B.addr.31, align 4 %inc = add nsw i64 %l.02, 1 @@ -273,7 +273,7 @@ for.body9: ; preds = %for.cond7.preheader %add15 = add nsw i64 %j.05, %k.03 %add16 = add nsw i64 %i.07, 10 %arrayidx20 = getelementptr inbounds [100 x [100 x [100 x i32]]], [100 x [100 x [100 x i32]]]* %A, i64 10, i64 %add16, i64 %add15, i64 %add14 - %0 = load i32* %arrayidx20, align 4 + %0 = load i32, i32* %arrayidx20, align 4 %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.31, i64 1 store i32 %0, i32* %B.addr.31, align 4 %inc = add nsw i64 %l.02, 1 diff --git a/llvm/test/Analysis/DependenceAnalysis/StrongSIV.ll b/llvm/test/Analysis/DependenceAnalysis/StrongSIV.ll index 9a5ab9b0a33c..78befa5e4707 100644 --- a/llvm/test/Analysis/DependenceAnalysis/StrongSIV.ll +++ b/llvm/test/Analysis/DependenceAnalysis/StrongSIV.ll @@ -32,7 +32,7 @@ for.body: ; preds = %for.body.preheader, %1 = trunc i64 %indvars.iv to i32 store i32 %1, i32* %arrayidx, align 4 %arrayidx3 = getelementptr inbounds i32, i32* %A, i64 %indvars.iv - %2 = load i32* %arrayidx3, align 4 + %2 = load i32, i32* %arrayidx3, align 4 %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.02, i64 1 store i32 %2, i32* %B.addr.02, align 4 %indvars.iv.next = add i64 %indvars.iv, 1 @@ -75,7 +75,7 @@ for.body: ; preds = %for.body.preheader, %arrayidx = getelementptr inbounds i32, i32* %A, i64 %add store i32 %conv2, i32* %arrayidx, align 4 %arrayidx3 = getelementptr inbounds i32, i32* %A, i64 %i.03 - %1 = load i32* %arrayidx3, align 4 + %1 = load i32, i32* %arrayidx3, align 4 %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.02, i64 1 store i32 %1, i32* %B.addr.02, align 4 %inc = add nsw i64 %i.03, 1 @@ -117,7 +117,7 @@ for.body: ; preds = %for.body.preheader, %arrayidx = getelementptr inbounds i32, i32* %A, i64 %add store i32 %conv, i32* %arrayidx, align 4 %arrayidx1 = getelementptr inbounds i32, i32* %A, i64 %i.03 - %0 = load i32* %arrayidx1, align 4 + %0 = load i32, i32* %arrayidx1, align 4 %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.02, i64 1 store i32 %0, i32* %B.addr.02, align 4 %inc = add i64 %i.03, 1 @@ -159,7 +159,7 @@ for.body: ; preds = %for.body.preheader, %1 = trunc i64 %indvars.iv to i32 store i32 %1, i32* %arrayidx, align 4 %arrayidx2 = getelementptr inbounds i32, i32* %A, i64 %indvars.iv - %2 = load i32* %arrayidx2, align 4 + %2 = load i32, i32* %arrayidx2, align 4 %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.02, i64 1 store i32 %2, i32* %B.addr.02, align 4 %indvars.iv.next = add i64 %indvars.iv, 1 @@ -198,7 +198,7 @@ for.body: ; preds = %entry, %for.body %arrayidx = getelementptr inbounds i32, i32* %A, i64 %add store i32 %conv, i32* %arrayidx, align 4 %arrayidx1 = getelementptr inbounds i32, i32* %A, i64 %i.02 - %0 = load i32* %arrayidx1, align 4 + %0 = load i32, i32* %arrayidx1, align 4 %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1 store i32 %0, i32* %B.addr.01, align 4 %inc = add i64 %i.02, 1 @@ -233,7 +233,7 @@ for.body: ; preds = %entry, %for.body %arrayidx = getelementptr inbounds i32, i32* %A, i64 %add store i32 %conv, i32* %arrayidx, align 4 %arrayidx1 = getelementptr inbounds i32, i32* %A, i64 %i.02 - %0 = load i32* %arrayidx1, align 4 + %0 = load i32, i32* %arrayidx1, align 4 %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1 store i32 %0, i32* %B.addr.01, align 4 %inc = add i64 %i.02, 1 @@ -270,7 +270,7 @@ for.body: ; preds = %entry, %for.body store i32 %conv, i32* %arrayidx, align 4 %mul1 = shl i64 %i.02, 1 %arrayidx2 = getelementptr inbounds i32, i32* %A, i64 %mul1 - %0 = load i32* %arrayidx2, align 4 + %0 = load i32, i32* %arrayidx2, align 4 %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1 store i32 %0, i32* %B.addr.01, align 4 %inc = add i64 %i.02, 1 @@ -307,7 +307,7 @@ for.body: ; preds = %entry, %for.body store i32 %conv, i32* %arrayidx, align 4 %mul1 = shl i64 %i.02, 1 %arrayidx2 = getelementptr inbounds i32, i32* %A, i64 %mul1 - %0 = load i32* %arrayidx2, align 4 + %0 = load i32, i32* %arrayidx2, align 4 %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1 store i32 %0, i32* %B.addr.01, align 4 %inc = add i64 %i.02, 1 @@ -342,7 +342,7 @@ for.body: ; preds = %entry, %for.body %arrayidx = getelementptr inbounds i32, i32* %A, i64 %add store i32 %conv, i32* %arrayidx, align 4 %arrayidx1 = getelementptr inbounds i32, i32* %A, i64 %i.02 - %0 = load i32* %arrayidx1, align 4 + %0 = load i32, i32* %arrayidx1, align 4 %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1 store i32 %0, i32* %B.addr.01, align 4 %inc = add i64 %i.02, 1 @@ -383,7 +383,7 @@ for.body: ; preds = %for.body.preheader, %mul = shl i64 %n, 1 %add1 = add i64 %i.03, %mul %arrayidx2 = getelementptr inbounds i32, i32* %A, i64 %add1 - %0 = load i32* %arrayidx2, align 4 + %0 = load i32, i32* %arrayidx2, align 4 %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.02, i64 1 store i32 %0, i32* %B.addr.02, align 4 %inc = add i64 %i.03, 1 @@ -424,7 +424,7 @@ for.body: ; preds = %entry, %for.body %mul1 = mul i64 %i.02, %n %add2 = add i64 %mul1, 5 %arrayidx3 = getelementptr inbounds i32, i32* %A, i64 %add2 - %0 = load i32* %arrayidx3, align 4 + %0 = load i32, i32* %arrayidx3, align 4 %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1 store i32 %0, i32* %B.addr.01, align 4 %inc = add i64 %i.02, 1 diff --git a/llvm/test/Analysis/DependenceAnalysis/SymbolicRDIV.ll b/llvm/test/Analysis/DependenceAnalysis/SymbolicRDIV.ll index cde1e8de83e8..6e8b98ca2643 100644 --- a/llvm/test/Analysis/DependenceAnalysis/SymbolicRDIV.ll +++ b/llvm/test/Analysis/DependenceAnalysis/SymbolicRDIV.ll @@ -53,7 +53,7 @@ for.body4: ; preds = %for.body4.preheader %mul56 = add i64 %j.03, %n1 %add7 = mul i64 %mul56, 3 %arrayidx8 = getelementptr inbounds i32, i32* %A, i64 %add7 - %0 = load i32* %arrayidx8, align 4 + %0 = load i32, i32* %arrayidx8, align 4 %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.02, i64 1 store i32 %0, i32* %B.addr.02, align 4 %inc10 = add nsw i64 %j.03, 1 @@ -118,7 +118,7 @@ for.body5: ; preds = %for.body5.preheader %mul7 = shl i64 %n2, 1 %add8 = add i64 %mul6, %mul7 %arrayidx9 = getelementptr inbounds i32, i32* %A, i64 %add8 - %0 = load i32* %arrayidx9, align 4 + %0 = load i32, i32* %arrayidx9, align 4 %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.02, i64 1 store i32 %0, i32* %B.addr.02, align 4 %inc11 = add nsw i64 %j.03, 1 @@ -181,7 +181,7 @@ for.body4: ; preds = %for.body4.preheader %mul6 = shl i64 %n1, 1 %add = sub i64 %mul6, %j.03 %arrayidx7 = getelementptr inbounds i32, i32* %A, i64 %add - %0 = load i32* %arrayidx7, align 4 + %0 = load i32, i32* %arrayidx7, align 4 %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.02, i64 1 store i32 %0, i32* %B.addr.02, align 4 %inc9 = add nsw i64 %j.03, 1 @@ -242,7 +242,7 @@ for.body4: ; preds = %for.body4.preheader %B.addr.02 = phi i32* [ %incdec.ptr, %for.body4 ], [ %B, %for.body4.preheader ] %sub5 = sub i64 %j.03, %n1 %arrayidx6 = getelementptr inbounds i32, i32* %A, i64 %sub5 - %0 = load i32* %arrayidx6, align 4 + %0 = load i32, i32* %arrayidx6, align 4 %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.02, i64 1 store i32 %0, i32* %B.addr.02, align 4 %inc8 = add nsw i64 %j.03, 1 @@ -304,7 +304,7 @@ for.body4: ; preds = %for.body4.preheader %B.addr.02 = phi i32* [ %incdec.ptr, %for.body4 ], [ %B, %for.body4.preheader ] %add6 = sub i64 %n1, %j.03 %arrayidx7 = getelementptr inbounds i32, i32* %A, i64 %add6 - %0 = load i32* %arrayidx7, align 4 + %0 = load i32, i32* %arrayidx7, align 4 %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.02, i64 1 store i32 %0, i32* %B.addr.02, align 4 %inc9 = add nsw i64 %j.03, 1 @@ -366,7 +366,7 @@ for.body4: ; preds = %for.body4.preheader %mul = shl i64 %n2, 1 %add6 = sub i64 %mul, %j.03 %arrayidx7 = getelementptr inbounds i32, i32* %A, i64 %add6 - %0 = load i32* %arrayidx7, align 4 + %0 = load i32, i32* %arrayidx7, align 4 %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.02, i64 1 store i32 %0, i32* %B.addr.02, align 4 %inc9 = add nsw i64 %j.03, 1 @@ -421,7 +421,7 @@ for.body3: ; preds = %for.body3.preheader store i32 %conv, i32* %arrayidx, align 4 %mul = shl i64 %n2, 1 %arrayidx4 = getelementptr inbounds i32, i32* %A, i64 %mul - %0 = load i32* %arrayidx4, align 4 + %0 = load i32, i32* %arrayidx4, align 4 %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.12, i64 1 store i32 %0, i32* %B.addr.12, align 4 %inc = add nsw i64 %j.03, 1 diff --git a/llvm/test/Analysis/DependenceAnalysis/SymbolicSIV.ll b/llvm/test/Analysis/DependenceAnalysis/SymbolicSIV.ll index aa5148335d60..711d0fabf87f 100644 --- a/llvm/test/Analysis/DependenceAnalysis/SymbolicSIV.ll +++ b/llvm/test/Analysis/DependenceAnalysis/SymbolicSIV.ll @@ -35,7 +35,7 @@ for.body: ; preds = %for.body.preheader, %mul14 = add i64 %i.03, %n %add3 = mul i64 %mul14, 3 %arrayidx4 = getelementptr inbounds i32, i32* %A, i64 %add3 - %0 = load i32* %arrayidx4, align 4 + %0 = load i32, i32* %arrayidx4, align 4 %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.02, i64 1 store i32 %0, i32* %B.addr.02, align 4 %inc = add nsw i64 %i.03, 1 @@ -82,7 +82,7 @@ for.body: ; preds = %for.body.preheader, %mul3 = shl i64 %n, 1 %add4 = add i64 %mul2, %mul3 %arrayidx5 = getelementptr inbounds i32, i32* %A, i64 %add4 - %0 = load i32* %arrayidx5, align 4 + %0 = load i32, i32* %arrayidx5, align 4 %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.02, i64 1 store i32 %0, i32* %B.addr.02, align 4 %inc = add nsw i64 %i.03, 1 @@ -127,7 +127,7 @@ for.body: ; preds = %for.body.preheader, %mul2 = shl i64 %n, 1 %add = sub i64 %mul2, %i.03 %arrayidx3 = getelementptr inbounds i32, i32* %A, i64 %add - %0 = load i32* %arrayidx3, align 4 + %0 = load i32, i32* %arrayidx3, align 4 %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.02, i64 1 store i32 %0, i32* %B.addr.02, align 4 %inc = add nsw i64 %i.03, 1 @@ -173,7 +173,7 @@ for.body: ; preds = %for.body.preheader, %mul2 = shl i64 %n, 1 %sub = sub i64 %i.03, %mul2 %arrayidx3 = getelementptr inbounds i32, i32* %A, i64 %sub - %0 = load i32* %arrayidx3, align 4 + %0 = load i32, i32* %arrayidx3, align 4 %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.02, i64 1 store i32 %0, i32* %B.addr.02, align 4 %inc = add nsw i64 %i.03, 1 @@ -218,7 +218,7 @@ for.body: ; preds = %for.body.preheader, store i32 %conv, i32* %arrayidx, align 4 %add2 = sub i64 %n, %i.03 %arrayidx3 = getelementptr inbounds i32, i32* %A, i64 %add2 - %0 = load i32* %arrayidx3, align 4 + %0 = load i32, i32* %arrayidx3, align 4 %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.02, i64 1 store i32 %0, i32* %B.addr.02, align 4 %inc = add nsw i64 %i.03, 1 @@ -264,7 +264,7 @@ for.body: ; preds = %for.body.preheader, %sub2 = sub nsw i64 0, %i.03 %sub3 = sub i64 %sub2, %n %arrayidx4 = getelementptr inbounds i32, i32* %A, i64 %sub3 - %0 = load i32* %arrayidx4, align 4 + %0 = load i32, i32* %arrayidx4, align 4 %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.02, i64 1 store i32 %0, i32* %B.addr.02, align 4 %inc = add nsw i64 %i.03, 1 @@ -310,7 +310,7 @@ for.body: ; preds = %for.body.preheader, store i32 %conv, i32* %arrayidx, align 4 %sub = sub i64 0, %i.03 %arrayidx2 = getelementptr inbounds i32, i32* %A, i64 %sub - %0 = load i32* %arrayidx2, align 4 + %0 = load i32, i32* %arrayidx2, align 4 %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.02, i64 1 store i32 %0, i32* %B.addr.02, align 4 %inc = add i64 %i.03, 1 @@ -359,7 +359,7 @@ for.body: ; preds = %for.body.preheader, %add5 = add i64 %mul3, %mul4 %add6 = add i64 %add5, 1 %arrayidx7 = getelementptr inbounds i32, i32* %A, i64 %add6 - %0 = load i32* %arrayidx7, align 4 + %0 = load i32, i32* %arrayidx7, align 4 %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.02, i64 1 store i32 %0, i32* %B.addr.02, align 4 %inc = add nsw i64 %i.03, 1 @@ -408,7 +408,7 @@ for.body: ; preds = %for.body.preheader, %sub = add i64 %mul3, %0 %add5 = add i64 %sub, 2 %arrayidx6 = getelementptr inbounds i32, i32* %A, i64 %add5 - %1 = load i32* %arrayidx6, align 4 + %1 = load i32, i32* %arrayidx6, align 4 %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.02, i64 1 store i32 %1, i32* %B.addr.02, align 4 %inc = add nsw i64 %i.03, 1 diff --git a/llvm/test/Analysis/DependenceAnalysis/WeakCrossingSIV.ll b/llvm/test/Analysis/DependenceAnalysis/WeakCrossingSIV.ll index be2d0357a265..5b81ec10f9d8 100644 --- a/llvm/test/Analysis/DependenceAnalysis/WeakCrossingSIV.ll +++ b/llvm/test/Analysis/DependenceAnalysis/WeakCrossingSIV.ll @@ -35,7 +35,7 @@ for.body: ; preds = %for.body.preheader, %mul1 = mul i64 %i.03, %n %sub = sub i64 1, %mul1 %arrayidx2 = getelementptr inbounds i32, i32* %A, i64 %sub - %0 = load i32* %arrayidx2, align 4 + %0 = load i32, i32* %arrayidx2, align 4 %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.02, i64 1 store i32 %0, i32* %B.addr.02, align 4 %inc = add i64 %i.03, 1 @@ -80,7 +80,7 @@ for.body: ; preds = %for.body.preheader, %add1 = add i64 %n, 1 %sub = sub i64 %add1, %i.03 %arrayidx2 = getelementptr inbounds i32, i32* %A, i64 %sub - %0 = load i32* %arrayidx2, align 4 + %0 = load i32, i32* %arrayidx2, align 4 %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.02, i64 1 store i32 %0, i32* %B.addr.02, align 4 %inc = add i64 %i.03, 1 @@ -118,7 +118,7 @@ for.body: ; preds = %entry, %for.body store i32 %conv, i32* %arrayidx, align 4 %sub = sub i64 6, %i.02 %arrayidx1 = getelementptr inbounds i32, i32* %A, i64 %sub - %0 = load i32* %arrayidx1, align 4 + %0 = load i32, i32* %arrayidx1, align 4 %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1 store i32 %0, i32* %B.addr.01, align 4 %inc = add i64 %i.02, 1 @@ -153,7 +153,7 @@ for.body: ; preds = %entry, %for.body store i32 %conv, i32* %arrayidx, align 4 %sub = sub i64 6, %i.02 %arrayidx1 = getelementptr inbounds i32, i32* %A, i64 %sub - %0 = load i32* %arrayidx1, align 4 + %0 = load i32, i32* %arrayidx1, align 4 %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1 store i32 %0, i32* %B.addr.01, align 4 %inc = add i64 %i.02, 1 @@ -188,7 +188,7 @@ for.body: ; preds = %entry, %for.body store i32 %conv, i32* %arrayidx, align 4 %sub = sub i64 -6, %i.02 %arrayidx1 = getelementptr inbounds i32, i32* %A, i64 %sub - %0 = load i32* %arrayidx1, align 4 + %0 = load i32, i32* %arrayidx1, align 4 %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1 store i32 %0, i32* %B.addr.01, align 4 %inc = add i64 %i.02, 1 @@ -229,7 +229,7 @@ for.body: ; preds = %for.body.preheader, %0 = mul i64 %i.03, -3 %sub = add i64 %0, 5 %arrayidx2 = getelementptr inbounds i32, i32* %A, i64 %sub - %1 = load i32* %arrayidx2, align 4 + %1 = load i32, i32* %arrayidx2, align 4 %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.02, i64 1 store i32 %1, i32* %B.addr.02, align 4 %inc = add i64 %i.03, 1 @@ -268,7 +268,7 @@ for.body: ; preds = %entry, %for.body store i32 %conv, i32* %arrayidx, align 4 %sub = sub i64 5, %i.02 %arrayidx1 = getelementptr inbounds i32, i32* %A, i64 %sub - %0 = load i32* %arrayidx1, align 4 + %0 = load i32, i32* %arrayidx1, align 4 %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1 store i32 %0, i32* %B.addr.01, align 4 %inc = add i64 %i.02, 1 diff --git a/llvm/test/Analysis/DependenceAnalysis/WeakZeroDstSIV.ll b/llvm/test/Analysis/DependenceAnalysis/WeakZeroDstSIV.ll index fa77fc09b3f0..8adb7f7b32d5 100644 --- a/llvm/test/Analysis/DependenceAnalysis/WeakZeroDstSIV.ll +++ b/llvm/test/Analysis/DependenceAnalysis/WeakZeroDstSIV.ll @@ -29,7 +29,7 @@ for.body: ; preds = %entry, %for.body %arrayidx = getelementptr inbounds i32, i32* %A, i64 %add store i32 %conv, i32* %arrayidx, align 4 %arrayidx1 = getelementptr inbounds i32, i32* %A, i64 10 - %0 = load i32* %arrayidx1, align 4 + %0 = load i32, i32* %arrayidx1, align 4 %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1 store i32 %0, i32* %B.addr.01, align 4 %inc = add i64 %i.02, 1 @@ -69,7 +69,7 @@ for.body: ; preds = %for.body.preheader, %arrayidx = getelementptr inbounds i32, i32* %A, i64 %add store i32 %conv, i32* %arrayidx, align 4 %arrayidx1 = getelementptr inbounds i32, i32* %A, i64 10 - %0 = load i32* %arrayidx1, align 4 + %0 = load i32, i32* %arrayidx1, align 4 %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.02, i64 1 store i32 %0, i32* %B.addr.02, align 4 %inc = add i64 %i.03, 1 @@ -107,7 +107,7 @@ for.body: ; preds = %entry, %for.body %arrayidx = getelementptr inbounds i32, i32* %A, i64 %mul store i32 %conv, i32* %arrayidx, align 4 %arrayidx1 = getelementptr inbounds i32, i32* %A, i64 10 - %0 = load i32* %arrayidx1, align 4 + %0 = load i32, i32* %arrayidx1, align 4 %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1 store i32 %0, i32* %B.addr.01, align 4 %inc = add i64 %i.02, 1 @@ -142,7 +142,7 @@ for.body: ; preds = %entry, %for.body %arrayidx = getelementptr inbounds i32, i32* %A, i64 %mul store i32 %conv, i32* %arrayidx, align 4 %arrayidx1 = getelementptr inbounds i32, i32* %A, i64 10 - %0 = load i32* %arrayidx1, align 4 + %0 = load i32, i32* %arrayidx1, align 4 %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1 store i32 %0, i32* %B.addr.01, align 4 %inc = add i64 %i.02, 1 @@ -177,7 +177,7 @@ for.body: ; preds = %entry, %for.body %arrayidx = getelementptr inbounds i32, i32* %A, i64 %mul store i32 %conv, i32* %arrayidx, align 4 %arrayidx1 = getelementptr inbounds i32, i32* %A, i64 10 - %0 = load i32* %arrayidx1, align 4 + %0 = load i32, i32* %arrayidx1, align 4 %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1 store i32 %0, i32* %B.addr.01, align 4 %inc = add i64 %i.02, 1 @@ -212,7 +212,7 @@ for.body: ; preds = %entry, %for.body %arrayidx = getelementptr inbounds i32, i32* %A, i64 %mul store i32 %conv, i32* %arrayidx, align 4 %arrayidx1 = getelementptr inbounds i32, i32* %A, i64 -10 - %0 = load i32* %arrayidx1, align 4 + %0 = load i32, i32* %arrayidx1, align 4 %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1 store i32 %0, i32* %B.addr.01, align 4 %inc = add i64 %i.02, 1 @@ -251,7 +251,7 @@ for.body: ; preds = %for.body.preheader, %arrayidx = getelementptr inbounds i32, i32* %A, i64 %mul store i32 %conv, i32* %arrayidx, align 4 %arrayidx1 = getelementptr inbounds i32, i32* %A, i64 10 - %0 = load i32* %arrayidx1, align 4 + %0 = load i32, i32* %arrayidx1, align 4 %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.02, i64 1 store i32 %0, i32* %B.addr.02, align 4 %inc = add i64 %i.03, 1 diff --git a/llvm/test/Analysis/DependenceAnalysis/WeakZeroSrcSIV.ll b/llvm/test/Analysis/DependenceAnalysis/WeakZeroSrcSIV.ll index 40e714fc7687..ac261b0dcaf5 100644 --- a/llvm/test/Analysis/DependenceAnalysis/WeakZeroSrcSIV.ll +++ b/llvm/test/Analysis/DependenceAnalysis/WeakZeroSrcSIV.ll @@ -29,7 +29,7 @@ for.body: ; preds = %entry, %for.body %mul = shl i64 %i.02, 1 %add = add i64 %mul, 10 %arrayidx1 = getelementptr inbounds i32, i32* %A, i64 %add - %0 = load i32* %arrayidx1, align 4 + %0 = load i32, i32* %arrayidx1, align 4 %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1 store i32 %0, i32* %B.addr.01, align 4 %inc = add i64 %i.02, 1 @@ -69,7 +69,7 @@ for.body: ; preds = %for.body.preheader, %mul = mul i64 %i.03, %n %add = add i64 %mul, 10 %arrayidx1 = getelementptr inbounds i32, i32* %A, i64 %add - %0 = load i32* %arrayidx1, align 4 + %0 = load i32, i32* %arrayidx1, align 4 %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.02, i64 1 store i32 %0, i32* %B.addr.02, align 4 %inc = add i64 %i.03, 1 @@ -107,7 +107,7 @@ for.body: ; preds = %entry, %for.body store i32 %conv, i32* %arrayidx, align 4 %mul = shl i64 %i.02, 1 %arrayidx1 = getelementptr inbounds i32, i32* %A, i64 %mul - %0 = load i32* %arrayidx1, align 4 + %0 = load i32, i32* %arrayidx1, align 4 %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1 store i32 %0, i32* %B.addr.01, align 4 %inc = add i64 %i.02, 1 @@ -142,7 +142,7 @@ for.body: ; preds = %entry, %for.body store i32 %conv, i32* %arrayidx, align 4 %mul = shl i64 %i.02, 1 %arrayidx1 = getelementptr inbounds i32, i32* %A, i64 %mul - %0 = load i32* %arrayidx1, align 4 + %0 = load i32, i32* %arrayidx1, align 4 %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1 store i32 %0, i32* %B.addr.01, align 4 %inc = add i64 %i.02, 1 @@ -177,7 +177,7 @@ for.body: ; preds = %entry, %for.body store i32 %conv, i32* %arrayidx, align 4 %mul = shl i64 %i.02, 1 %arrayidx1 = getelementptr inbounds i32, i32* %A, i64 %mul - %0 = load i32* %arrayidx1, align 4 + %0 = load i32, i32* %arrayidx1, align 4 %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1 store i32 %0, i32* %B.addr.01, align 4 %inc = add i64 %i.02, 1 @@ -212,7 +212,7 @@ for.body: ; preds = %entry, %for.body store i32 %conv, i32* %arrayidx, align 4 %mul = shl i64 %i.02, 1 %arrayidx1 = getelementptr inbounds i32, i32* %A, i64 %mul - %0 = load i32* %arrayidx1, align 4 + %0 = load i32, i32* %arrayidx1, align 4 %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1 store i32 %0, i32* %B.addr.01, align 4 %inc = add i64 %i.02, 1 @@ -251,7 +251,7 @@ for.body: ; preds = %for.body.preheader, store i32 %conv, i32* %arrayidx, align 4 %mul = mul i64 %i.03, 3 %arrayidx1 = getelementptr inbounds i32, i32* %A, i64 %mul - %0 = load i32* %arrayidx1, align 4 + %0 = load i32, i32* %arrayidx1, align 4 %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.02, i64 1 store i32 %0, i32* %B.addr.02, align 4 %inc = add i64 %i.03, 1 diff --git a/llvm/test/Analysis/DependenceAnalysis/ZIV.ll b/llvm/test/Analysis/DependenceAnalysis/ZIV.ll index 700c51e1fdbf..b3216415c8e8 100644 --- a/llvm/test/Analysis/DependenceAnalysis/ZIV.ll +++ b/llvm/test/Analysis/DependenceAnalysis/ZIV.ll @@ -23,7 +23,7 @@ entry: %add1 = add i64 %n, 1 %arrayidx2 = getelementptr inbounds i32, i32* %A, i64 %add1 - %0 = load i32* %arrayidx2, align 4 + %0 = load i32, i32* %arrayidx2, align 4 store i32 %0, i32* %B, align 4 ret void } @@ -46,7 +46,7 @@ entry: %add = add i64 %n, 1 %arrayidx1 = getelementptr inbounds i32, i32* %A, i64 %add - %0 = load i32* %arrayidx1, align 4 + %0 = load i32, i32* %arrayidx1, align 4 store i32 %0, i32* %B, align 4 ret void } @@ -68,7 +68,7 @@ entry: ; CHECK: da analyze - none! %arrayidx1 = getelementptr inbounds i32, i32* %A, i64 %m - %0 = load i32* %arrayidx1, align 4 + %0 = load i32, i32* %arrayidx1, align 4 store i32 %0, i32* %B, align 4 ret void } diff --git a/llvm/test/Analysis/GlobalsModRef/2008-09-03-ReadGlobals.ll b/llvm/test/Analysis/GlobalsModRef/2008-09-03-ReadGlobals.ll index d51c159a9117..513ec86ef031 100644 --- a/llvm/test/Analysis/GlobalsModRef/2008-09-03-ReadGlobals.ll +++ b/llvm/test/Analysis/GlobalsModRef/2008-09-03-ReadGlobals.ll @@ -3,7 +3,7 @@ @g = internal global i32 0 ; [#uses=2] define i32 @r() { - %tmp = load i32* @g ; [#uses=1] + %tmp = load i32, i32* @g ; [#uses=1] ret i32 %tmp } diff --git a/llvm/test/Analysis/GlobalsModRef/aliastest.ll b/llvm/test/Analysis/GlobalsModRef/aliastest.ll index 4cfed71bfb76..3474e130de8c 100644 --- a/llvm/test/Analysis/GlobalsModRef/aliastest.ll +++ b/llvm/test/Analysis/GlobalsModRef/aliastest.ll @@ -9,6 +9,6 @@ define i32 @test(i32* %P) { ; CHECK-NEXT: ret i32 7 store i32 7, i32* %P store i32 12, i32* @X - %V = load i32* %P ; [#uses=1] + %V = load i32, i32* %P ; [#uses=1] ret i32 %V } diff --git a/llvm/test/Analysis/GlobalsModRef/chaining-analysis.ll b/llvm/test/Analysis/GlobalsModRef/chaining-analysis.ll index aeb76e42d295..26671daf3480 100644 --- a/llvm/test/Analysis/GlobalsModRef/chaining-analysis.ll +++ b/llvm/test/Analysis/GlobalsModRef/chaining-analysis.ll @@ -14,7 +14,7 @@ define i32 @test(i32* %P) { ; CHECK-NEXT: ret i32 12 store i32 12, i32* @X call double @doesnotmodX( double 1.000000e+00 ) ; :1 [#uses=0] - %V = load i32* @X ; [#uses=1] + %V = load i32, i32* @X ; [#uses=1] ret i32 %V } diff --git a/llvm/test/Analysis/GlobalsModRef/indirect-global.ll b/llvm/test/Analysis/GlobalsModRef/indirect-global.ll index 48ac6dd1d167..028132324674 100644 --- a/llvm/test/Analysis/GlobalsModRef/indirect-global.ll +++ b/llvm/test/Analysis/GlobalsModRef/indirect-global.ll @@ -12,11 +12,11 @@ define void @test() { define i32 @test1(i32* %P) { ; CHECK: ret i32 0 - %g1 = load i32** @G ; [#uses=2] - %h1 = load i32* %g1 ; [#uses=1] + %g1 = load i32*, i32** @G ; [#uses=2] + %h1 = load i32, i32* %g1 ; [#uses=1] store i32 123, i32* %P - %g2 = load i32** @G ; [#uses=0] - %h2 = load i32* %g1 ; [#uses=1] + %g2 = load i32*, i32** @G ; [#uses=0] + %h2 = load i32, i32* %g1 ; [#uses=1] %X = sub i32 %h1, %h2 ; [#uses=1] ret i32 %X } diff --git a/llvm/test/Analysis/GlobalsModRef/modreftest.ll b/llvm/test/Analysis/GlobalsModRef/modreftest.ll index 3eed916e83b4..74101e23bed9 100644 --- a/llvm/test/Analysis/GlobalsModRef/modreftest.ll +++ b/llvm/test/Analysis/GlobalsModRef/modreftest.ll @@ -9,7 +9,7 @@ define i32 @test(i32* %P) { ; CHECK-NEXT: ret i32 12 store i32 12, i32* @X call void @doesnotmodX( ) - %V = load i32* @X ; [#uses=1] + %V = load i32, i32* @X ; [#uses=1] ret i32 %V } diff --git a/llvm/test/Analysis/GlobalsModRef/pr12351.ll b/llvm/test/Analysis/GlobalsModRef/pr12351.ll index c221f4c087f8..8f922770b306 100644 --- a/llvm/test/Analysis/GlobalsModRef/pr12351.ll +++ b/llvm/test/Analysis/GlobalsModRef/pr12351.ll @@ -9,7 +9,7 @@ define void @foo(i8* %x, i8* %y) { define void @bar(i8* %y, i8* %z) { %x = alloca i8 call void @foo(i8* %x, i8* %y) - %t = load i8* %x + %t = load i8, i8* %x store i8 %t, i8* %y ; CHECK: store i8 %t, i8* %y ret void @@ -19,8 +19,8 @@ define void @bar(i8* %y, i8* %z) { define i32 @foo2() { %foo = alloca i32 call void @bar2(i32* %foo) - %t0 = load i32* %foo, align 4 -; CHECK: %t0 = load i32* %foo, align 4 + %t0 = load i32, i32* %foo, align 4 +; CHECK: %t0 = load i32, i32* %foo, align 4 ret i32 %t0 } diff --git a/llvm/test/Analysis/GlobalsModRef/volatile-instrs.ll b/llvm/test/Analysis/GlobalsModRef/volatile-instrs.ll index 46d3d768bd07..df49b4b90f97 100644 --- a/llvm/test/Analysis/GlobalsModRef/volatile-instrs.ll +++ b/llvm/test/Analysis/GlobalsModRef/volatile-instrs.ll @@ -22,7 +22,7 @@ declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, define i32 @main() nounwind uwtable ssp { main_entry: tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* bitcast (%struct.anon* @b to i8*), i8* bitcast (%struct.anon* @a to i8*), i64 12, i32 4, i1 false) - %0 = load volatile i32* getelementptr inbounds (%struct.anon* @b, i64 0, i32 0), align 4 + %0 = load volatile i32, i32* getelementptr inbounds (%struct.anon* @b, i64 0, i32 0), align 4 store i32 %0, i32* @c, align 4 tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* bitcast (%struct.anon* @b to i8*), i8* bitcast (%struct.anon* @a to i8*), i64 12, i32 4, i1 false) nounwind %call = tail call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i64 0, i64 0), i32 %0) nounwind diff --git a/llvm/test/Analysis/LazyCallGraph/basic.ll b/llvm/test/Analysis/LazyCallGraph/basic.ll index b8108d99ed6f..d0116021d9cf 100644 --- a/llvm/test/Analysis/LazyCallGraph/basic.ll +++ b/llvm/test/Analysis/LazyCallGraph/basic.ll @@ -118,10 +118,10 @@ define void @test2() { ; CHECK-NEXT: -> f1 ; CHECK-NOT: -> - load i8** bitcast (void ()** @g to i8**) - load i8** bitcast (void ()** getelementptr ([4 x void ()*]* @g1, i32 0, i32 2) to i8**) - load i8** bitcast (void ()** getelementptr ({i8, void ()*, i8}* @g2, i32 0, i32 1) to i8**) - load i8** bitcast (void ()** @h to i8**) + load i8*, i8** bitcast (void ()** @g to i8**) + load i8*, i8** bitcast (void ()** getelementptr ([4 x void ()*]* @g1, i32 0, i32 2) to i8**) + load i8*, i8** bitcast (void ()** getelementptr ({i8, void ()*, i8}* @g2, i32 0, i32 1) to i8**) + load i8*, i8** bitcast (void ()** @h to i8**) ret void } diff --git a/llvm/test/Analysis/LoopAccessAnalysis/backward-dep-different-types.ll b/llvm/test/Analysis/LoopAccessAnalysis/backward-dep-different-types.ll index 5ca4b315c9be..238f3f445015 100644 --- a/llvm/test/Analysis/LoopAccessAnalysis/backward-dep-different-types.ll +++ b/llvm/test/Analysis/LoopAccessAnalysis/backward-dep-different-types.ll @@ -20,18 +20,18 @@ target triple = "x86_64-apple-macosx10.10.0" define void @f() { entry: - %a = load i32** @A, align 8 - %b = load i32** @B, align 8 + %a = load i32*, i32** @A, align 8 + %b = load i32*, i32** @B, align 8 br label %for.body for.body: ; preds = %for.body, %entry %storemerge3 = phi i64 [ 0, %entry ], [ %add, %for.body ] %arrayidxA = getelementptr inbounds i32, i32* %a, i64 %storemerge3 - %loadA = load i32* %arrayidxA, align 2 + %loadA = load i32, i32* %arrayidxA, align 2 %arrayidxB = getelementptr inbounds i32, i32* %b, i64 %storemerge3 - %loadB = load i32* %arrayidxB, align 2 + %loadB = load i32, i32* %arrayidxB, align 2 %mul = mul i32 %loadB, %loadA diff --git a/llvm/test/Analysis/LoopAccessAnalysis/unsafe-and-rt-checks-no-dbg.ll b/llvm/test/Analysis/LoopAccessAnalysis/unsafe-and-rt-checks-no-dbg.ll index f0203c5c7d3a..6770f9253a05 100644 --- a/llvm/test/Analysis/LoopAccessAnalysis/unsafe-and-rt-checks-no-dbg.ll +++ b/llvm/test/Analysis/LoopAccessAnalysis/unsafe-and-rt-checks-no-dbg.ll @@ -28,22 +28,22 @@ target triple = "x86_64-apple-macosx10.10.0" define void @f() { entry: - %a = load i16** @A, align 8 - %b = load i16** @B, align 8 - %c = load i16** @C, align 8 + %a = load i16*, i16** @A, align 8 + %b = load i16*, i16** @B, align 8 + %c = load i16*, i16** @C, align 8 br label %for.body for.body: ; preds = %for.body, %entry %storemerge3 = phi i64 [ 0, %entry ], [ %add, %for.body ] %arrayidxA = getelementptr inbounds i16, i16* %a, i64 %storemerge3 - %loadA = load i16* %arrayidxA, align 2 + %loadA = load i16, i16* %arrayidxA, align 2 %arrayidxB = getelementptr inbounds i16, i16* %b, i64 %storemerge3 - %loadB = load i16* %arrayidxB, align 2 + %loadB = load i16, i16* %arrayidxB, align 2 %arrayidxC = getelementptr inbounds i16, i16* %c, i64 %storemerge3 - %loadC = load i16* %arrayidxC, align 2 + %loadC = load i16, i16* %arrayidxC, align 2 %mul = mul i16 %loadB, %loadA %mul1 = mul i16 %mul, %loadC diff --git a/llvm/test/Analysis/LoopAccessAnalysis/unsafe-and-rt-checks.ll b/llvm/test/Analysis/LoopAccessAnalysis/unsafe-and-rt-checks.ll index f452b324f6e7..a7a324bd9a0e 100644 --- a/llvm/test/Analysis/LoopAccessAnalysis/unsafe-and-rt-checks.ll +++ b/llvm/test/Analysis/LoopAccessAnalysis/unsafe-and-rt-checks.ll @@ -11,7 +11,7 @@ target triple = "x86_64-apple-macosx10.10.0" ; CHECK: Report: unsafe dependent memory operations in loop -; DEBUG: LAA: Distance for %loadA = load i16* %arrayidxA, align 2 to store i16 %mul1, i16* %arrayidxA_plus_2, align 2: 2 +; DEBUG: LAA: Distance for %loadA = load i16, i16* %arrayidxA, align 2 to store i16 %mul1, i16* %arrayidxA_plus_2, align 2: 2 ; DEBUG-NEXT: LAA: Failure because of Positive distance 2 ; CHECK: Run-time memory checks: @@ -29,22 +29,22 @@ target triple = "x86_64-apple-macosx10.10.0" define void @f() { entry: - %a = load i16** @A, align 8 - %b = load i16** @B, align 8 - %c = load i16** @C, align 8 + %a = load i16*, i16** @A, align 8 + %b = load i16*, i16** @B, align 8 + %c = load i16*, i16** @C, align 8 br label %for.body for.body: ; preds = %for.body, %entry %storemerge3 = phi i64 [ 0, %entry ], [ %add, %for.body ] %arrayidxA = getelementptr inbounds i16, i16* %a, i64 %storemerge3 - %loadA = load i16* %arrayidxA, align 2 + %loadA = load i16, i16* %arrayidxA, align 2 %arrayidxB = getelementptr inbounds i16, i16* %b, i64 %storemerge3 - %loadB = load i16* %arrayidxB, align 2 + %loadB = load i16, i16* %arrayidxB, align 2 %arrayidxC = getelementptr inbounds i16, i16* %c, i64 %storemerge3 - %loadC = load i16* %arrayidxC, align 2 + %loadC = load i16, i16* %arrayidxC, align 2 %mul = mul i16 %loadB, %loadA %mul1 = mul i16 %mul, %loadC diff --git a/llvm/test/Analysis/MemoryDependenceAnalysis/memdep_requires_dominator_tree.ll b/llvm/test/Analysis/MemoryDependenceAnalysis/memdep_requires_dominator_tree.ll index b0725ecf50a4..d472f7c323fb 100644 --- a/llvm/test/Analysis/MemoryDependenceAnalysis/memdep_requires_dominator_tree.ll +++ b/llvm/test/Analysis/MemoryDependenceAnalysis/memdep_requires_dominator_tree.ll @@ -11,7 +11,7 @@ for.body: ; preds = %for.body, %entry %i.01 = phi i32 [ 0, %entry ], [ %tmp8.7, %for.body ] %arrayidx = getelementptr i32, i32* %bufUInt, i32 %i.01 %arrayidx5 = getelementptr i32, i32* %pattern, i32 %i.01 - %tmp6 = load i32* %arrayidx5, align 4 + %tmp6 = load i32, i32* %arrayidx5, align 4 store i32 %tmp6, i32* %arrayidx, align 4 %tmp8.7 = add i32 %i.01, 8 %cmp.7 = icmp ult i32 %tmp8.7, 1024 diff --git a/llvm/test/Analysis/ScalarEvolution/2008-07-12-UnneededSelect1.ll b/llvm/test/Analysis/ScalarEvolution/2008-07-12-UnneededSelect1.ll index 6896e7a47283..7e42530798f6 100644 --- a/llvm/test/Analysis/ScalarEvolution/2008-07-12-UnneededSelect1.ll +++ b/llvm/test/Analysis/ScalarEvolution/2008-07-12-UnneededSelect1.ll @@ -16,11 +16,11 @@ bb.nph: ; preds = %entry bb: ; preds = %bb1, %bb.nph %j.01 = phi i32 [ %8, %bb1 ], [ 0, %bb.nph ] ; [#uses=1] - load i32* %srcptr, align 4 ; :1 [#uses=2] + load i32, i32* %srcptr, align 4 ; :1 [#uses=2] and i32 %1, 255 ; :2 [#uses=1] and i32 %1, -256 ; :3 [#uses=1] getelementptr [256 x i8], [256 x i8]* @lut, i32 0, i32 %2 ; :4 [#uses=1] - load i8* %4, align 1 ; :5 [#uses=1] + load i8, i8* %4, align 1 ; :5 [#uses=1] zext i8 %5 to i32 ; :6 [#uses=1] or i32 %6, %3 ; :7 [#uses=1] store i32 %7, i32* %dstptr, align 4 diff --git a/llvm/test/Analysis/ScalarEvolution/2008-12-08-FiniteSGE.ll b/llvm/test/Analysis/ScalarEvolution/2008-12-08-FiniteSGE.ll index 1d4a27ccc860..0c24ee4eaff1 100644 --- a/llvm/test/Analysis/ScalarEvolution/2008-12-08-FiniteSGE.ll +++ b/llvm/test/Analysis/ScalarEvolution/2008-12-08-FiniteSGE.ll @@ -10,7 +10,7 @@ bb1: ; preds = %bb1, %bb1.thread %indvar = phi i32 [ 0, %bb1.thread ], [ %indvar.next, %bb1 ] ; [#uses=4] %i.0.reg2mem.0 = sub i32 255, %indvar ; [#uses=2] %0 = getelementptr i32, i32* %alp, i32 %i.0.reg2mem.0 ; [#uses=1] - %1 = load i32* %0, align 4 ; [#uses=1] + %1 = load i32, i32* %0, align 4 ; [#uses=1] %2 = getelementptr i32, i32* %lam, i32 %i.0.reg2mem.0 ; [#uses=1] store i32 %1, i32* %2, align 4 %3 = sub i32 254, %indvar ; [#uses=1] diff --git a/llvm/test/Analysis/ScalarEvolution/2009-01-02-SignedNegativeStride.ll b/llvm/test/Analysis/ScalarEvolution/2009-01-02-SignedNegativeStride.ll index f19d18c72e69..ebcecbf74294 100644 --- a/llvm/test/Analysis/ScalarEvolution/2009-01-02-SignedNegativeStride.ll +++ b/llvm/test/Analysis/ScalarEvolution/2009-01-02-SignedNegativeStride.ll @@ -9,12 +9,12 @@ define void @func_15() nounwind { entry: - %0 = load i16* @g_16, align 2 ; [#uses=1] + %0 = load i16, i16* @g_16, align 2 ; [#uses=1] %1 = icmp sgt i16 %0, 0 ; [#uses=1] br i1 %1, label %bb2, label %bb.nph bb.nph: ; preds = %entry - %g_16.promoted = load i16* @g_16 ; [#uses=1] + %g_16.promoted = load i16, i16* @g_16 ; [#uses=1] br label %bb bb: ; preds = %bb1, %bb.nph diff --git a/llvm/test/Analysis/ScalarEvolution/2009-07-04-GroupConstantsWidthMismatch.ll b/llvm/test/Analysis/ScalarEvolution/2009-07-04-GroupConstantsWidthMismatch.ll index a4358aa63215..d18bdaf7cba7 100644 --- a/llvm/test/Analysis/ScalarEvolution/2009-07-04-GroupConstantsWidthMismatch.ll +++ b/llvm/test/Analysis/ScalarEvolution/2009-07-04-GroupConstantsWidthMismatch.ll @@ -3,11 +3,11 @@ define void @test() { entry: - %0 = load i16* undef, align 1 + %0 = load i16, i16* undef, align 1 %1 = lshr i16 %0, 8 %2 = and i16 %1, 3 %3 = zext i16 %2 to i32 - %4 = load i8* undef, align 1 + %4 = load i8, i8* undef, align 1 %5 = lshr i8 %4, 4 %6 = and i8 %5, 1 %7 = zext i8 %6 to i32 diff --git a/llvm/test/Analysis/ScalarEvolution/2012-03-26-LoadConstant.ll b/llvm/test/Analysis/ScalarEvolution/2012-03-26-LoadConstant.ll index 8c6c9b6d1eb5..3ca552a7dfd7 100644 --- a/llvm/test/Analysis/ScalarEvolution/2012-03-26-LoadConstant.ll +++ b/llvm/test/Analysis/ScalarEvolution/2012-03-26-LoadConstant.ll @@ -19,20 +19,20 @@ lbl_818: ; preds = %for.end, %entry br label %for.cond for.cond: ; preds = %for.body, %lbl_818 - %0 = load i32* @g_814, align 4 + %0 = load i32, i32* @g_814, align 4 %cmp = icmp sle i32 %0, 0 br i1 %cmp, label %for.body, label %for.end for.body: ; preds = %for.cond %idxprom = sext i32 %0 to i64 %arrayidx = getelementptr inbounds [0 x i32], [0 x i32]* getelementptr inbounds ([1 x [0 x i32]]* @g_244, i32 0, i64 0), i32 0, i64 %idxprom - %1 = load i32* %arrayidx, align 1 + %1 = load i32, i32* %arrayidx, align 1 store i32 %1, i32* @func_21_l_773, align 4 store i32 1, i32* @g_814, align 4 br label %for.cond for.end: ; preds = %for.cond - %2 = load i32* @func_21_l_773, align 4 + %2 = load i32, i32* @func_21_l_773, align 4 %tobool = icmp ne i32 %2, 0 br i1 %tobool, label %lbl_818, label %if.end diff --git a/llvm/test/Analysis/ScalarEvolution/avoid-infinite-recursion-0.ll b/llvm/test/Analysis/ScalarEvolution/avoid-infinite-recursion-0.ll index 7eeb30849589..0976ef92985c 100644 --- a/llvm/test/Analysis/ScalarEvolution/avoid-infinite-recursion-0.ll +++ b/llvm/test/Analysis/ScalarEvolution/avoid-infinite-recursion-0.ll @@ -7,7 +7,7 @@ target triple = "x86_64-unknown-linux-gnu" define i32 @test() { entry: - %0 = load i32** undef, align 8 ; [#uses=1] + %0 = load i32*, i32** undef, align 8 ; [#uses=1] %1 = ptrtoint i32* %0 to i64 ; [#uses=1] %2 = sub i64 undef, %1 ; [#uses=1] %3 = lshr i64 %2, 3 ; [#uses=1] diff --git a/llvm/test/Analysis/ScalarEvolution/avoid-smax-0.ll b/llvm/test/Analysis/ScalarEvolution/avoid-smax-0.ll index e921544f9b43..a282ee6993f0 100644 --- a/llvm/test/Analysis/ScalarEvolution/avoid-smax-0.ll +++ b/llvm/test/Analysis/ScalarEvolution/avoid-smax-0.ll @@ -11,7 +11,7 @@ entry: br i1 %0, label %bb, label %return bb: - load i32* %q, align 4 + load i32, i32* %q, align 4 icmp eq i32 %1, 0 br i1 %2, label %return, label %bb3.preheader @@ -21,7 +21,7 @@ bb3.preheader: bb3: %i.0 = phi i32 [ %7, %bb3 ], [ 0, %bb3.preheader ] getelementptr i32, i32* %p, i32 %i.0 - load i32* %3, align 4 + load i32, i32* %3, align 4 add i32 %4, 1 getelementptr i32, i32* %p, i32 %i.0 store i32 %5, i32* %6, align 4 diff --git a/llvm/test/Analysis/ScalarEvolution/avoid-smax-1.ll b/llvm/test/Analysis/ScalarEvolution/avoid-smax-1.ll index 685a106c2963..e6c62ee6b475 100644 --- a/llvm/test/Analysis/ScalarEvolution/avoid-smax-1.ll +++ b/llvm/test/Analysis/ScalarEvolution/avoid-smax-1.ll @@ -36,7 +36,7 @@ bb6: ; preds = %bb7, %bb.nph7 %8 = shl i32 %x.06, 1 ; [#uses=1] %9 = add i32 %6, %8 ; [#uses=1] %10 = getelementptr i8, i8* %r, i32 %9 ; [#uses=1] - %11 = load i8* %10, align 1 ; [#uses=1] + %11 = load i8, i8* %10, align 1 ; [#uses=1] %12 = getelementptr i8, i8* %j, i32 %7 ; [#uses=1] store i8 %11, i8* %12, align 1 %13 = add i32 %x.06, 1 ; [#uses=2] @@ -103,7 +103,7 @@ bb14: ; preds = %bb15, %bb.nph3 %29 = shl i32 %x.12, 2 ; [#uses=1] %30 = add i32 %29, %25 ; [#uses=1] %31 = getelementptr i8, i8* %r, i32 %30 ; [#uses=1] - %32 = load i8* %31, align 1 ; [#uses=1] + %32 = load i8, i8* %31, align 1 ; [#uses=1] %.sum = add i32 %26, %x.12 ; [#uses=1] %33 = getelementptr i8, i8* %j, i32 %.sum ; [#uses=1] store i8 %32, i8* %33, align 1 @@ -111,7 +111,7 @@ bb14: ; preds = %bb15, %bb.nph3 %35 = or i32 %34, 2 ; [#uses=1] %36 = add i32 %35, %25 ; [#uses=1] %37 = getelementptr i8, i8* %r, i32 %36 ; [#uses=1] - %38 = load i8* %37, align 1 ; [#uses=1] + %38 = load i8, i8* %37, align 1 ; [#uses=1] %.sum6 = add i32 %27, %x.12 ; [#uses=1] %39 = getelementptr i8, i8* %j, i32 %.sum6 ; [#uses=1] store i8 %38, i8* %39, align 1 diff --git a/llvm/test/Analysis/ScalarEvolution/infer-prestart-no-wrap.ll b/llvm/test/Analysis/ScalarEvolution/infer-prestart-no-wrap.ll index c9689f7fe145..078ca03ff14e 100644 --- a/llvm/test/Analysis/ScalarEvolution/infer-prestart-no-wrap.ll +++ b/llvm/test/Analysis/ScalarEvolution/infer-prestart-no-wrap.ll @@ -53,7 +53,7 @@ define void @infer.sext.1(i32 %start, i1* %c) { ; CHECK: %idx.sext = sext i32 %idx to i64 ; CHECK-NEXT: --> {(2 + (sext i32 (4 * %start) to i64)),+,2}<%loop> %idx.inc = add nsw i32 %idx, 2 - %condition = load i1* %c + %condition = load i1, i1* %c br i1 %condition, label %exit, label %loop exit: @@ -73,7 +73,7 @@ define void @infer.sext.2(i1* %c, i8 %start) { ; CHECK: %idx.sext = sext i8 %idx to i16 ; CHECK-NEXT: --> {(1 + (sext i8 %start to i16)),+,1}<%loop> %idx.inc = add nsw i8 %idx, 1 - %condition = load volatile i1* %c + %condition = load volatile i1, i1* %c br i1 %condition, label %exit, label %loop exit: @@ -93,7 +93,7 @@ define void @infer.zext.1(i1* %c, i8 %start) { ; CHECK: %idx.zext = zext i8 %idx to i16 ; CHECK-NEXT: --> {(1 + (zext i8 %start to i16)),+,1}<%loop> %idx.inc = add nuw i8 %idx, 1 - %condition = load volatile i1* %c + %condition = load volatile i1, i1* %c br i1 %condition, label %exit, label %loop exit: diff --git a/llvm/test/Analysis/ScalarEvolution/load-with-range-metadata.ll b/llvm/test/Analysis/ScalarEvolution/load-with-range-metadata.ll index 32c1074b1742..f26c8d56754d 100644 --- a/llvm/test/Analysis/ScalarEvolution/load-with-range-metadata.ll +++ b/llvm/test/Analysis/ScalarEvolution/load-with-range-metadata.ll @@ -3,7 +3,7 @@ define i32 @slt_trip_count_with_range(i32 *%ptr0, i32 *%ptr1) { ; CHECK-LABEL: slt_trip_count_with_range entry: - %limit = load i32* %ptr0, !range !0 + %limit = load i32, i32* %ptr0, !range !0 br label %loop loop: @@ -20,7 +20,7 @@ define i32 @slt_trip_count_with_range(i32 *%ptr0, i32 *%ptr1) { define i32 @ult_trip_count_with_range(i32 *%ptr0, i32 *%ptr1) { ; CHECK-LABEL: ult_trip_count_with_range entry: - %limit = load i32* %ptr0, !range !0 + %limit = load i32, i32* %ptr0, !range !0 br label %loop loop: diff --git a/llvm/test/Analysis/ScalarEvolution/load.ll b/llvm/test/Analysis/ScalarEvolution/load.ll index 8b460a806cbe..f73e7f1c2016 100644 --- a/llvm/test/Analysis/ScalarEvolution/load.ll +++ b/llvm/test/Analysis/ScalarEvolution/load.ll @@ -17,10 +17,10 @@ for.body: ; preds = %entry, %for.body ; CHECK: --> %sum.04{{ *}}Exits: 2450 %i.03 = phi i32 [ 0, %entry ], [ %inc, %for.body ] %arrayidx = getelementptr inbounds [50 x i32], [50 x i32]* @arr1, i32 0, i32 %i.03 - %0 = load i32* %arrayidx, align 4 + %0 = load i32, i32* %arrayidx, align 4 ; CHECK: --> %0{{ *}}Exits: 50 %arrayidx1 = getelementptr inbounds [50 x i32], [50 x i32]* @arr2, i32 0, i32 %i.03 - %1 = load i32* %arrayidx1, align 4 + %1 = load i32, i32* %arrayidx1, align 4 ; CHECK: --> %1{{ *}}Exits: 0 %add = add i32 %0, %sum.04 %add2 = add i32 %add, %1 @@ -52,10 +52,10 @@ for.body: ; preds = %entry, %for.body %n.01 = phi %struct.ListNode* [ bitcast ({ %struct.ListNode*, i32, [4 x i8] }* @node5 to %struct.ListNode*), %entry ], [ %1, %for.body ] ; CHECK: --> %n.01{{ *}}Exits: @node1 %i = getelementptr inbounds %struct.ListNode, %struct.ListNode* %n.01, i64 0, i32 1 - %0 = load i32* %i, align 4 + %0 = load i32, i32* %i, align 4 %add = add nsw i32 %0, %sum.02 %next = getelementptr inbounds %struct.ListNode, %struct.ListNode* %n.01, i64 0, i32 0 - %1 = load %struct.ListNode** %next, align 8 + %1 = load %struct.ListNode*, %struct.ListNode** %next, align 8 ; CHECK: --> %1{{ *}}Exits: 0 %cmp = icmp eq %struct.ListNode* %1, null br i1 %cmp, label %for.end, label %for.body diff --git a/llvm/test/Analysis/ScalarEvolution/max-trip-count.ll b/llvm/test/Analysis/ScalarEvolution/max-trip-count.ll index 4faedde87579..abebea7d74c3 100644 --- a/llvm/test/Analysis/ScalarEvolution/max-trip-count.ll +++ b/llvm/test/Analysis/ScalarEvolution/max-trip-count.ll @@ -174,7 +174,7 @@ for.body.i: ; preds = %for.cond.i, %entry for.cond.i: ; preds = %for.body.i store i32 %add.i.i, i32* @a, align 4 - %ld = load volatile i32* @b + %ld = load volatile i32, i32* @b %cmp.i = icmp ne i32 %ld, 0 br i1 %cmp.i, label %for.body.i, label %bar.exit diff --git a/llvm/test/Analysis/ScalarEvolution/min-max-exprs.ll b/llvm/test/Analysis/ScalarEvolution/min-max-exprs.ll index b9ede6f7e446..892fc23fe6b2 100644 --- a/llvm/test/Analysis/ScalarEvolution/min-max-exprs.ll +++ b/llvm/test/Analysis/ScalarEvolution/min-max-exprs.ll @@ -35,7 +35,7 @@ bb2: ; preds = %bb1 ; CHECK: select i1 %tmp4, i64 %tmp5, i64 %tmp6 ; CHECK-NEXT: --> (-1 + (-1 * ((-1 + (-1 * (sext i32 {3,+,1}<%bb1> to i64))) smax (-1 + (-1 * (sext i32 %N to i64)))))) %tmp11 = getelementptr inbounds i32, i32* %A, i64 %tmp9 - %tmp12 = load i32* %tmp11, align 4 + %tmp12 = load i32, i32* %tmp11, align 4 %tmp13 = shl nsw i32 %tmp12, 1 %tmp14 = icmp sge i32 3, %i.0 %tmp17 = add nsw i64 %i.0.1, -3 diff --git a/llvm/test/Analysis/ScalarEvolution/nsw-offset-assume.ll b/llvm/test/Analysis/ScalarEvolution/nsw-offset-assume.ll index 246f9ad1abc8..bef1070bbcb0 100644 --- a/llvm/test/Analysis/ScalarEvolution/nsw-offset-assume.ll +++ b/llvm/test/Analysis/ScalarEvolution/nsw-offset-assume.ll @@ -28,10 +28,10 @@ bb: ; preds = %bb.nph, %bb1 ; CHECK: --> {%d,+,16}<%bb> %2 = getelementptr inbounds double, double* %d, i64 %1 ; [#uses=1] - %3 = load double* %2, align 8 ; [#uses=1] + %3 = load double, double* %2, align 8 ; [#uses=1] %4 = sext i32 %i.01 to i64 ; [#uses=1] %5 = getelementptr inbounds double, double* %q, i64 %4 ; [#uses=1] - %6 = load double* %5, align 8 ; [#uses=1] + %6 = load double, double* %5, align 8 ; [#uses=1] %7 = or i32 %i.01, 1 ; [#uses=1] ; CHECK: %8 = sext i32 %7 to i64 @@ -54,7 +54,7 @@ bb: ; preds = %bb.nph, %bb1 ; CHECK: {(8 + %q),+,16}<%bb> %t9 = getelementptr inbounds double, double* %q, i64 %t8 ; [#uses=1] - %10 = load double* %9, align 8 ; [#uses=1] + %10 = load double, double* %9, align 8 ; [#uses=1] %11 = fadd double %6, %10 ; [#uses=1] %12 = fadd double %11, 3.200000e+00 ; [#uses=1] %13 = fmul double %3, %12 ; [#uses=1] diff --git a/llvm/test/Analysis/ScalarEvolution/nsw-offset.ll b/llvm/test/Analysis/ScalarEvolution/nsw-offset.ll index 7b8de519429e..127bb19bff02 100644 --- a/llvm/test/Analysis/ScalarEvolution/nsw-offset.ll +++ b/llvm/test/Analysis/ScalarEvolution/nsw-offset.ll @@ -26,10 +26,10 @@ bb: ; preds = %bb.nph, %bb1 ; CHECK: --> {%d,+,16}<%bb> %2 = getelementptr inbounds double, double* %d, i64 %1 ; [#uses=1] - %3 = load double* %2, align 8 ; [#uses=1] + %3 = load double, double* %2, align 8 ; [#uses=1] %4 = sext i32 %i.01 to i64 ; [#uses=1] %5 = getelementptr inbounds double, double* %q, i64 %4 ; [#uses=1] - %6 = load double* %5, align 8 ; [#uses=1] + %6 = load double, double* %5, align 8 ; [#uses=1] %7 = or i32 %i.01, 1 ; [#uses=1] ; CHECK: %8 = sext i32 %7 to i64 @@ -52,7 +52,7 @@ bb: ; preds = %bb.nph, %bb1 ; CHECK: {(8 + %q),+,16}<%bb> %t9 = getelementptr inbounds double, double* %q, i64 %t8 ; [#uses=1] - %10 = load double* %9, align 8 ; [#uses=1] + %10 = load double, double* %9, align 8 ; [#uses=1] %11 = fadd double %6, %10 ; [#uses=1] %12 = fadd double %11, 3.200000e+00 ; [#uses=1] %13 = fmul double %3, %12 ; [#uses=1] diff --git a/llvm/test/Analysis/ScalarEvolution/nsw.ll b/llvm/test/Analysis/ScalarEvolution/nsw.ll index 024b2804c062..0a3c535b0499 100644 --- a/llvm/test/Analysis/ScalarEvolution/nsw.ll +++ b/llvm/test/Analysis/ScalarEvolution/nsw.ll @@ -7,7 +7,7 @@ target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64" ; CHECK: Classifying expressions for: @test1 define void @test1(double* %p) nounwind { entry: - %tmp = load double* %p, align 8 ; [#uses=1] + %tmp = load double, double* %p, align 8 ; [#uses=1] %tmp1 = fcmp ogt double %tmp, 2.000000e+00 ; [#uses=1] br i1 %tmp1, label %bb.nph, label %return @@ -20,7 +20,7 @@ bb: ; preds = %bb1, %bb.nph ; CHECK-NEXT: --> {0,+,1}<%bb> %tmp2 = sext i32 %i.01 to i64 ; [#uses=1] %tmp3 = getelementptr double, double* %p, i64 %tmp2 ; [#uses=1] - %tmp4 = load double* %tmp3, align 8 ; [#uses=1] + %tmp4 = load double, double* %tmp3, align 8 ; [#uses=1] %tmp5 = fmul double %tmp4, 9.200000e+00 ; [#uses=1] %tmp6 = sext i32 %i.01 to i64 ; [#uses=1] %tmp7 = getelementptr double, double* %p, i64 %tmp6 ; [#uses=1] @@ -39,7 +39,7 @@ bb1: ; preds = %bb %tmp9 = getelementptr double, double* %p, i64 %phitmp ; [#uses=1] ; CHECK: %tmp9 ; CHECK-NEXT: --> {(8 + %p),+,8}<%bb> - %tmp10 = load double* %tmp9, align 8 ; [#uses=1] + %tmp10 = load double, double* %tmp9, align 8 ; [#uses=1] %tmp11 = fcmp ogt double %tmp10, 2.000000e+00 ; [#uses=1] br i1 %tmp11, label %bb, label %bb1.return_crit_edge diff --git a/llvm/test/Analysis/ScalarEvolution/pr22179.ll b/llvm/test/Analysis/ScalarEvolution/pr22179.ll index d9fb5104436e..5dc41920e862 100644 --- a/llvm/test/Analysis/ScalarEvolution/pr22179.ll +++ b/llvm/test/Analysis/ScalarEvolution/pr22179.ll @@ -14,7 +14,7 @@ define i32 @main() { loop: %storemerge1 = phi i8 [ 0, %0 ], [ %inc, %loop ] - %m = load volatile i32* getelementptr inbounds (%struct.S* @b, i64 0, i32 0), align 4 + %m = load volatile i32, i32* getelementptr inbounds (%struct.S* @b, i64 0, i32 0), align 4 %inc = add nuw i8 %storemerge1, 1 ; CHECK: %inc = add nuw i8 %storemerge1, 1 ; CHECK-NEXT: --> {1,+,1}<%loop> diff --git a/llvm/test/Analysis/ScalarEvolution/pr22674.ll b/llvm/test/Analysis/ScalarEvolution/pr22674.ll index 6b7a143f11ef..1bc7fd327d28 100644 --- a/llvm/test/Analysis/ScalarEvolution/pr22674.ll +++ b/llvm/test/Analysis/ScalarEvolution/pr22674.ll @@ -45,9 +45,9 @@ cond.false: ; preds = %for.end, %for.inc, _ZNK4llvm12AttributeSet3endEj.exit: ; preds = %for.end %second.i.i.i = getelementptr inbounds %"struct.std::pair.241.2040.3839.6152.6923.7694.8465.9493.10007.10264.18507", %"struct.std::pair.241.2040.3839.6152.6923.7694.8465.9493.10007.10264.18507"* undef, i32 %I.099.lcssa129, i32 1 - %0 = load %"class.llvm::AttributeSetNode.230.2029.3828.6141.6912.7683.8454.9482.9996.10253.18506"** %second.i.i.i, align 4, !tbaa !2 + %0 = load %"class.llvm::AttributeSetNode.230.2029.3828.6141.6912.7683.8454.9482.9996.10253.18506"*, %"class.llvm::AttributeSetNode.230.2029.3828.6141.6912.7683.8454.9482.9996.10253.18506"** %second.i.i.i, align 4, !tbaa !2 %NumAttrs.i.i.i = getelementptr inbounds %"class.llvm::AttributeSetNode.230.2029.3828.6141.6912.7683.8454.9482.9996.10253.18506", %"class.llvm::AttributeSetNode.230.2029.3828.6141.6912.7683.8454.9482.9996.10253.18506"* %0, i32 0, i32 1 - %1 = load i32* %NumAttrs.i.i.i, align 4, !tbaa !8 + %1 = load i32, i32* %NumAttrs.i.i.i, align 4, !tbaa !8 %add.ptr.i.i.i55 = getelementptr inbounds %"class.llvm::Attribute.222.2021.3820.6133.6904.7675.8446.9474.9988.10245.18509", %"class.llvm::Attribute.222.2021.3820.6133.6904.7675.8446.9474.9988.10245.18509"* undef, i32 %1 br i1 undef, label %return, label %for.body11 @@ -58,7 +58,7 @@ for.cond9: ; preds = %_ZNK4llvm9Attribute for.body11: ; preds = %for.cond9, %_ZNK4llvm12AttributeSet3endEj.exit %I5.096 = phi %"class.llvm::Attribute.222.2021.3820.6133.6904.7675.8446.9474.9988.10245.18509"* [ %incdec.ptr, %for.cond9 ], [ undef, %_ZNK4llvm12AttributeSet3endEj.exit ] %2 = bitcast %"class.llvm::Attribute.222.2021.3820.6133.6904.7675.8446.9474.9988.10245.18509"* %I5.096 to i32* - %3 = load i32* %2, align 4, !tbaa !10 + %3 = load i32, i32* %2, align 4, !tbaa !10 %tobool.i59 = icmp eq i32 %3, 0 br i1 %tobool.i59, label %cond.false21, label %_ZNK4llvm9Attribute15isEnumAttributeEv.exit diff --git a/llvm/test/Analysis/ScalarEvolution/scev-aa.ll b/llvm/test/Analysis/ScalarEvolution/scev-aa.ll index 9a3b9cd228e9..e2123f44f167 100644 --- a/llvm/test/Analysis/ScalarEvolution/scev-aa.ll +++ b/llvm/test/Analysis/ScalarEvolution/scev-aa.ll @@ -22,8 +22,8 @@ bb: %pi = getelementptr double, double* %p, i64 %i %i.next = add i64 %i, 1 %pi.next = getelementptr double, double* %p, i64 %i.next - %x = load double* %pi - %y = load double* %pi.next + %x = load double, double* %pi + %y = load double, double* %pi.next %z = fmul double %x, %y store double %z, double* %pi %exitcond = icmp eq i64 %i.next, %n @@ -61,15 +61,15 @@ bb: %pi.j = getelementptr double, double* %p, i64 %e %f = add i64 %i.next, %j %pi.next.j = getelementptr double, double* %p, i64 %f - %x = load double* %pi.j - %y = load double* %pi.next.j + %x = load double, double* %pi.j + %y = load double, double* %pi.next.j %z = fmul double %x, %y store double %z, double* %pi.j %o = add i64 %j, 91 %g = add i64 %i, %o %pi.j.next = getelementptr double, double* %p, i64 %g - %a = load double* %pi.j.next + %a = load double, double* %pi.j.next %b = fmul double %x, %a store double %b, double* %pi.j.next @@ -118,15 +118,15 @@ bb: %pi.j = getelementptr double, double* %p, i64 %e %f = add i64 %i.next, %j %pi.next.j = getelementptr double, double* %p, i64 %f - %x = load double* %pi.j - %y = load double* %pi.next.j + %x = load double, double* %pi.j + %y = load double, double* %pi.next.j %z = fmul double %x, %y store double %z, double* %pi.j %o = add i64 %j, %n %g = add i64 %i, %o %pi.j.next = getelementptr double, double* %p, i64 %g - %a = load double* %pi.j.next + %a = load double, double* %pi.j.next %b = fmul double %x, %a store double %b, double* %pi.j.next @@ -202,7 +202,7 @@ for.body: ; preds = %entry, %for.body %inc = add nsw i64 %i, 1 ; [#uses=2] %arrayidx = getelementptr inbounds i64, i64* %p, i64 %inc store i64 0, i64* %arrayidx - %tmp6 = load i64* %p ; [#uses=1] + %tmp6 = load i64, i64* %p ; [#uses=1] %cmp = icmp slt i64 %inc, %tmp6 ; [#uses=1] br i1 %cmp, label %for.body, label %for.end diff --git a/llvm/test/Analysis/ScalarEvolution/scev-prestart-nowrap.ll b/llvm/test/Analysis/ScalarEvolution/scev-prestart-nowrap.ll index 3ca32bdd9a17..77f3482f03c0 100644 --- a/llvm/test/Analysis/ScalarEvolution/scev-prestart-nowrap.ll +++ b/llvm/test/Analysis/ScalarEvolution/scev-prestart-nowrap.ll @@ -66,7 +66,7 @@ define i64 @bad.1(i32 %start, i32 %low.limit, i32 %high.limit, i1* %unknown) { br i1 %break.early, label %continue.1, label %early.exit continue.1: - %cond = load volatile i1* %unknown + %cond = load volatile i1, i1* %unknown %idx.inc = add nsw i32 %idx, 1 br i1 %cond, label %loop, label %continue diff --git a/llvm/test/Analysis/ScalarEvolution/sext-iv-0.ll b/llvm/test/Analysis/ScalarEvolution/sext-iv-0.ll index f5d54556d24b..47f02715c0da 100644 --- a/llvm/test/Analysis/ScalarEvolution/sext-iv-0.ll +++ b/llvm/test/Analysis/ScalarEvolution/sext-iv-0.ll @@ -24,7 +24,7 @@ bb1: ; preds = %bb1, %bb1.thread ; CHECK: %2 ; CHECK-NEXT: --> {-128,+,1}<%bb1> Exits: 127 %3 = getelementptr double, double* %x, i64 %2 ; [#uses=1] - %4 = load double* %3, align 8 ; [#uses=1] + %4 = load double, double* %3, align 8 ; [#uses=1] %5 = fmul double %4, 3.900000e+00 ; [#uses=1] %6 = sext i8 %0 to i64 ; [#uses=1] %7 = getelementptr double, double* %x, i64 %6 ; [#uses=1] diff --git a/llvm/test/Analysis/ScalarEvolution/sext-iv-1.ll b/llvm/test/Analysis/ScalarEvolution/sext-iv-1.ll index 07f055e4d172..575b744a1a40 100644 --- a/llvm/test/Analysis/ScalarEvolution/sext-iv-1.ll +++ b/llvm/test/Analysis/ScalarEvolution/sext-iv-1.ll @@ -24,7 +24,7 @@ bb1: ; preds = %bb1, %bb1.thread %1 = trunc i64 %i.0.reg2mem.0 to i9 ; [#uses=1] %2 = sext i9 %1 to i64 ; [#uses=1] %3 = getelementptr double, double* %x, i64 %2 ; [#uses=1] - %4 = load double* %3, align 8 ; [#uses=1] + %4 = load double, double* %3, align 8 ; [#uses=1] %5 = fmul double %4, 3.900000e+00 ; [#uses=1] %6 = sext i7 %0 to i64 ; [#uses=1] %7 = getelementptr double, double* %x, i64 %6 ; [#uses=1] @@ -47,7 +47,7 @@ bb1: ; preds = %bb1, %bb1.thread %1 = trunc i64 %i.0.reg2mem.0 to i9 ; [#uses=1] %2 = sext i9 %1 to i64 ; [#uses=1] %3 = getelementptr double, double* %x, i64 %2 ; [#uses=1] - %4 = load double* %3, align 8 ; [#uses=1] + %4 = load double, double* %3, align 8 ; [#uses=1] %5 = fmul double %4, 3.900000e+00 ; [#uses=1] %6 = sext i8 %0 to i64 ; [#uses=1] %7 = getelementptr double, double* %x, i64 %6 ; [#uses=1] @@ -70,7 +70,7 @@ bb1: ; preds = %bb1, %bb1.thread %1 = trunc i64 %i.0.reg2mem.0 to i9 ; [#uses=1] %2 = sext i9 %1 to i64 ; [#uses=1] %3 = getelementptr double, double* %x, i64 %2 ; [#uses=1] - %4 = load double* %3, align 8 ; [#uses=1] + %4 = load double, double* %3, align 8 ; [#uses=1] %5 = fmul double %4, 3.900000e+00 ; [#uses=1] %6 = sext i8 %0 to i64 ; [#uses=1] %7 = getelementptr double, double* %x, i64 %6 ; [#uses=1] @@ -93,7 +93,7 @@ bb1: ; preds = %bb1, %bb1.thread %1 = trunc i64 %i.0.reg2mem.0 to i9 ; [#uses=1] %2 = sext i9 %1 to i64 ; [#uses=1] %3 = getelementptr double, double* %x, i64 %2 ; [#uses=1] - %4 = load double* %3, align 8 ; [#uses=1] + %4 = load double, double* %3, align 8 ; [#uses=1] %5 = fmul double %4, 3.900000e+00 ; [#uses=1] %6 = sext i8 %0 to i64 ; [#uses=1] %7 = getelementptr double, double* %x, i64 %6 ; [#uses=1] diff --git a/llvm/test/Analysis/ScalarEvolution/sext-iv-2.ll b/llvm/test/Analysis/ScalarEvolution/sext-iv-2.ll index e580cc18d981..6e075739b439 100644 --- a/llvm/test/Analysis/ScalarEvolution/sext-iv-2.ll +++ b/llvm/test/Analysis/ScalarEvolution/sext-iv-2.ll @@ -56,7 +56,7 @@ bb4.bb5_crit_edge: ; preds = %bb4 br label %bb5 bb5: ; preds = %bb4.bb5_crit_edge, %entry - %tmp12 = load i32* getelementptr ([32 x [256 x i32]]* @table, i64 0, i64 9, i64 132), align 16 ; [#uses=1] + %tmp12 = load i32, i32* getelementptr ([32 x [256 x i32]]* @table, i64 0, i64 9, i64 132), align 16 ; [#uses=1] %tmp13 = icmp eq i32 %tmp12, -1116 ; [#uses=1] br i1 %tmp13, label %bb7, label %bb6 diff --git a/llvm/test/Analysis/ScalarEvolution/sle.ll b/llvm/test/Analysis/ScalarEvolution/sle.ll index c31f750cddb2..f24c4807114f 100644 --- a/llvm/test/Analysis/ScalarEvolution/sle.ll +++ b/llvm/test/Analysis/ScalarEvolution/sle.ll @@ -15,7 +15,7 @@ entry: for.body: ; preds = %for.body, %entry %i = phi i64 [ %i.next, %for.body ], [ 0, %entry ] ; [#uses=2] %arrayidx = getelementptr double, double* %p, i64 %i ; [#uses=2] - %t4 = load double* %arrayidx ; [#uses=1] + %t4 = load double, double* %arrayidx ; [#uses=1] %mul = fmul double %t4, 2.200000e+00 ; [#uses=1] store double %mul, double* %arrayidx %i.next = add nsw i64 %i, 1 ; [#uses=2] diff --git a/llvm/test/Analysis/ScalarEvolution/trip-count11.ll b/llvm/test/Analysis/ScalarEvolution/trip-count11.ll index 3faa95176e7c..b0a2c4094a5c 100644 --- a/llvm/test/Analysis/ScalarEvolution/trip-count11.ll +++ b/llvm/test/Analysis/ScalarEvolution/trip-count11.ll @@ -21,7 +21,7 @@ for.cond: ; preds = %for.inc, %entry for.inc: ; preds = %for.cond %idxprom = sext i32 %i.0 to i64 %arrayidx = getelementptr inbounds [8 x i32], [8 x i32]* @foo.a, i64 0, i64 %idxprom - %0 = load i32* %arrayidx, align 4 + %0 = load i32, i32* %arrayidx, align 4 %add = add nsw i32 %sum.0, %0 %inc = add nsw i32 %i.0, 1 br label %for.cond @@ -44,7 +44,7 @@ for.cond: ; preds = %for.inc, %entry for.inc: ; preds = %for.cond %idxprom = sext i32 %i.0 to i64 %arrayidx = getelementptr inbounds [8 x i32], [8 x i32] addrspace(1)* @foo.a_as1, i64 0, i64 %idxprom - %0 = load i32 addrspace(1)* %arrayidx, align 4 + %0 = load i32, i32 addrspace(1)* %arrayidx, align 4 %add = add nsw i32 %sum.0, %0 %inc = add nsw i32 %i.0, 1 br label %for.cond diff --git a/llvm/test/Analysis/ScalarEvolution/trip-count12.ll b/llvm/test/Analysis/ScalarEvolution/trip-count12.ll index 3fd16b23df64..d0086ee2e6ac 100644 --- a/llvm/test/Analysis/ScalarEvolution/trip-count12.ll +++ b/llvm/test/Analysis/ScalarEvolution/trip-count12.ll @@ -17,7 +17,7 @@ for.body: ; preds = %for.body, %for.body %len.addr.04 = phi i32 [ %sub, %for.body ], [ %len, %for.body.preheader ] %res.03 = phi i32 [ %add, %for.body ], [ 0, %for.body.preheader ] %incdec.ptr = getelementptr inbounds i16, i16* %p.addr.05, i32 1 - %0 = load i16* %p.addr.05, align 2 + %0 = load i16, i16* %p.addr.05, align 2 %conv = zext i16 %0 to i32 %add = add i32 %conv, %res.03 %sub = add nsw i32 %len.addr.04, -2 diff --git a/llvm/test/Analysis/ScalarEvolution/trip-count4.ll b/llvm/test/Analysis/ScalarEvolution/trip-count4.ll index 6c1ed89989b4..966ffd26fd28 100644 --- a/llvm/test/Analysis/ScalarEvolution/trip-count4.ll +++ b/llvm/test/Analysis/ScalarEvolution/trip-count4.ll @@ -13,7 +13,7 @@ loop: ; preds = %loop, %entry %s0 = shl i64 %indvar, 8 ; [#uses=1] %indvar.i8 = ashr i64 %s0, 8 ; [#uses=1] %t0 = getelementptr double, double* %d, i64 %indvar.i8 ; [#uses=2] - %t1 = load double* %t0 ; [#uses=1] + %t1 = load double, double* %t0 ; [#uses=1] %t2 = fmul double %t1, 1.000000e-01 ; [#uses=1] store double %t2, double* %t0 %indvar.next = sub i64 %indvar, 1 ; [#uses=2] diff --git a/llvm/test/Analysis/ScalarEvolution/trip-count5.ll b/llvm/test/Analysis/ScalarEvolution/trip-count5.ll index 564a75a74586..dc02fedd1342 100644 --- a/llvm/test/Analysis/ScalarEvolution/trip-count5.ll +++ b/llvm/test/Analysis/ScalarEvolution/trip-count5.ll @@ -9,7 +9,7 @@ target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f3 define float @t(float* %pTmp1, float* %peakWeight, float* %nrgReducePeakrate, i32 %bim) nounwind { entry: - %tmp3 = load float* %peakWeight, align 4 ; [#uses=2] + %tmp3 = load float, float* %peakWeight, align 4 ; [#uses=2] %tmp2538 = icmp sgt i32 %bim, 0 ; [#uses=1] br i1 %tmp2538, label %bb.nph, label %bb4 @@ -22,12 +22,12 @@ bb: ; preds = %bb1, %bb.nph %peakCount.034 = phi float [ %tmp19, %bb1 ], [ %tmp3, %bb.nph ] ; [#uses=1] %tmp6 = sext i32 %hiPart.035 to i64 ; [#uses=1] %tmp7 = getelementptr float, float* %pTmp1, i64 %tmp6 ; [#uses=1] - %tmp8 = load float* %tmp7, align 4 ; [#uses=1] + %tmp8 = load float, float* %tmp7, align 4 ; [#uses=1] %tmp10 = fadd float %tmp8, %distERBhi.036 ; [#uses=3] %tmp12 = add i32 %hiPart.035, 1 ; [#uses=3] %tmp15 = sext i32 %tmp12 to i64 ; [#uses=1] %tmp16 = getelementptr float, float* %peakWeight, i64 %tmp15 ; [#uses=1] - %tmp17 = load float* %tmp16, align 4 ; [#uses=1] + %tmp17 = load float, float* %tmp16, align 4 ; [#uses=1] %tmp19 = fadd float %tmp17, %peakCount.034 ; [#uses=2] br label %bb1 diff --git a/llvm/test/Analysis/ScalarEvolution/trip-count6.ll b/llvm/test/Analysis/ScalarEvolution/trip-count6.ll index 9cba1101a6f0..7980bbdcb121 100644 --- a/llvm/test/Analysis/ScalarEvolution/trip-count6.ll +++ b/llvm/test/Analysis/ScalarEvolution/trip-count6.ll @@ -13,7 +13,7 @@ bb: ; preds = %bb4, %entry %mode.0 = phi i8 [ 0, %entry ], [ %indvar.next, %bb4 ] ; [#uses=4] zext i8 %mode.0 to i32 ; :1 [#uses=1] getelementptr [4 x i32], [4 x i32]* @mode_table, i32 0, i32 %1 ; :2 [#uses=1] - load i32* %2, align 4 ; :3 [#uses=1] + load i32, i32* %2, align 4 ; :3 [#uses=1] icmp eq i32 %3, %0 ; :4 [#uses=1] br i1 %4, label %bb1, label %bb2 diff --git a/llvm/test/Analysis/ScalarEvolution/trip-count7.ll b/llvm/test/Analysis/ScalarEvolution/trip-count7.ll index a4eb72f07377..bbe76c490cdd 100644 --- a/llvm/test/Analysis/ScalarEvolution/trip-count7.ll +++ b/llvm/test/Analysis/ScalarEvolution/trip-count7.ll @@ -73,7 +73,7 @@ bb.i: ; preds = %bb7.i store i32 0, i32* %q, align 4 %tmp1 = sext i32 %tmp to i64 ; [#uses=1] %tmp2 = getelementptr [9 x i32], [9 x i32]* %a, i64 0, i64 %tmp1 ; [#uses=1] - %tmp3 = load i32* %tmp2, align 4 ; [#uses=1] + %tmp3 = load i32, i32* %tmp2, align 4 ; [#uses=1] %tmp4 = icmp eq i32 %tmp3, 0 ; [#uses=1] br i1 %tmp4, label %bb.i.bb7.i.backedge_crit_edge, label %bb1.i @@ -81,7 +81,7 @@ bb1.i: ; preds = %bb.i %tmp5 = add i32 %j.0.i, 2 ; [#uses=1] %tmp6 = sext i32 %tmp5 to i64 ; [#uses=1] %tmp7 = getelementptr [17 x i32], [17 x i32]* %b, i64 0, i64 %tmp6 ; [#uses=1] - %tmp8 = load i32* %tmp7, align 4 ; [#uses=1] + %tmp8 = load i32, i32* %tmp7, align 4 ; [#uses=1] %tmp9 = icmp eq i32 %tmp8, 0 ; [#uses=1] br i1 %tmp9, label %bb1.i.bb7.i.backedge_crit_edge, label %bb2.i @@ -89,7 +89,7 @@ bb2.i: ; preds = %bb1.i %tmp10 = sub i32 7, %j.0.i ; [#uses=1] %tmp11 = sext i32 %tmp10 to i64 ; [#uses=1] %tmp12 = getelementptr [15 x i32], [15 x i32]* %c, i64 0, i64 %tmp11 ; [#uses=1] - %tmp13 = load i32* %tmp12, align 4 ; [#uses=1] + %tmp13 = load i32, i32* %tmp12, align 4 ; [#uses=1] %tmp14 = icmp eq i32 %tmp13, 0 ; [#uses=1] br i1 %tmp14, label %bb2.i.bb7.i.backedge_crit_edge, label %bb3.i @@ -108,7 +108,7 @@ bb3.i: ; preds = %bb2.i %tmp23 = getelementptr [15 x i32], [15 x i32]* %c, i64 0, i64 %tmp22 ; [#uses=1] store i32 0, i32* %tmp23, align 4 call void @Try(i32 2, i32* %q, i32* %b9, i32* %a10, i32* %c11, i32* %x1.sub) nounwind - %tmp24 = load i32* %q, align 4 ; [#uses=1] + %tmp24 = load i32, i32* %q, align 4 ; [#uses=1] %tmp25 = icmp eq i32 %tmp24, 0 ; [#uses=1] br i1 %tmp25, label %bb5.i, label %bb3.i.bb7.i.backedge_crit_edge @@ -131,7 +131,7 @@ bb7.i.backedge: ; preds = %bb3.i.bb7.i.backedge_crit_edge, %bb2.i.bb7.i.backedg bb7.i: ; preds = %bb7.i.backedge, %newFuncRoot %j.0.i = phi i32 [ 0, %newFuncRoot ], [ %tmp, %bb7.i.backedge ] ; [#uses=8] - %tmp34 = load i32* %q, align 4 ; [#uses=1] + %tmp34 = load i32, i32* %q, align 4 ; [#uses=1] %tmp35 = icmp eq i32 %tmp34, 0 ; [#uses=1] %tmp36 = icmp ne i32 %j.0.i, 8 ; [#uses=1] %tmp37 = and i1 %tmp35, %tmp36 ; [#uses=1] diff --git a/llvm/test/Analysis/ScalarEvolution/zext-signed-addrec.ll b/llvm/test/Analysis/ScalarEvolution/zext-signed-addrec.ll index 43698204a722..9201ffca44e0 100644 --- a/llvm/test/Analysis/ScalarEvolution/zext-signed-addrec.ll +++ b/llvm/test/Analysis/ScalarEvolution/zext-signed-addrec.ll @@ -15,16 +15,16 @@ target triple = "x86_64-unknown-linux-gnu" ; CHECK-LABEL: foo define i32 @foo() { entry: - %.pr = load i32* @b, align 4 + %.pr = load i32, i32* @b, align 4 %cmp10 = icmp slt i32 %.pr, 1 br i1 %cmp10, label %for.cond1.preheader.lr.ph, label %entry.for.end9_crit_edge entry.for.end9_crit_edge: ; preds = %entry - %.pre = load i32* @c, align 4 + %.pre = load i32, i32* @c, align 4 br label %for.end9 for.cond1.preheader.lr.ph: ; preds = %entry - %0 = load i32* @a, align 4 + %0 = load i32, i32* @a, align 4 %tobool = icmp eq i32 %0, 0 br i1 %tobool, label %for.cond1.preheader.for.cond1.preheader.split_crit_edge, label %return.loopexit.split diff --git a/llvm/test/Analysis/ScopedNoAliasAA/basic-domains.ll b/llvm/test/Analysis/ScopedNoAliasAA/basic-domains.ll index 1cb69a0cd336..c2b5bbd4e067 100644 --- a/llvm/test/Analysis/ScopedNoAliasAA/basic-domains.ll +++ b/llvm/test/Analysis/ScopedNoAliasAA/basic-domains.ll @@ -5,15 +5,15 @@ target triple = "x86_64-unknown-linux-gnu" define void @foo1(float* nocapture %a, float* nocapture readonly %c) #0 { entry: ; CHECK-LABEL: Function: foo1 - %0 = load float* %c, align 4, !alias.scope !9 + %0 = load float, float* %c, align 4, !alias.scope !9 %arrayidx.i = getelementptr inbounds float, float* %a, i64 5 store float %0, float* %arrayidx.i, align 4, !noalias !6 - %1 = load float* %c, align 4, !alias.scope !5 + %1 = load float, float* %c, align 4, !alias.scope !5 %arrayidx.i2 = getelementptr inbounds float, float* %a, i64 15 store float %1, float* %arrayidx.i2, align 4, !noalias !6 - %2 = load float* %c, align 4, !alias.scope !6 + %2 = load float, float* %c, align 4, !alias.scope !6 %arrayidx.i3 = getelementptr inbounds float, float* %a, i64 16 store float %2, float* %arrayidx.i3, align 4, !noalias !5 @@ -42,15 +42,15 @@ attributes #0 = { nounwind uwtable } ; A list of scopes from both domains. !9 = !{!2, !4, !7} -; CHECK: NoAlias: %0 = load float* %c, align 4, !alias.scope !0 <-> store float %0, float* %arrayidx.i, align 4, !noalias !6 -; CHECK: NoAlias: %0 = load float* %c, align 4, !alias.scope !0 <-> store float %1, float* %arrayidx.i2, align 4, !noalias !6 -; CHECK: MayAlias: %0 = load float* %c, align 4, !alias.scope !0 <-> store float %2, float* %arrayidx.i3, align 4, !noalias !7 -; CHECK: NoAlias: %1 = load float* %c, align 4, !alias.scope !7 <-> store float %0, float* %arrayidx.i, align 4, !noalias !6 -; CHECK: NoAlias: %1 = load float* %c, align 4, !alias.scope !7 <-> store float %1, float* %arrayidx.i2, align 4, !noalias !6 -; CHECK: NoAlias: %1 = load float* %c, align 4, !alias.scope !7 <-> store float %2, float* %arrayidx.i3, align 4, !noalias !7 -; CHECK: NoAlias: %2 = load float* %c, align 4, !alias.scope !6 <-> store float %0, float* %arrayidx.i, align 4, !noalias !6 -; CHECK: NoAlias: %2 = load float* %c, align 4, !alias.scope !6 <-> store float %1, float* %arrayidx.i2, align 4, !noalias !6 -; CHECK: MayAlias: %2 = load float* %c, align 4, !alias.scope !6 <-> store float %2, float* %arrayidx.i3, align 4, !noalias !7 +; CHECK: NoAlias: %0 = load float, float* %c, align 4, !alias.scope !0 <-> store float %0, float* %arrayidx.i, align 4, !noalias !6 +; CHECK: NoAlias: %0 = load float, float* %c, align 4, !alias.scope !0 <-> store float %1, float* %arrayidx.i2, align 4, !noalias !6 +; CHECK: MayAlias: %0 = load float, float* %c, align 4, !alias.scope !0 <-> store float %2, float* %arrayidx.i3, align 4, !noalias !7 +; CHECK: NoAlias: %1 = load float, float* %c, align 4, !alias.scope !7 <-> store float %0, float* %arrayidx.i, align 4, !noalias !6 +; CHECK: NoAlias: %1 = load float, float* %c, align 4, !alias.scope !7 <-> store float %1, float* %arrayidx.i2, align 4, !noalias !6 +; CHECK: NoAlias: %1 = load float, float* %c, align 4, !alias.scope !7 <-> store float %2, float* %arrayidx.i3, align 4, !noalias !7 +; CHECK: NoAlias: %2 = load float, float* %c, align 4, !alias.scope !6 <-> store float %0, float* %arrayidx.i, align 4, !noalias !6 +; CHECK: NoAlias: %2 = load float, float* %c, align 4, !alias.scope !6 <-> store float %1, float* %arrayidx.i2, align 4, !noalias !6 +; CHECK: MayAlias: %2 = load float, float* %c, align 4, !alias.scope !6 <-> store float %2, float* %arrayidx.i3, align 4, !noalias !7 ; CHECK: NoAlias: store float %1, float* %arrayidx.i2, align 4, !noalias !6 <-> store float %0, float* %arrayidx.i, align 4, !noalias !6 ; CHECK: NoAlias: store float %2, float* %arrayidx.i3, align 4, !noalias !7 <-> store float %0, float* %arrayidx.i, align 4, !noalias !6 ; CHECK: NoAlias: store float %2, float* %arrayidx.i3, align 4, !noalias !7 <-> store float %1, float* %arrayidx.i2, align 4, !noalias !6 diff --git a/llvm/test/Analysis/ScopedNoAliasAA/basic.ll b/llvm/test/Analysis/ScopedNoAliasAA/basic.ll index cc2641338ade..26258341cc14 100644 --- a/llvm/test/Analysis/ScopedNoAliasAA/basic.ll +++ b/llvm/test/Analysis/ScopedNoAliasAA/basic.ll @@ -5,18 +5,18 @@ target triple = "x86_64-unknown-linux-gnu" define void @foo1(float* nocapture %a, float* nocapture readonly %c) #0 { entry: ; CHECK-LABEL: Function: foo1 - %0 = load float* %c, align 4, !alias.scope !1 + %0 = load float, float* %c, align 4, !alias.scope !1 %arrayidx.i = getelementptr inbounds float, float* %a, i64 5 store float %0, float* %arrayidx.i, align 4, !noalias !1 - %1 = load float* %c, align 4 + %1 = load float, float* %c, align 4 %arrayidx = getelementptr inbounds float, float* %a, i64 7 store float %1, float* %arrayidx, align 4 ret void -; CHECK: NoAlias: %0 = load float* %c, align 4, !alias.scope !0 <-> store float %0, float* %arrayidx.i, align 4, !noalias !0 -; CHECK: MayAlias: %0 = load float* %c, align 4, !alias.scope !0 <-> store float %1, float* %arrayidx, align 4 -; CHECK: MayAlias: %1 = load float* %c, align 4 <-> store float %0, float* %arrayidx.i, align 4, !noalias !0 -; CHECK: MayAlias: %1 = load float* %c, align 4 <-> store float %1, float* %arrayidx, align 4 +; CHECK: NoAlias: %0 = load float, float* %c, align 4, !alias.scope !0 <-> store float %0, float* %arrayidx.i, align 4, !noalias !0 +; CHECK: MayAlias: %0 = load float, float* %c, align 4, !alias.scope !0 <-> store float %1, float* %arrayidx, align 4 +; CHECK: MayAlias: %1 = load float, float* %c, align 4 <-> store float %0, float* %arrayidx.i, align 4, !noalias !0 +; CHECK: MayAlias: %1 = load float, float* %c, align 4 <-> store float %1, float* %arrayidx, align 4 ; CHECK: NoAlias: store float %1, float* %arrayidx, align 4 <-> store float %0, float* %arrayidx.i, align 4, !noalias !0 } diff --git a/llvm/test/Analysis/ScopedNoAliasAA/basic2.ll b/llvm/test/Analysis/ScopedNoAliasAA/basic2.ll index ff99f66ac1c5..a8a17e5f90bf 100644 --- a/llvm/test/Analysis/ScopedNoAliasAA/basic2.ll +++ b/llvm/test/Analysis/ScopedNoAliasAA/basic2.ll @@ -5,24 +5,24 @@ target triple = "x86_64-unknown-linux-gnu" define void @foo2(float* nocapture %a, float* nocapture %b, float* nocapture readonly %c) #0 { entry: ; CHECK-LABEL: Function: foo2 - %0 = load float* %c, align 4, !alias.scope !0 + %0 = load float, float* %c, align 4, !alias.scope !0 %arrayidx.i = getelementptr inbounds float, float* %a, i64 5 store float %0, float* %arrayidx.i, align 4, !alias.scope !5, !noalias !4 %arrayidx1.i = getelementptr inbounds float, float* %b, i64 8 store float %0, float* %arrayidx1.i, align 4, !alias.scope !0, !noalias !5 - %1 = load float* %c, align 4 + %1 = load float, float* %c, align 4 %arrayidx = getelementptr inbounds float, float* %a, i64 7 store float %1, float* %arrayidx, align 4 ret void -; CHECK: MayAlias: %0 = load float* %c, align 4, !alias.scope !0 <-> store float %0, float* %arrayidx.i, align 4, !alias.scope !4, !noalia +; CHECK: MayAlias: %0 = load float, float* %c, align 4, !alias.scope !0 <-> store float %0, float* %arrayidx.i, align 4, !alias.scope !4, !noalia ; CHECK: s !5 -; CHECK: MayAlias: %0 = load float* %c, align 4, !alias.scope !0 <-> store float %0, float* %arrayidx1.i, align 4, !alias.scope !0, !noali +; CHECK: MayAlias: %0 = load float, float* %c, align 4, !alias.scope !0 <-> store float %0, float* %arrayidx1.i, align 4, !alias.scope !0, !noali ; CHECK: as !4 -; CHECK: MayAlias: %0 = load float* %c, align 4, !alias.scope !0 <-> store float %1, float* %arrayidx, align 4 -; CHECK: MayAlias: %1 = load float* %c, align 4 <-> store float %0, float* %arrayidx.i, align 4, !alias.scope !4, !noalias !5 -; CHECK: MayAlias: %1 = load float* %c, align 4 <-> store float %0, float* %arrayidx1.i, align 4, !alias.scope !0, !noalias !4 -; CHECK: MayAlias: %1 = load float* %c, align 4 <-> store float %1, float* %arrayidx, align 4 +; CHECK: MayAlias: %0 = load float, float* %c, align 4, !alias.scope !0 <-> store float %1, float* %arrayidx, align 4 +; CHECK: MayAlias: %1 = load float, float* %c, align 4 <-> store float %0, float* %arrayidx.i, align 4, !alias.scope !4, !noalias !5 +; CHECK: MayAlias: %1 = load float, float* %c, align 4 <-> store float %0, float* %arrayidx1.i, align 4, !alias.scope !0, !noalias !4 +; CHECK: MayAlias: %1 = load float, float* %c, align 4 <-> store float %1, float* %arrayidx, align 4 ; CHECK: NoAlias: store float %0, float* %arrayidx1.i, align 4, !alias.scope !0, !noalias !4 <-> store float %0, float* %arrayidx.i, align ; CHECK: 4, !alias.scope !4, !noalias !5 ; CHECK: NoAlias: store float %1, float* %arrayidx, align 4 <-> store float %0, float* %arrayidx.i, align 4, !alias.scope !4, !noalias !5 diff --git a/llvm/test/Analysis/TypeBasedAliasAnalysis/PR17620.ll b/llvm/test/Analysis/TypeBasedAliasAnalysis/PR17620.ll index 920d6f56d957..15041642ee0a 100644 --- a/llvm/test/Analysis/TypeBasedAliasAnalysis/PR17620.ll +++ b/llvm/test/Analysis/TypeBasedAliasAnalysis/PR17620.ll @@ -16,11 +16,11 @@ target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f3 define %structA** @test(%classA* %this, i32** %p1) #0 align 2 { entry: ; CHECK-LABEL: @test -; CHECK: load i32** %p1, align 8, !tbaa -; CHECK: load i32** getelementptr (%classC* null, i32 0, i32 1, i32 0, i32 0), align 8, !tbaa +; CHECK: load i32*, i32** %p1, align 8, !tbaa +; CHECK: load i32*, i32** getelementptr (%classC* null, i32 0, i32 1, i32 0, i32 0), align 8, !tbaa ; CHECK: call void @callee - %0 = load i32** %p1, align 8, !tbaa !1 - %1 = load i32** getelementptr (%classC* null, i32 0, i32 1, i32 0, i32 0), align 8, !tbaa !5 + %0 = load i32*, i32** %p1, align 8, !tbaa !1 + %1 = load i32*, i32** getelementptr (%classC* null, i32 0, i32 1, i32 0, i32 0), align 8, !tbaa !5 call void @callee(i32* %0, i32* %1) unreachable } diff --git a/llvm/test/Analysis/TypeBasedAliasAnalysis/aliastest.ll b/llvm/test/Analysis/TypeBasedAliasAnalysis/aliastest.ll index 10da13a8d460..93c34f9503ce 100644 --- a/llvm/test/Analysis/TypeBasedAliasAnalysis/aliastest.ll +++ b/llvm/test/Analysis/TypeBasedAliasAnalysis/aliastest.ll @@ -5,9 +5,9 @@ ; CHECK: @test0_yes ; CHECK: add i8 %x, %x define i8 @test0_yes(i8* %a, i8* %b) nounwind { - %x = load i8* %a, !tbaa !1 + %x = load i8, i8* %a, !tbaa !1 store i8 0, i8* %b, !tbaa !2 - %y = load i8* %a, !tbaa !1 + %y = load i8, i8* %a, !tbaa !1 %z = add i8 %x, %y ret i8 %z } @@ -15,9 +15,9 @@ define i8 @test0_yes(i8* %a, i8* %b) nounwind { ; CHECK: @test0_no ; CHECK: add i8 %x, %y define i8 @test0_no(i8* %a, i8* %b) nounwind { - %x = load i8* %a, !tbaa !3 + %x = load i8, i8* %a, !tbaa !3 store i8 0, i8* %b, !tbaa !4 - %y = load i8* %a, !tbaa !3 + %y = load i8, i8* %a, !tbaa !3 %z = add i8 %x, %y ret i8 %z } @@ -27,9 +27,9 @@ define i8 @test0_no(i8* %a, i8* %b) nounwind { ; CHECK: @test1_yes ; CHECK: add i8 %x, %x define i8 @test1_yes(i8* %a, i8* %b) nounwind { - %x = load i8* %a, !tbaa !5 + %x = load i8, i8* %a, !tbaa !5 store i8 0, i8* %b - %y = load i8* %a, !tbaa !5 + %y = load i8, i8* %a, !tbaa !5 %z = add i8 %x, %y ret i8 %z } @@ -37,9 +37,9 @@ define i8 @test1_yes(i8* %a, i8* %b) nounwind { ; CHECK: @test1_no ; CHECK: add i8 %x, %y define i8 @test1_no(i8* %a, i8* %b) nounwind { - %x = load i8* %a, !tbaa !6 + %x = load i8, i8* %a, !tbaa !6 store i8 0, i8* %b - %y = load i8* %a, !tbaa !6 + %y = load i8, i8* %a, !tbaa !6 %z = add i8 %x, %y ret i8 %z } diff --git a/llvm/test/Analysis/TypeBasedAliasAnalysis/argument-promotion.ll b/llvm/test/Analysis/TypeBasedAliasAnalysis/argument-promotion.ll index 31f775e04665..a7987f7d7f06 100644 --- a/llvm/test/Analysis/TypeBasedAliasAnalysis/argument-promotion.ll +++ b/llvm/test/Analysis/TypeBasedAliasAnalysis/argument-promotion.ll @@ -6,8 +6,8 @@ target datalayout = "E-p:64:64:64" ; CHECK-NOT: alloca define internal i32 @test(i32* %X, i32* %Y, i32* %Q) { store i32 77, i32* %Q, !tbaa !2 - %A = load i32* %X, !tbaa !1 - %B = load i32* %Y, !tbaa !1 + %A = load i32, i32* %X, !tbaa !1 + %B = load i32, i32* %Y, !tbaa !1 %C = add i32 %A, %B ret i32 %C } diff --git a/llvm/test/Analysis/TypeBasedAliasAnalysis/dse.ll b/llvm/test/Analysis/TypeBasedAliasAnalysis/dse.ll index 09f8feb610a9..b6dc9b298eb0 100644 --- a/llvm/test/Analysis/TypeBasedAliasAnalysis/dse.ll +++ b/llvm/test/Analysis/TypeBasedAliasAnalysis/dse.ll @@ -4,47 +4,47 @@ target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" ; DSE should make use of TBAA. ; CHECK: @test0_yes -; CHECK-NEXT: load i8* %b +; CHECK-NEXT: load i8, i8* %b ; CHECK-NEXT: store i8 1, i8* %a ; CHECK-NEXT: ret i8 %y define i8 @test0_yes(i8* %a, i8* %b) nounwind { store i8 0, i8* %a, !tbaa !1 - %y = load i8* %b, !tbaa !2 + %y = load i8, i8* %b, !tbaa !2 store i8 1, i8* %a, !tbaa !1 ret i8 %y } ; CHECK: @test0_no ; CHECK-NEXT: store i8 0, i8* %a -; CHECK-NEXT: load i8* %b +; CHECK-NEXT: load i8, i8* %b ; CHECK-NEXT: store i8 1, i8* %a ; CHECK-NEXT: ret i8 %y define i8 @test0_no(i8* %a, i8* %b) nounwind { store i8 0, i8* %a, !tbaa !3 - %y = load i8* %b, !tbaa !4 + %y = load i8, i8* %b, !tbaa !4 store i8 1, i8* %a, !tbaa !3 ret i8 %y } ; CHECK: @test1_yes -; CHECK-NEXT: load i8* %b +; CHECK-NEXT: load i8, i8* %b ; CHECK-NEXT: store i8 1, i8* %a ; CHECK-NEXT: ret i8 %y define i8 @test1_yes(i8* %a, i8* %b) nounwind { store i8 0, i8* %a - %y = load i8* %b, !tbaa !5 + %y = load i8, i8* %b, !tbaa !5 store i8 1, i8* %a ret i8 %y } ; CHECK: @test1_no ; CHECK-NEXT: store i8 0, i8* %a -; CHECK-NEXT: load i8* %b +; CHECK-NEXT: load i8, i8* %b ; CHECK-NEXT: store i8 1, i8* %a ; CHECK-NEXT: ret i8 %y define i8 @test1_no(i8* %a, i8* %b) nounwind { store i8 0, i8* %a - %y = load i8* %b, !tbaa !6 + %y = load i8, i8* %b, !tbaa !6 store i8 1, i8* %a ret i8 %y } diff --git a/llvm/test/Analysis/TypeBasedAliasAnalysis/dynamic-indices.ll b/llvm/test/Analysis/TypeBasedAliasAnalysis/dynamic-indices.ll index 293e96e2a046..afc83c9f4f57 100644 --- a/llvm/test/Analysis/TypeBasedAliasAnalysis/dynamic-indices.ll +++ b/llvm/test/Analysis/TypeBasedAliasAnalysis/dynamic-indices.ll @@ -13,7 +13,7 @@ target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f3 ; CHECK: for.end: ; CHECK: %arrayidx31 = getelementptr inbounds %union.vector_t, %union.vector_t* %t, i64 0, i32 0, i64 1 -; CHECK: %tmp32 = load i64* %arrayidx31, align 8, !tbaa [[TAG:!.*]] +; CHECK: %tmp32 = load i64, i64* %arrayidx31, align 8, !tbaa [[TAG:!.*]] define void @vrlh(%union.vector_t* %va, %union.vector_t* %vb, %union.vector_t* %vd) nounwind { entry: @@ -26,21 +26,21 @@ for.body: ; preds = %entry, %for.body %idxprom = sext i32 %sub to i64 %half = bitcast %union.vector_t* %vb to [8 x i16]* %arrayidx = getelementptr inbounds [8 x i16], [8 x i16]* %half, i64 0, i64 %idxprom - %tmp4 = load i16* %arrayidx, align 2, !tbaa !0 + %tmp4 = load i16, i16* %arrayidx, align 2, !tbaa !0 %conv = zext i16 %tmp4 to i32 %and = and i32 %conv, 15 %sub6 = sub nsw i32 7, %i.01 %idxprom7 = sext i32 %sub6 to i64 %half9 = bitcast %union.vector_t* %va to [8 x i16]* %arrayidx10 = getelementptr inbounds [8 x i16], [8 x i16]* %half9, i64 0, i64 %idxprom7 - %tmp11 = load i16* %arrayidx10, align 2, !tbaa !0 + %tmp11 = load i16, i16* %arrayidx10, align 2, !tbaa !0 %conv12 = zext i16 %tmp11 to i32 %shl = shl i32 %conv12, %and %sub15 = sub nsw i32 7, %i.01 %idxprom16 = sext i32 %sub15 to i64 %half18 = bitcast %union.vector_t* %va to [8 x i16]* %arrayidx19 = getelementptr inbounds [8 x i16], [8 x i16]* %half18, i64 0, i64 %idxprom16 - %tmp20 = load i16* %arrayidx19, align 2, !tbaa !0 + %tmp20 = load i16, i16* %arrayidx19, align 2, !tbaa !0 %conv21 = zext i16 %tmp20 to i32 %sub23 = sub nsw i32 16, %and %shr = lshr i32 %conv21, %sub23 @@ -57,11 +57,11 @@ for.body: ; preds = %entry, %for.body for.end: ; preds = %for.body %arrayidx31 = getelementptr inbounds %union.vector_t, %union.vector_t* %t, i64 0, i32 0, i64 1 - %tmp32 = load i64* %arrayidx31, align 8, !tbaa !3 + %tmp32 = load i64, i64* %arrayidx31, align 8, !tbaa !3 %arrayidx35 = getelementptr inbounds %union.vector_t, %union.vector_t* %vd, i64 0, i32 0, i64 1 store i64 %tmp32, i64* %arrayidx35, align 8, !tbaa !3 %arrayidx37 = getelementptr inbounds %union.vector_t, %union.vector_t* %t, i64 0, i32 0, i64 0 - %tmp38 = load i64* %arrayidx37, align 8, !tbaa !3 + %tmp38 = load i64, i64* %arrayidx37, align 8, !tbaa !3 %arrayidx41 = getelementptr inbounds %union.vector_t, %union.vector_t* %vd, i64 0, i32 0, i64 0 store i64 %tmp38, i64* %arrayidx41, align 8, !tbaa !3 ret void @@ -82,7 +82,7 @@ entry: for.body: ; preds = %entry, %for.body %i2.01 = phi i64 [ 0, %entry ], [ %inc, %for.body ] %f = getelementptr inbounds %struct.X, %struct.X* %a, i64 %i2.01, i32 1 - %tmp6 = load float* %f, align 4, !tbaa !5 + %tmp6 = load float, float* %f, align 4, !tbaa !5 %mul = fmul float %tmp6, 0x40019999A0000000 store float %mul, float* %f, align 4, !tbaa !5 %inc = add nsw i64 %i2.01, 1 @@ -91,7 +91,7 @@ for.body: ; preds = %entry, %for.body for.end: ; preds = %for.body %i9 = getelementptr inbounds %struct.X, %struct.X* %a, i64 0, i32 0 - %tmp10 = load i32* %i9, align 4, !tbaa !4 + %tmp10 = load i32, i32* %i9, align 4, !tbaa !4 ret i32 %tmp10 } @@ -110,7 +110,7 @@ entry: for.body: ; preds = %entry, %for.body %i.01 = phi i64 [ 0, %entry ], [ %inc, %for.body ] %i5 = getelementptr inbounds %struct.X, %struct.X* %a, i64 %i.01, i32 0 - %tmp6 = load i32* %i5, align 4, !tbaa !4 + %tmp6 = load i32, i32* %i5, align 4, !tbaa !4 %mul = mul nsw i32 %tmp6, 3 store i32 %mul, i32* %i5, align 4, !tbaa !4 %inc = add nsw i64 %i.01, 1 @@ -119,7 +119,7 @@ for.body: ; preds = %entry, %for.body for.end: ; preds = %for.body %f9 = getelementptr inbounds %struct.X, %struct.X* %a, i64 0, i32 1 - %tmp10 = load float* %f9, align 4, !tbaa !5 + %tmp10 = load float, float* %f9, align 4, !tbaa !5 ret float %tmp10 } diff --git a/llvm/test/Analysis/TypeBasedAliasAnalysis/gvn-nonlocal-type-mismatch.ll b/llvm/test/Analysis/TypeBasedAliasAnalysis/gvn-nonlocal-type-mismatch.ll index edea6d02800d..aaa43a460900 100644 --- a/llvm/test/Analysis/TypeBasedAliasAnalysis/gvn-nonlocal-type-mismatch.ll +++ b/llvm/test/Analysis/TypeBasedAliasAnalysis/gvn-nonlocal-type-mismatch.ll @@ -17,7 +17,7 @@ entry: br i1 %c, label %if.else, label %if.then if.then: - %t = load i32* %p, !tbaa !1 + %t = load i32, i32* %p, !tbaa !1 store i32 %t, i32* %q ret void @@ -32,11 +32,11 @@ if.else: ; CHECK: @watch_out_for_type_change ; CHECK: if.then: -; CHECK: %t = load i32* %p +; CHECK: %t = load i32, i32* %p ; CHECK: store i32 %t, i32* %q ; CHECK: ret void ; CHECK: if.else: -; CHECK: %u = load i32* %p +; CHECK: %u = load i32, i32* %p ; CHECK: store i32 %u, i32* %q define void @watch_out_for_type_change(i1 %c, i32* %p, i32* %p1, i32* %q) nounwind { @@ -46,12 +46,12 @@ entry: br i1 %c, label %if.else, label %if.then if.then: - %t = load i32* %p, !tbaa !3 + %t = load i32, i32* %p, !tbaa !3 store i32 %t, i32* %q ret void if.else: - %u = load i32* %p, !tbaa !4 + %u = load i32, i32* %p, !tbaa !4 store i32 %u, i32* %q ret void } @@ -64,7 +64,7 @@ if.else: ; CHECK: store i32 0, i32* %q ; CHECK: ret void ; CHECK: if.else: -; CHECK: %u = load i32* %p +; CHECK: %u = load i32, i32* %p ; CHECK: store i32 %u, i32* %q define void @watch_out_for_another_type_change(i1 %c, i32* %p, i32* %p1, i32* %q) nounwind { @@ -74,12 +74,12 @@ entry: br i1 %c, label %if.else, label %if.then if.then: - %t = load i32* %p, !tbaa !4 + %t = load i32, i32* %p, !tbaa !4 store i32 %t, i32* %q ret void if.else: - %u = load i32* %p, !tbaa !3 + %u = load i32, i32* %p, !tbaa !3 store i32 %u, i32* %q ret void } diff --git a/llvm/test/Analysis/TypeBasedAliasAnalysis/licm.ll b/llvm/test/Analysis/TypeBasedAliasAnalysis/licm.ll index 150be838f998..fe07730577e6 100644 --- a/llvm/test/Analysis/TypeBasedAliasAnalysis/licm.ll +++ b/llvm/test/Analysis/TypeBasedAliasAnalysis/licm.ll @@ -5,7 +5,7 @@ ; CHECK: @foo ; CHECK: entry: -; CHECK-NEXT: %tmp3 = load double** @P, !tbaa !0 +; CHECK-NEXT: %tmp3 = load double*, double** @P, !tbaa !0 ; CHECK-NEXT: br label %for.body @P = common global double* null @@ -16,9 +16,9 @@ entry: for.body: ; preds = %entry, %for.body %i.07 = phi i64 [ %inc, %for.body ], [ 0, %entry ] - %tmp3 = load double** @P, !tbaa !1 + %tmp3 = load double*, double** @P, !tbaa !1 %scevgep = getelementptr double, double* %tmp3, i64 %i.07 - %tmp4 = load double* %scevgep, !tbaa !2 + %tmp4 = load double, double* %scevgep, !tbaa !2 %mul = fmul double %tmp4, 2.300000e+00 store double %mul, double* %scevgep, !tbaa !2 %inc = add i64 %i.07, 1 @@ -49,9 +49,9 @@ entry: br label %loop loop: - %tmp51 = load i8** %p, !tbaa !4 + %tmp51 = load i8*, i8** %p, !tbaa !4 store i8* %tmp51, i8** %p - %tmp40 = load i8* %q, !tbaa !5 + %tmp40 = load i8, i8* %q, !tbaa !5 store i8 %tmp40, i8* %q br label %loop } diff --git a/llvm/test/Analysis/TypeBasedAliasAnalysis/placement-tbaa.ll b/llvm/test/Analysis/TypeBasedAliasAnalysis/placement-tbaa.ll index 6d775b48d777..aa91020f12d6 100644 --- a/llvm/test/Analysis/TypeBasedAliasAnalysis/placement-tbaa.ll +++ b/llvm/test/Analysis/TypeBasedAliasAnalysis/placement-tbaa.ll @@ -33,20 +33,20 @@ entry: %call = call noalias i8* @_Znwm(i64 8) %0 = bitcast i8* %call to %struct.Foo* store %struct.Foo* %0, %struct.Foo** %f, align 8, !tbaa !4 - %1 = load %struct.Foo** %f, align 8, !tbaa !4 + %1 = load %struct.Foo*, %struct.Foo** %f, align 8, !tbaa !4 %i = getelementptr inbounds %struct.Foo, %struct.Foo* %1, i32 0, i32 0 store i64 1, i64* %i, align 8, !tbaa !6 store i32 0, i32* %i1, align 4, !tbaa !0 br label %for.cond for.cond: - %2 = load i32* %i1, align 4, !tbaa !0 - %3 = load i32* %n.addr, align 4, !tbaa !0 + %2 = load i32, i32* %i1, align 4, !tbaa !0 + %3 = load i32, i32* %n.addr, align 4, !tbaa !0 %cmp = icmp slt i32 %2, %3 br i1 %cmp, label %for.body, label %for.end for.body: - %4 = load %struct.Foo** %f, align 8, !tbaa !4 + %4 = load %struct.Foo*, %struct.Foo** %f, align 8, !tbaa !4 %5 = bitcast %struct.Foo* %4 to i8* %new.isnull = icmp eq i8* %5, null br i1 %new.isnull, label %new.cont, label %new.notnull @@ -58,10 +58,10 @@ new.notnull: new.cont: %7 = phi %struct.Bar* [ %6, %new.notnull ], [ null, %for.body ] store %struct.Bar* %7, %struct.Bar** %b, align 8, !tbaa !4 - %8 = load %struct.Bar** %b, align 8, !tbaa !4 + %8 = load %struct.Bar*, %struct.Bar** %b, align 8, !tbaa !4 %p = getelementptr inbounds %struct.Bar, %struct.Bar* %8, i32 0, i32 0 store i8* null, i8** %p, align 8, !tbaa !9 - %9 = load %struct.Foo** %f, align 8, !tbaa !4 + %9 = load %struct.Foo*, %struct.Foo** %f, align 8, !tbaa !4 %10 = bitcast %struct.Foo* %9 to i8* %new.isnull2 = icmp eq i8* %10, null br i1 %new.isnull2, label %new.cont4, label %new.notnull3 @@ -73,23 +73,23 @@ new.notnull3: new.cont4: %12 = phi %struct.Foo* [ %11, %new.notnull3 ], [ null, %new.cont ] store %struct.Foo* %12, %struct.Foo** %f, align 8, !tbaa !4 - %13 = load i32* %i1, align 4, !tbaa !0 + %13 = load i32, i32* %i1, align 4, !tbaa !0 %conv = sext i32 %13 to i64 - %14 = load %struct.Foo** %f, align 8, !tbaa !4 + %14 = load %struct.Foo*, %struct.Foo** %f, align 8, !tbaa !4 %i5 = getelementptr inbounds %struct.Foo, %struct.Foo* %14, i32 0, i32 0 store i64 %conv, i64* %i5, align 8, !tbaa !6 br label %for.inc for.inc: - %15 = load i32* %i1, align 4, !tbaa !0 + %15 = load i32, i32* %i1, align 4, !tbaa !0 %inc = add nsw i32 %15, 1 store i32 %inc, i32* %i1, align 4, !tbaa !0 br label %for.cond for.end: - %16 = load %struct.Foo** %f, align 8, !tbaa !4 + %16 = load %struct.Foo*, %struct.Foo** %f, align 8, !tbaa !4 %i6 = getelementptr inbounds %struct.Foo, %struct.Foo* %16, i32 0, i32 0 - %17 = load i64* %i6, align 8, !tbaa !6 + %17 = load i64, i64* %i6, align 8, !tbaa !6 ret i64 %17 } diff --git a/llvm/test/Analysis/TypeBasedAliasAnalysis/precedence.ll b/llvm/test/Analysis/TypeBasedAliasAnalysis/precedence.ll index e50021befc4f..b2931cac75c8 100644 --- a/llvm/test/Analysis/TypeBasedAliasAnalysis/precedence.ll +++ b/llvm/test/Analysis/TypeBasedAliasAnalysis/precedence.ll @@ -18,7 +18,7 @@ entry: store i32 0, i32* %x, !tbaa !0 %0 = bitcast i32* %x to float* store float 0x4002666660000000, float* %0, !tbaa !3 - %tmp3 = load i32* %x, !tbaa !0 + %tmp3 = load i32, i32* %x, !tbaa !0 ret i32 %tmp3 } @@ -35,7 +35,7 @@ entry: %0 = bitcast i64* %x to i8* %1 = getelementptr i8, i8* %0, i64 1 store i8 1, i8* %1, !tbaa !5 - %tmp3 = load i64* %x, !tbaa !4 + %tmp3 = load i64, i64* %x, !tbaa !4 ret i64 %tmp3 } diff --git a/llvm/test/Analysis/TypeBasedAliasAnalysis/sink.ll b/llvm/test/Analysis/TypeBasedAliasAnalysis/sink.ll index 1a124b86083b..c95dc151ed31 100644 --- a/llvm/test/Analysis/TypeBasedAliasAnalysis/sink.ll +++ b/llvm/test/Analysis/TypeBasedAliasAnalysis/sink.ll @@ -1,11 +1,11 @@ ; RUN: opt -tbaa -sink -S < %s | FileCheck %s ; CHECK: a: -; CHECK: %f = load float* %p, !tbaa [[TAGA:!.*]] +; CHECK: %f = load float, float* %p, !tbaa [[TAGA:!.*]] ; CHECK: store float %f, float* %q define void @foo(float* %p, i1 %c, float* %q, float* %r) { - %f = load float* %p, !tbaa !0 + %f = load float, float* %p, !tbaa !0 store float 0.0, float* %r, !tbaa !1 br i1 %c, label %a, label %b a: diff --git a/llvm/test/Analysis/TypeBasedAliasAnalysis/tbaa-path.ll b/llvm/test/Analysis/TypeBasedAliasAnalysis/tbaa-path.ll index 107aff01ee49..a2e4dc6ddcf0 100644 --- a/llvm/test/Analysis/TypeBasedAliasAnalysis/tbaa-path.ll +++ b/llvm/test/Analysis/TypeBasedAliasAnalysis/tbaa-path.ll @@ -17,7 +17,7 @@ entry: ; OPT: define ; OPT: store i32 1 ; OPT: store i32 4 -; OPT: %[[RET:.*]] = load i32* +; OPT: %[[RET:.*]] = load i32, i32* ; OPT: ret i32 %[[RET]] %s.addr = alloca i32*, align 8 %A.addr = alloca %struct.StructA*, align 8 @@ -25,13 +25,13 @@ entry: store i32* %s, i32** %s.addr, align 8, !tbaa !0 store %struct.StructA* %A, %struct.StructA** %A.addr, align 8, !tbaa !0 store i64 %count, i64* %count.addr, align 8, !tbaa !4 - %0 = load i32** %s.addr, align 8, !tbaa !0 + %0 = load i32*, i32** %s.addr, align 8, !tbaa !0 store i32 1, i32* %0, align 4, !tbaa !6 - %1 = load %struct.StructA** %A.addr, align 8, !tbaa !0 + %1 = load %struct.StructA*, %struct.StructA** %A.addr, align 8, !tbaa !0 %f32 = getelementptr inbounds %struct.StructA, %struct.StructA* %1, i32 0, i32 1 store i32 4, i32* %f32, align 4, !tbaa !8 - %2 = load i32** %s.addr, align 8, !tbaa !0 - %3 = load i32* %2, align 4, !tbaa !6 + %2 = load i32*, i32** %s.addr, align 8, !tbaa !0 + %3 = load i32, i32* %2, align 4, !tbaa !6 ret i32 %3 } @@ -51,13 +51,13 @@ entry: store i32* %s, i32** %s.addr, align 8, !tbaa !0 store %struct.StructA* %A, %struct.StructA** %A.addr, align 8, !tbaa !0 store i64 %count, i64* %count.addr, align 8, !tbaa !4 - %0 = load i32** %s.addr, align 8, !tbaa !0 + %0 = load i32*, i32** %s.addr, align 8, !tbaa !0 store i32 1, i32* %0, align 4, !tbaa !6 - %1 = load %struct.StructA** %A.addr, align 8, !tbaa !0 + %1 = load %struct.StructA*, %struct.StructA** %A.addr, align 8, !tbaa !0 %f16 = getelementptr inbounds %struct.StructA, %struct.StructA* %1, i32 0, i32 0 store i16 4, i16* %f16, align 2, !tbaa !11 - %2 = load i32** %s.addr, align 8, !tbaa !0 - %3 = load i32* %2, align 4, !tbaa !6 + %2 = load i32*, i32** %s.addr, align 8, !tbaa !0 + %3 = load i32, i32* %2, align 4, !tbaa !6 ret i32 %3 } @@ -69,7 +69,7 @@ entry: ; OPT: define ; OPT: store i32 1 ; OPT: store i32 4 -; OPT: %[[RET:.*]] = load i32* +; OPT: %[[RET:.*]] = load i32, i32* ; OPT: ret i32 %[[RET]] %A.addr = alloca %struct.StructA*, align 8 %B.addr = alloca %struct.StructB*, align 8 @@ -77,16 +77,16 @@ entry: store %struct.StructA* %A, %struct.StructA** %A.addr, align 8, !tbaa !0 store %struct.StructB* %B, %struct.StructB** %B.addr, align 8, !tbaa !0 store i64 %count, i64* %count.addr, align 8, !tbaa !4 - %0 = load %struct.StructA** %A.addr, align 8, !tbaa !0 + %0 = load %struct.StructA*, %struct.StructA** %A.addr, align 8, !tbaa !0 %f32 = getelementptr inbounds %struct.StructA, %struct.StructA* %0, i32 0, i32 1 store i32 1, i32* %f32, align 4, !tbaa !8 - %1 = load %struct.StructB** %B.addr, align 8, !tbaa !0 + %1 = load %struct.StructB*, %struct.StructB** %B.addr, align 8, !tbaa !0 %a = getelementptr inbounds %struct.StructB, %struct.StructB* %1, i32 0, i32 1 %f321 = getelementptr inbounds %struct.StructA, %struct.StructA* %a, i32 0, i32 1 store i32 4, i32* %f321, align 4, !tbaa !12 - %2 = load %struct.StructA** %A.addr, align 8, !tbaa !0 + %2 = load %struct.StructA*, %struct.StructA** %A.addr, align 8, !tbaa !0 %f322 = getelementptr inbounds %struct.StructA, %struct.StructA* %2, i32 0, i32 1 - %3 = load i32* %f322, align 4, !tbaa !8 + %3 = load i32, i32* %f322, align 4, !tbaa !8 ret i32 %3 } @@ -106,16 +106,16 @@ entry: store %struct.StructA* %A, %struct.StructA** %A.addr, align 8, !tbaa !0 store %struct.StructB* %B, %struct.StructB** %B.addr, align 8, !tbaa !0 store i64 %count, i64* %count.addr, align 8, !tbaa !4 - %0 = load %struct.StructA** %A.addr, align 8, !tbaa !0 + %0 = load %struct.StructA*, %struct.StructA** %A.addr, align 8, !tbaa !0 %f32 = getelementptr inbounds %struct.StructA, %struct.StructA* %0, i32 0, i32 1 store i32 1, i32* %f32, align 4, !tbaa !8 - %1 = load %struct.StructB** %B.addr, align 8, !tbaa !0 + %1 = load %struct.StructB*, %struct.StructB** %B.addr, align 8, !tbaa !0 %a = getelementptr inbounds %struct.StructB, %struct.StructB* %1, i32 0, i32 1 %f16 = getelementptr inbounds %struct.StructA, %struct.StructA* %a, i32 0, i32 0 store i16 4, i16* %f16, align 2, !tbaa !14 - %2 = load %struct.StructA** %A.addr, align 8, !tbaa !0 + %2 = load %struct.StructA*, %struct.StructA** %A.addr, align 8, !tbaa !0 %f321 = getelementptr inbounds %struct.StructA, %struct.StructA* %2, i32 0, i32 1 - %3 = load i32* %f321, align 4, !tbaa !8 + %3 = load i32, i32* %f321, align 4, !tbaa !8 ret i32 %3 } @@ -135,15 +135,15 @@ entry: store %struct.StructA* %A, %struct.StructA** %A.addr, align 8, !tbaa !0 store %struct.StructB* %B, %struct.StructB** %B.addr, align 8, !tbaa !0 store i64 %count, i64* %count.addr, align 8, !tbaa !4 - %0 = load %struct.StructA** %A.addr, align 8, !tbaa !0 + %0 = load %struct.StructA*, %struct.StructA** %A.addr, align 8, !tbaa !0 %f32 = getelementptr inbounds %struct.StructA, %struct.StructA* %0, i32 0, i32 1 store i32 1, i32* %f32, align 4, !tbaa !8 - %1 = load %struct.StructB** %B.addr, align 8, !tbaa !0 + %1 = load %struct.StructB*, %struct.StructB** %B.addr, align 8, !tbaa !0 %f321 = getelementptr inbounds %struct.StructB, %struct.StructB* %1, i32 0, i32 2 store i32 4, i32* %f321, align 4, !tbaa !15 - %2 = load %struct.StructA** %A.addr, align 8, !tbaa !0 + %2 = load %struct.StructA*, %struct.StructA** %A.addr, align 8, !tbaa !0 %f322 = getelementptr inbounds %struct.StructA, %struct.StructA* %2, i32 0, i32 1 - %3 = load i32* %f322, align 4, !tbaa !8 + %3 = load i32, i32* %f322, align 4, !tbaa !8 ret i32 %3 } @@ -163,16 +163,16 @@ entry: store %struct.StructA* %A, %struct.StructA** %A.addr, align 8, !tbaa !0 store %struct.StructB* %B, %struct.StructB** %B.addr, align 8, !tbaa !0 store i64 %count, i64* %count.addr, align 8, !tbaa !4 - %0 = load %struct.StructA** %A.addr, align 8, !tbaa !0 + %0 = load %struct.StructA*, %struct.StructA** %A.addr, align 8, !tbaa !0 %f32 = getelementptr inbounds %struct.StructA, %struct.StructA* %0, i32 0, i32 1 store i32 1, i32* %f32, align 4, !tbaa !8 - %1 = load %struct.StructB** %B.addr, align 8, !tbaa !0 + %1 = load %struct.StructB*, %struct.StructB** %B.addr, align 8, !tbaa !0 %a = getelementptr inbounds %struct.StructB, %struct.StructB* %1, i32 0, i32 1 %f32_2 = getelementptr inbounds %struct.StructA, %struct.StructA* %a, i32 0, i32 3 store i32 4, i32* %f32_2, align 4, !tbaa !16 - %2 = load %struct.StructA** %A.addr, align 8, !tbaa !0 + %2 = load %struct.StructA*, %struct.StructA** %A.addr, align 8, !tbaa !0 %f321 = getelementptr inbounds %struct.StructA, %struct.StructA* %2, i32 0, i32 1 - %3 = load i32* %f321, align 4, !tbaa !8 + %3 = load i32, i32* %f321, align 4, !tbaa !8 ret i32 %3 } @@ -192,15 +192,15 @@ entry: store %struct.StructA* %A, %struct.StructA** %A.addr, align 8, !tbaa !0 store %struct.StructS* %S, %struct.StructS** %S.addr, align 8, !tbaa !0 store i64 %count, i64* %count.addr, align 8, !tbaa !4 - %0 = load %struct.StructA** %A.addr, align 8, !tbaa !0 + %0 = load %struct.StructA*, %struct.StructA** %A.addr, align 8, !tbaa !0 %f32 = getelementptr inbounds %struct.StructA, %struct.StructA* %0, i32 0, i32 1 store i32 1, i32* %f32, align 4, !tbaa !8 - %1 = load %struct.StructS** %S.addr, align 8, !tbaa !0 + %1 = load %struct.StructS*, %struct.StructS** %S.addr, align 8, !tbaa !0 %f321 = getelementptr inbounds %struct.StructS, %struct.StructS* %1, i32 0, i32 1 store i32 4, i32* %f321, align 4, !tbaa !17 - %2 = load %struct.StructA** %A.addr, align 8, !tbaa !0 + %2 = load %struct.StructA*, %struct.StructA** %A.addr, align 8, !tbaa !0 %f322 = getelementptr inbounds %struct.StructA, %struct.StructA* %2, i32 0, i32 1 - %3 = load i32* %f322, align 4, !tbaa !8 + %3 = load i32, i32* %f322, align 4, !tbaa !8 ret i32 %3 } @@ -220,15 +220,15 @@ entry: store %struct.StructA* %A, %struct.StructA** %A.addr, align 8, !tbaa !0 store %struct.StructS* %S, %struct.StructS** %S.addr, align 8, !tbaa !0 store i64 %count, i64* %count.addr, align 8, !tbaa !4 - %0 = load %struct.StructA** %A.addr, align 8, !tbaa !0 + %0 = load %struct.StructA*, %struct.StructA** %A.addr, align 8, !tbaa !0 %f32 = getelementptr inbounds %struct.StructA, %struct.StructA* %0, i32 0, i32 1 store i32 1, i32* %f32, align 4, !tbaa !8 - %1 = load %struct.StructS** %S.addr, align 8, !tbaa !0 + %1 = load %struct.StructS*, %struct.StructS** %S.addr, align 8, !tbaa !0 %f16 = getelementptr inbounds %struct.StructS, %struct.StructS* %1, i32 0, i32 0 store i16 4, i16* %f16, align 2, !tbaa !19 - %2 = load %struct.StructA** %A.addr, align 8, !tbaa !0 + %2 = load %struct.StructA*, %struct.StructA** %A.addr, align 8, !tbaa !0 %f321 = getelementptr inbounds %struct.StructA, %struct.StructA* %2, i32 0, i32 1 - %3 = load i32* %f321, align 4, !tbaa !8 + %3 = load i32, i32* %f321, align 4, !tbaa !8 ret i32 %3 } @@ -248,15 +248,15 @@ entry: store %struct.StructS* %S, %struct.StructS** %S.addr, align 8, !tbaa !0 store %struct.StructS2* %S2, %struct.StructS2** %S2.addr, align 8, !tbaa !0 store i64 %count, i64* %count.addr, align 8, !tbaa !4 - %0 = load %struct.StructS** %S.addr, align 8, !tbaa !0 + %0 = load %struct.StructS*, %struct.StructS** %S.addr, align 8, !tbaa !0 %f32 = getelementptr inbounds %struct.StructS, %struct.StructS* %0, i32 0, i32 1 store i32 1, i32* %f32, align 4, !tbaa !17 - %1 = load %struct.StructS2** %S2.addr, align 8, !tbaa !0 + %1 = load %struct.StructS2*, %struct.StructS2** %S2.addr, align 8, !tbaa !0 %f321 = getelementptr inbounds %struct.StructS2, %struct.StructS2* %1, i32 0, i32 1 store i32 4, i32* %f321, align 4, !tbaa !20 - %2 = load %struct.StructS** %S.addr, align 8, !tbaa !0 + %2 = load %struct.StructS*, %struct.StructS** %S.addr, align 8, !tbaa !0 %f322 = getelementptr inbounds %struct.StructS, %struct.StructS* %2, i32 0, i32 1 - %3 = load i32* %f322, align 4, !tbaa !17 + %3 = load i32, i32* %f322, align 4, !tbaa !17 ret i32 %3 } @@ -276,15 +276,15 @@ entry: store %struct.StructS* %S, %struct.StructS** %S.addr, align 8, !tbaa !0 store %struct.StructS2* %S2, %struct.StructS2** %S2.addr, align 8, !tbaa !0 store i64 %count, i64* %count.addr, align 8, !tbaa !4 - %0 = load %struct.StructS** %S.addr, align 8, !tbaa !0 + %0 = load %struct.StructS*, %struct.StructS** %S.addr, align 8, !tbaa !0 %f32 = getelementptr inbounds %struct.StructS, %struct.StructS* %0, i32 0, i32 1 store i32 1, i32* %f32, align 4, !tbaa !17 - %1 = load %struct.StructS2** %S2.addr, align 8, !tbaa !0 + %1 = load %struct.StructS2*, %struct.StructS2** %S2.addr, align 8, !tbaa !0 %f16 = getelementptr inbounds %struct.StructS2, %struct.StructS2* %1, i32 0, i32 0 store i16 4, i16* %f16, align 2, !tbaa !22 - %2 = load %struct.StructS** %S.addr, align 8, !tbaa !0 + %2 = load %struct.StructS*, %struct.StructS** %S.addr, align 8, !tbaa !0 %f321 = getelementptr inbounds %struct.StructS, %struct.StructS* %2, i32 0, i32 1 - %3 = load i32* %f321, align 4, !tbaa !17 + %3 = load i32, i32* %f321, align 4, !tbaa !17 ret i32 %3 } @@ -304,21 +304,21 @@ entry: store %struct.StructC* %C, %struct.StructC** %C.addr, align 8, !tbaa !0 store %struct.StructD* %D, %struct.StructD** %D.addr, align 8, !tbaa !0 store i64 %count, i64* %count.addr, align 8, !tbaa !4 - %0 = load %struct.StructC** %C.addr, align 8, !tbaa !0 + %0 = load %struct.StructC*, %struct.StructC** %C.addr, align 8, !tbaa !0 %b = getelementptr inbounds %struct.StructC, %struct.StructC* %0, i32 0, i32 1 %a = getelementptr inbounds %struct.StructB, %struct.StructB* %b, i32 0, i32 1 %f32 = getelementptr inbounds %struct.StructA, %struct.StructA* %a, i32 0, i32 1 store i32 1, i32* %f32, align 4, !tbaa !23 - %1 = load %struct.StructD** %D.addr, align 8, !tbaa !0 + %1 = load %struct.StructD*, %struct.StructD** %D.addr, align 8, !tbaa !0 %b1 = getelementptr inbounds %struct.StructD, %struct.StructD* %1, i32 0, i32 1 %a2 = getelementptr inbounds %struct.StructB, %struct.StructB* %b1, i32 0, i32 1 %f323 = getelementptr inbounds %struct.StructA, %struct.StructA* %a2, i32 0, i32 1 store i32 4, i32* %f323, align 4, !tbaa !25 - %2 = load %struct.StructC** %C.addr, align 8, !tbaa !0 + %2 = load %struct.StructC*, %struct.StructC** %C.addr, align 8, !tbaa !0 %b4 = getelementptr inbounds %struct.StructC, %struct.StructC* %2, i32 0, i32 1 %a5 = getelementptr inbounds %struct.StructB, %struct.StructB* %b4, i32 0, i32 1 %f326 = getelementptr inbounds %struct.StructA, %struct.StructA* %a5, i32 0, i32 1 - %3 = load i32* %f326, align 4, !tbaa !23 + %3 = load i32, i32* %f326, align 4, !tbaa !23 ret i32 %3 } @@ -330,7 +330,7 @@ entry: ; OPT: define ; OPT: store i32 1 ; OPT: store i32 4 -; OPT: %[[RET:.*]] = load i32* +; OPT: %[[RET:.*]] = load i32, i32* ; OPT: ret i32 %[[RET]] %C.addr = alloca %struct.StructC*, align 8 %D.addr = alloca %struct.StructD*, align 8 @@ -340,24 +340,24 @@ entry: store %struct.StructC* %C, %struct.StructC** %C.addr, align 8, !tbaa !0 store %struct.StructD* %D, %struct.StructD** %D.addr, align 8, !tbaa !0 store i64 %count, i64* %count.addr, align 8, !tbaa !4 - %0 = load %struct.StructC** %C.addr, align 8, !tbaa !0 + %0 = load %struct.StructC*, %struct.StructC** %C.addr, align 8, !tbaa !0 %b = getelementptr inbounds %struct.StructC, %struct.StructC* %0, i32 0, i32 1 store %struct.StructB* %b, %struct.StructB** %b1, align 8, !tbaa !0 - %1 = load %struct.StructD** %D.addr, align 8, !tbaa !0 + %1 = load %struct.StructD*, %struct.StructD** %D.addr, align 8, !tbaa !0 %b3 = getelementptr inbounds %struct.StructD, %struct.StructD* %1, i32 0, i32 1 store %struct.StructB* %b3, %struct.StructB** %b2, align 8, !tbaa !0 - %2 = load %struct.StructB** %b1, align 8, !tbaa !0 + %2 = load %struct.StructB*, %struct.StructB** %b1, align 8, !tbaa !0 %a = getelementptr inbounds %struct.StructB, %struct.StructB* %2, i32 0, i32 1 %f32 = getelementptr inbounds %struct.StructA, %struct.StructA* %a, i32 0, i32 1 store i32 1, i32* %f32, align 4, !tbaa !12 - %3 = load %struct.StructB** %b2, align 8, !tbaa !0 + %3 = load %struct.StructB*, %struct.StructB** %b2, align 8, !tbaa !0 %a4 = getelementptr inbounds %struct.StructB, %struct.StructB* %3, i32 0, i32 1 %f325 = getelementptr inbounds %struct.StructA, %struct.StructA* %a4, i32 0, i32 1 store i32 4, i32* %f325, align 4, !tbaa !12 - %4 = load %struct.StructB** %b1, align 8, !tbaa !0 + %4 = load %struct.StructB*, %struct.StructB** %b1, align 8, !tbaa !0 %a6 = getelementptr inbounds %struct.StructB, %struct.StructB* %4, i32 0, i32 1 %f327 = getelementptr inbounds %struct.StructA, %struct.StructA* %a6, i32 0, i32 1 - %5 = load i32* %f327, align 4, !tbaa !12 + %5 = load i32, i32* %f327, align 4, !tbaa !12 ret i32 %5 } diff --git a/llvm/test/Analysis/ValueTracking/memory-dereferenceable.ll b/llvm/test/Analysis/ValueTracking/memory-dereferenceable.ll index 4ee21c5d146e..51f926549217 100644 --- a/llvm/test/Analysis/ValueTracking/memory-dereferenceable.ll +++ b/llvm/test/Analysis/ValueTracking/memory-dereferenceable.ll @@ -18,15 +18,15 @@ define void @test(i32 addrspace(1)* dereferenceable(8) %dparam) { ; CHECK-NOT: %nparam entry: %globalptr = getelementptr inbounds [6 x i8], [6 x i8]* @globalstr, i32 0, i32 0 - %load1 = load i8* %globalptr + %load1 = load i8, i8* %globalptr %alloca = alloca i1 - %load2 = load i1* %alloca - %load3 = load i32 addrspace(1)* %dparam + %load2 = load i1, i1* %alloca + %load3 = load i32, i32 addrspace(1)* %dparam %tok = tail call i32 (i1 ()*, i32, i32, ...)* @llvm.experimental.gc.statepoint.p0f_i1f(i1 ()* @return_i1, i32 0, i32 0, i32 0, i32 addrspace(1)* %dparam) %relocate = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(i32 %tok, i32 4, i32 4) - %load4 = load i32 addrspace(1)* %relocate + %load4 = load i32, i32 addrspace(1)* %relocate %nparam = getelementptr i32, i32 addrspace(1)* %dparam, i32 5 - %load5 = load i32 addrspace(1)* %nparam + %load5 = load i32, i32 addrspace(1)* %nparam ret void } diff --git a/llvm/test/Assembler/2002-04-29-NameBinding.ll b/llvm/test/Assembler/2002-04-29-NameBinding.ll index 960209bcb5b3..c387c47ef93f 100644 --- a/llvm/test/Assembler/2002-04-29-NameBinding.ll +++ b/llvm/test/Assembler/2002-04-29-NameBinding.ll @@ -13,7 +13,7 @@ define i32 @createtask() { %v1 = alloca i32 ;; Alloca should have one use! - %reg112 = load i32* %v1 ;; This load should not use the global! + %reg112 = load i32, i32* %v1 ;; This load should not use the global! ret i32 %reg112 } diff --git a/llvm/test/Assembler/2002-08-19-BytecodeReader.ll b/llvm/test/Assembler/2002-08-19-BytecodeReader.ll index 6ddb01e6d241..1fd6bb6f3bee 100644 --- a/llvm/test/Assembler/2002-08-19-BytecodeReader.ll +++ b/llvm/test/Assembler/2002-08-19-BytecodeReader.ll @@ -11,8 +11,8 @@ define void @Evaluate() { %reg1321 = getelementptr %CHESS_POSITION, %CHESS_POSITION* @search, i64 0, i32 1 ; [#uses=1] - %reg114 = load i32* %reg1321 ; [#uses=0] + %reg114 = load i32, i32* %reg1321 ; [#uses=0] %reg1801 = getelementptr %CHESS_POSITION, %CHESS_POSITION* @search, i64 0, i32 0 ; [#uses=1] - %reg182 = load i32* %reg1801 ; [#uses=0] + %reg182 = load i32, i32* %reg1801 ; [#uses=0] ret void } diff --git a/llvm/test/Assembler/2003-08-20-ConstantExprGEP-Fold.ll b/llvm/test/Assembler/2003-08-20-ConstantExprGEP-Fold.ll index 911f0ff71592..05b4ee2519cc 100644 --- a/llvm/test/Assembler/2003-08-20-ConstantExprGEP-Fold.ll +++ b/llvm/test/Assembler/2003-08-20-ConstantExprGEP-Fold.ll @@ -4,7 +4,7 @@ @.str_1 = internal constant [6 x i8] c"_Bool\00" ; <[6 x i8]*> [#uses=2] define i32 @test() { - %tmp.54 = load i8* getelementptr ([6 x i8]* @.str_1, i64 0, i64 1) ; [#uses=1] + %tmp.54 = load i8, i8* getelementptr ([6 x i8]* @.str_1, i64 0, i64 1) ; [#uses=1] %tmp.55 = icmp ne i8 %tmp.54, 66 ; [#uses=1] br i1 %tmp.55, label %then.7, label %endif.7 diff --git a/llvm/test/Assembler/2004-06-07-VerifierBug.ll b/llvm/test/Assembler/2004-06-07-VerifierBug.ll index e01cee82ab9f..2fc0ae29ca7b 100644 --- a/llvm/test/Assembler/2004-06-07-VerifierBug.ll +++ b/llvm/test/Assembler/2004-06-07-VerifierBug.ll @@ -7,6 +7,6 @@ entry: loop: ; preds = %loop %tmp.4.i9 = getelementptr i32, i32* null, i32 %tmp.5.i10 ; [#uses=1] - %tmp.5.i10 = load i32* %tmp.4.i9 ; [#uses=1] + %tmp.5.i10 = load i32, i32* %tmp.4.i9 ; [#uses=1] br label %loop } diff --git a/llvm/test/Assembler/2007-01-05-Cmp-ConstExpr.ll b/llvm/test/Assembler/2007-01-05-Cmp-ConstExpr.ll index 573d008c3659..5d1dd91ba91b 100644 --- a/llvm/test/Assembler/2007-01-05-Cmp-ConstExpr.ll +++ b/llvm/test/Assembler/2007-01-05-Cmp-ConstExpr.ll @@ -7,7 +7,7 @@ define i32 @main(i32 %argc, i8** %argv) { entry: %tmp65 = getelementptr i8*, i8** %argv, i32 1 ; [#uses=1] - %tmp66 = load i8** %tmp65 ; [#uses=0] + %tmp66 = load i8*, i8** %tmp65 ; [#uses=0] br i1 icmp ne (i32 sub (i32 ptrtoint (i8* getelementptr ([4 x i8]* @str, i32 0, i64 1) to i32), i32 ptrtoint ([4 x i8]* @str to i32)), i32 1), label %exit_1, label %exit_2 exit_1: ; preds = %entry diff --git a/llvm/test/Assembler/2007-04-20-AlignedLoad.ll b/llvm/test/Assembler/2007-04-20-AlignedLoad.ll index bcf65fd6eac7..1e8850a7073e 100644 --- a/llvm/test/Assembler/2007-04-20-AlignedLoad.ll +++ b/llvm/test/Assembler/2007-04-20-AlignedLoad.ll @@ -3,6 +3,6 @@ define i32 @test(i32* %arg) { entry: - %tmp2 = load i32* %arg, align 1024 ; [#uses=1] + %tmp2 = load i32, i32* %arg, align 1024 ; [#uses=1] ret i32 %tmp2 } diff --git a/llvm/test/Assembler/2007-12-11-AddressSpaces.ll b/llvm/test/Assembler/2007-12-11-AddressSpaces.ll index f860f57e8ddd..3d13f43602fb 100644 --- a/llvm/test/Assembler/2007-12-11-AddressSpaces.ll +++ b/llvm/test/Assembler/2007-12-11-AddressSpaces.ll @@ -12,15 +12,15 @@ define void @foo() { entry: - %tmp1 = load i32 addrspace(33)* addrspace(42)* getelementptr (%struct.mystruct addrspace(42)* @input, i32 0, i32 3), align 4 ; [#uses=1] + %tmp1 = load i32 addrspace(33)*, i32 addrspace(33)* addrspace(42)* getelementptr (%struct.mystruct addrspace(42)* @input, i32 0, i32 3), align 4 ; [#uses=1] store i32 addrspace(33)* %tmp1, i32 addrspace(33)* addrspace(66)* getelementptr (%struct.mystruct addrspace(66)* @output, i32 0, i32 1), align 4 ret void } define i32 addrspace(11)* @bar(i32 addrspace(11)* addrspace(22)* addrspace(33)* %x) { entry: - %tmp1 = load i32 addrspace(11)* addrspace(22)* addrspace(33)* @y, align 4 ; [#uses=2] + %tmp1 = load i32 addrspace(11)* addrspace(22)*, i32 addrspace(11)* addrspace(22)* addrspace(33)* @y, align 4 ; [#uses=2] store i32 addrspace(11)* addrspace(22)* %tmp1, i32 addrspace(11)* addrspace(22)* addrspace(33)* %x, align 4 - %tmp5 = load i32 addrspace(11)* addrspace(22)* %tmp1, align 4 ; [#uses=1] + %tmp5 = load i32 addrspace(11)*, i32 addrspace(11)* addrspace(22)* %tmp1, align 4 ; [#uses=1] ret i32 addrspace(11)* %tmp5 } diff --git a/llvm/test/Assembler/2010-02-05-FunctionLocalMetadataBecomesNull.ll b/llvm/test/Assembler/2010-02-05-FunctionLocalMetadataBecomesNull.ll index 50ad32e1c9ed..c8898fc6698c 100644 --- a/llvm/test/Assembler/2010-02-05-FunctionLocalMetadataBecomesNull.ll +++ b/llvm/test/Assembler/2010-02-05-FunctionLocalMetadataBecomesNull.ll @@ -15,7 +15,7 @@ define i32 @main() nounwind readonly { ; CHECK: call void @llvm.dbg.value(metadata i64 72, call void @llvm.dbg.declare(metadata i64* %diff1, metadata !0, metadata !{!"0x102"}) store i64 72, i64* %diff1, align 8 - %v1 = load %struct.test** @TestArrayPtr, align 8 ; <%struct.test*> [#uses=1] + %v1 = load %struct.test*, %struct.test** @TestArrayPtr, align 8 ; <%struct.test*> [#uses=1] %v2 = ptrtoint %struct.test* %v1 to i64 ; [#uses=1] %v3 = sub i64 %v2, ptrtoint ([10 x %struct.test]* @TestArray to i64) ; [#uses=1] store i64 %v3, i64* %diff1, align 8 diff --git a/llvm/test/Assembler/align-inst-load.ll b/llvm/test/Assembler/align-inst-load.ll index 3586be2d6e03..75fcd3e5a028 100644 --- a/llvm/test/Assembler/align-inst-load.ll +++ b/llvm/test/Assembler/align-inst-load.ll @@ -1,6 +1,6 @@ ; RUN: not llvm-as %s -o /dev/null 2>/dev/null define void @foo() { - load i1* %p, align 1073741824 + load i1, i1* %p, align 1073741824 ret void } diff --git a/llvm/test/Assembler/align-inst.ll b/llvm/test/Assembler/align-inst.ll index 1952fbc52972..028da396d2fe 100644 --- a/llvm/test/Assembler/align-inst.ll +++ b/llvm/test/Assembler/align-inst.ll @@ -5,7 +5,7 @@ define void @foo() { %p = alloca i1, align 536870912 - load i1* %p, align 536870912 + load i1, i1* %p, align 536870912 store i1 false, i1* %p, align 536870912 ret void } diff --git a/llvm/test/Assembler/atomic.ll b/llvm/test/Assembler/atomic.ll index 0356f5fc582c..148b95d88e30 100644 --- a/llvm/test/Assembler/atomic.ll +++ b/llvm/test/Assembler/atomic.ll @@ -3,10 +3,10 @@ ; Basic smoke test for atomic operations. define void @f(i32* %x) { - ; CHECK: load atomic i32* %x unordered, align 4 - load atomic i32* %x unordered, align 4 - ; CHECK: load atomic volatile i32* %x singlethread acquire, align 4 - load atomic volatile i32* %x singlethread acquire, align 4 + ; CHECK: load atomic i32, i32* %x unordered, align 4 + load atomic i32, i32* %x unordered, align 4 + ; CHECK: load atomic volatile i32, i32* %x singlethread acquire, align 4 + load atomic volatile i32, i32* %x singlethread acquire, align 4 ; CHECK: store atomic i32 3, i32* %x release, align 4 store atomic i32 3, i32* %x release, align 4 ; CHECK: store atomic volatile i32 3, i32* %x singlethread monotonic, align 4 diff --git a/llvm/test/Assembler/fast-math-flags.ll b/llvm/test/Assembler/fast-math-flags.ll index 8e75bdf0f966..f0d3ecc761d1 100644 --- a/llvm/test/Assembler/fast-math-flags.ll +++ b/llvm/test/Assembler/fast-math-flags.ll @@ -9,12 +9,12 @@ define float @none(float %x, float %y) { entry: -; CHECK: %vec = load <3 x float>* @vec - %vec = load <3 x float>* @vec -; CHECK: %select = load i1* @select - %select = load i1* @select -; CHECK: %arr = load [3 x float]* @arr - %arr = load [3 x float]* @arr +; CHECK: %vec = load <3 x float>, <3 x float>* @vec + %vec = load <3 x float>, <3 x float>* @vec +; CHECK: %select = load i1, i1* @select + %select = load i1, i1* @select +; CHECK: %arr = load [3 x float], [3 x float]* @arr + %arr = load [3 x float], [3 x float]* @arr ; CHECK: %a = fadd float %x, %y %a = fadd float %x, %y @@ -43,12 +43,12 @@ entry: ; CHECK: no_nan define float @no_nan(float %x, float %y) { entry: -; CHECK: %vec = load <3 x float>* @vec - %vec = load <3 x float>* @vec -; CHECK: %select = load i1* @select - %select = load i1* @select -; CHECK: %arr = load [3 x float]* @arr - %arr = load [3 x float]* @arr +; CHECK: %vec = load <3 x float>, <3 x float>* @vec + %vec = load <3 x float>, <3 x float>* @vec +; CHECK: %select = load i1, i1* @select + %select = load i1, i1* @select +; CHECK: %arr = load [3 x float], [3 x float]* @arr + %arr = load [3 x float], [3 x float]* @arr ; CHECK: %a = fadd nnan float %x, %y %a = fadd nnan float %x, %y @@ -77,12 +77,12 @@ entry: ; CHECK: no_nan_inf define float @no_nan_inf(float %x, float %y) { entry: -; CHECK: %vec = load <3 x float>* @vec - %vec = load <3 x float>* @vec -; CHECK: %select = load i1* @select - %select = load i1* @select -; CHECK: %arr = load [3 x float]* @arr - %arr = load [3 x float]* @arr +; CHECK: %vec = load <3 x float>, <3 x float>* @vec + %vec = load <3 x float>, <3 x float>* @vec +; CHECK: %select = load i1, i1* @select + %select = load i1, i1* @select +; CHECK: %arr = load [3 x float], [3 x float]* @arr + %arr = load [3 x float], [3 x float]* @arr ; CHECK: %a = fadd nnan ninf float %x, %y %a = fadd ninf nnan float %x, %y @@ -111,12 +111,12 @@ entry: ; CHECK: mixed_flags define float @mixed_flags(float %x, float %y) { entry: -; CHECK: %vec = load <3 x float>* @vec - %vec = load <3 x float>* @vec -; CHECK: %select = load i1* @select - %select = load i1* @select -; CHECK: %arr = load [3 x float]* @arr - %arr = load [3 x float]* @arr +; CHECK: %vec = load <3 x float>, <3 x float>* @vec + %vec = load <3 x float>, <3 x float>* @vec +; CHECK: %select = load i1, i1* @select + %select = load i1, i1* @select +; CHECK: %arr = load [3 x float], [3 x float]* @arr + %arr = load [3 x float], [3 x float]* @arr ; CHECK: %a = fadd nnan ninf float %x, %y %a = fadd ninf nnan float %x, %y diff --git a/llvm/test/Assembler/half-constprop.ll b/llvm/test/Assembler/half-constprop.ll index c5ae3bf16abb..7ca876b3c71f 100644 --- a/llvm/test/Assembler/half-constprop.ll +++ b/llvm/test/Assembler/half-constprop.ll @@ -9,8 +9,8 @@ entry: %.compoundliteral = alloca float, align 4 store half 0xH4200, half* %a, align 2 store half 0xH4B9A, half* %b, align 2 - %tmp = load half* %a, align 2 - %tmp1 = load half* %b, align 2 + %tmp = load half, half* %a, align 2 + %tmp1 = load half, half* %b, align 2 %add = fadd half %tmp, %tmp1 ; CHECK: 0xH4C8D ret half %add diff --git a/llvm/test/Assembler/half-conv.ll b/llvm/test/Assembler/half-conv.ll index e6f73cf71ca7..657550154af4 100644 --- a/llvm/test/Assembler/half-conv.ll +++ b/llvm/test/Assembler/half-conv.ll @@ -7,7 +7,7 @@ entry: %a = alloca half, align 2 %.compoundliteral = alloca float, align 4 store half 0xH4C8D, half* %a, align 2 - %tmp = load half* %a, align 2 + %tmp = load half, half* %a, align 2 %conv = fpext half %tmp to float ; CHECK: 0x4032340000000000 ret float %conv diff --git a/llvm/test/Assembler/insertextractvalue.ll b/llvm/test/Assembler/insertextractvalue.ll index 692843e9910b..71dbba371d4e 100644 --- a/llvm/test/Assembler/insertextractvalue.ll +++ b/llvm/test/Assembler/insertextractvalue.ll @@ -8,7 +8,7 @@ ; CHECK-NEXT: store ; CHECK-NEXT: ret define float @foo({{i32},{float, double}}* %p) nounwind { - %t = load {{i32},{float, double}}* %p + %t = load {{i32},{float, double}}, {{i32},{float, double}}* %p %s = extractvalue {{i32},{float, double}} %t, 1, 0 %r = insertvalue {{i32},{float, double}} %t, double 2.0, 1, 1 store {{i32},{float, double}} %r, {{i32},{float, double}}* %p diff --git a/llvm/test/Assembler/invalid-load-mismatched-explicit-type.ll b/llvm/test/Assembler/invalid-load-mismatched-explicit-type.ll new file mode 100644 index 000000000000..b8422ed730f2 --- /dev/null +++ b/llvm/test/Assembler/invalid-load-mismatched-explicit-type.ll @@ -0,0 +1,6 @@ +; RUN: not llvm-as < %s 2>&1 | FileCheck %s +; CHECK: :4:13: error: explicit pointee type doesn't match operand's pointee type +define void @test(i32* %t) { + %x = load i16, i32* %t + ret void +} diff --git a/llvm/test/Assembler/invalid-load-missing-explicit-type.ll b/llvm/test/Assembler/invalid-load-missing-explicit-type.ll new file mode 100644 index 000000000000..455498e3d15d --- /dev/null +++ b/llvm/test/Assembler/invalid-load-missing-explicit-type.ll @@ -0,0 +1,6 @@ +; RUN: not llvm-as < %s 2>&1 | FileCheck %s +; CHECK: :4:18: error: expected comma after load's type +define void @test(i32* %t) { + %x = load i32* %t + ret void +} diff --git a/llvm/test/Assembler/numbered-values.ll b/llvm/test/Assembler/numbered-values.ll index 70b63779ccb6..0b14c6845ab0 100644 --- a/llvm/test/Assembler/numbered-values.ll +++ b/llvm/test/Assembler/numbered-values.ll @@ -9,7 +9,7 @@ entry: %0 = alloca i32 ; :0 [#uses=2] %"alloca point" = bitcast i32 0 to i32 ; [#uses=0] store i32 %X, i32* %X_addr - %1 = load i32* %X_addr, align 4 ; :1 [#uses=1] + %1 = load i32, i32* %X_addr, align 4 ; :1 [#uses=1] mul i32 %1, 4 ; :2 [#uses=1] %3 = add i32 %2, 123 ; :3 [#uses=1] store i32 %3, i32* %0, align 4 diff --git a/llvm/test/Assembler/unnamed.ll b/llvm/test/Assembler/unnamed.ll index 099a15a254af..7f79e5c20d74 100644 --- a/llvm/test/Assembler/unnamed.ll +++ b/llvm/test/Assembler/unnamed.ll @@ -16,7 +16,7 @@ module asm "this is another inline asm block" @3 = global x86_fp80 0xK4001E000000000000000 define float @foo(%0* %p) nounwind { - %t = load %0* %p ; <%0> [#uses=2] + %t = load %0, %0* %p ; <%0> [#uses=2] %s = extractvalue %0 %t, 1, 0 ; [#uses=1] %r = insertvalue %0 %t, double 2.000000e+00, 1, 1; <%0> [#uses=1] store %0 %r, %0* %p diff --git a/llvm/test/Assembler/upgrade-loop-metadata.ll b/llvm/test/Assembler/upgrade-loop-metadata.ll index 0852469774bf..d88cb3ee2e2c 100644 --- a/llvm/test/Assembler/upgrade-loop-metadata.ll +++ b/llvm/test/Assembler/upgrade-loop-metadata.ll @@ -14,7 +14,7 @@ entry: br label %for.cond for.cond: ; preds = %for.inc, %entry - %0 = load i32* %i, align 4 + %0 = load i32, i32* %i, align 4 %cmp = icmp slt i32 %0, 16 br i1 %cmp, label %for.body, label %for.end, !llvm.loop !1 @@ -22,7 +22,7 @@ for.body: ; preds = %for.cond br label %for.inc for.inc: ; preds = %for.body - %1 = load i32* %i, align 4 + %1 = load i32, i32* %i, align 4 %inc = add nsw i32 %1, 1 store i32 %inc, i32* %i, align 4 br label %for.cond diff --git a/llvm/test/Assembler/uselistorder.ll b/llvm/test/Assembler/uselistorder.ll index be5ee7000299..873e160737ce 100644 --- a/llvm/test/Assembler/uselistorder.ll +++ b/llvm/test/Assembler/uselistorder.ll @@ -48,7 +48,7 @@ first: define i1 @loada() { entry: - %a = load i1* getelementptr ([4 x i1]* @a, i64 0, i64 2) + %a = load i1, i1* getelementptr ([4 x i1]* @a, i64 0, i64 2) ret i1 %a } diff --git a/llvm/test/Bitcode/arm32_neon_vcnt_upgrade.ll b/llvm/test/Bitcode/arm32_neon_vcnt_upgrade.ll index ed3981b465ac..0032c4a9b7a3 100644 --- a/llvm/test/Bitcode/arm32_neon_vcnt_upgrade.ll +++ b/llvm/test/Bitcode/arm32_neon_vcnt_upgrade.ll @@ -4,7 +4,7 @@ define <4 x i16> @vclz16(<4 x i16>* %A) nounwind { ;CHECK: @vclz16 - %tmp1 = load <4 x i16>* %A + %tmp1 = load <4 x i16>, <4 x i16>* %A %tmp2 = call <4 x i16> @llvm.arm.neon.vclz.v4i16(<4 x i16> %tmp1) ;CHECK: {{call.*@llvm.ctlz.v4i16\(<4 x i16>.*, i1 false}} ret <4 x i16> %tmp2 @@ -12,7 +12,7 @@ define <4 x i16> @vclz16(<4 x i16>* %A) nounwind { define <8 x i8> @vcnt8(<8 x i8>* %A) nounwind { ;CHECK: @vcnt8 - %tmp1 = load <8 x i8>* %A + %tmp1 = load <8 x i8>, <8 x i8>* %A %tmp2 = call <8 x i8> @llvm.arm.neon.vcnt.v8i8(<8 x i8> %tmp1) ;CHECK: call <8 x i8> @llvm.ctpop.v8i8(<8 x i8> ret <8 x i8> %tmp2 diff --git a/llvm/test/Bitcode/case-ranges-3.3.ll b/llvm/test/Bitcode/case-ranges-3.3.ll index 020b37f49db7..eb55ef1ad21e 100644 --- a/llvm/test/Bitcode/case-ranges-3.3.ll +++ b/llvm/test/Bitcode/case-ranges-3.3.ll @@ -10,7 +10,7 @@ define i32 @foo(i32 %x) nounwind ssp uwtable { %1 = alloca i32, align 4 %2 = alloca i32, align 4 store i32 %x, i32* %2, align 4 - %3 = load i32* %2, align 4 + %3 = load i32, i32* %2, align 4 switch i32 %3, label %9 [ ; CHECK: switch i32 %3, label %9 i32 -3, label %4 @@ -63,6 +63,6 @@ define i32 @foo(i32 %x) nounwind ssp uwtable { br label %11 ;