Skip to content
LiveIntervalAnalysis.cpp 85 KiB
Newer Older

    // Avoid instructions obviously unsafe for remat.
    if (TID.hasUnmodeledSideEffects() || TID.isNotDuplicable())
      return false;

    // If the instruction accesses memory and the memory could be non-constant,
    // assume the instruction is not rematerializable.
    for (std::list<MachineMemOperand>::const_iterator
           I = MI->memoperands_begin(), E = MI->memoperands_end(); I != E; ++I){
      const MachineMemOperand &MMO = *I;
      if (MMO.isVolatile() || MMO.isStore())
        return false;
      const Value *V = MMO.getValue();
      if (!V)
        return false;
      if (const PseudoSourceValue *PSV = dyn_cast<PseudoSourceValue>(V)) {
        if (!PSV->isConstant(mf_->getFrameInfo()))
          return false;
      } else if (!aa_->pointsToConstantMemory(V))
        return false;
    }

    // If any of the registers accessed are non-constant, conservatively assume
    // the instruction is not rematerializable.
    unsigned ImpUse = 0;
    for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
      const MachineOperand &MO = MI->getOperand(i);
        if (TargetRegisterInfo::isPhysicalRegister(Reg))
          return false;

        // Only allow one def, and that in the first operand.
        if (MO.isDef() != (i == 0))

        // Only allow constant-valued registers.
        bool IsLiveIn = mri_->isLiveIn(Reg);
        MachineRegisterInfo::def_iterator I = mri_->def_begin(Reg),
                                          E = mri_->def_end();

Dan Gohman's avatar
Dan Gohman committed
        // For the def, it should be the only def of that register.
        if (MO.isDef() && (next(I) != E || IsLiveIn))
          return false;

        if (MO.isUse()) {
          // Only allow one use other register use, as that's all the
          // remat mechanisms support currently.
          if (Reg != li.reg) {
            if (ImpUse == 0)
              ImpUse = Reg;
            else if (Reg != ImpUse)
              return false;
          }
Dan Gohman's avatar
Dan Gohman committed
          // For the use, there should be only one associated def.
          if (I != E && (next(I) != E || IsLiveIn))
            return false;
        }
Evan Cheng's avatar
Evan Cheng committed

  unsigned ImpUse = getReMatImplicitUse(li, MI);
  if (ImpUse) {
    const LiveInterval &ImpLi = getInterval(ImpUse);
    for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
           re = mri_->use_end(); ri != re; ++ri) {
      MachineInstr *UseMI = &*ri;
      unsigned UseIdx = getInstructionIndex(UseMI);
      if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
        continue;
      if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
        return false;
    }

    // If a register operand of the re-materialized instruction is going to
    // be spilled next, then it's not legal to re-materialize this instruction.
    for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
      if (ImpUse == SpillIs[i]->reg)
        return false;
/// isReMaterializable - Returns true if the definition MI of the specified
/// val# of the specified interval is re-materializable.
bool LiveIntervals::isReMaterializable(const LiveInterval &li,
                                       const VNInfo *ValNo, MachineInstr *MI) {
  SmallVector<LiveInterval*, 4> Dummy1;
  bool Dummy2;
  return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2);
}

/// isReMaterializable - Returns true if every definition of MI of every
/// val# of the specified interval is re-materializable.
bool LiveIntervals::isReMaterializable(const LiveInterval &li,
                                       SmallVectorImpl<LiveInterval*> &SpillIs,
                                       bool &isLoad) {
  isLoad = false;
  for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
       i != e; ++i) {
    const VNInfo *VNI = *i;
Lang Hames's avatar
Lang Hames committed
    if (VNI->isUnused())
      continue; // Dead val#.
    // Is the def for the val# rematerializable?
Lang Hames's avatar
Lang Hames committed
    if (!VNI->isDefAccurate())
Lang Hames's avatar
Lang Hames committed
    MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
        !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
Evan Cheng's avatar
Evan Cheng committed
      return false;
Evan Cheng's avatar
Evan Cheng committed
  }
  return true;
}

/// FilterFoldedOps - Filter out two-address use operands. Return
/// true if it finds any issue with the operands that ought to prevent
/// folding.
static bool FilterFoldedOps(MachineInstr *MI,
                            SmallVector<unsigned, 2> &Ops,
                            unsigned &MRInfo,
                            SmallVector<unsigned, 2> &FoldOps) {
  MRInfo = 0;
  for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
    unsigned OpIdx = Ops[i];
    MachineOperand &MO = MI->getOperand(OpIdx);
      MRInfo |= (unsigned)VirtRegMap::isMod;
    else {
      // Filter out two-address use operand(s).
      if (MI->isRegTiedToDefOperand(OpIdx)) {
        MRInfo = VirtRegMap::isModRef;
        continue;
      }
      MRInfo |= (unsigned)VirtRegMap::isRef;
    }
    FoldOps.push_back(OpIdx);
  return false;
}
                           

/// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
/// slot / to reg or any rematerialized load into ith operand of specified
/// MI. If it is successul, MI is updated with the newly created MI and
/// returns true.
bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
                                         VirtRegMap &vrm, MachineInstr *DefMI,
                                         unsigned InstrIdx,
                                         SmallVector<unsigned, 2> &Ops,
                                         bool isSS, int Slot, unsigned Reg) {
  // If it is an implicit def instruction, just delete it.
  if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
    RemoveMachineInstrFromMaps(MI);
    vrm.RemoveMachineInstrFromMaps(MI);
    MI->eraseFromParent();
    ++numFolds;
    return true;
  }

  // Filter the list of operand indexes that are to be folded. Abort if
  // any operand will prevent folding.
  unsigned MRInfo = 0;
  SmallVector<unsigned, 2> FoldOps;
  if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
    return false;
  // The only time it's safe to fold into a two address instruction is when
  // it's folding reload and spill from / into a spill stack slot.
  if (DefMI && (MRInfo & VirtRegMap::isMod))
Evan Cheng's avatar
Evan Cheng committed
  MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
                           : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
Evan Cheng's avatar
Evan Cheng committed
  if (fmi) {
    // Remember this instruction uses the spill slot.
    if (isSS) vrm.addSpillSlotUse(Slot, fmi);

Evan Cheng's avatar
Evan Cheng committed
    // Attempt to fold the memory reference into the instruction. If
    // we can do this, we don't need to insert spill code.
    MachineBasicBlock &MBB = *MI->getParent();
    if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
      vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
Evan Cheng's avatar
Evan Cheng committed
    vrm.transferSpillPts(MI, fmi);
    vrm.transferRestorePts(MI, fmi);
    vrm.transferEmergencySpills(MI, fmi);
Evan Cheng's avatar
Evan Cheng committed
    mi2iMap_.erase(MI);
    i2miMap_[InstrIdx /InstrSlots::NUM] = fmi;
    mi2iMap_[fmi] = InstrIdx;
Evan Cheng's avatar
Evan Cheng committed
    MI = MBB.insert(MBB.erase(MI), fmi);
Evan Cheng's avatar
Evan Cheng committed
    return true;
  }
  return false;
}

/// canFoldMemoryOperand - Returns true if the specified load / store
/// folding is possible.
bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
                                         bool ReMat) const {
  // Filter the list of operand indexes that are to be folded. Abort if
  // any operand will prevent folding.
  unsigned MRInfo = 0;
  if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
    return false;
  // It's only legal to remat for a use, not a def.
  if (ReMat && (MRInfo & VirtRegMap::isMod))
  return tii_->canFoldMemoryOperand(MI, FoldOps);
}

Evan Cheng's avatar
Evan Cheng committed
bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
  SmallPtrSet<MachineBasicBlock*, 4> MBBs;
  for (LiveInterval::Ranges::const_iterator
         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
    std::vector<IdxMBBPair>::const_iterator II =
      std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), I->start);
    if (II == Idx2MBBMap.end())
      continue;
    if (I->end > II->first)  // crossing a MBB.
      return false;
    MBBs.insert(II->second);
    if (MBBs.size() > 1)
      return false;
  }
  return true;
}

/// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
/// interval on to-be re-materialized operands of MI) with new register.
void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
                                       MachineInstr *MI, unsigned NewVReg,
                                       VirtRegMap &vrm) {
  // There is an implicit use. That means one of the other operand is
  // being remat'ed and the remat'ed instruction has li.reg as an
  // use operand. Make sure we rewrite that as well.
  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
    MachineOperand &MO = MI->getOperand(i);
      continue;
    unsigned Reg = MO.getReg();
    if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
      continue;
    if (!vrm.isReMaterialized(Reg))
      continue;
    MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
    MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
    if (UseMO)
      UseMO->setReg(NewVReg);
Evan Cheng's avatar
Evan Cheng committed
/// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
/// for addIntervalsForSpills to rewrite uses / defs for the given live range.
rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
                 bool TrySplit, unsigned index, unsigned end,  MachineInstr *MI,
Evan Cheng's avatar
Evan Cheng committed
                 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
Evan Cheng's avatar
Evan Cheng committed
                 unsigned Slot, int LdSlot,
                 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
Evan Cheng's avatar
Evan Cheng committed
                 const TargetRegisterClass* rc,
                 SmallVector<int, 4> &ReMatIds,
                 const MachineLoopInfo *loopInfo,
                 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
                 DenseMap<unsigned,unsigned> &MBBVRegsMap,
Evan Cheng's avatar
Evan Cheng committed
 RestartInstruction:
  for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
    MachineOperand& mop = MI->getOperand(i);
Evan Cheng's avatar
Evan Cheng committed
      continue;
    unsigned Reg = mop.getReg();
    unsigned RegI = Reg;
    if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
Evan Cheng's avatar
Evan Cheng committed
      continue;
    if (Reg != li.reg)
      continue;

    bool TryFold = !DefIsReMat;
    bool FoldSS = true; // Default behavior unless it's a remat.
Evan Cheng's avatar
Evan Cheng committed
    int FoldSlot = Slot;
    if (DefIsReMat) {
      // If this is the rematerializable definition MI itself and
      // all of its uses are rematerialized, simply delete it.
Evan Cheng's avatar
Evan Cheng committed
      if (MI == ReMatOrigDefMI && CanDelete) {
        DOUT << "\t\t\t\tErasing re-materlizable def: ";
        DOUT << MI << '\n';
Evan Cheng's avatar
Evan Cheng committed
        RemoveMachineInstrFromMaps(MI);
        vrm.RemoveMachineInstrFromMaps(MI);
Evan Cheng's avatar
Evan Cheng committed
        MI->eraseFromParent();
        break;
      }

      // If def for this use can't be rematerialized, then try folding.
      // If def is rematerializable and it's a load, also try folding.
      TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
Evan Cheng's avatar
Evan Cheng committed
      if (isLoad) {
        // Try fold loads (from stack slot, constant pool, etc.) into uses.
        FoldSS = isLoadSS;
        FoldSlot = LdSlot;
      }
    }

    // Scan all of the operands of this instruction rewriting operands
    // to use NewVReg instead of li.reg as appropriate.  We do this for
    // two reasons:
    //
    //   1. If the instr reads the same spilled vreg multiple times, we
    //      want to reuse the NewVReg.
    //   2. If the instr is a two-addr instruction, we are required to
    //      keep the src/dst regs pinned.
    //
    // Keep track of whether we replace a use and/or def so that we can
    // create the spill interval with the appropriate range. 
Evan Cheng's avatar
Evan Cheng committed
    HasUse = mop.isUse();
    HasDef = mop.isDef();
    SmallVector<unsigned, 2> Ops;
    Ops.push_back(i);
Evan Cheng's avatar
Evan Cheng committed
    for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
      const MachineOperand &MOj = MI->getOperand(j);
Evan Cheng's avatar
Evan Cheng committed
        continue;
      if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
Evan Cheng's avatar
Evan Cheng committed
        continue;
      if (RegJ == RegI) {
        Ops.push_back(j);
        HasUse |= MOj.isUse();
        HasDef |= MOj.isDef();
    if (HasUse && !li.liveAt(getUseIndex(index)))
      // Must be defined by an implicit def. It should not be spilled. Note,
      // this is for correctness reason. e.g.
      // 8   %reg1024<def> = IMPLICIT_DEF
      // 12  %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
      // The live range [12, 14) are not part of the r1024 live interval since
      // it's defined by an implicit def. It will not conflicts with live
      // interval of r1025. Now suppose both registers are spilled, you can
Evan Cheng's avatar
Evan Cheng committed
      // easily see a situation where both registers are reloaded before
      // the INSERT_SUBREG and both target registers that would overlap.
      HasUse = false;

David Greene's avatar
 
David Greene committed
    // Create a new virtual register for the spill interval.
    // Create the new register now so we can map the fold instruction
    // to the new register so when it is unfolded we get the correct
    // answer.
    bool CreatedNewVReg = false;
    if (NewVReg == 0) {
      NewVReg = mri_->createVirtualRegister(rc);
      vrm.grow();
      CreatedNewVReg = true;
    }

    if (!TryFold)
      CanFold = false;
    else {
      // Do not fold load / store here if we are splitting. We'll find an
      // optimal point to insert a load / store later.
      if (!TrySplit) {
        if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
David Greene's avatar
 
David Greene committed
                                 Ops, FoldSS, FoldSlot, NewVReg)) {
          // Folding the load/store can completely change the instruction in
          // unpredictable ways, rescan it from the beginning.
David Greene's avatar
 
David Greene committed

          if (FoldSS) {
            // We need to give the new vreg the same stack slot as the
            // spilled interval.
            vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
          }

        // We'll try to fold it later if it's profitable.
        CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
    if (mop.isImplicit())
      rewriteImplicitOps(li, MI, NewVReg, vrm);
    for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
      MachineOperand &mopj = MI->getOperand(Ops[j]);
      mopj.setReg(NewVReg);
      if (mopj.isImplicit())
        rewriteImplicitOps(li, MI, NewVReg, vrm);
    }
Evan Cheng's avatar
Evan Cheng committed
    if (CreatedNewVReg) {
      if (DefIsReMat) {
        vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI/*, CanDelete*/);
        if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
Evan Cheng's avatar
Evan Cheng committed
          // Each valnum may have its own remat id.
          ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
Evan Cheng's avatar
Evan Cheng committed
        } else {
          vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
Evan Cheng's avatar
Evan Cheng committed
        }
        if (!CanDelete || (HasUse && HasDef)) {
          // If this is a two-addr instruction then its use operands are
          // rematerializable but its def is not. It should be assigned a
          // stack slot.
          vrm.assignVirt2StackSlot(NewVReg, Slot);
        }
Evan Cheng's avatar
Evan Cheng committed
      } else {
        vrm.assignVirt2StackSlot(NewVReg, Slot);
      }
    } else if (HasUse && HasDef &&
               vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
      // If this interval hasn't been assigned a stack slot (because earlier
      // def is a deleted remat def), do it now.
      assert(Slot != VirtRegMap::NO_STACK_SLOT);
      vrm.assignVirt2StackSlot(NewVReg, Slot);
    // Re-matting an instruction with virtual register use. Add the
    // register as an implicit use on the use MI.
    if (DefIsReMat && ImpUse)
      MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));

    // Create a new register interval for this spill / remat.
Evan Cheng's avatar
Evan Cheng committed
    LiveInterval &nI = getOrCreateInterval(NewVReg);
Evan Cheng's avatar
Evan Cheng committed
    if (CreatedNewVReg) {
      NewLIs.push_back(&nI);
      MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
Evan Cheng's avatar
Evan Cheng committed
      if (TrySplit)
        vrm.setIsSplitFromReg(NewVReg, li.reg);
    }
Evan Cheng's avatar
Evan Cheng committed

    if (HasUse) {
Evan Cheng's avatar
Evan Cheng committed
      if (CreatedNewVReg) {
        LiveRange LR(getLoadIndex(index), getUseIndex(index)+1,
Lang Hames's avatar
Lang Hames committed
                     nI.getNextValue(0, 0, false, VNInfoAllocator));
Evan Cheng's avatar
Evan Cheng committed
        DOUT << " +" << LR;
        nI.addRange(LR);
      } else {
        // Extend the split live interval to this def / use.
        unsigned End = getUseIndex(index)+1;
        LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
                     nI.getValNumInfo(nI.getNumValNums()-1));
        DOUT << " +" << LR;
        nI.addRange(LR);
      }
Evan Cheng's avatar
Evan Cheng committed
    }
    if (HasDef) {
      LiveRange LR(getDefIndex(index), getStoreIndex(index),
Lang Hames's avatar
Lang Hames committed
                   nI.getNextValue(0, 0, false, VNInfoAllocator));
Evan Cheng's avatar
Evan Cheng committed
      DOUT << " +" << LR;
      nI.addRange(LR);
    }
Evan Cheng's avatar
Evan Cheng committed
    DOUT << "\t\t\t\tAdded new interval: ";
    nI.print(DOUT, tri_);
Evan Cheng's avatar
Evan Cheng committed
    DOUT << '\n';
  }
Evan Cheng's avatar
Evan Cheng committed
}
Evan Cheng's avatar
Evan Cheng committed
bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
                                   const VNInfo *VNI,
                                   MachineBasicBlock *MBB, unsigned Idx) const {
Evan Cheng's avatar
Evan Cheng committed
  unsigned End = getMBBEndIdx(MBB);
  for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
    unsigned KillIdx = VNI->kills[j];
    if (KillIdx > Idx && KillIdx < End)
      return true;
Evan Cheng's avatar
Evan Cheng committed
  }
  return false;
}

/// RewriteInfo - Keep track of machine instrs that will be rewritten
/// during spilling.
namespace {
  struct RewriteInfo {
    unsigned Index;
    MachineInstr *MI;
    bool HasUse;
    bool HasDef;
    RewriteInfo(unsigned i, MachineInstr *mi, bool u, bool d)
      : Index(i), MI(mi), HasUse(u), HasDef(d) {}
  };

  struct RewriteInfoCompare {
    bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
      return LHS.Index < RHS.Index;
    }
  };
}
Evan Cheng's avatar
Evan Cheng committed
void LiveIntervals::
Evan Cheng's avatar
Evan Cheng committed
rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
Evan Cheng's avatar
Evan Cheng committed
                    LiveInterval::Ranges::const_iterator &I,
Evan Cheng's avatar
Evan Cheng committed
                    MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
Evan Cheng's avatar
Evan Cheng committed
                    unsigned Slot, int LdSlot,
                    bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
Evan Cheng's avatar
Evan Cheng committed
                    const TargetRegisterClass* rc,
                    SmallVector<int, 4> &ReMatIds,
                    const MachineLoopInfo *loopInfo,
Evan Cheng's avatar
Evan Cheng committed
                    BitVector &SpillMBBs,
                    DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
                    DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
                    DenseMap<unsigned,unsigned> &MBBVRegsMap,
Evan Cheng's avatar
Evan Cheng committed
  unsigned NewVReg = 0;
  unsigned start = getBaseIndex(I->start);
Evan Cheng's avatar
Evan Cheng committed
  unsigned end = getBaseIndex(I->end-1) + InstrSlots::NUM;

  // First collect all the def / use in this live range that will be rewritten.
  // Make sure they are sorted according to instruction index.
  std::vector<RewriteInfo> RewriteMIs;
  for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
         re = mri_->reg_end(); ri != re; ) {
    MachineOperand &O = ri.getOperand();
    ++ri;
    assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
    unsigned index = getInstructionIndex(MI);
    if (index < start || index >= end)
      continue;
    if (O.isUse() && !li.liveAt(getUseIndex(index)))
      // Must be defined by an implicit def. It should not be spilled. Note,
      // this is for correctness reason. e.g.
      // 8   %reg1024<def> = IMPLICIT_DEF
      // 12  %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
      // The live range [12, 14) are not part of the r1024 live interval since
      // it's defined by an implicit def. It will not conflicts with live
      // interval of r1025. Now suppose both registers are spilled, you can
Evan Cheng's avatar
Evan Cheng committed
      // easily see a situation where both registers are reloaded before
      // the INSERT_SUBREG and both target registers that would overlap.
      continue;
    RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
  }
  std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());

  unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
  // Now rewrite the defs and uses.
  for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
    RewriteInfo &rwi = RewriteMIs[i];
    ++i;
    unsigned index = rwi.Index;
    bool MIHasUse = rwi.HasUse;
    bool MIHasDef = rwi.HasDef;
    MachineInstr *MI = rwi.MI;
    // If MI def and/or use the same register multiple times, then there
    // are multiple entries.
    unsigned NumUses = MIHasUse;
    while (i != e && RewriteMIs[i].MI == MI) {
      assert(RewriteMIs[i].Index == index);
      bool isUse = RewriteMIs[i].HasUse;
      if (isUse) ++NumUses;
      MIHasUse |= isUse;
      MIHasDef |= RewriteMIs[i].HasDef;
      ++i;
    }
Evan Cheng's avatar
Evan Cheng committed
    MachineBasicBlock *MBB = MI->getParent();
    if (ImpUse && MI != ReMatDefMI) {
      // Re-matting an instruction with virtual register use. Update the
      // register interval's spill weight to HUGE_VALF to prevent it from
      // being spilled.
      LiveInterval &ImpLi = getInterval(ImpUse);
    unsigned MBBId = MBB->getNumber();
    if (TrySplit) {
      DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
        // One common case:
        // x = use
        // ...
        // ...
        // def = ...
        //     = use
        // It's better to start a new interval to avoid artifically
        // extend the new interval.
        if (MIHasDef && !MIHasUse) {
          MBBVRegsMap.erase(MBB->getNumber());

    bool IsNew = ThisVReg == 0;
    if (IsNew) {
      // This ends the previous live interval. If all of its def / use
      // can be folded, give it a low spill weight.
      if (NewVReg && TrySplit && AllCanFold) {
        LiveInterval &nI = getOrCreateInterval(NewVReg);
        nI.weight /= 10.0F;
      }
      AllCanFold = true;
    }
    NewVReg = ThisVReg;

Evan Cheng's avatar
Evan Cheng committed
    bool HasDef = false;
    bool HasUse = false;
    bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
                         index, end, MI, ReMatOrigDefMI, ReMatDefMI,
                         Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
                         CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
                         ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
Evan Cheng's avatar
Evan Cheng committed
    if (!HasDef && !HasUse)
      continue;

Evan Cheng's avatar
Evan Cheng committed
    // Update weight of spill interval.
    LiveInterval &nI = getOrCreateInterval(NewVReg);
    if (!TrySplit) {
Evan Cheng's avatar
Evan Cheng committed
      // The spill weight is now infinity as it cannot be spilled again.
      nI.weight = HUGE_VALF;
      continue;
    }

    // Keep track of the last def and first use in each MBB.
    if (HasDef) {
      if (MI != ReMatOrigDefMI || !CanDelete) {
        bool HasKill = false;
        if (!HasUse)
          HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, getDefIndex(index));
        else {
          // If this is a two-address code, then this index starts a new VNInfo.
          const VNInfo *VNI = li.findDefinedVNInfo(getDefIndex(index));
          if (VNI)
            HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, getDefIndex(index));
Evan Cheng's avatar
Evan Cheng committed
        }
        DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
          if (SII == SpillIdxes.end()) {
            std::vector<SRInfo> S;
            S.push_back(SRInfo(index, NewVReg, true));
            SpillIdxes.insert(std::make_pair(MBBId, S));
          } else if (SII->second.back().vreg != NewVReg) {
            SII->second.push_back(SRInfo(index, NewVReg, true));
          } else if ((int)index > SII->second.back().index) {
            // If there is an earlier def and this is a two-address
            // instruction, then it's not possible to fold the store (which
            // would also fold the load).
            SRInfo &Info = SII->second.back();
            Info.index = index;
            Info.canFold = !HasUse;
Evan Cheng's avatar
Evan Cheng committed
          }
        } else if (SII != SpillIdxes.end() &&
                   SII->second.back().vreg == NewVReg &&
                   (int)index > SII->second.back().index) {
          // There is an earlier def that's not killed (must be two-address).
          // The spill is no longer needed.
          SII->second.pop_back();
          if (SII->second.empty()) {
            SpillIdxes.erase(MBBId);
            SpillMBBs.reset(MBBId);
          }
      DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
      if (SII != SpillIdxes.end() &&
          SII->second.back().vreg == NewVReg &&
          (int)index > SII->second.back().index)
        // Use(s) following the last def, it's not safe to fold the spill.
      DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
      if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
        // If we are splitting live intervals, only fold if it's the first
        // use and there isn't another use later in the MBB.
      else if (IsNew) {
        // Only need a reload if there isn't an earlier def / use.
        if (RII == RestoreIdxes.end()) {
          std::vector<SRInfo> Infos;
          Infos.push_back(SRInfo(index, NewVReg, true));
          RestoreIdxes.insert(std::make_pair(MBBId, Infos));
        } else {
          RII->second.push_back(SRInfo(index, NewVReg, true));
        }
Evan Cheng's avatar
Evan Cheng committed
    }
    unsigned loopDepth = loopInfo->getLoopDepth(MBB);
    nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
Evan Cheng's avatar
Evan Cheng committed
  }

  if (NewVReg && TrySplit && AllCanFold) {
    // If all of its def / use can be folded, give it a low spill weight.
    LiveInterval &nI = getOrCreateInterval(NewVReg);
    nI.weight /= 10.0F;
  }
bool LiveIntervals::alsoFoldARestore(int Id, int index, unsigned vr,
                        BitVector &RestoreMBBs,
                        DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
  if (!RestoreMBBs[Id])
    return false;
  std::vector<SRInfo> &Restores = RestoreIdxes[Id];
  for (unsigned i = 0, e = Restores.size(); i != e; ++i)
    if (Restores[i].index == index &&
        Restores[i].vreg == vr &&
        Restores[i].canFold)
      return true;
  return false;
}

void LiveIntervals::eraseRestoreInfo(int Id, int index, unsigned vr,
                        BitVector &RestoreMBBs,
                        DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
  if (!RestoreMBBs[Id])
    return;
  std::vector<SRInfo> &Restores = RestoreIdxes[Id];
  for (unsigned i = 0, e = Restores.size(); i != e; ++i)
    if (Restores[i].index == index && Restores[i].vreg)
      Restores[i].index = -1;
}
/// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
/// spilled and create empty intervals for their uses.
void
LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
                                    const TargetRegisterClass* rc,
                                    std::vector<LiveInterval*> &NewLIs) {
  for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
         re = mri_->reg_end(); ri != re; ) {
    if (O.isDef()) {
      assert(MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF &&
             "Register def was not rewritten?");
      RemoveMachineInstrFromMaps(MI);
      vrm.RemoveMachineInstrFromMaps(MI);
      MI->eraseFromParent();
    } else {
      // This must be an use of an implicit_def so it's not part of the live
      // interval. Create a new empty live interval for it.
      // FIXME: Can we simply erase some of the instructions? e.g. Stores?
      unsigned NewVReg = mri_->createVirtualRegister(rc);
      vrm.grow();
      vrm.setIsImplicitlyDefined(NewVReg);
      NewLIs.push_back(&getOrCreateInterval(NewVReg));
      for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
        MachineOperand &MO = MI->getOperand(i);
        if (MO.isReg() && MO.getReg() == li.reg)
std::vector<LiveInterval*> LiveIntervals::
addIntervalsForSpillsFast(const LiveInterval &li,
                          const MachineLoopInfo *loopInfo,
Owen Anderson's avatar
Owen Anderson committed
  unsigned slot = vrm.assignVirt2StackSlot(li.reg);

  std::vector<LiveInterval*> added;

  assert(li.weight != HUGE_VALF &&
         "attempt to spill already spilled interval!");

  DOUT << "\t\t\t\tadding intervals for spills for interval: ";
  DEBUG(li.dump());
  DOUT << '\n';

  const TargetRegisterClass* rc = mri_->getRegClass(li.reg);

  MachineRegisterInfo::reg_iterator RI = mri_->reg_begin(li.reg);
  while (RI != mri_->reg_end()) {
    MachineInstr* MI = &*RI;
    SmallVector<unsigned, 2> Indices;
    bool HasUse = false;
    bool HasDef = false;
    for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
      MachineOperand& mop = MI->getOperand(i);
      if (!mop.isReg() || mop.getReg() != li.reg) continue;
      
      HasUse |= MI->getOperand(i).isUse();
      HasDef |= MI->getOperand(i).isDef();
      
      Indices.push_back(i);
    if (!tryFoldMemoryOperand(MI, vrm, NULL, getInstructionIndex(MI),
                              Indices, true, slot, li.reg)) {
      unsigned NewVReg = mri_->createVirtualRegister(rc);
      vrm.grow();
      vrm.assignVirt2StackSlot(NewVReg, slot);
      
      // create a new register for this spill
      LiveInterval &nI = getOrCreateInterval(NewVReg);

      // the spill weight is now infinity as it
      // cannot be spilled again
      nI.weight = HUGE_VALF;
      
      // Rewrite register operands to use the new vreg.
      for (SmallVectorImpl<unsigned>::iterator I = Indices.begin(),
           E = Indices.end(); I != E; ++I) {
        MI->getOperand(*I).setReg(NewVReg);
        
        if (MI->getOperand(*I).isUse())
          MI->getOperand(*I).setIsKill(true);
      }
      
      // Fill in  the new live interval.
      unsigned index = getInstructionIndex(MI);
      if (HasUse) {
        LiveRange LR(getLoadIndex(index), getUseIndex(index),
Lang Hames's avatar
Lang Hames committed
                     nI.getNextValue(0, 0, false, getVNInfoAllocator()));
        DOUT << " +" << LR;
        nI.addRange(LR);
        vrm.addRestorePoint(NewVReg, MI);
      }
      if (HasDef) {
        LiveRange LR(getDefIndex(index), getStoreIndex(index),
Lang Hames's avatar
Lang Hames committed
                     nI.getNextValue(0, 0, false, getVNInfoAllocator()));
        DOUT << " +" << LR;
        nI.addRange(LR);
        vrm.addSpillPoint(NewVReg, true, MI);
      }
      
Owen Anderson's avatar
Owen Anderson committed
      added.push_back(&nI);
      DOUT << "\t\t\t\tadded new interval: ";
      DEBUG(nI.dump());
      DOUT << '\n';
    }
Evan Cheng's avatar
Evan Cheng committed
std::vector<LiveInterval*> LiveIntervals::
Evan Cheng's avatar
Evan Cheng committed
addIntervalsForSpills(const LiveInterval &li,
                      SmallVectorImpl<LiveInterval*> &SpillIs,
                      const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
  
  if (EnableFastSpilling)
    return addIntervalsForSpillsFast(li, loopInfo, vrm);
Evan Cheng's avatar
Evan Cheng committed
  assert(li.weight != HUGE_VALF &&
         "attempt to spill already spilled interval!");

  DOUT << "\t\t\t\tadding intervals for spills for interval: ";
  li.print(DOUT, tri_);
Evan Cheng's avatar
Evan Cheng committed
  DOUT << '\n';

Evan Cheng's avatar
Evan Cheng committed
  // Each bit specify whether a spill is required in the MBB.
Evan Cheng's avatar
Evan Cheng committed
  BitVector SpillMBBs(mf_->getNumBlockIDs());
  DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
  BitVector RestoreMBBs(mf_->getNumBlockIDs());
  DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
  DenseMap<unsigned,unsigned> MBBVRegsMap;
Evan Cheng's avatar
Evan Cheng committed
  std::vector<LiveInterval*> NewLIs;
  const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
Evan Cheng's avatar
Evan Cheng committed

  unsigned NumValNums = li.getNumValNums();
  SmallVector<MachineInstr*, 4> ReMatDefs;
  ReMatDefs.resize(NumValNums, NULL);
  SmallVector<MachineInstr*, 4> ReMatOrigDefs;
  ReMatOrigDefs.resize(NumValNums, NULL);
  SmallVector<int, 4> ReMatIds;
  ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
  BitVector ReMatDelete(NumValNums);
  unsigned Slot = VirtRegMap::MAX_STACK_SLOT;

Evan Cheng's avatar
Evan Cheng committed
  // Spilling a split live interval. It cannot be split any further. Also,
  // it's also guaranteed to be a single val# / range interval.
  if (vrm.getPreSplitReg(li.reg)) {
    vrm.setIsSplitFromReg(li.reg, 0);
    // Unset the split kill marker on the last use.
    unsigned KillIdx = vrm.getKillPoint(li.reg);
    if (KillIdx) {
      MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
      assert(KillMI && "Last use disappeared?");
      int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
      assert(KillOp != -1 && "Last use disappeared?");
      KillMI->getOperand(KillOp).setIsKill(false);
Evan Cheng's avatar
Evan Cheng committed
    bool DefIsReMat = vrm.isReMaterialized(li.reg);
    Slot = vrm.getStackSlot(li.reg);
    assert(Slot != VirtRegMap::MAX_STACK_SLOT);
    MachineInstr *ReMatDefMI = DefIsReMat ?
      vrm.getReMaterializedMI(li.reg) : NULL;
    int LdSlot = 0;
    bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
    bool isLoad = isLoadSS ||
      (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
Evan Cheng's avatar
Evan Cheng committed
    bool IsFirstRange = true;
    for (LiveInterval::Ranges::const_iterator
           I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
      // If this is a split live interval with multiple ranges, it means there
      // are two-address instructions that re-defined the value. Only the
      // first def can be rematerialized!
      if (IsFirstRange) {
        // Note ReMatOrigDefMI has already been deleted.
Evan Cheng's avatar
Evan Cheng committed
        rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
                             Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
                             SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
Evan Cheng's avatar
Evan Cheng committed
      } else {
        rewriteInstructionsForSpills(li, false, I, NULL, 0,
                             Slot, 0, false, false, false,
                             SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
Evan Cheng's avatar
Evan Cheng committed
      }
      IsFirstRange = false;
    }
    handleSpilledImpDefs(li, vrm, rc, NewLIs);
Evan Cheng's avatar
Evan Cheng committed
    return NewLIs;
  }

  bool TrySplit = SplitAtBB && !intervalIsInOneMBB(li);
  if (SplitLimit != -1 && (int)numSplits >= SplitLimit)
    TrySplit = false;
  if (TrySplit)
    ++numSplits;
Evan Cheng's avatar
Evan Cheng committed
  bool NeedStackSlot = false;
  for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
       i != e; ++i) {
    const VNInfo *VNI = *i;
    unsigned VN = VNI->id;
Lang Hames's avatar
Lang Hames committed
    if (VNI->isUnused())
Evan Cheng's avatar
Evan Cheng committed
      continue; // Dead val#.
    // Is the def for the val# rematerializable?
Lang Hames's avatar
Lang Hames committed
    MachineInstr *ReMatDefMI = VNI->isDefAccurate()
      ? getInstructionFromIndex(VNI->def) : 0;
    if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
Evan Cheng's avatar
Evan Cheng committed
      // Remember how to remat the def of this val#.
Evan Cheng's avatar
Evan Cheng committed
      ReMatOrigDefs[VN] = ReMatDefMI;
      // Original def may be modified so we have to make a copy here.
      MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
      ClonedMIs.push_back(Clone);
      ReMatDefs[VN] = Clone;
Evan Cheng's avatar
Evan Cheng committed

      bool CanDelete = true;
Lang Hames's avatar
Lang Hames committed
      if (VNI->hasPHIKill()) {
        // A kill is a phi node, not all of its uses can be rematerialized.
Evan Cheng's avatar
Evan Cheng committed
        // It must not be deleted.
        CanDelete = false;
        // Need a stack slot if there is any live range where uses cannot be
        // rematerialized.
        NeedStackSlot = true;
Evan Cheng's avatar
Evan Cheng committed
      }
      if (CanDelete)
        ReMatDelete.set(VN);
    } else {
      // Need a stack slot if there is any live range where uses cannot be
      // rematerialized.
      NeedStackSlot = true;
    }
  }

  // One stack slot per live interval.
  if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) {
    if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT)