Skip to content
LiveIntervalAnalysis.cpp 46.4 KiB
Newer Older
      // *SlotI overlaps LI. Collect mask bits.
      if (!Found) {
        // This is the first overlap. Initialize UsableRegs to all ones.
        UsableRegs.clear();
        UsableRegs.resize(tri_->getNumRegs(), true);
        Found = true;
      }
      // Remove usable registers clobbered by this mask.
      UsableRegs.clearBitsNotInMask(Bits[SlotI-Slots.begin()]);
      if (++SlotI == SlotE)
        return Found;
    }
    // *SlotI is beyond the current LI segment.
    LiveI = LI.advanceTo(LiveI, *SlotI);
    if (LiveI == LiveE)
      return Found;
    // Advance SlotI until it overlaps.
    while (*SlotI < LiveI->start)
      if (++SlotI == SlotE)
        return Found;
  }
}

//===----------------------------------------------------------------------===//
//                         IntervalUpdate class.
//===----------------------------------------------------------------------===//

/// HMEditor is a toolkit used by handleMove to trim or extend live intervals.
class LiveIntervals::HMEditor {
private:
  LiveIntervals& LIS;
  const MachineRegisterInfo& MRI;
  const TargetRegisterInfo& TRI;
  SlotIndex NewIdx;
  typedef std::pair<LiveInterval*, LiveRange*> IntRangePair;
  typedef DenseSet<IntRangePair> RangeSet;

  HMEditor(LiveIntervals& LIS, const MachineRegisterInfo& MRI,
           const TargetRegisterInfo& TRI, SlotIndex NewIdx)
    : LIS(LIS), MRI(MRI), TRI(TRI), NewIdx(NewIdx) {}

  // Update intervals for all operands of MI from OldIdx to NewIdx.
  // This assumes that MI used to be at OldIdx, and now resides at
  // NewIdx.
  void moveAllOperandsFrom(MachineInstr* MI, SlotIndex OldIdx) {
    // Collect the operands.
    RangeSet Entering, Internal, Exiting;
    bool hasRegMaskOp = false;
    collectRanges(MI, Entering, Internal, Exiting, hasRegMaskOp, OldIdx);

    moveAllEnteringFrom(OldIdx, Entering);
    moveAllInternalFrom(OldIdx, Internal);
    moveAllExitingFrom(OldIdx, Exiting);
    if (hasRegMaskOp)
      updateRegMaskSlots(OldIdx);

#ifndef NDEBUG
    LIValidator validator;
    std::for_each(Entering.begin(), Entering.end(), validator);
    std::for_each(Internal.begin(), Internal.end(), validator);
    std::for_each(Exiting.begin(), Exiting.end(), validator);
    assert(validator.rangesOk() && "moveOperandsFrom broke liveness.");
#endif

private:

#ifndef NDEBUG
  class LIValidator {
  private:
    DenseSet<const LiveInterval*> Checked, Bogus;
  public:
    void operator()(const IntRangePair& P) {
      const LiveInterval* LI = P.first;
      if (Checked.count(LI))
        return;
      Checked.insert(LI);
      if (LI->empty())
        return;
      SlotIndex LastEnd = LI->begin()->start;
      for (LiveInterval::const_iterator LRI = LI->begin(), LRE = LI->end();
           LRI != LRE; ++LRI) {
        const LiveRange& LR = *LRI;
        if (LastEnd > LR.start || LR.start >= LR.end)
          Bogus.insert(LI);
        LastEnd = LR.end;
    bool rangesOk() const {
      return Bogus.empty();
  // Collect IntRangePairs for all operands of MI that may need fixing.
  // Treat's MI's index as OldIdx (regardless of what it is in SlotIndexes'
  // maps).
  void collectRanges(MachineInstr* MI, RangeSet& Entering, RangeSet& Internal,
                     RangeSet& Exiting, bool& hasRegMaskOp, SlotIndex OldIdx) {
    hasRegMaskOp = false;
    for (MachineInstr::mop_iterator MOI = MI->operands_begin(),
                                    MOE = MI->operands_end();
         MOI != MOE; ++MOI) {
      const MachineOperand& MO = *MOI;

      if (MO.isRegMask()) {
        hasRegMaskOp = true;
        continue;
      }

      if (!MO.isReg() || MO.getReg() == 0)
      unsigned Reg = MO.getReg();

      // TODO: Currently we're skipping uses that are reserved or have no
      // interval, but we're not updating their kills. This should be
      // fixed.
      if (!LIS.hasInterval(Reg) ||
          (TargetRegisterInfo::isPhysicalRegister(Reg) && LIS.isReserved(Reg)))
      LiveInterval* LI = &LIS.getInterval(Reg);

      if (MO.readsReg()) {
        LiveRange* LR = LI->getLiveRangeContaining(OldIdx);
        if (LR != 0)
          Entering.insert(std::make_pair(LI, LR));
        if (MO.isEarlyClobber()) {
          LiveRange* LR = LI->getLiveRangeContaining(OldIdx.getRegSlot(true));
          assert(LR != 0 && "No EC range?");
          if (LR->end > OldIdx.getDeadSlot())
            Exiting.insert(std::make_pair(LI, LR));
          else
            Internal.insert(std::make_pair(LI, LR));
        } else if (MO.isDead()) {
          LiveRange* LR = LI->getLiveRangeContaining(OldIdx.getRegSlot());
          assert(LR != 0 && "No dead-def range?");
          Internal.insert(std::make_pair(LI, LR));
          LiveRange* LR = LI->getLiveRangeContaining(OldIdx.getDeadSlot());
          assert(LR && LR->end > OldIdx.getDeadSlot() &&
                 "Non-dead-def should have live range exiting.");
          Exiting.insert(std::make_pair(LI, LR));
  void moveKillFlags(unsigned reg, SlotIndex OldIdx, SlotIndex newKillIdx) {
    MachineInstr* OldKillMI = LIS.getInstructionFromIndex(OldIdx);
    if (!OldKillMI->killsRegister(reg))
      return; // Bail out if we don't have kill flags on the old register.
    MachineInstr* NewKillMI = LIS.getInstructionFromIndex(newKillIdx);
    assert(OldKillMI->killsRegister(reg) && "Old 'kill' instr isn't a kill.");
    assert(!NewKillMI->killsRegister(reg) && "New kill instr is already a kill.");
    OldKillMI->clearRegisterKills(reg, &TRI);
    NewKillMI->addRegisterKilled(reg, &TRI);
  void updateRegMaskSlots(SlotIndex OldIdx) {
    SmallVectorImpl<SlotIndex>::iterator RI =
      std::lower_bound(LIS.RegMaskSlots.begin(), LIS.RegMaskSlots.end(),
                       OldIdx);
    assert(*RI == OldIdx && "No RegMask at OldIdx.");
    *RI = NewIdx;
    assert(*prior(RI) < *RI && *RI < *next(RI) &&
           "RegSlots out of order. Did you move one call across another?");
  // Return the last use of reg between NewIdx and OldIdx.
  SlotIndex findLastUseBefore(unsigned Reg, SlotIndex OldIdx) {
    SlotIndex LastUse = NewIdx;
    for (MachineRegisterInfo::use_nodbg_iterator
           UI = MRI.use_nodbg_begin(Reg),
           UE = MRI.use_nodbg_end();
         UI != UE; ++UI) {
      const MachineInstr* MI = &*UI;
      SlotIndex InstSlot = LIS.getSlotIndexes()->getInstructionIndex(MI);
      if (InstSlot > LastUse && InstSlot < OldIdx)
        LastUse = InstSlot;
  void moveEnteringUpFrom(SlotIndex OldIdx, IntRangePair& P) {
    LiveInterval* LI = P.first;
    LiveRange* LR = P.second;
    bool LiveThrough = LR->end > OldIdx.getRegSlot();
    if (LiveThrough)
      return;
    SlotIndex LastUse = findLastUseBefore(LI->reg, OldIdx);
    if (LastUse != NewIdx)
      moveKillFlags(LI->reg, NewIdx, LastUse);
    LR->end = LastUse.getRegSlot(LR->end.isEarlyClobber());
  void moveEnteringDownFrom(SlotIndex OldIdx, IntRangePair& P) {
    LiveInterval* LI = P.first;
    LiveRange* LR = P.second;
    bool LiveThrough = LR->end > OldIdx.getRegSlot();
    if (LiveThrough) {
      MachineBasicBlock* MBB = LIS.getInstructionFromIndex(NewIdx)->getParent();
      bool LiveOut = LR->end >= LIS.getSlotIndexes()->getMBBEndIdx(MBB);
      if (!LiveOut) {
        moveKillFlags(LI->reg, LR->end, NewIdx);
        LR->end = NewIdx.getRegSlot(LR->end.isEarlyClobber());
      }
    } else {
      // Not live through. Easy - just update the range endpoint.
      LR->end = NewIdx.getRegSlot(LR->end.isEarlyClobber());
  void moveAllEnteringFrom(SlotIndex OldIdx, RangeSet& Entering) {
    bool GoingUp = NewIdx < OldIdx;

    if (GoingUp) {
      for (RangeSet::iterator EI = Entering.begin(), EE = Entering.end();
           EI != EE; ++EI)
        moveEnteringUpFrom(OldIdx, *EI);
    } else {
      for (RangeSet::iterator EI = Entering.begin(), EE = Entering.end();
           EI != EE; ++EI)
        moveEnteringDownFrom(OldIdx, *EI);
  void moveInternalFrom(SlotIndex OldIdx, IntRangePair& P) {
    LiveInterval* LI = P.first;
    LiveRange* LR = P.second;
    assert(OldIdx < LR->start && LR->start < OldIdx.getDeadSlot() &&
           LR->end <= OldIdx.getDeadSlot() &&
           "Range should be internal to OldIdx.");
    LiveRange Tmp(*LR);
    Tmp.start = NewIdx.getRegSlot(LR->start.isEarlyClobber());
    Tmp.valno->def = Tmp.start;
    Tmp.end = LR->end.isDead() ? NewIdx.getDeadSlot() : NewIdx.getRegSlot();
    LI->removeRange(*LR);
    LI->addRange(Tmp);
  }

  void moveAllInternalFrom(SlotIndex OldIdx, RangeSet& Internal) {
    for (RangeSet::iterator II = Internal.begin(), IE = Internal.end();
         II != IE; ++II)
      moveInternalFrom(OldIdx, *II);

  void moveExitingFrom(SlotIndex OldIdx, IntRangePair& P) {
    LiveRange* LR = P.second;
    assert(OldIdx < LR->start && LR->start < OldIdx.getDeadSlot() &&
           "Range should start in OldIdx.");
    assert(LR->end > OldIdx.getDeadSlot() && "Range should exit OldIdx.");
    SlotIndex NewStart = NewIdx.getRegSlot(LR->start.isEarlyClobber());
    LR->start = NewStart;
    LR->valno->def = NewStart;
  }

  void moveAllExitingFrom(SlotIndex OldIdx, RangeSet& Exiting) {
    for (RangeSet::iterator EI = Exiting.begin(), EE = Exiting.end();
         EI != EE; ++EI)
      moveExitingFrom(OldIdx, *EI);
  }

void LiveIntervals::handleMove(MachineInstr* MI) {
  SlotIndex OldIndex = indexes_->getInstructionIndex(MI);
  indexes_->removeMachineInstrFromMaps(MI);
  SlotIndex NewIndex = MI->isInsideBundle() ?
                        indexes_->getInstructionIndex(MI->getBundleStart()) :
                        indexes_->insertMachineInstrInMaps(MI);
  assert(getMBBStartIdx(MI->getParent()) <= OldIndex &&
         OldIndex < getMBBEndIdx(MI->getParent()) &&
         "Cannot handle moves across basic block boundaries.");
  assert(!MI->isBundled() && "Can't handle bundled instructions yet.");
  HMEditor HME(*this, *mri_, *tri_, NewIndex);
  HME.moveAllOperandsFrom(MI, OldIndex);