Skip to content
LiveIntervalAnalysis.cpp 97.9 KiB
Newer Older
    handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
                              getOrCreateInterval(MO.getReg()), CopyMI);
Evan Cheng's avatar
Evan Cheng committed
    // Def of a register also defines its sub-registers.
    for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
      // If MI also modifies the sub-register explicitly, avoid processing it
      // more than once. Do not pass in TRI here so it checks for exact match.
      if (!MI->modifiesRegister(*AS))
        handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
Chris Lattner's avatar
Chris Lattner committed
  }
void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
                                         LiveIndex MIIdx,
Evan Cheng's avatar
Evan Cheng committed
                                         LiveInterval &interval, bool isAlias) {
  DEBUG({
      errs() << "\t\tlivein register: ";

  // Look for kills, if it reaches a def before it's killed, then it shouldn't
  // be considered a livein.
  MachineBasicBlock::iterator mi = MBB->begin();
  LiveIndex baseIndex = MIIdx;
  LiveIndex start = baseIndex;
  while (baseIndex.getVecIndex() < i2miMap_.size() && 
    baseIndex = getNextIndex(baseIndex);
  LiveIndex end = baseIndex;
    if (mi->killsRegister(interval.reg, tri_)) {
      DEBUG(errs() << " killed");
      end = getNextSlot(getUseIndex(baseIndex));
    } else if (mi->modifiesRegister(interval.reg, tri_)) {
      // Another instruction redefines the register before it is ever read.
      // Then the register is essentially dead at the instruction that defines
      // it. Hence its interval is:
      // [defSlot(def), defSlot(def)+1)
      DEBUG(errs() << " dead");
      end = getNextSlot(getDefIndex(start));
    baseIndex = getNextIndex(baseIndex);
      while (baseIndex.getVecIndex() < i2miMap_.size() && 
             getInstructionFromIndex(baseIndex) == 0)
        baseIndex = getNextIndex(baseIndex);
  // Live-in register might not be used at all.
      DEBUG(errs() << " dead");
      end = getNextSlot(getDefIndex(MIIdx));
      DEBUG(errs() << " live through");
Evan Cheng's avatar
Evan Cheng committed
  }

    interval.getNextValue(LiveIndex(MBB->getNumber()),
  vni->setIsPHIDef(true);
  LiveRange LR(start, end, vni);
  
  interval.addRange(LR);
  DEBUG(errs() << " +" << LR << '\n');
bool
LiveIntervals::isProfitableToCoalesce(LiveInterval &DstInt, LiveInterval &SrcInt,
                                   SmallVector<MachineInstr*,16> &IdentCopies,
                                   SmallVector<MachineInstr*,16> &OtherCopies) {
  bool HaveConflict = false;
  for (MachineRegisterInfo::def_iterator ri = mri_->def_begin(SrcInt.reg),
         re = mri_->def_end(); ri != re; ++ri) {
    MachineInstr *MI = &*ri;
    unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
    if (!tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
    if (SrcReg != DstInt.reg) {
      OtherCopies.push_back(MI);
      HaveConflict |= DstInt.liveAt(getInstructionIndex(MI));
    } else {
      IdentCopies.push_back(MI);
      ++NumIdent;
    }
  }

  if (!HaveConflict)
    return false; // Let coalescer handle it
  return IdentCopies.size() > OtherCopies.size();
}

void LiveIntervals::performEarlyCoalescing() {
  if (!EarlyCoalescing)
    return;

  /// Perform early coalescing: eliminate copies which feed into phi joins
  /// and whose sources are defined by the phi joins.
  for (unsigned i = 0, e = phiJoinCopies.size(); i != e; ++i) {
    MachineInstr *Join = phiJoinCopies[i];
    if (CoalescingLimit != -1 && (int)numCoalescing == CoalescingLimit)
      break;

    unsigned PHISrc, PHIDst, SrcSubReg, DstSubReg;
    bool isMove= tii_->isMoveInstr(*Join, PHISrc, PHIDst, SrcSubReg, DstSubReg);
#ifndef NDEBUG
    assert(isMove && "PHI join instruction must be a move!");
#else
    isMove = isMove;
#endif

    LiveInterval &DstInt = getInterval(PHIDst);
    LiveInterval &SrcInt = getInterval(PHISrc);
    SmallVector<MachineInstr*, 16> IdentCopies;
    SmallVector<MachineInstr*, 16> OtherCopies;
    if (!isProfitableToCoalesce(DstInt, SrcInt, IdentCopies, OtherCopies))
      continue;

    DEBUG(errs() << "PHI Join: " << *Join);
    assert(DstInt.containsOneValue() && "PHI join should have just one val#!");
    VNInfo *VNI = DstInt.getValNumInfo(0);

    // Change the non-identity copies to directly target the phi destination.
    for (unsigned i = 0, e = OtherCopies.size(); i != e; ++i) {
      MachineInstr *PHICopy = OtherCopies[i];
      DEBUG(errs() << "Moving: " << *PHICopy);

      LiveIndex MIIndex = getInstructionIndex(PHICopy);
      LiveIndex DefIndex = getDefIndex(MIIndex);
      LiveRange *SLR = SrcInt.getLiveRangeContaining(DefIndex);
      LiveIndex StartIndex = SLR->start;
      LiveIndex EndIndex = SLR->end;

      // Delete val# defined by the now identity copy and add the range from
      // beginning of the mbb to the end of the range.
      SrcInt.removeValNo(SLR->valno);
      DEBUG(errs() << "  added range [" << StartIndex << ','
            << EndIndex << "] to reg" << DstInt.reg << '\n');
      if (DstInt.liveAt(StartIndex))
        DstInt.removeRange(StartIndex, EndIndex);
      VNInfo *NewVNI = DstInt.getNextValue(DefIndex, PHICopy, true,
                                           VNInfoAllocator);
      NewVNI->setHasPHIKill(true);
      DstInt.addRange(LiveRange(StartIndex, EndIndex, NewVNI));
      for (unsigned j = 0, ee = PHICopy->getNumOperands(); j != ee; ++j) {
        MachineOperand &MO = PHICopy->getOperand(j);
        if (!MO.isReg() || MO.getReg() != PHISrc)
          continue;
        MO.setReg(PHIDst);
      }
    }

    // Now let's eliminate all the would-be identity copies.
    for (unsigned i = 0, e = IdentCopies.size(); i != e; ++i) {
      MachineInstr *PHICopy = IdentCopies[i];
      DEBUG(errs() << "Coalescing: " << *PHICopy);

      LiveIndex MIIndex = getInstructionIndex(PHICopy);
      LiveIndex DefIndex = getDefIndex(MIIndex);
      LiveRange *SLR = SrcInt.getLiveRangeContaining(DefIndex);
      LiveIndex StartIndex = SLR->start;
      LiveIndex EndIndex = SLR->end;

      // Delete val# defined by the now identity copy and add the range from
      // beginning of the mbb to the end of the range.
      SrcInt.removeValNo(SLR->valno);
      RemoveMachineInstrFromMaps(PHICopy);
      PHICopy->eraseFromParent();
      DEBUG(errs() << "  added range [" << StartIndex << ','
            << EndIndex << "] to reg" << DstInt.reg << '\n');
      DstInt.addRange(LiveRange(StartIndex, EndIndex, VNI));
    // Remove the phi join and update the phi block liveness.
    LiveIndex MIIndex = getInstructionIndex(Join);
    LiveIndex UseIndex = getUseIndex(MIIndex);
    LiveIndex DefIndex = getDefIndex(MIIndex);
    LiveRange *SLR = SrcInt.getLiveRangeContaining(UseIndex);
    LiveRange *DLR = DstInt.getLiveRangeContaining(DefIndex);
    DLR->valno->setCopy(0);
    DLR->valno->setIsDefAccurate(false);
    DstInt.addRange(LiveRange(SLR->start, SLR->end, DLR->valno));
    SrcInt.removeRange(SLR->start, SLR->end);
    assert(SrcInt.empty());
    removeInterval(PHISrc);
    RemoveMachineInstrFromMaps(Join);
    Join->eraseFromParent();
/// computeIntervals - computes the live intervals for virtual
/// registers. for some ordering of the machine instructions [1,N] a
/// live interval is an interval [i, j) where 1 <= i <= j < N for
void LiveIntervals::computeIntervals() { 
  DEBUG(errs() << "********** COMPUTING LIVE INTERVALS **********\n"
               << "********** Function: "
               << ((Value*)mf_->getFunction())->getName() << '\n');
  for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
       MBBI != E; ++MBBI) {
    MachineBasicBlock *MBB = MBBI;
    // Track the index of the current machine instr.
    LiveIndex MIIndex = getMBBStartIdx(MBB);
    DEBUG(errs() << ((Value*)MBB->getBasicBlock())->getName() << ":\n");
    MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
    // Create intervals for live-ins to this BB first.
    for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
           LE = MBB->livein_end(); LI != LE; ++LI) {
      handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
      // Multiple live-ins can alias the same register.
      for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
        if (!hasInterval(*AS))
          handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
                               true);
    while (MIIndex.getVecIndex() < i2miMap_.size() &&
      MIIndex = getNextIndex(MIIndex);
      DEBUG(errs() << MIIndex << "\t" << *MI);
      for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
        MachineOperand &MO = MI->getOperand(i);
        // handle register defs - build intervals
          handleRegisterDef(MBB, MI, MIIndex, MO, i);
        else if (MO.isUndef())
          UndefUses.push_back(MO.getReg());

      // Skip over the empty slots after each instruction.
      unsigned Slots = MI->getDesc().getNumDefs();
      if (Slots == 0)
        Slots = 1;
        MIIndex = getNextIndex(MIIndex);
      while (MIIndex.getVecIndex() < i2miMap_.size() &&
        MIIndex = getNextIndex(MIIndex);

  // Create empty intervals for registers defined by implicit_def's (except
  // for those implicit_def that define values which are liveout of their
  // blocks.
  for (unsigned i = 0, e = UndefUses.size(); i != e; ++i) {
    unsigned UndefReg = UndefUses[i];
    (void)getOrCreateInterval(UndefReg);
  }
bool LiveIntervals::findLiveInMBBs(
                              LiveIndex Start, LiveIndex End,
Evan Cheng's avatar
Evan Cheng committed
                              SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
  std::vector<IdxMBBPair>::const_iterator I =
    std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), Start);

  bool ResVal = false;
  while (I != Idx2MBBMap.end()) {
      break;
    MBBs.push_back(I->second);
    ResVal = true;
    ++I;
  }
  return ResVal;
}

bool LiveIntervals::findReachableMBBs(
                              LiveIndex Start, LiveIndex End,
                              SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
  std::vector<IdxMBBPair>::const_iterator I =
    std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), Start);

  bool ResVal = false;
  while (I != Idx2MBBMap.end()) {
    if (I->first > End)
      break;
    MachineBasicBlock *MBB = I->second;
    if (getMBBEndIdx(MBB) > End)
      break;
    for (MachineBasicBlock::succ_iterator SI = MBB->succ_begin(),
           SE = MBB->succ_end(); SI != SE; ++SI)
      MBBs.push_back(*SI);
    ResVal = true;
    ++I;
  }
  return ResVal;
}

LiveInterval* LiveIntervals::createInterval(unsigned reg) {
  float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
Evan Cheng's avatar
Evan Cheng committed

/// dupInterval - Duplicate a live interval. The caller is responsible for
/// managing the allocated memory.
LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
  LiveInterval *NewLI = createInterval(li->reg);
  NewLI->Copy(*li, mri_, getVNInfoAllocator());
/// getVNInfoSourceReg - Helper function that parses the specified VNInfo
/// copy field and returns the source register that defines it.
unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
  if (VNI->getCopy()->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG) {
    // If it's extracting out of a physical register, return the sub-register.
    unsigned Reg = VNI->getCopy()->getOperand(1).getReg();
    if (TargetRegisterInfo::isPhysicalRegister(Reg))
      Reg = tri_->getSubReg(Reg, VNI->getCopy()->getOperand(2).getImm());
  } else if (VNI->getCopy()->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
             VNI->getCopy()->getOpcode() == TargetInstrInfo::SUBREG_TO_REG)
    return VNI->getCopy()->getOperand(2).getReg();
  if (tii_->isMoveInstr(*VNI->getCopy(), SrcReg, DstReg, SrcSubReg, DstSubReg))
  llvm_unreachable("Unrecognized copy instruction!");
Evan Cheng's avatar
Evan Cheng committed

//===----------------------------------------------------------------------===//
// Register allocator hooks.
//

/// getReMatImplicitUse - If the remat definition MI has one (for now, we only
/// allow one) virtual register operand, then its uses are implicitly using
/// the register. Returns the virtual register.
unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
                                            MachineInstr *MI) const {
  unsigned RegOp = 0;
  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
    MachineOperand &MO = MI->getOperand(i);
    if (!MO.isReg() || !MO.isUse())
      continue;
    unsigned Reg = MO.getReg();
    if (Reg == 0 || Reg == li.reg)
      continue;
    
    if (TargetRegisterInfo::isPhysicalRegister(Reg) &&
        !allocatableRegs_[Reg])
      continue;
    // FIXME: For now, only remat MI with at most one register operand.
    assert(!RegOp &&
           "Can't rematerialize instruction with multiple register operand!");
    RegOp = MO.getReg();
  }
  return RegOp;
}

/// isValNoAvailableAt - Return true if the val# of the specified interval
/// which reaches the given instruction also reaches the specified use index.
bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
                                       LiveIndex UseIdx) const {
  LiveIndex Index = getInstructionIndex(MI);  
  VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
  LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
  return UI != li.end() && UI->valno == ValNo;
}

Evan Cheng's avatar
Evan Cheng committed
/// isReMaterializable - Returns true if the definition MI of the specified
/// val# of the specified interval is re-materializable.
bool LiveIntervals::isReMaterializable(const LiveInterval &li,
                                       SmallVectorImpl<LiveInterval*> &SpillIs,
Evan Cheng's avatar
Evan Cheng committed
  if (DisableReMat)
    return false;

  if (!tii_->isTriviallyReMaterializable(MI, aa_))
    return false;
Evan Cheng's avatar
Evan Cheng committed

  // Target-specific code can mark an instruction as being rematerializable
  // if it has one virtual reg use, though it had better be something like
  // a PIC base register which is likely to be live everywhere.
  unsigned ImpUse = getReMatImplicitUse(li, MI);
  if (ImpUse) {
    const LiveInterval &ImpLi = getInterval(ImpUse);
    for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
           re = mri_->use_end(); ri != re; ++ri) {
      MachineInstr *UseMI = &*ri;
      LiveIndex UseIdx = getInstructionIndex(UseMI);
      if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
        continue;
      if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
        return false;
    }

    // If a register operand of the re-materialized instruction is going to
    // be spilled next, then it's not legal to re-materialize this instruction.
    for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
      if (ImpUse == SpillIs[i]->reg)
        return false;
/// isReMaterializable - Returns true if the definition MI of the specified
/// val# of the specified interval is re-materializable.
bool LiveIntervals::isReMaterializable(const LiveInterval &li,
                                       const VNInfo *ValNo, MachineInstr *MI) {
  SmallVector<LiveInterval*, 4> Dummy1;
  bool Dummy2;
  return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2);
}

/// isReMaterializable - Returns true if every definition of MI of every
/// val# of the specified interval is re-materializable.
bool LiveIntervals::isReMaterializable(const LiveInterval &li,
                                       SmallVectorImpl<LiveInterval*> &SpillIs,
                                       bool &isLoad) {
  isLoad = false;
  for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
       i != e; ++i) {
    const VNInfo *VNI = *i;
Lang Hames's avatar
Lang Hames committed
    if (VNI->isUnused())
      continue; // Dead val#.
    // Is the def for the val# rematerializable?
Lang Hames's avatar
Lang Hames committed
    if (!VNI->isDefAccurate())
Lang Hames's avatar
Lang Hames committed
    MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
        !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
Evan Cheng's avatar
Evan Cheng committed
      return false;
Evan Cheng's avatar
Evan Cheng committed
  }
  return true;
}

/// FilterFoldedOps - Filter out two-address use operands. Return
/// true if it finds any issue with the operands that ought to prevent
/// folding.
static bool FilterFoldedOps(MachineInstr *MI,
                            SmallVector<unsigned, 2> &Ops,
                            unsigned &MRInfo,
                            SmallVector<unsigned, 2> &FoldOps) {
  MRInfo = 0;
  for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
    unsigned OpIdx = Ops[i];
    MachineOperand &MO = MI->getOperand(OpIdx);
      MRInfo |= (unsigned)VirtRegMap::isMod;
    else {
      // Filter out two-address use operand(s).
      if (MI->isRegTiedToDefOperand(OpIdx)) {
        MRInfo = VirtRegMap::isModRef;
        continue;
      }
      MRInfo |= (unsigned)VirtRegMap::isRef;
    }
    FoldOps.push_back(OpIdx);
  return false;
}
                           

/// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
/// slot / to reg or any rematerialized load into ith operand of specified
/// MI. If it is successul, MI is updated with the newly created MI and
/// returns true.
bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
                                         VirtRegMap &vrm, MachineInstr *DefMI,
                                         LiveIndex InstrIdx,
                                         SmallVector<unsigned, 2> &Ops,
                                         bool isSS, int Slot, unsigned Reg) {
  // If it is an implicit def instruction, just delete it.
  if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
    RemoveMachineInstrFromMaps(MI);
    vrm.RemoveMachineInstrFromMaps(MI);
    MI->eraseFromParent();
    ++numFolds;
    return true;
  }

  // Filter the list of operand indexes that are to be folded. Abort if
  // any operand will prevent folding.
  unsigned MRInfo = 0;
  SmallVector<unsigned, 2> FoldOps;
  if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
    return false;
  // The only time it's safe to fold into a two address instruction is when
  // it's folding reload and spill from / into a spill stack slot.
  if (DefMI && (MRInfo & VirtRegMap::isMod))
Evan Cheng's avatar
Evan Cheng committed
  MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
                           : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
Evan Cheng's avatar
Evan Cheng committed
  if (fmi) {
    // Remember this instruction uses the spill slot.
    if (isSS) vrm.addSpillSlotUse(Slot, fmi);

Evan Cheng's avatar
Evan Cheng committed
    // Attempt to fold the memory reference into the instruction. If
    // we can do this, we don't need to insert spill code.
    MachineBasicBlock &MBB = *MI->getParent();
    if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
      vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
Evan Cheng's avatar
Evan Cheng committed
    vrm.transferSpillPts(MI, fmi);
    vrm.transferRestorePts(MI, fmi);
    vrm.transferEmergencySpills(MI, fmi);
Evan Cheng's avatar
Evan Cheng committed
    mi2iMap_.erase(MI);
    i2miMap_[InstrIdx.getVecIndex()] = fmi;
Evan Cheng's avatar
Evan Cheng committed
    MI = MBB.insert(MBB.erase(MI), fmi);
Evan Cheng's avatar
Evan Cheng committed
    return true;
  }
  return false;
}

/// canFoldMemoryOperand - Returns true if the specified load / store
/// folding is possible.
bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
                                         bool ReMat) const {
  // Filter the list of operand indexes that are to be folded. Abort if
  // any operand will prevent folding.
  unsigned MRInfo = 0;
  if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
    return false;
  // It's only legal to remat for a use, not a def.
  if (ReMat && (MRInfo & VirtRegMap::isMod))
  return tii_->canFoldMemoryOperand(MI, FoldOps);
}

Evan Cheng's avatar
Evan Cheng committed
bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
  SmallPtrSet<MachineBasicBlock*, 4> MBBs;
  for (LiveInterval::Ranges::const_iterator
         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
    std::vector<IdxMBBPair>::const_iterator II =
      std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), I->start);
    if (II == Idx2MBBMap.end())
      continue;
    if (I->end > II->first)  // crossing a MBB.
      return false;
    MBBs.insert(II->second);
    if (MBBs.size() > 1)
      return false;
  }
  return true;
}

/// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
/// interval on to-be re-materialized operands of MI) with new register.
void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
                                       MachineInstr *MI, unsigned NewVReg,
                                       VirtRegMap &vrm) {
  // There is an implicit use. That means one of the other operand is
  // being remat'ed and the remat'ed instruction has li.reg as an
  // use operand. Make sure we rewrite that as well.
  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
    MachineOperand &MO = MI->getOperand(i);
      continue;
    unsigned Reg = MO.getReg();
    if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
      continue;
    if (!vrm.isReMaterialized(Reg))
      continue;
    MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
    MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
    if (UseMO)
      UseMO->setReg(NewVReg);
Evan Cheng's avatar
Evan Cheng committed
/// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
/// for addIntervalsForSpills to rewrite uses / defs for the given live range.
rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
                 bool TrySplit, LiveIndex index, LiveIndex end, 
Evan Cheng's avatar
Evan Cheng committed
                 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
Evan Cheng's avatar
Evan Cheng committed
                 unsigned Slot, int LdSlot,
                 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
Evan Cheng's avatar
Evan Cheng committed
                 const TargetRegisterClass* rc,
                 SmallVector<int, 4> &ReMatIds,
                 const MachineLoopInfo *loopInfo,
                 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
                 DenseMap<unsigned,unsigned> &MBBVRegsMap,
Evan Cheng's avatar
Evan Cheng committed
 RestartInstruction:
  for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
    MachineOperand& mop = MI->getOperand(i);
Evan Cheng's avatar
Evan Cheng committed
      continue;
    unsigned Reg = mop.getReg();
    unsigned RegI = Reg;
    if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
Evan Cheng's avatar
Evan Cheng committed
      continue;
    if (Reg != li.reg)
      continue;

    bool TryFold = !DefIsReMat;
    bool FoldSS = true; // Default behavior unless it's a remat.
Evan Cheng's avatar
Evan Cheng committed
    int FoldSlot = Slot;
    if (DefIsReMat) {
      // If this is the rematerializable definition MI itself and
      // all of its uses are rematerialized, simply delete it.
Evan Cheng's avatar
Evan Cheng committed
      if (MI == ReMatOrigDefMI && CanDelete) {
        DEBUG(errs() << "\t\t\t\tErasing re-materlizable def: "
                     << MI << '\n');
Evan Cheng's avatar
Evan Cheng committed
        RemoveMachineInstrFromMaps(MI);
        vrm.RemoveMachineInstrFromMaps(MI);
Evan Cheng's avatar
Evan Cheng committed
        MI->eraseFromParent();
        break;
      }

      // If def for this use can't be rematerialized, then try folding.
      // If def is rematerializable and it's a load, also try folding.
      TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
Evan Cheng's avatar
Evan Cheng committed
      if (isLoad) {
        // Try fold loads (from stack slot, constant pool, etc.) into uses.
        FoldSS = isLoadSS;
        FoldSlot = LdSlot;
      }
    }

    // Scan all of the operands of this instruction rewriting operands
    // to use NewVReg instead of li.reg as appropriate.  We do this for
    // two reasons:
    //
    //   1. If the instr reads the same spilled vreg multiple times, we
    //      want to reuse the NewVReg.
    //   2. If the instr is a two-addr instruction, we are required to
    //      keep the src/dst regs pinned.
    //
    // Keep track of whether we replace a use and/or def so that we can
    // create the spill interval with the appropriate range. 
Evan Cheng's avatar
Evan Cheng committed
    HasUse = mop.isUse();
    HasDef = mop.isDef();
    SmallVector<unsigned, 2> Ops;
    Ops.push_back(i);
Evan Cheng's avatar
Evan Cheng committed
    for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
      const MachineOperand &MOj = MI->getOperand(j);
Evan Cheng's avatar
Evan Cheng committed
        continue;
      if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
Evan Cheng's avatar
Evan Cheng committed
        continue;
      if (RegJ == RegI) {
        if (!MOj.isUndef()) {
          HasUse |= MOj.isUse();
          HasDef |= MOj.isDef();
        }
David Greene's avatar
 
David Greene committed
    // Create a new virtual register for the spill interval.
    // Create the new register now so we can map the fold instruction
    // to the new register so when it is unfolded we get the correct
    // answer.
    bool CreatedNewVReg = false;
    if (NewVReg == 0) {
      NewVReg = mri_->createVirtualRegister(rc);
      vrm.grow();
      CreatedNewVReg = true;
    }

    if (!TryFold)
      CanFold = false;
    else {
      // Do not fold load / store here if we are splitting. We'll find an
      // optimal point to insert a load / store later.
      if (!TrySplit) {
        if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
David Greene's avatar
 
David Greene committed
                                 Ops, FoldSS, FoldSlot, NewVReg)) {
          // Folding the load/store can completely change the instruction in
          // unpredictable ways, rescan it from the beginning.
David Greene's avatar
 
David Greene committed

          if (FoldSS) {
            // We need to give the new vreg the same stack slot as the
            // spilled interval.
            vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
          }

        // We'll try to fold it later if it's profitable.
        CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
    if (mop.isImplicit())
      rewriteImplicitOps(li, MI, NewVReg, vrm);
    for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
      MachineOperand &mopj = MI->getOperand(Ops[j]);
      mopj.setReg(NewVReg);
      if (mopj.isImplicit())
        rewriteImplicitOps(li, MI, NewVReg, vrm);
    }
Evan Cheng's avatar
Evan Cheng committed
    if (CreatedNewVReg) {
      if (DefIsReMat) {
        vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI);
        if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
Evan Cheng's avatar
Evan Cheng committed
          // Each valnum may have its own remat id.
          ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
Evan Cheng's avatar
Evan Cheng committed
        } else {
          vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
Evan Cheng's avatar
Evan Cheng committed
        }
        if (!CanDelete || (HasUse && HasDef)) {
          // If this is a two-addr instruction then its use operands are
          // rematerializable but its def is not. It should be assigned a
          // stack slot.
          vrm.assignVirt2StackSlot(NewVReg, Slot);
        }
Evan Cheng's avatar
Evan Cheng committed
      } else {
        vrm.assignVirt2StackSlot(NewVReg, Slot);
      }
    } else if (HasUse && HasDef &&
               vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
      // If this interval hasn't been assigned a stack slot (because earlier
      // def is a deleted remat def), do it now.
      assert(Slot != VirtRegMap::NO_STACK_SLOT);
      vrm.assignVirt2StackSlot(NewVReg, Slot);
    // Re-matting an instruction with virtual register use. Add the
    // register as an implicit use on the use MI.
    if (DefIsReMat && ImpUse)
      MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));

    // Create a new register interval for this spill / remat.
Evan Cheng's avatar
Evan Cheng committed
    LiveInterval &nI = getOrCreateInterval(NewVReg);
Evan Cheng's avatar
Evan Cheng committed
    if (CreatedNewVReg) {
      NewLIs.push_back(&nI);
      MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
Evan Cheng's avatar
Evan Cheng committed
      if (TrySplit)
        vrm.setIsSplitFromReg(NewVReg, li.reg);
    }
Evan Cheng's avatar
Evan Cheng committed

    if (HasUse) {
Evan Cheng's avatar
Evan Cheng committed
      if (CreatedNewVReg) {
        LiveRange LR(getLoadIndex(index), getNextSlot(getUseIndex(index)),
                     nI.getNextValue(LiveIndex(), 0, false,
        DEBUG(errs() << " +" << LR);
Evan Cheng's avatar
Evan Cheng committed
        nI.addRange(LR);
      } else {
        // Extend the split live interval to this def / use.
        LiveIndex End = getNextSlot(getUseIndex(index));
Evan Cheng's avatar
Evan Cheng committed
        LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
                     nI.getValNumInfo(nI.getNumValNums()-1));
        DEBUG(errs() << " +" << LR);
Evan Cheng's avatar
Evan Cheng committed
        nI.addRange(LR);
      }
Evan Cheng's avatar
Evan Cheng committed
    }
    if (HasDef) {
      LiveRange LR(getDefIndex(index), getStoreIndex(index),
                   nI.getNextValue(LiveIndex(), 0, false,
      DEBUG(errs() << " +" << LR);
Evan Cheng's avatar
Evan Cheng committed
      nI.addRange(LR);
    }
    DEBUG({
        errs() << "\t\t\t\tAdded new interval: ";
        nI.print(errs(), tri_);
        errs() << '\n';
      });
Evan Cheng's avatar
Evan Cheng committed
  }
Evan Cheng's avatar
Evan Cheng committed
}
Evan Cheng's avatar
Evan Cheng committed
bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
                                   LiveIndex Idx) const {
  LiveIndex End = getMBBEndIdx(MBB);
  for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
    if (VNI->kills[j].isPHIIndex())
    LiveIndex KillIdx = VNI->kills[j];
    if (KillIdx > Idx && KillIdx < End)
      return true;
Evan Cheng's avatar
Evan Cheng committed
  }
  return false;
}

/// RewriteInfo - Keep track of machine instrs that will be rewritten
/// during spilling.
    LiveIndex Index;
    MachineInstr *MI;
    bool HasUse;
    bool HasDef;
    RewriteInfo(LiveIndex i, MachineInstr *mi, bool u, bool d)
      : Index(i), MI(mi), HasUse(u), HasDef(d) {}
  };

  struct RewriteInfoCompare {
    bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
      return LHS.Index < RHS.Index;
    }
  };
}
Evan Cheng's avatar
Evan Cheng committed
void LiveIntervals::
Evan Cheng's avatar
Evan Cheng committed
rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
Evan Cheng's avatar
Evan Cheng committed
                    LiveInterval::Ranges::const_iterator &I,
Evan Cheng's avatar
Evan Cheng committed
                    MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
Evan Cheng's avatar
Evan Cheng committed
                    unsigned Slot, int LdSlot,
                    bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
Evan Cheng's avatar
Evan Cheng committed
                    const TargetRegisterClass* rc,
                    SmallVector<int, 4> &ReMatIds,
                    const MachineLoopInfo *loopInfo,
Evan Cheng's avatar
Evan Cheng committed
                    BitVector &SpillMBBs,
                    DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
                    DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
                    DenseMap<unsigned,unsigned> &MBBVRegsMap,
Evan Cheng's avatar
Evan Cheng committed
  unsigned NewVReg = 0;
  LiveIndex start = getBaseIndex(I->start);
  LiveIndex end = getNextIndex(getBaseIndex(getPrevSlot(I->end)));
Evan Cheng's avatar
Evan Cheng committed

  // First collect all the def / use in this live range that will be rewritten.
  // Make sure they are sorted according to instruction index.
  std::vector<RewriteInfo> RewriteMIs;
  for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
         re = mri_->reg_end(); ri != re; ) {
    MachineOperand &O = ri.getOperand();
    ++ri;
    assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
    LiveIndex index = getInstructionIndex(MI);
    if (index < start || index >= end)
      continue;
      // Must be defined by an implicit def. It should not be spilled. Note,
      // this is for correctness reason. e.g.
      // 8   %reg1024<def> = IMPLICIT_DEF
      // 12  %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
      // The live range [12, 14) are not part of the r1024 live interval since
      // it's defined by an implicit def. It will not conflicts with live
      // interval of r1025. Now suppose both registers are spilled, you can
Evan Cheng's avatar
Evan Cheng committed
      // easily see a situation where both registers are reloaded before
      // the INSERT_SUBREG and both target registers that would overlap.
      continue;
    RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
  }
  std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());

  unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
  // Now rewrite the defs and uses.
  for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
    RewriteInfo &rwi = RewriteMIs[i];
    ++i;
    LiveIndex index = rwi.Index;
    bool MIHasUse = rwi.HasUse;
    bool MIHasDef = rwi.HasDef;
    MachineInstr *MI = rwi.MI;
    // If MI def and/or use the same register multiple times, then there
    // are multiple entries.
    unsigned NumUses = MIHasUse;
    while (i != e && RewriteMIs[i].MI == MI) {
      assert(RewriteMIs[i].Index == index);
      bool isUse = RewriteMIs[i].HasUse;
      if (isUse) ++NumUses;
      MIHasUse |= isUse;
      MIHasDef |= RewriteMIs[i].HasDef;
      ++i;
    }
Evan Cheng's avatar
Evan Cheng committed
    MachineBasicBlock *MBB = MI->getParent();
    if (ImpUse && MI != ReMatDefMI) {
      // Re-matting an instruction with virtual register use. Update the
      // register interval's spill weight to HUGE_VALF to prevent it from
      // being spilled.
      LiveInterval &ImpLi = getInterval(ImpUse);
    unsigned MBBId = MBB->getNumber();
    if (TrySplit) {
      DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
        // One common case:
        // x = use
        // ...
        // ...
        // def = ...
        //     = use
        // It's better to start a new interval to avoid artifically
        // extend the new interval.
        if (MIHasDef && !MIHasUse) {
          MBBVRegsMap.erase(MBB->getNumber());

    bool IsNew = ThisVReg == 0;
    if (IsNew) {
      // This ends the previous live interval. If all of its def / use
      // can be folded, give it a low spill weight.
      if (NewVReg && TrySplit && AllCanFold) {
        LiveInterval &nI = getOrCreateInterval(NewVReg);
        nI.weight /= 10.0F;
      }
      AllCanFold = true;
    }
    NewVReg = ThisVReg;

Evan Cheng's avatar
Evan Cheng committed
    bool HasDef = false;
    bool HasUse = false;
    bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
                         index, end, MI, ReMatOrigDefMI, ReMatDefMI,
                         Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
                         CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
                         ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
Evan Cheng's avatar
Evan Cheng committed
    if (!HasDef && !HasUse)
      continue;

Evan Cheng's avatar
Evan Cheng committed
    // Update weight of spill interval.
    LiveInterval &nI = getOrCreateInterval(NewVReg);
    if (!TrySplit) {
Evan Cheng's avatar
Evan Cheng committed
      // The spill weight is now infinity as it cannot be spilled again.
      nI.weight = HUGE_VALF;
      continue;
    }

    // Keep track of the last def and first use in each MBB.
    if (HasDef) {
      if (MI != ReMatOrigDefMI || !CanDelete) {
        bool HasKill = false;
        if (!HasUse)
          HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, getDefIndex(index));
        else {
          // If this is a two-address code, then this index starts a new VNInfo.
          const VNInfo *VNI = li.findDefinedVNInfoForRegInt(getDefIndex(index));