Skip to content
LiveIntervalAnalysis.cpp 93.2 KiB
Newer Older
  // Live-in register might not be used at all.
      DEBUG(errs() << " dead");
      end = getDefIndex(MIIdx) + 1;
      DEBUG(errs() << " live through");
Evan Cheng's avatar
Evan Cheng committed
  }

  VNInfo *vni =
    interval.getNextValue(MBB->getNumber(), 0, false, VNInfoAllocator);
  vni->setIsPHIDef(true);
  LiveRange LR(start, end, vni);
  
  interval.addRange(LR);
  interval.addKill(LR.valno, end, false);
  DEBUG(errs() << " +" << LR << '\n');
/// computeIntervals - computes the live intervals for virtual
/// registers. for some ordering of the machine instructions [1,N] a
/// live interval is an interval [i, j) where 1 <= i <= j < N for
void LiveIntervals::computeIntervals() { 
  DEBUG(errs() << "********** COMPUTING LIVE INTERVALS **********\n"
               << "********** Function: "
               << ((Value*)mf_->getFunction())->getName() << '\n');
  for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
       MBBI != E; ++MBBI) {
    MachineBasicBlock *MBB = MBBI;
    // Track the index of the current machine instr.
    unsigned MIIndex = getMBBStartIdx(MBB);
    DEBUG(errs() << ((Value*)MBB->getBasicBlock())->getName() << ":\n");
    MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
    // Create intervals for live-ins to this BB first.
    for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
           LE = MBB->livein_end(); LI != LE; ++LI) {
      handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
      // Multiple live-ins can alias the same register.
      for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
        if (!hasInterval(*AS))
          handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
                               true);
    // Skip over empty initial indices.
    while (MIIndex / InstrSlots::NUM < i2miMap_.size() &&
           getInstructionFromIndex(MIIndex) == 0)
      MIIndex += InstrSlots::NUM;
    
      DEBUG(errs() << MIIndex << "\t" << *MI);
      for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
        MachineOperand &MO = MI->getOperand(i);
        // handle register defs - build intervals
          handleRegisterDef(MBB, MI, MIIndex, MO, i);
        else if (MO.isUndef())
          UndefUses.push_back(MO.getReg());

      // Skip over the empty slots after each instruction.
      unsigned Slots = MI->getDesc().getNumDefs();
      if (Slots == 0)
        Slots = 1;
      MIIndex += InstrSlots::NUM * Slots;
      
      // Skip over empty indices.
      while (MIIndex / InstrSlots::NUM < i2miMap_.size() &&
             getInstructionFromIndex(MIIndex) == 0)
        MIIndex += InstrSlots::NUM;

  // Create empty intervals for registers defined by implicit_def's (except
  // for those implicit_def that define values which are liveout of their
  // blocks.
  for (unsigned i = 0, e = UndefUses.size(); i != e; ++i) {
    unsigned UndefReg = UndefUses[i];
    (void)getOrCreateInterval(UndefReg);
  }
bool LiveIntervals::findLiveInMBBs(unsigned Start, unsigned End,
Evan Cheng's avatar
Evan Cheng committed
                              SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
  std::vector<IdxMBBPair>::const_iterator I =
    std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), Start);

  bool ResVal = false;
  while (I != Idx2MBBMap.end()) {
      break;
    MBBs.push_back(I->second);
    ResVal = true;
    ++I;
  }
  return ResVal;
}

bool LiveIntervals::findReachableMBBs(unsigned Start, unsigned End,
                              SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
  std::vector<IdxMBBPair>::const_iterator I =
    std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), Start);

  bool ResVal = false;
  while (I != Idx2MBBMap.end()) {
    if (I->first > End)
      break;
    MachineBasicBlock *MBB = I->second;
    if (getMBBEndIdx(MBB) > End)
      break;
    for (MachineBasicBlock::succ_iterator SI = MBB->succ_begin(),
           SE = MBB->succ_end(); SI != SE; ++SI)
      MBBs.push_back(*SI);
    ResVal = true;
    ++I;
  }
  return ResVal;
}

LiveInterval* LiveIntervals::createInterval(unsigned reg) {
  float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
Evan Cheng's avatar
Evan Cheng committed

/// dupInterval - Duplicate a live interval. The caller is responsible for
/// managing the allocated memory.
LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
  LiveInterval *NewLI = createInterval(li->reg);
  NewLI->Copy(*li, mri_, getVNInfoAllocator());
/// getVNInfoSourceReg - Helper function that parses the specified VNInfo
/// copy field and returns the source register that defines it.
unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
  if (VNI->getCopy()->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG) {
    // If it's extracting out of a physical register, return the sub-register.
    unsigned Reg = VNI->getCopy()->getOperand(1).getReg();
    if (TargetRegisterInfo::isPhysicalRegister(Reg))
      Reg = tri_->getSubReg(Reg, VNI->getCopy()->getOperand(2).getImm());
  } else if (VNI->getCopy()->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
             VNI->getCopy()->getOpcode() == TargetInstrInfo::SUBREG_TO_REG)
    return VNI->getCopy()->getOperand(2).getReg();
  if (tii_->isMoveInstr(*VNI->getCopy(), SrcReg, DstReg, SrcSubReg, DstSubReg))
  llvm_unreachable("Unrecognized copy instruction!");
Evan Cheng's avatar
Evan Cheng committed

//===----------------------------------------------------------------------===//
// Register allocator hooks.
//

/// getReMatImplicitUse - If the remat definition MI has one (for now, we only
/// allow one) virtual register operand, then its uses are implicitly using
/// the register. Returns the virtual register.
unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
                                            MachineInstr *MI) const {
  unsigned RegOp = 0;
  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
    MachineOperand &MO = MI->getOperand(i);
    if (!MO.isReg() || !MO.isUse())
      continue;
    unsigned Reg = MO.getReg();
    if (Reg == 0 || Reg == li.reg)
      continue;
    
    if (TargetRegisterInfo::isPhysicalRegister(Reg) &&
        !allocatableRegs_[Reg])
      continue;
    // FIXME: For now, only remat MI with at most one register operand.
    assert(!RegOp &&
           "Can't rematerialize instruction with multiple register operand!");
    RegOp = MO.getReg();
  }
  return RegOp;
}

/// isValNoAvailableAt - Return true if the val# of the specified interval
/// which reaches the given instruction also reaches the specified use index.
bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
                                       unsigned UseIdx) const {
  unsigned Index = getInstructionIndex(MI);  
  VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
  LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
  return UI != li.end() && UI->valno == ValNo;
}

Evan Cheng's avatar
Evan Cheng committed
/// isReMaterializable - Returns true if the definition MI of the specified
/// val# of the specified interval is re-materializable.
bool LiveIntervals::isReMaterializable(const LiveInterval &li,
                                       SmallVectorImpl<LiveInterval*> &SpillIs,
Evan Cheng's avatar
Evan Cheng committed
  if (DisableReMat)
    return false;

  if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF)

  int FrameIdx = 0;
  if (tii_->isLoadFromStackSlot(MI, FrameIdx) &&
      mf_->getFrameInfo()->isImmutableObjectIndex(FrameIdx))
    // FIXME: Let target specific isReallyTriviallyReMaterializable determines
    // this but remember this is not safe to fold into a two-address
    // instruction.
    // This is a load from fixed stack slot. It can be rematerialized.
  // If the target-specific rules don't identify an instruction as
  // being trivially rematerializable, use some target-independent
  // rules.
  if (!MI->getDesc().isRematerializable() ||
      !tii_->isTriviallyReMaterializable(MI)) {
    if (!EnableAggressiveRemat)
      return false;
Dan Gohman's avatar
Dan Gohman committed
    // If the instruction accesses memory but the memoperands have been lost,
    const TargetInstrDesc &TID = MI->getDesc();
    if ((TID.mayLoad() || TID.mayStore()) && MI->memoperands_empty())
      return false;

    // Avoid instructions obviously unsafe for remat.
    if (TID.hasUnmodeledSideEffects() || TID.isNotDuplicable())
      return false;

    // If the instruction accesses memory and the memory could be non-constant,
    // assume the instruction is not rematerializable.
    for (std::list<MachineMemOperand>::const_iterator
           I = MI->memoperands_begin(), E = MI->memoperands_end(); I != E; ++I){
      const MachineMemOperand &MMO = *I;
      if (MMO.isVolatile() || MMO.isStore())
        return false;
      const Value *V = MMO.getValue();
      if (!V)
        return false;
      if (const PseudoSourceValue *PSV = dyn_cast<PseudoSourceValue>(V)) {
        if (!PSV->isConstant(mf_->getFrameInfo()))
          return false;
      } else if (!aa_->pointsToConstantMemory(V))
        return false;
    }

    // If any of the registers accessed are non-constant, conservatively assume
    // the instruction is not rematerializable.
    unsigned ImpUse = 0;
    for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
      const MachineOperand &MO = MI->getOperand(i);
        if (TargetRegisterInfo::isPhysicalRegister(Reg))
          return false;

        // Only allow one def, and that in the first operand.
        if (MO.isDef() != (i == 0))

        // Only allow constant-valued registers.
        bool IsLiveIn = mri_->isLiveIn(Reg);
        MachineRegisterInfo::def_iterator I = mri_->def_begin(Reg),
                                          E = mri_->def_end();

Dan Gohman's avatar
Dan Gohman committed
        // For the def, it should be the only def of that register.
        if (MO.isDef() && (next(I) != E || IsLiveIn))
          return false;

        if (MO.isUse()) {
          // Only allow one use other register use, as that's all the
          // remat mechanisms support currently.
          if (Reg != li.reg) {
            if (ImpUse == 0)
              ImpUse = Reg;
            else if (Reg != ImpUse)
              return false;
          }
Dan Gohman's avatar
Dan Gohman committed
          // For the use, there should be only one associated def.
          if (I != E && (next(I) != E || IsLiveIn))
            return false;
        }
Evan Cheng's avatar
Evan Cheng committed

  unsigned ImpUse = getReMatImplicitUse(li, MI);
  if (ImpUse) {
    const LiveInterval &ImpLi = getInterval(ImpUse);
    for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
           re = mri_->use_end(); ri != re; ++ri) {
      MachineInstr *UseMI = &*ri;
      unsigned UseIdx = getInstructionIndex(UseMI);
      if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
        continue;
      if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
        return false;
    }

    // If a register operand of the re-materialized instruction is going to
    // be spilled next, then it's not legal to re-materialize this instruction.
    for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
      if (ImpUse == SpillIs[i]->reg)
        return false;
/// isReMaterializable - Returns true if the definition MI of the specified
/// val# of the specified interval is re-materializable.
bool LiveIntervals::isReMaterializable(const LiveInterval &li,
                                       const VNInfo *ValNo, MachineInstr *MI) {
  SmallVector<LiveInterval*, 4> Dummy1;
  bool Dummy2;
  return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2);
}

/// isReMaterializable - Returns true if every definition of MI of every
/// val# of the specified interval is re-materializable.
bool LiveIntervals::isReMaterializable(const LiveInterval &li,
                                       SmallVectorImpl<LiveInterval*> &SpillIs,
                                       bool &isLoad) {
  isLoad = false;
  for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
       i != e; ++i) {
    const VNInfo *VNI = *i;
Lang Hames's avatar
Lang Hames committed
    if (VNI->isUnused())
      continue; // Dead val#.
    // Is the def for the val# rematerializable?
Lang Hames's avatar
Lang Hames committed
    if (!VNI->isDefAccurate())
Lang Hames's avatar
Lang Hames committed
    MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
        !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
Evan Cheng's avatar
Evan Cheng committed
      return false;
Evan Cheng's avatar
Evan Cheng committed
  }
  return true;
}

/// FilterFoldedOps - Filter out two-address use operands. Return
/// true if it finds any issue with the operands that ought to prevent
/// folding.
static bool FilterFoldedOps(MachineInstr *MI,
                            SmallVector<unsigned, 2> &Ops,
                            unsigned &MRInfo,
                            SmallVector<unsigned, 2> &FoldOps) {
  MRInfo = 0;
  for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
    unsigned OpIdx = Ops[i];
    MachineOperand &MO = MI->getOperand(OpIdx);
      MRInfo |= (unsigned)VirtRegMap::isMod;
    else {
      // Filter out two-address use operand(s).
      if (MI->isRegTiedToDefOperand(OpIdx)) {
        MRInfo = VirtRegMap::isModRef;
        continue;
      }
      MRInfo |= (unsigned)VirtRegMap::isRef;
    }
    FoldOps.push_back(OpIdx);
  return false;
}
                           

/// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
/// slot / to reg or any rematerialized load into ith operand of specified
/// MI. If it is successul, MI is updated with the newly created MI and
/// returns true.
bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
                                         VirtRegMap &vrm, MachineInstr *DefMI,
                                         unsigned InstrIdx,
                                         SmallVector<unsigned, 2> &Ops,
                                         bool isSS, int Slot, unsigned Reg) {
  // If it is an implicit def instruction, just delete it.
  if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
    RemoveMachineInstrFromMaps(MI);
    vrm.RemoveMachineInstrFromMaps(MI);
    MI->eraseFromParent();
    ++numFolds;
    return true;
  }

  // Filter the list of operand indexes that are to be folded. Abort if
  // any operand will prevent folding.
  unsigned MRInfo = 0;
  SmallVector<unsigned, 2> FoldOps;
  if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
    return false;
  // The only time it's safe to fold into a two address instruction is when
  // it's folding reload and spill from / into a spill stack slot.
  if (DefMI && (MRInfo & VirtRegMap::isMod))
Evan Cheng's avatar
Evan Cheng committed
  MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
                           : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
Evan Cheng's avatar
Evan Cheng committed
  if (fmi) {
    // Remember this instruction uses the spill slot.
    if (isSS) vrm.addSpillSlotUse(Slot, fmi);

Evan Cheng's avatar
Evan Cheng committed
    // Attempt to fold the memory reference into the instruction. If
    // we can do this, we don't need to insert spill code.
    MachineBasicBlock &MBB = *MI->getParent();
    if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
      vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
Evan Cheng's avatar
Evan Cheng committed
    vrm.transferSpillPts(MI, fmi);
    vrm.transferRestorePts(MI, fmi);
    vrm.transferEmergencySpills(MI, fmi);
Evan Cheng's avatar
Evan Cheng committed
    mi2iMap_.erase(MI);
    i2miMap_[InstrIdx /InstrSlots::NUM] = fmi;
    mi2iMap_[fmi] = InstrIdx;
Evan Cheng's avatar
Evan Cheng committed
    MI = MBB.insert(MBB.erase(MI), fmi);
Evan Cheng's avatar
Evan Cheng committed
    return true;
  }
  return false;
}

/// canFoldMemoryOperand - Returns true if the specified load / store
/// folding is possible.
bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
                                         bool ReMat) const {
  // Filter the list of operand indexes that are to be folded. Abort if
  // any operand will prevent folding.
  unsigned MRInfo = 0;
  if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
    return false;
  // It's only legal to remat for a use, not a def.
  if (ReMat && (MRInfo & VirtRegMap::isMod))
  return tii_->canFoldMemoryOperand(MI, FoldOps);
}

Evan Cheng's avatar
Evan Cheng committed
bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
  SmallPtrSet<MachineBasicBlock*, 4> MBBs;
  for (LiveInterval::Ranges::const_iterator
         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
    std::vector<IdxMBBPair>::const_iterator II =
      std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), I->start);
    if (II == Idx2MBBMap.end())
      continue;
    if (I->end > II->first)  // crossing a MBB.
      return false;
    MBBs.insert(II->second);
    if (MBBs.size() > 1)
      return false;
  }
  return true;
}

/// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
/// interval on to-be re-materialized operands of MI) with new register.
void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
                                       MachineInstr *MI, unsigned NewVReg,
                                       VirtRegMap &vrm) {
  // There is an implicit use. That means one of the other operand is
  // being remat'ed and the remat'ed instruction has li.reg as an
  // use operand. Make sure we rewrite that as well.
  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
    MachineOperand &MO = MI->getOperand(i);
      continue;
    unsigned Reg = MO.getReg();
    if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
      continue;
    if (!vrm.isReMaterialized(Reg))
      continue;
    MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
    MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
    if (UseMO)
      UseMO->setReg(NewVReg);
Evan Cheng's avatar
Evan Cheng committed
/// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
/// for addIntervalsForSpills to rewrite uses / defs for the given live range.
rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
                 bool TrySplit, unsigned index, unsigned end,  MachineInstr *MI,
Evan Cheng's avatar
Evan Cheng committed
                 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
Evan Cheng's avatar
Evan Cheng committed
                 unsigned Slot, int LdSlot,
                 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
Evan Cheng's avatar
Evan Cheng committed
                 const TargetRegisterClass* rc,
                 SmallVector<int, 4> &ReMatIds,
                 const MachineLoopInfo *loopInfo,
                 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
                 DenseMap<unsigned,unsigned> &MBBVRegsMap,
Evan Cheng's avatar
Evan Cheng committed
 RestartInstruction:
  for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
    MachineOperand& mop = MI->getOperand(i);
Evan Cheng's avatar
Evan Cheng committed
      continue;
    unsigned Reg = mop.getReg();
    unsigned RegI = Reg;
    if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
Evan Cheng's avatar
Evan Cheng committed
      continue;
    if (Reg != li.reg)
      continue;

    bool TryFold = !DefIsReMat;
    bool FoldSS = true; // Default behavior unless it's a remat.
Evan Cheng's avatar
Evan Cheng committed
    int FoldSlot = Slot;
    if (DefIsReMat) {
      // If this is the rematerializable definition MI itself and
      // all of its uses are rematerialized, simply delete it.
Evan Cheng's avatar
Evan Cheng committed
      if (MI == ReMatOrigDefMI && CanDelete) {
        DEBUG(errs() << "\t\t\t\tErasing re-materlizable def: "
                     << MI << '\n');
Evan Cheng's avatar
Evan Cheng committed
        RemoveMachineInstrFromMaps(MI);
        vrm.RemoveMachineInstrFromMaps(MI);
Evan Cheng's avatar
Evan Cheng committed
        MI->eraseFromParent();
        break;
      }

      // If def for this use can't be rematerialized, then try folding.
      // If def is rematerializable and it's a load, also try folding.
      TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
Evan Cheng's avatar
Evan Cheng committed
      if (isLoad) {
        // Try fold loads (from stack slot, constant pool, etc.) into uses.
        FoldSS = isLoadSS;
        FoldSlot = LdSlot;
      }
    }

    // Scan all of the operands of this instruction rewriting operands
    // to use NewVReg instead of li.reg as appropriate.  We do this for
    // two reasons:
    //
    //   1. If the instr reads the same spilled vreg multiple times, we
    //      want to reuse the NewVReg.
    //   2. If the instr is a two-addr instruction, we are required to
    //      keep the src/dst regs pinned.
    //
    // Keep track of whether we replace a use and/or def so that we can
    // create the spill interval with the appropriate range. 
Evan Cheng's avatar
Evan Cheng committed
    HasUse = mop.isUse();
    HasDef = mop.isDef();
    SmallVector<unsigned, 2> Ops;
    Ops.push_back(i);
Evan Cheng's avatar
Evan Cheng committed
    for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
      const MachineOperand &MOj = MI->getOperand(j);
Evan Cheng's avatar
Evan Cheng committed
        continue;
      if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
Evan Cheng's avatar
Evan Cheng committed
        continue;
      if (RegJ == RegI) {
        if (!MOj.isUndef()) {
          HasUse |= MOj.isUse();
          HasDef |= MOj.isDef();
        }
David Greene's avatar
 
David Greene committed
    // Create a new virtual register for the spill interval.
    // Create the new register now so we can map the fold instruction
    // to the new register so when it is unfolded we get the correct
    // answer.
    bool CreatedNewVReg = false;
    if (NewVReg == 0) {
      NewVReg = mri_->createVirtualRegister(rc);
      vrm.grow();
      CreatedNewVReg = true;
    }

    if (!TryFold)
      CanFold = false;
    else {
      // Do not fold load / store here if we are splitting. We'll find an
      // optimal point to insert a load / store later.
      if (!TrySplit) {
        if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
David Greene's avatar
 
David Greene committed
                                 Ops, FoldSS, FoldSlot, NewVReg)) {
          // Folding the load/store can completely change the instruction in
          // unpredictable ways, rescan it from the beginning.
David Greene's avatar
 
David Greene committed

          if (FoldSS) {
            // We need to give the new vreg the same stack slot as the
            // spilled interval.
            vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
          }

        // We'll try to fold it later if it's profitable.
        CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
    if (mop.isImplicit())
      rewriteImplicitOps(li, MI, NewVReg, vrm);
    for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
      MachineOperand &mopj = MI->getOperand(Ops[j]);
      mopj.setReg(NewVReg);
      if (mopj.isImplicit())
        rewriteImplicitOps(li, MI, NewVReg, vrm);
    }
Evan Cheng's avatar
Evan Cheng committed
    if (CreatedNewVReg) {
      if (DefIsReMat) {
        vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI);
        if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
Evan Cheng's avatar
Evan Cheng committed
          // Each valnum may have its own remat id.
          ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
Evan Cheng's avatar
Evan Cheng committed
        } else {
          vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
Evan Cheng's avatar
Evan Cheng committed
        }
        if (!CanDelete || (HasUse && HasDef)) {
          // If this is a two-addr instruction then its use operands are
          // rematerializable but its def is not. It should be assigned a
          // stack slot.
          vrm.assignVirt2StackSlot(NewVReg, Slot);
        }
Evan Cheng's avatar
Evan Cheng committed
      } else {
        vrm.assignVirt2StackSlot(NewVReg, Slot);
      }
    } else if (HasUse && HasDef &&
               vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
      // If this interval hasn't been assigned a stack slot (because earlier
      // def is a deleted remat def), do it now.
      assert(Slot != VirtRegMap::NO_STACK_SLOT);
      vrm.assignVirt2StackSlot(NewVReg, Slot);
    // Re-matting an instruction with virtual register use. Add the
    // register as an implicit use on the use MI.
    if (DefIsReMat && ImpUse)
      MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));

    // Create a new register interval for this spill / remat.
Evan Cheng's avatar
Evan Cheng committed
    LiveInterval &nI = getOrCreateInterval(NewVReg);
Evan Cheng's avatar
Evan Cheng committed
    if (CreatedNewVReg) {
      NewLIs.push_back(&nI);
      MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
Evan Cheng's avatar
Evan Cheng committed
      if (TrySplit)
        vrm.setIsSplitFromReg(NewVReg, li.reg);
    }
Evan Cheng's avatar
Evan Cheng committed

    if (HasUse) {
Evan Cheng's avatar
Evan Cheng committed
      if (CreatedNewVReg) {
        LiveRange LR(getLoadIndex(index), getUseIndex(index)+1,
Lang Hames's avatar
Lang Hames committed
                     nI.getNextValue(0, 0, false, VNInfoAllocator));
        DEBUG(errs() << " +" << LR);
Evan Cheng's avatar
Evan Cheng committed
        nI.addRange(LR);
      } else {
        // Extend the split live interval to this def / use.
        unsigned End = getUseIndex(index)+1;
        LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
                     nI.getValNumInfo(nI.getNumValNums()-1));
        DEBUG(errs() << " +" << LR);
Evan Cheng's avatar
Evan Cheng committed
        nI.addRange(LR);
      }
Evan Cheng's avatar
Evan Cheng committed
    }
    if (HasDef) {
      LiveRange LR(getDefIndex(index), getStoreIndex(index),
Lang Hames's avatar
Lang Hames committed
                   nI.getNextValue(0, 0, false, VNInfoAllocator));
      DEBUG(errs() << " +" << LR);
Evan Cheng's avatar
Evan Cheng committed
      nI.addRange(LR);
    }
    DEBUG({
        errs() << "\t\t\t\tAdded new interval: ";
        nI.print(errs(), tri_);
        errs() << '\n';
      });
Evan Cheng's avatar
Evan Cheng committed
  }
Evan Cheng's avatar
Evan Cheng committed
}
Evan Cheng's avatar
Evan Cheng committed
bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
                                   const VNInfo *VNI,
                                   MachineBasicBlock *MBB, unsigned Idx) const {
Evan Cheng's avatar
Evan Cheng committed
  unsigned End = getMBBEndIdx(MBB);
  for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
    if (VNI->kills[j].isPHIKill)
      continue;

    unsigned KillIdx = VNI->kills[j].killIdx;
    if (KillIdx > Idx && KillIdx < End)
      return true;
Evan Cheng's avatar
Evan Cheng committed
  }
  return false;
}

/// RewriteInfo - Keep track of machine instrs that will be rewritten
/// during spilling.
namespace {
  struct RewriteInfo {
    unsigned Index;
    MachineInstr *MI;
    bool HasUse;
    bool HasDef;
    RewriteInfo(unsigned i, MachineInstr *mi, bool u, bool d)
      : Index(i), MI(mi), HasUse(u), HasDef(d) {}
  };

  struct RewriteInfoCompare {
    bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
      return LHS.Index < RHS.Index;
    }
  };
}
Evan Cheng's avatar
Evan Cheng committed
void LiveIntervals::
Evan Cheng's avatar
Evan Cheng committed
rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
Evan Cheng's avatar
Evan Cheng committed
                    LiveInterval::Ranges::const_iterator &I,
Evan Cheng's avatar
Evan Cheng committed
                    MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
Evan Cheng's avatar
Evan Cheng committed
                    unsigned Slot, int LdSlot,
                    bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
Evan Cheng's avatar
Evan Cheng committed
                    const TargetRegisterClass* rc,
                    SmallVector<int, 4> &ReMatIds,
                    const MachineLoopInfo *loopInfo,
Evan Cheng's avatar
Evan Cheng committed
                    BitVector &SpillMBBs,
                    DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
                    DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
                    DenseMap<unsigned,unsigned> &MBBVRegsMap,
Evan Cheng's avatar
Evan Cheng committed
  unsigned NewVReg = 0;
  unsigned start = getBaseIndex(I->start);
Evan Cheng's avatar
Evan Cheng committed
  unsigned end = getBaseIndex(I->end-1) + InstrSlots::NUM;

  // First collect all the def / use in this live range that will be rewritten.
  // Make sure they are sorted according to instruction index.
  std::vector<RewriteInfo> RewriteMIs;
  for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
         re = mri_->reg_end(); ri != re; ) {
    MachineOperand &O = ri.getOperand();
    ++ri;
    assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
    unsigned index = getInstructionIndex(MI);
    if (index < start || index >= end)
      continue;
      // Must be defined by an implicit def. It should not be spilled. Note,
      // this is for correctness reason. e.g.
      // 8   %reg1024<def> = IMPLICIT_DEF
      // 12  %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
      // The live range [12, 14) are not part of the r1024 live interval since
      // it's defined by an implicit def. It will not conflicts with live
      // interval of r1025. Now suppose both registers are spilled, you can
Evan Cheng's avatar
Evan Cheng committed
      // easily see a situation where both registers are reloaded before
      // the INSERT_SUBREG and both target registers that would overlap.
      continue;
    RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
  }
  std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());

  unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
  // Now rewrite the defs and uses.
  for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
    RewriteInfo &rwi = RewriteMIs[i];
    ++i;
    unsigned index = rwi.Index;
    bool MIHasUse = rwi.HasUse;
    bool MIHasDef = rwi.HasDef;
    MachineInstr *MI = rwi.MI;
    // If MI def and/or use the same register multiple times, then there
    // are multiple entries.
    unsigned NumUses = MIHasUse;
    while (i != e && RewriteMIs[i].MI == MI) {
      assert(RewriteMIs[i].Index == index);
      bool isUse = RewriteMIs[i].HasUse;
      if (isUse) ++NumUses;
      MIHasUse |= isUse;
      MIHasDef |= RewriteMIs[i].HasDef;
      ++i;
    }
Evan Cheng's avatar
Evan Cheng committed
    MachineBasicBlock *MBB = MI->getParent();
    if (ImpUse && MI != ReMatDefMI) {
      // Re-matting an instruction with virtual register use. Update the
      // register interval's spill weight to HUGE_VALF to prevent it from
      // being spilled.
      LiveInterval &ImpLi = getInterval(ImpUse);
    unsigned MBBId = MBB->getNumber();
    if (TrySplit) {
      DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
        // One common case:
        // x = use
        // ...
        // ...
        // def = ...
        //     = use
        // It's better to start a new interval to avoid artifically
        // extend the new interval.
        if (MIHasDef && !MIHasUse) {
          MBBVRegsMap.erase(MBB->getNumber());

    bool IsNew = ThisVReg == 0;
    if (IsNew) {
      // This ends the previous live interval. If all of its def / use
      // can be folded, give it a low spill weight.
      if (NewVReg && TrySplit && AllCanFold) {
        LiveInterval &nI = getOrCreateInterval(NewVReg);
        nI.weight /= 10.0F;
      }
      AllCanFold = true;
    }
    NewVReg = ThisVReg;

Evan Cheng's avatar
Evan Cheng committed
    bool HasDef = false;
    bool HasUse = false;
    bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
                         index, end, MI, ReMatOrigDefMI, ReMatDefMI,
                         Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
                         CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
                         ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
Evan Cheng's avatar
Evan Cheng committed
    if (!HasDef && !HasUse)
      continue;

Evan Cheng's avatar
Evan Cheng committed
    // Update weight of spill interval.
    LiveInterval &nI = getOrCreateInterval(NewVReg);
    if (!TrySplit) {
Evan Cheng's avatar
Evan Cheng committed
      // The spill weight is now infinity as it cannot be spilled again.
      nI.weight = HUGE_VALF;
      continue;
    }

    // Keep track of the last def and first use in each MBB.
    if (HasDef) {
      if (MI != ReMatOrigDefMI || !CanDelete) {
        bool HasKill = false;
        if (!HasUse)
          HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, getDefIndex(index));
        else {
          // If this is a two-address code, then this index starts a new VNInfo.
          const VNInfo *VNI = li.findDefinedVNInfo(getDefIndex(index));
          if (VNI)
            HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, getDefIndex(index));
Evan Cheng's avatar
Evan Cheng committed
        }
        DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
          if (SII == SpillIdxes.end()) {
            std::vector<SRInfo> S;
            S.push_back(SRInfo(index, NewVReg, true));
            SpillIdxes.insert(std::make_pair(MBBId, S));
          } else if (SII->second.back().vreg != NewVReg) {
            SII->second.push_back(SRInfo(index, NewVReg, true));
          } else if ((int)index > SII->second.back().index) {
            // If there is an earlier def and this is a two-address
            // instruction, then it's not possible to fold the store (which
            // would also fold the load).
            SRInfo &Info = SII->second.back();
            Info.index = index;
            Info.canFold = !HasUse;
Evan Cheng's avatar
Evan Cheng committed
          }
        } else if (SII != SpillIdxes.end() &&
                   SII->second.back().vreg == NewVReg &&
                   (int)index > SII->second.back().index) {
          // There is an earlier def that's not killed (must be two-address).
          // The spill is no longer needed.
          SII->second.pop_back();
          if (SII->second.empty()) {
            SpillIdxes.erase(MBBId);
            SpillMBBs.reset(MBBId);
          }
      DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
      if (SII != SpillIdxes.end() &&
          SII->second.back().vreg == NewVReg &&
          (int)index > SII->second.back().index)
        // Use(s) following the last def, it's not safe to fold the spill.
      DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
      if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
        // If we are splitting live intervals, only fold if it's the first
        // use and there isn't another use later in the MBB.
      else if (IsNew) {
        // Only need a reload if there isn't an earlier def / use.
        if (RII == RestoreIdxes.end()) {
          std::vector<SRInfo> Infos;
          Infos.push_back(SRInfo(index, NewVReg, true));
          RestoreIdxes.insert(std::make_pair(MBBId, Infos));
        } else {
          RII->second.push_back(SRInfo(index, NewVReg, true));
        }
Evan Cheng's avatar
Evan Cheng committed
    }
    unsigned loopDepth = loopInfo->getLoopDepth(MBB);
    nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
Evan Cheng's avatar
Evan Cheng committed
  }

  if (NewVReg && TrySplit && AllCanFold) {
    // If all of its def / use can be folded, give it a low spill weight.
    LiveInterval &nI = getOrCreateInterval(NewVReg);
    nI.weight /= 10.0F;
  }
bool LiveIntervals::alsoFoldARestore(int Id, int index, unsigned vr,
                        BitVector &RestoreMBBs,
                        DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
  if (!RestoreMBBs[Id])
    return false;
  std::vector<SRInfo> &Restores = RestoreIdxes[Id];
  for (unsigned i = 0, e = Restores.size(); i != e; ++i)
    if (Restores[i].index == index &&
        Restores[i].vreg == vr &&
        Restores[i].canFold)
      return true;
  return false;
}

void LiveIntervals::eraseRestoreInfo(int Id, int index, unsigned vr,
                        BitVector &RestoreMBBs,
                        DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
  if (!RestoreMBBs[Id])
    return;
  std::vector<SRInfo> &Restores = RestoreIdxes[Id];
  for (unsigned i = 0, e = Restores.size(); i != e; ++i)
    if (Restores[i].index == index && Restores[i].vreg)
      Restores[i].index = -1;
}
/// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
/// spilled and create empty intervals for their uses.
void
LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
                                    const TargetRegisterClass* rc,
                                    std::vector<LiveInterval*> &NewLIs) {
  for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
         re = mri_->reg_end(); ri != re; ) {