Skip to content
LiveIntervalAnalysis.cpp 84.3 KiB
Newer Older
  SmallPtrSet<LiveInterval*, 4> AddedKill;
  if (NeedStackSlot) {
    int Id = SpillMBBs.find_first();
    while (Id != -1) {
      MachineBasicBlock *MBB = mf_->getBlockNumbered(Id);
      unsigned loopDepth = loopInfo->getLoopDepth(MBB);
      std::vector<SRInfo> &spills = SpillIdxes[Id];
      for (unsigned i = 0, e = spills.size(); i != e; ++i) {
        int index = spills[i].index;
        unsigned VReg = spills[i].vreg;
        LiveInterval &nI = getOrCreateInterval(VReg);
        bool isReMat = vrm.isReMaterialized(VReg);
        MachineInstr *MI = getInstructionFromIndex(index);
        bool CanFold = false;
        bool FoundUse = false;
        Ops.clear();
          for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
            MachineOperand &MO = MI->getOperand(j);
            if (!MO.isReg() || MO.getReg() != VReg)
            if (isReMat || 
                (!FoundUse && !alsoFoldARestore(Id, index, VReg,
                                                RestoreMBBs, RestoreIdxes))) {
              // MI has two-address uses of the same register. If the use
              // isn't the first and only use in the BB, then we can't fold
              // it. FIXME: Move this to rewriteInstructionsForSpills.
              CanFold = false;
          }
        }
        // Fold the store into the def if possible.
        if (CanFold && !Ops.empty()) {
          if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
            if (FoundUse) {
              // Also folded uses, do not issue a load.
              eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
Evan Cheng's avatar
Evan Cheng committed
              nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
            }
            nI.removeRange(getDefIndex(index), getStoreIndex(index));
        // Otherwise tell the spiller to issue a spill.
        if (!Folded) {
          LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
          bool isKill = LR->end == getStoreIndex(index);
Evan Cheng's avatar
Evan Cheng committed
          if (!MI->registerDefIsDead(nI.reg))
            // No need to spill a dead def.
            vrm.addSpillPoint(VReg, isKill, MI);
          if (isKill)
            AddedKill.insert(&nI);
        }

        // Update spill slot weight.
        if (!isReMat)
          SSWeight += getSpillWeight(true, false, loopDepth);
  int Id = RestoreMBBs.find_first();
  while (Id != -1) {
    MachineBasicBlock *MBB = mf_->getBlockNumbered(Id);
    unsigned loopDepth = loopInfo->getLoopDepth(MBB);

    std::vector<SRInfo> &restores = RestoreIdxes[Id];
    for (unsigned i = 0, e = restores.size(); i != e; ++i) {
      int index = restores[i].index;
      if (index == -1)
        continue;
      unsigned VReg = restores[i].vreg;
      LiveInterval &nI = getOrCreateInterval(VReg);
      bool isReMat = vrm.isReMaterialized(VReg);
Evan Cheng's avatar
Evan Cheng committed
      MachineInstr *MI = getInstructionFromIndex(index);
Evan Cheng's avatar
Evan Cheng committed
        for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
          MachineOperand &MO = MI->getOperand(j);
          if (!MO.isReg() || MO.getReg() != VReg)
Evan Cheng's avatar
Evan Cheng committed
            continue;
            // If this restore were to be folded, it would have been folded
            // already.
            CanFold = false;
Evan Cheng's avatar
Evan Cheng committed
            break;
          }

      // Fold the load into the use if possible.
          Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
        else {
          MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
          int LdSlot = 0;
          bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
          // If the rematerializable def is a load, also try to fold it.
          if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
            Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
                                          Ops, isLoadSS, LdSlot, VReg);
          if (!Folded) {
            unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
            if (ImpUse) {
              // Re-matting an instruction with virtual register use. Add the
              // register as an implicit use on the use MI and update the register
              // interval's spill weight to HUGE_VALF to prevent it from being
              // spilled.
              LiveInterval &ImpLi = getInterval(ImpUse);
              ImpLi.weight = HUGE_VALF;
              MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
            }
      }
      // If folding is not possible / failed, then tell the spiller to issue a
      // load / rematerialization for us.
      if (Folded)
        nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
        vrm.addRestorePoint(VReg, MI);

      // Update spill slot weight.
      if (!isReMat)
        SSWeight += getSpillWeight(false, true, loopDepth);
Evan Cheng's avatar
Evan Cheng committed
    }
  // Finalize intervals: add kills, finalize spill weights, and filter out
  // dead intervals.
  std::vector<LiveInterval*> RetNewLIs;
  for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
    LiveInterval *LI = NewLIs[i];
    if (!LI->empty()) {
      LI->weight /= InstrSlots::NUM * getApproximateInstructionCount(*LI);
      if (!AddedKill.count(LI)) {
        LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
        unsigned LastUseIdx = getBaseIndex(LR->end);
        MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
        int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
        assert(UseIdx != -1);
        if (!LastUse->isRegTiedToDefOperand(UseIdx)) {
          LastUse->getOperand(UseIdx).setIsKill();
          vrm.addKillPoint(LI->reg, LastUseIdx);
  handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
Evan Cheng's avatar
Evan Cheng committed
}

/// hasAllocatableSuperReg - Return true if the specified physical register has
/// any super register that's allocatable.
bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
  for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
    if (allocatableRegs_[*AS] && hasInterval(*AS))
      return true;
  return false;
}

/// getRepresentativeReg - Find the largest super register of the specified
/// physical register.
unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
  // Find the largest super-register that is allocatable. 
  unsigned BestReg = Reg;
  for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
    unsigned SuperReg = *AS;
    if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
      BestReg = SuperReg;
      break;
    }
  }
  return BestReg;
}

/// getNumConflictsWithPhysReg - Return the number of uses and defs of the
/// specified interval that conflicts with the specified physical register.
unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
                                                   unsigned PhysReg) const {
  unsigned NumConflicts = 0;
  const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
  for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
         E = mri_->reg_end(); I != E; ++I) {
    MachineOperand &O = I.getOperand();
    MachineInstr *MI = O.getParent();
    unsigned Index = getInstructionIndex(MI);
    if (pli.liveAt(Index))
      ++NumConflicts;
  }
  return NumConflicts;
}

/// spillPhysRegAroundRegDefsUses - Spill the specified physical register
/// around all defs and uses of the specified interval. Return true if it
/// was able to cut its interval.
bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
                                            unsigned PhysReg, VirtRegMap &vrm) {
  unsigned SpillReg = getRepresentativeReg(PhysReg);

  for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
    // If there are registers which alias PhysReg, but which are not a
    // sub-register of the chosen representative super register. Assert
    // since we can't handle it yet.
    assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) ||
  LiveInterval &pli = getInterval(SpillReg);
  SmallPtrSet<MachineInstr*, 8> SeenMIs;
  for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
         E = mri_->reg_end(); I != E; ++I) {
    MachineOperand &O = I.getOperand();
    MachineInstr *MI = O.getParent();
    if (SeenMIs.count(MI))
      continue;
    SeenMIs.insert(MI);
    unsigned Index = getInstructionIndex(MI);
    if (pli.liveAt(Index)) {
      vrm.addEmergencySpill(SpillReg, MI);
      unsigned StartIdx = getLoadIndex(Index);
      unsigned EndIdx = getStoreIndex(Index)+1;
      if (pli.isInOneLiveRange(StartIdx, EndIdx)) {
        pli.removeRange(StartIdx, EndIdx);
        cerr << "Ran out of registers during register allocation!\n";
        if (MI->getOpcode() == TargetInstrInfo::INLINEASM) {
          cerr << "Please check your inline asm statement for invalid "
               << "constraints:\n";
          MI->print(cerr.stream(), tm_);
        }
        exit(1);
      }
      for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS) {
        if (!hasInterval(*AS))
          continue;
        LiveInterval &spli = getInterval(*AS);
        if (spli.liveAt(Index))
          spli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);
      }
    }
  }

LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
                                                   MachineInstr* startInst) {
  LiveInterval& Interval = getOrCreateInterval(reg);
  VNInfo* VN = Interval.getNextValue(
            getInstructionIndex(startInst) + InstrSlots::DEF,
            startInst, getVNInfoAllocator());
  VN->hasPHIKill = true;
  VN->kills.push_back(getMBBEndIdx(startInst->getParent()));
  LiveRange LR(getInstructionIndex(startInst) + InstrSlots::DEF,
               getMBBEndIdx(startInst->getParent()) + 1, VN);
  Interval.addRange(LR);
  
  return LR;
}