Newer
Older
//===- ARMRegisterInfo.cpp - ARM Register Information -----------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file was developed by the "Instituto Nokia de Tecnologia" and
// is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file contains the ARM implementation of the MRegisterInfo class.
//
//===----------------------------------------------------------------------===//
#include "ARM.h"
#include "ARMAddressingModes.h"
#include "ARMInstrInfo.h"
#include "ARMMachineFunctionInfo.h"
#include "llvm/Constants.h"
#include "llvm/DerivedTypes.h"
#include "llvm/CodeGen/MachineConstantPool.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineLocation.h"
Evan Cheng
committed
#include "llvm/CodeGen/RegisterScavenging.h"
Rafael Espindola
committed
#include "llvm/Target/TargetFrameInfo.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetOptions.h"
#include "llvm/ADT/BitVector.h"
Evan Cheng
committed
#include "llvm/Support/CommandLine.h"
static cl::opt<bool> ThumbRegScavenging("enable-thumb-reg-scavenging",
cl::Hidden,
cl::desc("Enable register scavenging on Thumb"));
Evan Cheng
committed
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
unsigned ARMRegisterInfo::getRegisterNumbering(unsigned RegEnum) {
using namespace ARM;
switch (RegEnum) {
case R0: case S0: case D0: return 0;
case R1: case S1: case D1: return 1;
case R2: case S2: case D2: return 2;
case R3: case S3: case D3: return 3;
case R4: case S4: case D4: return 4;
case R5: case S5: case D5: return 5;
case R6: case S6: case D6: return 6;
case R7: case S7: case D7: return 7;
case R8: case S8: case D8: return 8;
case R9: case S9: case D9: return 9;
case R10: case S10: case D10: return 10;
case R11: case S11: case D11: return 11;
case R12: case S12: case D12: return 12;
case SP: case S13: case D13: return 13;
case LR: case S14: case D14: return 14;
case PC: case S15: case D15: return 15;
case S16: return 16;
case S17: return 17;
case S18: return 18;
case S19: return 19;
case S20: return 20;
case S21: return 21;
case S22: return 22;
case S23: return 23;
case S24: return 24;
case S25: return 25;
case S26: return 26;
case S27: return 27;
case S28: return 28;
case S29: return 29;
case S30: return 30;
case S31: return 31;
default:
}
}
ARMRegisterInfo::ARMRegisterInfo(const TargetInstrInfo &tii,
const ARMSubtarget &sti)
: ARMGenRegisterInfo(ARM::ADJCALLSTACKDOWN, ARM::ADJCALLSTACKUP),
FramePtr((STI.useThumbBacktraces() || STI.isThumb()) ? ARM::R7 : ARM::R11) {
Evan Cheng
committed
}
bool ARMRegisterInfo::spillCalleeSavedRegisters(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
const std::vector<CalleeSavedInfo> &CSI) const {
MachineFunction &MF = *MBB.getParent();
ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
if (!AFI->isThumbFunction() || CSI.empty())
return false;
MachineInstrBuilder MIB = BuildMI(MBB, MI, TII.get(ARM::tPUSH));
Evan Cheng
committed
for (unsigned i = CSI.size(); i != 0; --i) {
unsigned Reg = CSI[i-1].getReg();
// Add the callee-saved register as live-in. It's killed at the spill.
MBB.addLiveIn(Reg);
MIB.addReg(Reg, false/*isDef*/,false/*isImp*/,true/*isKill*/);
}
return true;
}
bool ARMRegisterInfo::restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
const std::vector<CalleeSavedInfo> &CSI) const {
MachineFunction &MF = *MBB.getParent();
ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
if (!AFI->isThumbFunction() || CSI.empty())
return false;
bool isVarArg = AFI->getVarArgsRegSaveSize() > 0;
MachineInstr *PopMI = new MachineInstr(TII.get(ARM::tPOP));
MBB.insert(MI, PopMI);
for (unsigned i = CSI.size(); i != 0; --i) {
unsigned Reg = CSI[i-1].getReg();
if (Reg == ARM::LR) {
// Special epilogue for vararg functions. See emitEpilogue
if (isVarArg)
continue;
Reg = ARM::PC;
PopMI->setInstrDescriptor(TII.get(ARM::tPOP_RET));
MBB.erase(MI);
}
PopMI->addRegOperand(Reg, true);
}
return true;
}
void ARMRegisterInfo::
storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
unsigned SrcReg, int FI,
const TargetRegisterClass *RC) const {
if (RC == ARM::GPRRegisterClass) {
MachineFunction &MF = *MBB.getParent();
ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
if (AFI->isThumbFunction())
Evan Cheng
committed
BuildMI(MBB, I, TII.get(ARM::tSpill)).addReg(SrcReg, false, false, true)
.addFrameIndex(FI).addImm(0);
else
Evan Cheng
committed
BuildMI(MBB, I, TII.get(ARM::STR)).addReg(SrcReg, false, false, true)
.addFrameIndex(FI).addReg(0).addImm(0).addImm((int64_t)ARMCC::AL)
.addReg(0);
} else if (RC == ARM::DPRRegisterClass) {
Evan Cheng
committed
BuildMI(MBB, I, TII.get(ARM::FSTD)).addReg(SrcReg, false, false, true)
.addFrameIndex(FI).addImm(0).addImm((int64_t)ARMCC::AL).addReg(0);
} else {
assert(RC == ARM::SPRRegisterClass && "Unknown regclass!");
Evan Cheng
committed
BuildMI(MBB, I, TII.get(ARM::FSTS)).addReg(SrcReg, false, false, true)
.addFrameIndex(FI).addImm(0).addImm((int64_t)ARMCC::AL).addReg(0);
}
void ARMRegisterInfo::
loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
unsigned DestReg, int FI,
const TargetRegisterClass *RC) const {
if (RC == ARM::GPRRegisterClass) {
MachineFunction &MF = *MBB.getParent();
ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
if (AFI->isThumbFunction())
BuildMI(MBB, I, TII.get(ARM::tRestore), DestReg)
.addFrameIndex(FI).addImm(0);
else
BuildMI(MBB, I, TII.get(ARM::LDR), DestReg)
.addFrameIndex(FI).addReg(0).addImm(0).addImm((int64_t)ARMCC::AL)
.addReg(0);
} else if (RC == ARM::DPRRegisterClass) {
BuildMI(MBB, I, TII.get(ARM::FLDD), DestReg)
.addFrameIndex(FI).addImm(0).addImm((int64_t)ARMCC::AL).addReg(0);
} else {
assert(RC == ARM::SPRRegisterClass && "Unknown regclass!");
BuildMI(MBB, I, TII.get(ARM::FLDS), DestReg)
.addFrameIndex(FI).addImm(0).addImm((int64_t)ARMCC::AL).addReg(0);
}
void ARMRegisterInfo::copyRegToReg(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I,
unsigned DestReg, unsigned SrcReg,
const TargetRegisterClass *RC) const {
if (RC == ARM::GPRRegisterClass) {
MachineFunction &MF = *MBB.getParent();
ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
if (AFI->isThumbFunction())
BuildMI(MBB, I, TII.get(ARM::tMOVr), DestReg).addReg(SrcReg);
else
BuildMI(MBB, I, TII.get(ARM::MOVr), DestReg).addReg(SrcReg)
Evan Cheng
committed
.addImm((int64_t)ARMCC::AL).addReg(0).addReg(0);
} else if (RC == ARM::SPRRegisterClass)
BuildMI(MBB, I, TII.get(ARM::FCPYS), DestReg).addReg(SrcReg)
.addImm((int64_t)ARMCC::AL).addReg(0);
BuildMI(MBB, I, TII.get(ARM::FCPYD), DestReg).addReg(SrcReg)
.addImm((int64_t)ARMCC::AL).addReg(0);
/// emitLoadConstPool - Emits a load from constpool to materialize the
/// specified immediate.
static void emitLoadConstPool(MachineBasicBlock &MBB,
MachineBasicBlock::iterator &MBBI,
unsigned DestReg, int Val,
ARMCC::CondCodes Pred, unsigned PredReg,
const TargetInstrInfo &TII, bool isThumb) {
MachineFunction &MF = *MBB.getParent();
MachineConstantPool *ConstantPool = MF.getConstantPool();
Constant *C = ConstantInt::get(Type::Int32Ty, Val);
unsigned Idx = ConstantPool->getConstantPoolIndex(C, 2);
if (isThumb)
BuildMI(MBB, MBBI, TII.get(ARM::tLDRcp), DestReg).addConstantPoolIndex(Idx);
else
BuildMI(MBB, MBBI, TII.get(ARM::LDRcp), DestReg).addConstantPoolIndex(Idx)
.addReg(0).addImm(0).addImm((unsigned)Pred).addReg(PredReg);
}
void ARMRegisterInfo::reMaterialize(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I,
unsigned DestReg,
const MachineInstr *Orig) const {
if (Orig->getOpcode() == ARM::MOVi2pieces) {
emitLoadConstPool(MBB, I, DestReg,
Orig->getOperand(1).getImmedValue(),
(ARMCC::CondCodes)Orig->getOperand(2).getImmedValue(),
Orig->getOperand(3).getReg(),
TII, false);
return;
}
MachineInstr *MI = Orig->clone();
MI->getOperand(0).setReg(DestReg);
MBB.insert(I, MI);
}
/// isLowRegister - Returns true if the register is low register r0-r7.
///
static bool isLowRegister(unsigned Reg) {
using namespace ARM;
switch (Reg) {
case R0: case R1: case R2: case R3:
case R4: case R5: case R6: case R7:
return true;
default:
return false;
}
}
MachineInstr *ARMRegisterInfo::foldMemoryOperand(MachineInstr *MI,
unsigned OpNum, int FI) const {
unsigned Opc = MI->getOpcode();
MachineInstr *NewMI = NULL;
switch (Opc) {
default: break;
Evan Cheng
committed
if (MI->getOperand(4).getReg() == ARM::CPSR)
// If it is updating CPSR, then it cannot be foled.
break;
unsigned Pred = MI->getOperand(2).getImmedValue();
unsigned PredReg = MI->getOperand(3).getReg();
if (OpNum == 0) { // move -> store
unsigned SrcReg = MI->getOperand(1).getReg();
NewMI = BuildMI(TII.get(ARM::STR)).addReg(SrcReg).addFrameIndex(FI)
.addReg(0).addImm(0).addImm(Pred).addReg(PredReg);
} else { // move -> load
unsigned DstReg = MI->getOperand(0).getReg();
NewMI = BuildMI(TII.get(ARM::LDR), DstReg).addFrameIndex(FI).addReg(0)
.addImm(0).addImm(Pred).addReg(PredReg);
if (OpNum == 0) { // move -> store
unsigned SrcReg = MI->getOperand(1).getReg();
if (isPhysicalRegister(SrcReg) && !isLowRegister(SrcReg))
// tSpill cannot take a high register operand.
break;
NewMI = BuildMI(TII.get(ARM::tSpill)).addReg(SrcReg).addFrameIndex(FI)
.addImm(0);
} else { // move -> load
unsigned DstReg = MI->getOperand(0).getReg();
if (isPhysicalRegister(DstReg) && !isLowRegister(DstReg))
// tRestore cannot target a high register operand.
break;
NewMI = BuildMI(TII.get(ARM::tRestore), DstReg).addFrameIndex(FI)
.addImm(0);
}
break;
}
case ARM::FCPYS: {
unsigned Pred = MI->getOperand(2).getImmedValue();
unsigned PredReg = MI->getOperand(3).getReg();
if (OpNum == 0) { // move -> store
unsigned SrcReg = MI->getOperand(1).getReg();
NewMI = BuildMI(TII.get(ARM::FSTS)).addReg(SrcReg).addFrameIndex(FI)
.addImm(0).addImm(Pred).addReg(PredReg);
} else { // move -> load
unsigned DstReg = MI->getOperand(0).getReg();
NewMI = BuildMI(TII.get(ARM::FLDS), DstReg).addFrameIndex(FI)
.addImm(0).addImm(Pred).addReg(PredReg);
}
break;
}
case ARM::FCPYD: {
unsigned Pred = MI->getOperand(2).getImmedValue();
unsigned PredReg = MI->getOperand(3).getReg();
if (OpNum == 0) { // move -> store
unsigned SrcReg = MI->getOperand(1).getReg();
NewMI = BuildMI(TII.get(ARM::FSTD)).addReg(SrcReg).addFrameIndex(FI)
.addImm(0).addImm(Pred).addReg(PredReg);
} else { // move -> load
unsigned DstReg = MI->getOperand(0).getReg();
NewMI = BuildMI(TII.get(ARM::FLDD), DstReg).addFrameIndex(FI)
.addImm(0).addImm(Pred).addReg(PredReg);
}
break;
}
}
if (NewMI)
NewMI->copyKillDeadInfo(MI);
return NewMI;
const unsigned* ARMRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF)
const {
ARM::LR, ARM::R11, ARM::R10, ARM::R9, ARM::R8,
ARM::R7, ARM::R6, ARM::R5, ARM::R4,
ARM::D15, ARM::D14, ARM::D13, ARM::D12,
ARM::D11, ARM::D10, ARM::D9, ARM::D8,
0
static const unsigned DarwinCalleeSavedRegs[] = {
ARM::LR, ARM::R7, ARM::R6, ARM::R5, ARM::R4,
ARM::R11, ARM::R10, ARM::R9, ARM::R8,
ARM::D15, ARM::D14, ARM::D13, ARM::D12,
ARM::D11, ARM::D10, ARM::D9, ARM::D8,
0
};
return STI.isTargetDarwin() ? DarwinCalleeSavedRegs : CalleeSavedRegs;
}
const TargetRegisterClass* const *
ARMRegisterInfo::getCalleeSavedRegClasses(const MachineFunction *MF) const {
static const TargetRegisterClass * const CalleeSavedRegClasses[] = {
&ARM::GPRRegClass, &ARM::GPRRegClass, &ARM::GPRRegClass,
&ARM::GPRRegClass, &ARM::GPRRegClass, &ARM::GPRRegClass,
&ARM::GPRRegClass, &ARM::GPRRegClass, &ARM::GPRRegClass,
&ARM::DPRRegClass, &ARM::DPRRegClass, &ARM::DPRRegClass, &ARM::DPRRegClass,
&ARM::DPRRegClass, &ARM::DPRRegClass, &ARM::DPRRegClass, &ARM::DPRRegClass,
0
}
BitVector ARMRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
// FIXME: avoid re-calculating this everytime.
BitVector Reserved(getNumRegs());
Reserved.set(ARM::SP);
if (STI.isTargetDarwin() || hasFP(MF))
Reserved.set(FramePtr);
// Some targets reserve R9.
if (STI.isR9Reserved())
Reserved.set(ARM::R9);
return Reserved;
}
bool
ARMRegisterInfo::isReservedReg(const MachineFunction &MF, unsigned Reg) const {
switch (Reg) {
default: break;
case ARM::SP:
case ARM::PC:
return true;
case ARM::R7:
case ARM::R11:
if (FramePtr == Reg && (STI.isTargetDarwin() || hasFP(MF)))
return true;
break;
case ARM::R9:
return STI.isR9Reserved();
}
return false;
}
bool
ARMRegisterInfo::requiresRegisterScavenging(const MachineFunction &MF) const {
const ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
return ThumbRegScavenging || !AFI->isThumbFunction();
/// hasFP - Return true if the specified function should have a dedicated frame
/// pointer register. This is true if the function has variable sized allocas
/// or if frame pointer elimination is disabled.
///
bool ARMRegisterInfo::hasFP(const MachineFunction &MF) const {
return NoFramePointerElim || MF.getFrameInfo()->hasVarSizedObjects();
}
// hasReservedCallFrame - Under normal circumstances, when a frame pointer is
// not required, we reserve argument space for call sites in the function
// immediately on entry to the current function. This eliminates the need for
// add/sub sp brackets around call sites. Returns true if the call frame is
// included as part of the stack frame.
bool ARMRegisterInfo::hasReservedCallFrame(MachineFunction &MF) const {
const MachineFrameInfo *FFI = MF.getFrameInfo();
unsigned CFSize = FFI->getMaxCallFrameSize();
ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
// It's not always a good idea to include the call frame as part of the
// stack frame. ARM (especially Thumb) has small immediate offset to
// address the stack frame. So a large call frame can cause poor codegen
// and may even makes it impossible to scavenge a register.
if (AFI->isThumbFunction()) {
if (CFSize >= ((1 << 8) - 1) * 4 / 2) // Half of imm8 * 4
return false;
} else {
if (CFSize >= ((1 << 12) - 1) / 2) // Half of imm12
return false;
}
return !MF.getFrameInfo()->hasVarSizedObjects();
}
/// emitARMRegPlusImmediate - Emits a series of instructions to materialize
/// a destreg = basereg + immediate in ARM code.
static
void emitARMRegPlusImmediate(MachineBasicBlock &MBB,
MachineBasicBlock::iterator &MBBI,
unsigned DestReg, unsigned BaseReg, int NumBytes,
ARMCC::CondCodes Pred, unsigned PredReg,
const TargetInstrInfo &TII) {
bool isSub = NumBytes < 0;
if (isSub) NumBytes = -NumBytes;
while (NumBytes) {
unsigned RotAmt = ARM_AM::getSOImmValRotate(NumBytes);
unsigned ThisVal = NumBytes & ARM_AM::rotr32(0xFF, RotAmt);
assert(ThisVal && "Didn't extract field correctly");
// We will handle these bits from offset, clear them.
NumBytes &= ~ThisVal;
// Get the properly encoded SOImmVal field.
int SOImmVal = ARM_AM::getSOImmVal(ThisVal);
assert(SOImmVal != -1 && "Bit extraction didn't work?");
// Build the new ADD / SUB.
BuildMI(MBB, MBBI, TII.get(isSub ? ARM::SUBri : ARM::ADDri), DestReg)
.addReg(BaseReg, false, false, true).addImm(SOImmVal)
Evan Cheng
committed
.addImm((unsigned)Pred).addReg(PredReg).addReg(0);
/// calcNumMI - Returns the number of instructions required to materialize
/// the specific add / sub r, c instruction.
static unsigned calcNumMI(int Opc, int ExtraOpc, unsigned Bytes,
unsigned NumBits, unsigned Scale) {
unsigned NumMIs = 0;
unsigned Chunk = ((1 << NumBits) - 1) * Scale;
if (Opc == ARM::tADDrSPi) {
unsigned ThisVal = (Bytes > Chunk) ? Chunk : Bytes;
Bytes -= ThisVal;
NumMIs++;
NumBits = 8;
Scale = 1; // Followed by a number of tADDi8.
Chunk = ((1 << NumBits) - 1) * Scale;
}
NumMIs += Bytes / Chunk;
if ((Bytes % Chunk) != 0)
NumMIs++;
if (ExtraOpc)
NumMIs++;
return NumMIs;
}
/// emitThumbRegPlusImmInReg - Emits a series of instructions to materialize
/// a destreg = basereg + immediate in Thumb code. Materialize the immediate
/// in a register using mov / mvn sequences or load the immediate from a
/// constpool entry.
static
MachineBasicBlock::iterator &MBBI,
unsigned DestReg, unsigned BaseReg,
Evan Cheng
committed
int NumBytes, bool CanChangeCC,
const TargetInstrInfo &TII) {
bool isHigh = !isLowRegister(DestReg) ||
(BaseReg != 0 && !isLowRegister(BaseReg));
bool isSub = false;
// Subtract doesn't have high register version. Load the negative value
Evan Cheng
committed
// if either base or dest register is a high register. Also, if do not
// issue sub as part of the sequence if condition register is to be
// preserved.
if (NumBytes < 0 && !isHigh && CanChangeCC) {
isSub = true;
NumBytes = -NumBytes;
}
unsigned LdReg = DestReg;
if (DestReg == ARM::SP) {
assert(BaseReg == ARM::SP && "Unexpected!");
LdReg = ARM::R3;
BuildMI(MBB, MBBI, TII.get(ARM::tMOVr), ARM::R12)
Evan Cheng
committed
.addReg(ARM::R3, false, false, true);
}
Evan Cheng
committed
if (NumBytes <= 255 && NumBytes >= 0)
BuildMI(MBB, MBBI, TII.get(ARM::tMOVi8), LdReg).addImm(NumBytes);
else if (NumBytes < 0 && NumBytes >= -255) {
BuildMI(MBB, MBBI, TII.get(ARM::tMOVi8), LdReg).addImm(NumBytes);
Evan Cheng
committed
BuildMI(MBB, MBBI, TII.get(ARM::tNEG), LdReg)
.addReg(LdReg, false, false, true);
} else
emitLoadConstPool(MBB, MBBI, LdReg, NumBytes, ARMCC::AL, 0, TII, true);
// Emit add / sub.
int Opc = (isSub) ? ARM::tSUBrr : (isHigh ? ARM::tADDhirr : ARM::tADDrr);
const MachineInstrBuilder MIB = BuildMI(MBB, MBBI, TII.get(Opc), DestReg);
Evan Cheng
committed
if (DestReg == ARM::SP || isSub)
MIB.addReg(BaseReg).addReg(LdReg, false, false, true);
else
Evan Cheng
committed
MIB.addReg(LdReg).addReg(BaseReg, false, false, true);
if (DestReg == ARM::SP)
BuildMI(MBB, MBBI, TII.get(ARM::tMOVr), ARM::R3)
Evan Cheng
committed
.addReg(ARM::R12, false, false, true);
}
/// emitThumbRegPlusImmediate - Emits a series of instructions to materialize
/// a destreg = basereg + immediate in Thumb code.
static
void emitThumbRegPlusImmediate(MachineBasicBlock &MBB,
MachineBasicBlock::iterator &MBBI,
unsigned DestReg, unsigned BaseReg,
int NumBytes, const TargetInstrInfo &TII) {
bool isSub = NumBytes < 0;
unsigned Bytes = (unsigned)NumBytes;
if (isSub) Bytes = -NumBytes;
bool isMul4 = (Bytes & 3) == 0;
bool isTwoAddr = false;
bool DstNotEqBase = false;
int Opc = 0;
int ExtraOpc = 0;
if (DestReg == BaseReg && BaseReg == ARM::SP) {
assert(isMul4 && "Thumb sp inc / dec size must be multiple of 4!");
NumBits = 7;
Opc = isSub ? ARM::tSUBspi : ARM::tADDspi;
isTwoAddr = true;
} else if (!isSub && BaseReg == ARM::SP) {
// r1 = add sp, 403
// =>
// r1 = add sp, 100 * 4
// r1 = add r1, 3
if (!isMul4) {
Bytes &= ~3;
ExtraOpc = ARM::tADDi3;
}
NumBits = 8;
// sp = sub sp, c
// r1 = sub sp, c
// r8 = sub sp, c
if (DestReg != BaseReg)
DstNotEqBase = true;
NumBits = 8;
Opc = isSub ? ARM::tSUBi8 : ARM::tADDi8;
isTwoAddr = true;
}
unsigned NumMIs = calcNumMI(Opc, ExtraOpc, Bytes, NumBits, Scale);
unsigned Threshold = (DestReg == ARM::SP) ? 3 : 2;
if (NumMIs > Threshold) {
// This will expand into too many instructions. Load the immediate from a
// constpool entry.
emitThumbRegPlusImmInReg(MBB, MBBI, DestReg, BaseReg, NumBytes, true, TII);
return;
}
if (DstNotEqBase) {
if (isLowRegister(DestReg) && isLowRegister(BaseReg)) {
// If both are low registers, emit DestReg = add BaseReg, max(Imm, 7)
unsigned Chunk = (1 << 3) - 1;
unsigned ThisVal = (Bytes > Chunk) ? Chunk : Bytes;
Bytes -= ThisVal;
BuildMI(MBB, MBBI, TII.get(isSub ? ARM::tSUBi3 : ARM::tADDi3), DestReg)
Evan Cheng
committed
.addReg(BaseReg, false, false, true).addImm(ThisVal);
} else {
BuildMI(MBB, MBBI, TII.get(ARM::tMOVr), DestReg)
Evan Cheng
committed
.addReg(BaseReg, false, false, true);
}
BaseReg = DestReg;
}
unsigned Chunk = ((1 << NumBits) - 1) * Scale;
while (Bytes) {
unsigned ThisVal = (Bytes > Chunk) ? Chunk : Bytes;
// Build the new tADD / tSUB.
if (isTwoAddr)
BuildMI(MBB, MBBI, TII.get(Opc), DestReg).addReg(DestReg).addImm(ThisVal);
Evan Cheng
committed
bool isKill = BaseReg != ARM::SP;
BuildMI(MBB, MBBI, TII.get(Opc), DestReg)
.addReg(BaseReg, false, false, isKill).addImm(ThisVal);
BaseReg = DestReg;
if (Opc == ARM::tADDrSPi) {
// r4 = add sp, imm
// r4 = add r4, imm
// ...
NumBits = 8;
Scale = 1;
Chunk = ((1 << NumBits) - 1) * Scale;
Opc = isSub ? ARM::tSUBi8 : ARM::tADDi8;
isTwoAddr = true;
}
}
}
if (ExtraOpc)
Evan Cheng
committed
BuildMI(MBB, MBBI, TII.get(ExtraOpc), DestReg)
.addReg(DestReg, false, false, true)
.addImm(((unsigned)NumBytes) & 3);
}
static
void emitSPUpdate(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI,
int NumBytes, ARMCC::CondCodes Pred, unsigned PredReg,
bool isThumb, const TargetInstrInfo &TII) {
if (isThumb)
emitThumbRegPlusImmediate(MBB, MBBI, ARM::SP, ARM::SP, NumBytes, TII);
else
emitARMRegPlusImmediate(MBB, MBBI, ARM::SP, ARM::SP, NumBytes,
Pred, PredReg, TII);
void ARMRegisterInfo::
eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
MachineBasicBlock::iterator I) const {
if (!hasReservedCallFrame(MF)) {
// If we have alloca, convert as follows:
// ADJCALLSTACKDOWN -> sub, sp, sp, amount
// ADJCALLSTACKUP -> add, sp, sp, amount
Rafael Espindola
committed
MachineInstr *Old = I;
unsigned Amount = Old->getOperand(0).getImmedValue();
if (Amount != 0) {
ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
// We need to keep the stack aligned properly. To do this, we round the
// amount of space needed for the outgoing arguments up to the next
// alignment boundary.
Rafael Espindola
committed
unsigned Align = MF.getTarget().getFrameInfo()->getStackAlignment();
Amount = (Amount+Align-1)/Align*Align;
// Replace the pseudo instruction with a new instruction...
unsigned Opc = Old->getOpcode();
bool isThumb = AFI->isThumbFunction();
ARMCC::CondCodes Pred = isThumb
? ARMCC::AL : (ARMCC::CondCodes)Old->getOperand(1).getImmedValue();
unsigned PredReg = isThumb ? 0 : Old->getOperand(2).getReg();
if (Opc == ARM::ADJCALLSTACKDOWN || Opc == ARM::tADJCALLSTACKDOWN) {
emitSPUpdate(MBB, I, -Amount, Pred, PredReg, isThumb, TII);
Rafael Espindola
committed
} else {
assert(Opc == ARM::ADJCALLSTACKUP || Opc == ARM::tADJCALLSTACKUP);
emitSPUpdate(MBB, I, Amount, Pred, PredReg, isThumb, TII);
Rafael Espindola
committed
}
}
/// emitThumbConstant - Emit a series of instructions to materialize a
/// constant.
static void emitThumbConstant(MachineBasicBlock &MBB,
MachineBasicBlock::iterator &MBBI,
unsigned DestReg, int Imm,
const TargetInstrInfo &TII) {
bool isSub = Imm < 0;
if (isSub) Imm = -Imm;
int Chunk = (1 << 8) - 1;
int ThisVal = (Imm > Chunk) ? Chunk : Imm;
Imm -= ThisVal;
BuildMI(MBB, MBBI, TII.get(ARM::tMOVi8), DestReg).addImm(ThisVal);
if (Imm > 0)
emitThumbRegPlusImmediate(MBB, MBBI, DestReg, DestReg, Imm, TII);
if (isSub)
Evan Cheng
committed
BuildMI(MBB, MBBI, TII.get(ARM::tNEG), DestReg)
.addReg(DestReg, false, false, true);
/// findScratchRegister - Find a 'free' ARM register. If register scavenger
/// is not being used, R12 is available. Otherwise, try for a call-clobbered
/// register first and then a spilled callee-saved register if that fails.
static
unsigned findScratchRegister(RegScavenger *RS, const TargetRegisterClass *RC,
ARMFunctionInfo *AFI) {
unsigned Reg = RS ? RS->FindUnusedReg(RC, true) : (unsigned) ARM::R12;
if (Reg == 0)
// Try a already spilled CS register.
Reg = RS->FindUnusedReg(RC, AFI->getSpilledCSRegisters());
return Reg;
}
void ARMRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
MachineInstr &MI = *II;
MachineBasicBlock &MBB = *MI.getParent();
MachineFunction &MF = *MBB.getParent();
ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
bool isThumb = AFI->isThumbFunction();
while (!MI.getOperand(i).isFrameIndex()) {
++i;
assert(i < MI.getNumOperands() && "Instr doesn't have FrameIndex operand!");
}
unsigned FrameReg = ARM::SP;
int FrameIndex = MI.getOperand(i).getFrameIndex();
int Offset = MF.getFrameInfo()->getObjectOffset(FrameIndex) +
if (AFI->isGPRCalleeSavedArea1Frame(FrameIndex))
Offset -= AFI->getGPRCalleeSavedArea1Offset();
else if (AFI->isGPRCalleeSavedArea2Frame(FrameIndex))
Offset -= AFI->getGPRCalleeSavedArea2Offset();
else if (AFI->isDPRCalleeSavedAreaFrame(FrameIndex))
Offset -= AFI->getDPRCalleeSavedAreaOffset();
// There is alloca()'s in this function, must reference off the frame
// pointer instead.
FrameReg = getFrameRegister(MF);
Lauro Ramos Venancio
committed
Offset -= AFI->getFramePtrSpillOffset();
}
unsigned Opcode = MI.getOpcode();
const TargetInstrDescriptor &Desc = TII.get(Opcode);
unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask);
bool isSub = false;
if (Opcode == ARM::ADDri) {
Offset += MI.getOperand(i+1).getImm();
if (Offset == 0) {
// Turn it into a move.
MI.getOperand(i).ChangeToRegister(FrameReg, false);
MI.RemoveOperand(i+1);
return;
} else if (Offset < 0) {
Offset = -Offset;
isSub = true;
MI.setInstrDescriptor(TII.get(ARM::SUBri));
}
// Common case: small offset, fits into instruction.
int ImmedOffset = ARM_AM::getSOImmVal(Offset);
if (ImmedOffset != -1) {
// Replace the FrameIndex with sp / fp
MI.getOperand(i).ChangeToRegister(FrameReg, false);
MI.getOperand(i+1).ChangeToImmediate(ImmedOffset);
return;
}
// Otherwise, we fallback to common code below to form the imm offset with
// a sequence of ADDri instructions. First though, pull as much of the imm
// into this ADDri as possible.
unsigned RotAmt = ARM_AM::getSOImmValRotate(Offset);
unsigned ThisImmVal = Offset & ARM_AM::rotr32(0xFF, RotAmt);
// We will handle these bits from offset, clear them.
Offset &= ~ThisImmVal;
// Get the properly encoded SOImmVal field.
int ThisSOImmVal = ARM_AM::getSOImmVal(ThisImmVal);
assert(ThisSOImmVal != -1 && "Bit extraction didn't work?");
MI.getOperand(i+1).ChangeToImmediate(ThisSOImmVal);
} else if (Opcode == ARM::tADDrSPi) {
Offset += MI.getOperand(i+1).getImm();
// Can't use tADDrSPi if it's based off the frame pointer.
unsigned NumBits = 0;
unsigned Scale = 1;
if (FrameReg != ARM::SP) {
Opcode = ARM::tADDi3;
MI.setInstrDescriptor(TII.get(ARM::tADDi3));
NumBits = 3;
} else {
NumBits = 8;
Scale = 4;
assert((Offset & 3) == 0 &&
"Thumb add/sub sp, #imm immediate must be multiple of 4!");
}
if (Offset == 0) {
// Turn it into a move.
MI.getOperand(i).ChangeToRegister(FrameReg, false);
MI.RemoveOperand(i+1);
return;
}
// Common case: small offset, fits into instruction.
unsigned Mask = (1 << NumBits) - 1;
if (((Offset / Scale) & ~Mask) == 0) {
// Replace the FrameIndex with sp / fp
MI.getOperand(i).ChangeToRegister(FrameReg, false);
MI.getOperand(i+1).ChangeToImmediate(Offset / Scale);
return;
}
unsigned DestReg = MI.getOperand(0).getReg();
unsigned Bytes = (Offset > 0) ? Offset : -Offset;
unsigned NumMIs = calcNumMI(Opcode, 0, Bytes, NumBits, Scale);
// MI would expand into a large number of instructions. Don't try to
// simplify the immediate.
if (NumMIs > 2) {
emitThumbRegPlusImmediate(MBB, II, DestReg, FrameReg, Offset, TII);
MBB.erase(II);
return;
}
if (Offset > 0) {
// Translate r0 = add sp, imm to
// r0 = add sp, 255*4
// r0 = add r0, (imm - 255*4)
MI.getOperand(i).ChangeToRegister(FrameReg, false);
MI.getOperand(i+1).ChangeToImmediate(Mask);
Offset = (Offset - Mask * Scale);
MachineBasicBlock::iterator NII = next(II);
emitThumbRegPlusImmediate(MBB, NII, DestReg, DestReg, Offset, TII);
} else {
// Translate r0 = add sp, -imm to
// r0 = -imm (this is then translated into a series of instructons)
// r0 = add r0, sp
emitThumbConstant(MBB, II, DestReg, Offset, TII);
MI.setInstrDescriptor(TII.get(ARM::tADDhirr));
Evan Cheng
committed
MI.getOperand(i).ChangeToRegister(DestReg, false, false, true);
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
MI.getOperand(i+1).ChangeToRegister(FrameReg, false);
}
return;
} else {
unsigned ImmIdx = 0;
int InstrOffs = 0;
unsigned NumBits = 0;
unsigned Scale = 1;
switch (AddrMode) {
case ARMII::AddrMode2: {
ImmIdx = i+2;
InstrOffs = ARM_AM::getAM2Offset(MI.getOperand(ImmIdx).getImm());
if (ARM_AM::getAM2Op(MI.getOperand(ImmIdx).getImm()) == ARM_AM::sub)
InstrOffs *= -1;
NumBits = 12;
break;
}
case ARMII::AddrMode3: {
ImmIdx = i+2;
InstrOffs = ARM_AM::getAM3Offset(MI.getOperand(ImmIdx).getImm());
if (ARM_AM::getAM3Op(MI.getOperand(ImmIdx).getImm()) == ARM_AM::sub)
InstrOffs *= -1;
NumBits = 8;
break;
}
case ARMII::AddrMode5: {
ImmIdx = i+1;
InstrOffs = ARM_AM::getAM5Offset(MI.getOperand(ImmIdx).getImm());
if (ARM_AM::getAM5Op(MI.getOperand(ImmIdx).getImm()) == ARM_AM::sub)
InstrOffs *= -1;
NumBits = 8;
Scale = 4;
break;
}
case ARMII::AddrModeTs: {
ImmIdx = i+1;
InstrOffs = MI.getOperand(ImmIdx).getImm();
NumBits = (FrameReg == ARM::SP) ? 8 : 5;
Scale = 4;
assert(0 && "Unsupported addressing mode!");
assert((Offset & (Scale-1)) == 0 && "Can't encode this offset!");
Evan Cheng
committed
if (Offset < 0 && !isThumb) {
Offset = -Offset;
isSub = true;
}
Evan Cheng
committed
// Common case: small offset, fits into instruction.
MachineOperand &ImmOp = MI.getOperand(ImmIdx);
int ImmedOffset = Offset / Scale;
unsigned Mask = (1 << NumBits) - 1;
if ((unsigned)Offset <= Mask * Scale) {
// Replace the FrameIndex with sp
MI.getOperand(i).ChangeToRegister(FrameReg, false);
if (isSub)
ImmedOffset |= 1 << NumBits;
ImmOp.ChangeToImmediate(ImmedOffset);
return;
}
Evan Cheng
committed
bool isThumSpillRestore = Opcode == ARM::tRestore || Opcode == ARM::tSpill;
if (AddrMode == ARMII::AddrModeTs) {
// Thumb tLDRspi, tSTRspi. These will change to instructions that use
// a different base register.
NumBits = 5;
Mask = (1 << NumBits) - 1;
}
Evan Cheng
committed
// If this is a thumb spill / restore, we will be using a constpool load to
// materialize the offset.
Evan Cheng
committed
if (AddrMode == ARMII::AddrModeTs && isThumSpillRestore)
ImmOp.ChangeToImmediate(0);
else {
// Otherwise, it didn't fit. Pull in what we can to simplify the immed.
ImmedOffset = ImmedOffset & Mask;
if (isSub)
ImmedOffset |= 1 << NumBits;
ImmOp.ChangeToImmediate(ImmedOffset);
Offset &= ~(Mask*Scale);
}
}
// If we get here, the immediate doesn't fit into the instruction. We folded
// as much as possible above, handle the rest, providing a register that is
// SP+LargeImm.
assert(Offset && "This code isn't needed if offset already handled!");
if (isThumb) {
if (TII.isLoad(Opcode)) {
// Use the destination register to materialize sp + offset.
unsigned TmpReg = MI.getOperand(0).getReg();
bool UseRR = false;
if (Opcode == ARM::tRestore) {
if (FrameReg == ARM::SP)
emitThumbRegPlusImmInReg(MBB, II, TmpReg, FrameReg,Offset,false,TII);
else {
emitLoadConstPool(MBB, II, TmpReg, Offset, ARMCC::AL, 0, TII, true);
UseRR = true;
}
} else
Evan Cheng
committed
emitThumbRegPlusImmediate(MBB, II, TmpReg, FrameReg, Offset, TII);
MI.setInstrDescriptor(TII.get(ARM::tLDR));
Evan Cheng
committed
MI.getOperand(i).ChangeToRegister(TmpReg, false, false, true);
if (UseRR)
MI.addRegOperand(FrameReg, false); // Use [reg, reg] addrmode.
else
Evan Cheng
committed
MI.addRegOperand(0, false); // tLDR has an extra register operand.
} else if (TII.isStore(Opcode)) {
// FIXME! This is horrific!!! We need register scavenging.
// Our temporary workaround has marked r3 unavailable. Of course, r3 is
// also a ABI register so it's possible that is is the register that is
// being storing here. If that's the case, we do the following:
// r12 = r2
// Use r2 to materialize sp + offset
// str r3, r2
unsigned ValReg = MI.getOperand(0).getReg();
bool UseRR = false;
BuildMI(MBB, II, TII.get(ARM::tMOVr), ARM::R12)
Evan Cheng
committed
.addReg(ARM::R2, false, false, true);
if (TmpReg == ARM::R3 && AFI->isR3LiveIn())
BuildMI(MBB, II, TII.get(ARM::tMOVr), ARM::R12)
Evan Cheng
committed
.addReg(ARM::R3, false, false, true);
if (Opcode == ARM::tSpill) {
if (FrameReg == ARM::SP)
emitThumbRegPlusImmInReg(MBB, II, TmpReg, FrameReg,Offset,false,TII);
else {
emitLoadConstPool(MBB, II, TmpReg, Offset, ARMCC::AL, 0, TII, true);
UseRR = true;
}
} else
Evan Cheng
committed
emitThumbRegPlusImmediate(MBB, II, TmpReg, FrameReg, Offset, TII);
MI.setInstrDescriptor(TII.get(ARM::tSTR));