Newer
Older
Jakob Stoklund Olesen
committed
// Find the flag operand corresponding to UseOpIdx
unsigned FlagIdx, NumOps=0;
for (FlagIdx = InlineAsm::MIOp_FirstOperand;
FlagIdx < UseOpIdx; FlagIdx += NumOps+1) {
Jakob Stoklund Olesen
committed
const MachineOperand &UFMO = getOperand(FlagIdx);
// After the normal asm operands there may be additional imp-def regs.
if (!UFMO.isImm())
return false;
Jakob Stoklund Olesen
committed
NumOps = InlineAsm::getNumOperandRegisters(UFMO.getImm());
assert(NumOps < getNumOperands() && "Invalid inline asm flag");
if (UseOpIdx < FlagIdx+NumOps+1)
break;
}
Jakob Stoklund Olesen
committed
if (FlagIdx >= UseOpIdx)
return false;
Jakob Stoklund Olesen
committed
const MachineOperand &UFMO = getOperand(FlagIdx);
unsigned DefNo;
if (InlineAsm::isUseOperandTiedToDef(UFMO.getImm(), DefNo)) {
if (!DefOpIdx)
return true;
unsigned DefIdx = InlineAsm::MIOp_FirstOperand;
// Remember to adjust the index. First operand is asm string, second is
// the HasSideEffects and AlignStack bits, then there is a flag for each.
while (DefNo) {
const MachineOperand &FMO = getOperand(DefIdx);
assert(FMO.isImm());
// Skip over this def.
DefIdx += InlineAsm::getNumOperandRegisters(FMO.getImm()) + 1;
--DefNo;
}
*DefOpIdx = DefIdx + UseOpIdx - FlagIdx;
return true;
}
return false;
}
const TargetInstrDesc &TID = getDesc();
if (UseOpIdx >= TID.getNumOperands())
return false;
const MachineOperand &MO = getOperand(UseOpIdx);
if (!MO.isReg() || !MO.isUse())
return false;
int DefIdx = TID.getOperandConstraint(UseOpIdx, TOI::TIED_TO);
if (DefIdx == -1)
return false;
if (DefOpIdx)
*DefOpIdx = (unsigned)DefIdx;
return true;
}
/// clearKillInfo - Clears kill flags on all operands.
///
void MachineInstr::clearKillInfo() {
for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
MachineOperand &MO = getOperand(i);
if (MO.isReg() && MO.isUse())
MO.setIsKill(false);
}
}
/// copyKillDeadInfo - Copies kill / dead operand properties from MI.
///
void MachineInstr::copyKillDeadInfo(const MachineInstr *MI) {
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
const MachineOperand &MO = MI->getOperand(i);
if (!MO.isReg() || (!MO.isKill() && !MO.isDead()))
continue;
for (unsigned j = 0, ee = getNumOperands(); j != ee; ++j) {
MachineOperand &MOp = getOperand(j);
if (!MOp.isIdenticalTo(MO))
continue;
if (MO.isKill())
MOp.setIsKill();
else
MOp.setIsDead();
break;
}
}
}
/// copyPredicates - Copies predicate operand(s) from MI.
void MachineInstr::copyPredicates(const MachineInstr *MI) {
const TargetInstrDesc &TID = MI->getDesc();
if (!TID.isPredicable())
return;
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
if (TID.OpInfo[i].isPredicate()) {
// Predicated operands must be last operands.
addOperand(MI->getOperand(i));
}
}
}
Jakob Stoklund Olesen
committed
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
void MachineInstr::substituteRegister(unsigned FromReg,
unsigned ToReg,
unsigned SubIdx,
const TargetRegisterInfo &RegInfo) {
if (TargetRegisterInfo::isPhysicalRegister(ToReg)) {
if (SubIdx)
ToReg = RegInfo.getSubReg(ToReg, SubIdx);
for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
MachineOperand &MO = getOperand(i);
if (!MO.isReg() || MO.getReg() != FromReg)
continue;
MO.substPhysReg(ToReg, RegInfo);
}
} else {
for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
MachineOperand &MO = getOperand(i);
if (!MO.isReg() || MO.getReg() != FromReg)
continue;
MO.substVirtReg(ToReg, SubIdx, RegInfo);
}
}
}
/// isSafeToMove - Return true if it is safe to move this instruction. If
/// SawStore is set to true, it means that there is a store (or call) between
/// the instruction's location and its intended destination.
bool MachineInstr::isSafeToMove(const TargetInstrInfo *TII,
AliasAnalysis *AA,
bool &SawStore) const {
// Ignore stuff that we obviously can't move.
if (TID->mayStore() || TID->isCall()) {
SawStore = true;
return false;
}
Evan Cheng
committed
if (isLabel() || isDebugValue() ||
TID->isTerminator() || hasUnmodeledSideEffects())
return false;
// See if this instruction does a load. If so, we have to guarantee that the
// loaded value doesn't change between the load and the its intended
// destination. The check for isInvariantLoad gives the targe the chance to
// classify the load as always returning a constant, e.g. a constant pool
// load.
if (TID->mayLoad() && !isInvariantLoad(AA))
// Otherwise, this is a real load. If there is a store between the load and
// end of block, or if the load is volatile, we can't move it.
return !SawStore && !hasVolatileMemoryRef();
return true;
}
/// isSafeToReMat - Return true if it's safe to rematerialize the specified
/// instruction which defined the specified register instead of copying it.
bool MachineInstr::isSafeToReMat(const TargetInstrInfo *TII,
AliasAnalysis *AA,
unsigned DstReg) const {
if (!TII->isTriviallyReMaterializable(this, AA) ||
!isSafeToMove(TII, AA, SawStore))
return false;
for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
const MachineOperand &MO = getOperand(i);
if (!MO.isReg())
continue;
// FIXME: For now, do not remat any instruction with register operands.
// Later on, we can loosen the restriction is the register operands have
// not been modified between the def and use. Note, this is different from
// MachineSink because the code is no longer in two-address form (at least
// partially).
if (MO.isUse())
return false;
else if (!MO.isDead() && MO.getReg() != DstReg)
return false;
}
return true;
}
/// hasVolatileMemoryRef - Return true if this instruction may have a
/// volatile memory reference, or if the information describing the
/// memory reference is not available. Return false if it is known to
/// have no volatile memory references.
bool MachineInstr::hasVolatileMemoryRef() const {
// An instruction known never to access memory won't have a volatile access.
if (!TID->mayStore() &&
!TID->mayLoad() &&
!TID->isCall() &&
!hasUnmodeledSideEffects())
return false;
// Otherwise, if the instruction has no memory reference information,
// conservatively assume it wasn't preserved.
if (memoperands_empty())
return true;
// Check the memory reference information for volatile references.
for (mmo_iterator I = memoperands_begin(), E = memoperands_end(); I != E; ++I)
if ((*I)->isVolatile())
return true;
return false;
}
/// isInvariantLoad - Return true if this instruction is loading from a
/// location whose value is invariant across the function. For example,
/// loading a value from the constant pool or from the argument area
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
/// of a function if it does not change. This should only return true of
/// *all* loads the instruction does are invariant (if it does multiple loads).
bool MachineInstr::isInvariantLoad(AliasAnalysis *AA) const {
// If the instruction doesn't load at all, it isn't an invariant load.
if (!TID->mayLoad())
return false;
// If the instruction has lost its memoperands, conservatively assume that
// it may not be an invariant load.
if (memoperands_empty())
return false;
const MachineFrameInfo *MFI = getParent()->getParent()->getFrameInfo();
for (mmo_iterator I = memoperands_begin(),
E = memoperands_end(); I != E; ++I) {
if ((*I)->isVolatile()) return false;
if ((*I)->isStore()) return false;
if (const Value *V = (*I)->getValue()) {
// A load from a constant PseudoSourceValue is invariant.
if (const PseudoSourceValue *PSV = dyn_cast<PseudoSourceValue>(V))
if (PSV->isConstant(MFI))
continue;
// If we have an AliasAnalysis, ask it whether the memory is constant.
if (AA && AA->pointsToConstantMemory(
AliasAnalysis::Location(V, (*I)->getSize(),
(*I)->getTBAAInfo())))
continue;
}
// Otherwise assume conservatively.
return false;
}
// Everything checks out.
return true;
}
/// isConstantValuePHI - If the specified instruction is a PHI that always
/// merges together the same virtual register, return the register, otherwise
/// return 0.
unsigned MachineInstr::isConstantValuePHI() const {
assert(getNumOperands() >= 3 &&
"It's illegal to have a PHI without source operands");
unsigned Reg = getOperand(1).getReg();
for (unsigned i = 3, e = getNumOperands(); i < e; i += 2)
if (getOperand(i).getReg() != Reg)
return 0;
return Reg;
}
bool MachineInstr::hasUnmodeledSideEffects() const {
if (getDesc().hasUnmodeledSideEffects())
return true;
if (isInlineAsm()) {
unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
if (ExtraInfo & InlineAsm::Extra_HasSideEffects)
return true;
}
return false;
}
Evan Cheng
committed
/// allDefsAreDead - Return true if all the defs of this instruction are dead.
///
bool MachineInstr::allDefsAreDead() const {
for (unsigned i = 0, e = getNumOperands(); i < e; ++i) {
const MachineOperand &MO = getOperand(i);
if (!MO.isReg() || MO.isUse())
continue;
if (!MO.isDead())
return false;
}
return true;
}
/// copyImplicitOps - Copy implicit register operands from specified
/// instruction to this instruction.
void MachineInstr::copyImplicitOps(const MachineInstr *MI) {
for (unsigned i = MI->getDesc().getNumOperands(), e = MI->getNumOperands();
i != e; ++i) {
const MachineOperand &MO = MI->getOperand(i);
if (MO.isReg() && MO.isImplicit())
addOperand(MO);
}
}
Brian Gaeke
committed
void MachineInstr::dump() const {
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
static void printDebugLoc(DebugLoc DL, const MachineFunction *MF,
raw_ostream &CommentOS) {
const LLVMContext &Ctx = MF->getFunction()->getContext();
if (!DL.isUnknown()) { // Print source line info.
DIScope Scope(DL.getScope(Ctx));
// Omit the directory, because it's likely to be long and uninteresting.
if (Scope.Verify())
CommentOS << Scope.getFilename();
else
CommentOS << "<unknown>";
CommentOS << ':' << DL.getLine();
if (DL.getCol() != 0)
CommentOS << ':' << DL.getCol();
DebugLoc InlinedAtDL = DebugLoc::getFromDILocation(DL.getInlinedAt(Ctx));
if (!InlinedAtDL.isUnknown()) {
CommentOS << " @[ ";
printDebugLoc(InlinedAtDL, MF, CommentOS);
CommentOS << " ]";
}
}
}
void MachineInstr::print(raw_ostream &OS, const TargetMachine *TM) const {
// We can be a bit tidier if we know the TargetMachine and/or MachineFunction.
const MachineFunction *MF = 0;
Jakob Stoklund Olesen
committed
const MachineRegisterInfo *MRI = 0;
if (const MachineBasicBlock *MBB = getParent()) {
MF = MBB->getParent();
if (!TM && MF)
TM = &MF->getTarget();
Jakob Stoklund Olesen
committed
if (MF)
MRI = &MF->getRegInfo();
Jakob Stoklund Olesen
committed
// Save a list of virtual registers.
SmallVector<unsigned, 8> VirtRegs;
// Print explicitly defined operands on the left of an assignment syntax.
unsigned StartOp = 0, e = getNumOperands();
for (; StartOp < e && getOperand(StartOp).isReg() &&
getOperand(StartOp).isDef() &&
!getOperand(StartOp).isImplicit();
++StartOp) {
if (StartOp != 0) OS << ", ";
getOperand(StartOp).print(OS, TM);
Jakob Stoklund Olesen
committed
unsigned Reg = getOperand(StartOp).getReg();
if (Reg && TargetRegisterInfo::isVirtualRegister(Reg))
VirtRegs.push_back(Reg);
}
if (StartOp != 0)
OS << " = ";
// Print the opcode name.
OS << getDesc().getName();
// Print the rest of the operands.
bool OmittedAnyCallClobbers = false;
bool FirstOp = true;
if (isInlineAsm()) {
// Print asm string.
OS << " ";
getOperand(InlineAsm::MIOp_AsmString).print(OS, TM);
// Print HasSideEffects, IsAlignStack
unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
if (ExtraInfo & InlineAsm::Extra_HasSideEffects)
OS << " [sideeffect]";
if (ExtraInfo & InlineAsm::Extra_IsAlignStack)
OS << " [alignstack]";
StartOp = InlineAsm::MIOp_FirstOperand;
FirstOp = false;
}
for (unsigned i = StartOp, e = getNumOperands(); i != e; ++i) {
const MachineOperand &MO = getOperand(i);
Jakob Stoklund Olesen
committed
if (MO.isReg() && MO.getReg() &&
TargetRegisterInfo::isVirtualRegister(MO.getReg()))
VirtRegs.push_back(MO.getReg());
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
// Omit call-clobbered registers which aren't used anywhere. This makes
// call instructions much less noisy on targets where calls clobber lots
// of registers. Don't rely on MO.isDead() because we may be called before
// LiveVariables is run, or we may be looking at a non-allocatable reg.
if (MF && getDesc().isCall() &&
MO.isReg() && MO.isImplicit() && MO.isDef()) {
unsigned Reg = MO.getReg();
if (Reg != 0 && TargetRegisterInfo::isPhysicalRegister(Reg)) {
const MachineRegisterInfo &MRI = MF->getRegInfo();
if (MRI.use_empty(Reg) && !MRI.isLiveOut(Reg)) {
bool HasAliasLive = false;
for (const unsigned *Alias = TM->getRegisterInfo()->getAliasSet(Reg);
unsigned AliasReg = *Alias; ++Alias)
if (!MRI.use_empty(AliasReg) || MRI.isLiveOut(AliasReg)) {
HasAliasLive = true;
break;
}
if (!HasAliasLive) {
OmittedAnyCallClobbers = true;
continue;
}
}
}
}
if (FirstOp) FirstOp = false; else OS << ",";
OS << " ";
if (i < getDesc().NumOperands) {
const TargetOperandInfo &TOI = getDesc().OpInfo[i];
if (TOI.isPredicate())
OS << "pred:";
if (TOI.isOptionalDef())
OS << "opt:";
}
if (isDebugValue() && MO.isMetadata()) {
// Pretty print DBG_VALUE instructions.
const MDNode *MD = MO.getMetadata();
if (const MDString *MDS = dyn_cast<MDString>(MD->getOperand(2)))
OS << "!\"" << MDS->getString() << '\"';
else
MO.print(OS, TM);
} else if (TM && (isInsertSubreg() || isRegSequence()) && MO.isImm()) {
OS << TM->getRegisterInfo()->getSubRegIndexName(MO.getImm());
} else
MO.print(OS, TM);
}
// Briefly indicate whether any call clobbers were omitted.
if (OmittedAnyCallClobbers) {
OS << " ...";
if (!memoperands_empty()) {
if (!HaveSemi) OS << ";"; HaveSemi = true;
OS << " mem:";
for (mmo_iterator i = memoperands_begin(), e = memoperands_end();
i != e; ++i) {
OS << **i;
if (llvm::next(i) != e)
}
}
Jakob Stoklund Olesen
committed
// Print the regclass of any virtual registers encountered.
if (MRI && !VirtRegs.empty()) {
if (!HaveSemi) OS << ";"; HaveSemi = true;
for (unsigned i = 0; i != VirtRegs.size(); ++i) {
const TargetRegisterClass *RC = MRI->getRegClass(VirtRegs[i]);
OS << " " << RC->getName() << ":%reg" << VirtRegs[i];
for (unsigned j = i+1; j != VirtRegs.size();) {
if (MRI->getRegClass(VirtRegs[j]) != RC) {
++j;
continue;
}
if (VirtRegs[i] != VirtRegs[j])
OS << "," << VirtRegs[j];
VirtRegs.erase(VirtRegs.begin()+j);
}
}
}
if (!debugLoc.isUnknown() && MF) {
OS << " dbg:";
Owen Anderson
committed
bool MachineInstr::addRegisterKilled(unsigned IncomingReg,
const TargetRegisterInfo *RegInfo,
Owen Anderson
committed
bool AddIfNotFound) {
bool isPhysReg = TargetRegisterInfo::isPhysicalRegister(IncomingReg);
bool hasAliases = isPhysReg && RegInfo->getAliasSet(IncomingReg);
bool Found = false;
for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
MachineOperand &MO = getOperand(i);
Jakob Stoklund Olesen
committed
if (!MO.isReg() || !MO.isUse() || MO.isUndef())
continue;
unsigned Reg = MO.getReg();
if (!Reg)
continue;
if (!Found) {
if (MO.isKill())
// The register is already marked kill.
return true;
Jakob Stoklund Olesen
committed
if (isPhysReg && isRegTiedToDefOperand(i))
// Two-address uses of physregs must not be marked kill.
return true;
MO.setIsKill();
Found = true;
}
} else if (hasAliases && MO.isKill() &&
TargetRegisterInfo::isPhysicalRegister(Reg)) {
// A super-register kill already exists.
if (RegInfo->isSuperRegister(IncomingReg, Reg))
return true;
if (RegInfo->isSubRegister(IncomingReg, Reg))
Owen Anderson
committed
}
}
// Trim unneeded kill operands.
while (!DeadOps.empty()) {
unsigned OpIdx = DeadOps.back();
if (getOperand(OpIdx).isImplicit())
RemoveOperand(OpIdx);
else
getOperand(OpIdx).setIsKill(false);
DeadOps.pop_back();
}
// If not found, this means an alias of one of the operands is killed. Add a
Owen Anderson
committed
// new implicit operand if required.
if (!Found && AddIfNotFound) {
addOperand(MachineOperand::CreateReg(IncomingReg,
false /*IsDef*/,
true /*IsImp*/,
true /*IsKill*/));
Owen Anderson
committed
return true;
}
return Found;
Owen Anderson
committed
}
bool MachineInstr::addRegisterDead(unsigned IncomingReg,
const TargetRegisterInfo *RegInfo,
Owen Anderson
committed
bool AddIfNotFound) {
bool isPhysReg = TargetRegisterInfo::isPhysicalRegister(IncomingReg);
bool hasAliases = isPhysReg && RegInfo->getAliasSet(IncomingReg);
bool Found = false;
Owen Anderson
committed
for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
MachineOperand &MO = getOperand(i);
if (!MO.isReg() || !MO.isDef())
if (!Reg)
continue;
if (!Found) {
if (MO.isDead())
// The register is already marked dead.
return true;
MO.setIsDead();
Found = true;
}
} else if (hasAliases && MO.isDead() &&
TargetRegisterInfo::isPhysicalRegister(Reg)) {
// There exists a super-register that's marked dead.
if (RegInfo->isSuperRegister(IncomingReg, Reg))
return true;
Owen Anderson
committed
if (RegInfo->getSubRegisters(IncomingReg) &&
RegInfo->getSuperRegisters(Reg) &&
RegInfo->isSubRegister(IncomingReg, Reg))
Owen Anderson
committed
}
}
// Trim unneeded dead operands.
while (!DeadOps.empty()) {
unsigned OpIdx = DeadOps.back();
if (getOperand(OpIdx).isImplicit())
RemoveOperand(OpIdx);
else
getOperand(OpIdx).setIsDead(false);
DeadOps.pop_back();
}
// If not found, this means an alias of one of the operands is dead. Add a
// new implicit operand if required.
if (Found || !AddIfNotFound)
return Found;
addOperand(MachineOperand::CreateReg(IncomingReg,
true /*IsDef*/,
true /*IsImp*/,
false /*IsKill*/,
true /*IsDead*/));
return true;
Owen Anderson
committed
}
Jakob Stoklund Olesen
committed
void MachineInstr::addRegisterDefined(unsigned IncomingReg,
const TargetRegisterInfo *RegInfo) {
Jakob Stoklund Olesen
committed
if (TargetRegisterInfo::isPhysicalRegister(IncomingReg)) {
MachineOperand *MO = findRegisterDefOperand(IncomingReg, false, RegInfo);
if (MO)
return;
} else {
for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
const MachineOperand &MO = getOperand(i);
if (MO.isReg() && MO.getReg() == IncomingReg && MO.isDef() &&
MO.getSubReg() == 0)
return;
}
}
addOperand(MachineOperand::CreateReg(IncomingReg,
true /*IsDef*/,
true /*IsImp*/));
Jakob Stoklund Olesen
committed
}
void MachineInstr::setPhysRegsDeadExcept(const SmallVectorImpl<unsigned> &UsedRegs,
const TargetRegisterInfo &TRI) {
for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
MachineOperand &MO = getOperand(i);
if (!MO.isReg() || !MO.isDef()) continue;
unsigned Reg = MO.getReg();
if (Reg == 0) continue;
bool Dead = true;
for (SmallVectorImpl<unsigned>::const_iterator I = UsedRegs.begin(),
E = UsedRegs.end(); I != E; ++I)
if (TRI.regsOverlap(*I, Reg)) {
Dead = false;
break;
}
// If there are no uses, including partial uses, the def is dead.
if (Dead) MO.setIsDead();
}
}
unsigned
MachineInstrExpressionTrait::getHashValue(const MachineInstr* const &MI) {
unsigned Hash = MI->getOpcode() * 37;
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
const MachineOperand &MO = MI->getOperand(i);
uint64_t Key = (uint64_t)MO.getType() << 32;
switch (MO.getType()) {
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
default: break;
case MachineOperand::MO_Register:
if (MO.isDef() && MO.getReg() &&
TargetRegisterInfo::isVirtualRegister(MO.getReg()))
continue; // Skip virtual register defs.
Key |= MO.getReg();
break;
case MachineOperand::MO_Immediate:
Key |= MO.getImm();
break;
case MachineOperand::MO_FrameIndex:
case MachineOperand::MO_ConstantPoolIndex:
case MachineOperand::MO_JumpTableIndex:
Key |= MO.getIndex();
break;
case MachineOperand::MO_MachineBasicBlock:
Key |= DenseMapInfo<void*>::getHashValue(MO.getMBB());
break;
case MachineOperand::MO_GlobalAddress:
Key |= DenseMapInfo<void*>::getHashValue(MO.getGlobal());
break;
case MachineOperand::MO_BlockAddress:
Key |= DenseMapInfo<void*>::getHashValue(MO.getBlockAddress());
break;
case MachineOperand::MO_MCSymbol:
Key |= DenseMapInfo<void*>::getHashValue(MO.getMCSymbol());
break;
}
Key += ~(Key << 32);
Key ^= (Key >> 22);
Key += ~(Key << 13);
Key ^= (Key >> 8);
Key += (Key << 3);
Key ^= (Key >> 15);
Key += ~(Key << 27);
Key ^= (Key >> 31);
Hash = (unsigned)Key + Hash * 37;
}
return Hash;
}