Newer
Older
case X86::SHUFPSrri: {
assert(MI->getNumOperands() == 4 && "Unknown shufps instruction!");
if (!TM.getSubtarget<X86Subtarget>().hasSSE2()) return 0;
unsigned B = MI->getOperand(1).getReg();
unsigned C = MI->getOperand(2).getReg();
unsigned A = MI->getOperand(0).getReg();
unsigned M = MI->getOperand(3).getImm();
NewMI = BuildMI(get(X86::PSHUFDri)).addReg(A, true, false, false, isDead)
.addReg(B, false, false, isKill).addImm(M);
assert(MI->getNumOperands() >= 3 && "Unknown shift instruction!");
// NOTE: LEA doesn't produce flags like shift does, but LLVM never uses
// the flags produced by a shift yet, so this is safe.
unsigned ShAmt = MI->getOperand(2).getImm();
if (ShAmt == 0 || ShAmt >= 4) return 0;
NewMI = BuildMI(get(X86::LEA64r)).addReg(Dest, true, false, false, isDead)
.addReg(0).addImm(1 << ShAmt).addReg(Src, false, false, isKill).addImm(0);
assert(MI->getNumOperands() >= 3 && "Unknown shift instruction!");
// NOTE: LEA doesn't produce flags like shift does, but LLVM never uses
// the flags produced by a shift yet, so this is safe.
unsigned ShAmt = MI->getOperand(2).getImm();
if (ShAmt == 0 || ShAmt >= 4) return 0;
unsigned Opc = TM.getSubtarget<X86Subtarget>().is64Bit() ?
X86::LEA64_32r : X86::LEA32r;
NewMI = BuildMI(get(Opc)).addReg(Dest, true, false, false, isDead)
.addReg(0).addImm(1 << ShAmt)
.addReg(Src, false, false, isKill).addImm(0);
assert(MI->getNumOperands() >= 3 && "Unknown shift instruction!");
// NOTE: LEA doesn't produce flags like shift does, but LLVM never uses
// the flags produced by a shift yet, so this is safe.
unsigned ShAmt = MI->getOperand(2).getImm();
if (ShAmt == 0 || ShAmt >= 4) return 0;
Christopher Lamb
committed
if (DisableLEA16) {
// If 16-bit LEA is disabled, use 32-bit LEA via subregisters.
MachineRegisterInfo &RegInfo = MFI->getParent()->getRegInfo();
unsigned Opc = TM.getSubtarget<X86Subtarget>().is64Bit()
? X86::LEA64_32r : X86::LEA32r;
unsigned leaInReg = RegInfo.createVirtualRegister(&X86::GR32RegClass);
unsigned leaOutReg = RegInfo.createVirtualRegister(&X86::GR32RegClass);
// Build and insert into an implicit UNDEF value. This is OK because
// well be shifting and then extracting the lower 16-bits.
MachineInstr *Undef = BuildMI(get(X86::IMPLICIT_DEF), leaInReg);
MachineInstr *InsMI = BuildMI(get(X86::INSERT_SUBREG),leaInReg)
.addReg(leaInReg).addReg(Src, false, false, isKill)
.addImm(X86::SUBREG_16BIT);
Christopher Lamb
committed
NewMI = BuildMI(get(Opc), leaOutReg).addReg(0).addImm(1 << ShAmt)
.addReg(leaInReg, false, false, true).addImm(0);
Christopher Lamb
committed
MachineInstr *ExtMI = BuildMI(get(X86::EXTRACT_SUBREG))
.addReg(Dest, true, false, false, isDead)
.addReg(leaOutReg, false, false, true).addImm(X86::SUBREG_16BIT);
Christopher Lamb
committed
Christopher Lamb
committed
MFI->insert(MBBI, Undef);
MFI->insert(MBBI, InsMI); // Insert the insert_subreg
MFI->insert(MBBI, NewMI); // Insert the lea inst
MFI->insert(MBBI, ExtMI); // Insert the extract_subreg
Owen Anderson
committed
if (LV) {
// Update live variables
LV->getVarInfo(leaInReg).Kills.push_back(NewMI);
LV->getVarInfo(leaOutReg).Kills.push_back(ExtMI);
if (isKill)
LV->replaceKillInstruction(Src, MI, InsMI);
if (isDead)
LV->replaceKillInstruction(Dest, MI, ExtMI);
Owen Anderson
committed
}
return ExtMI;
Christopher Lamb
committed
} else {
NewMI = BuildMI(get(X86::LEA16r)).addReg(Dest, true, false, false, isDead)
.addReg(0).addImm(1 << ShAmt)
.addReg(Src, false, false, isKill).addImm(0);
Christopher Lamb
committed
}
}
default: {
// The following opcodes also sets the condition code register(s). Only
// convert them to equivalent lea if the condition code register def's
// are dead!
if (hasLiveCondCodeDef(MI))
return 0;
bool is64Bit = TM.getSubtarget<X86Subtarget>().is64Bit();
switch (MIOpc) {
default: return 0;
case X86::INC64r:
case X86::INC32r: {
assert(MI->getNumOperands() >= 2 && "Unknown inc instruction!");
unsigned Opc = MIOpc == X86::INC64r ? X86::LEA64r
: (is64Bit ? X86::LEA64_32r : X86::LEA32r);
NewMI = addRegOffset(BuildMI(get(Opc))
.addReg(Dest, true, false, false, isDead),
Src, isKill, 1);
break;
}
case X86::INC16r:
case X86::INC64_16r:
if (DisableLEA16) return 0;
assert(MI->getNumOperands() >= 2 && "Unknown inc instruction!");
NewMI = addRegOffset(BuildMI(get(X86::LEA16r))
.addReg(Dest, true, false, false, isDead),
Src, isKill, 1);
break;
case X86::DEC64r:
case X86::DEC32r: {
assert(MI->getNumOperands() >= 2 && "Unknown dec instruction!");
unsigned Opc = MIOpc == X86::DEC64r ? X86::LEA64r
: (is64Bit ? X86::LEA64_32r : X86::LEA32r);
NewMI = addRegOffset(BuildMI(get(Opc))
.addReg(Dest, true, false, false, isDead),
Src, isKill, -1);
break;
}
case X86::DEC16r:
case X86::DEC64_16r:
if (DisableLEA16) return 0;
assert(MI->getNumOperands() >= 2 && "Unknown dec instruction!");
NewMI = addRegOffset(BuildMI(get(X86::LEA16r))
.addReg(Dest, true, false, false, isDead),
Src, isKill, -1);
break;
case X86::ADD64rr:
case X86::ADD32rr: {
assert(MI->getNumOperands() >= 3 && "Unknown add instruction!");
unsigned Opc = MIOpc == X86::ADD64rr ? X86::LEA64r
: (is64Bit ? X86::LEA64_32r : X86::LEA32r);
unsigned Src2 = MI->getOperand(2).getReg();
bool isKill2 = MI->getOperand(2).isKill();
NewMI = addRegReg(BuildMI(get(Opc))
.addReg(Dest, true, false, false, isDead),
Src, isKill, Src2, isKill2);
if (LV && isKill2)
LV->replaceKillInstruction(Src2, MI, NewMI);
break;
}
case X86::ADD16rr: {
if (DisableLEA16) return 0;
assert(MI->getNumOperands() >= 3 && "Unknown add instruction!");
unsigned Src2 = MI->getOperand(2).getReg();
bool isKill2 = MI->getOperand(2).isKill();
NewMI = addRegReg(BuildMI(get(X86::LEA16r))
.addReg(Dest, true, false, false, isDead),
Src, isKill, Src2, isKill2);
if (LV && isKill2)
LV->replaceKillInstruction(Src2, MI, NewMI);
break;
}
case X86::ADD64ri32:
case X86::ADD64ri8:
assert(MI->getNumOperands() >= 3 && "Unknown add instruction!");
if (MI->getOperand(2).isImmediate())
NewMI = addRegOffset(BuildMI(get(X86::LEA64r))
.addReg(Dest, true, false, false, isDead),
Src, isKill, MI->getOperand(2).getImm());
break;
case X86::ADD32ri:
case X86::ADD32ri8:
assert(MI->getNumOperands() >= 3 && "Unknown add instruction!");
if (MI->getOperand(2).isImmediate()) {
unsigned Opc = is64Bit ? X86::LEA64_32r : X86::LEA32r;
NewMI = addRegOffset(BuildMI(get(Opc))
.addReg(Dest, true, false, false, isDead),
Src, isKill, MI->getOperand(2).getImm());
break;
case X86::ADD16ri:
case X86::ADD16ri8:
if (DisableLEA16) return 0;
assert(MI->getNumOperands() >= 3 && "Unknown add instruction!");
if (MI->getOperand(2).isImmediate())
NewMI = addRegOffset(BuildMI(get(X86::LEA16r))
.addReg(Dest, true, false, false, isDead),
Src, isKill, MI->getOperand(2).getImm());
break;
case X86::SHL16ri:
if (DisableLEA16) return 0;
case X86::SHL32ri:
case X86::SHL64ri: {
assert(MI->getNumOperands() >= 3 && MI->getOperand(2).isImmediate() &&
"Unknown shl instruction!");
Chris Lattner
committed
unsigned ShAmt = MI->getOperand(2).getImm();
if (ShAmt == 1 || ShAmt == 2 || ShAmt == 3) {
X86AddressMode AM;
AM.Scale = 1 << ShAmt;
AM.IndexReg = Src;
unsigned Opc = MIOpc == X86::SHL64ri ? X86::LEA64r
: (MIOpc == X86::SHL32ri
? (is64Bit ? X86::LEA64_32r : X86::LEA32r) : X86::LEA16r);
NewMI = addFullAddress(BuildMI(get(Opc))
.addReg(Dest, true, false, false, isDead), AM);
if (isKill)
NewMI->getOperand(3).setIsKill(true);
}
break;
}
Chris Lattner
committed
}
}
Evan Cheng
committed
}
if (!NewMI) return 0;
if (LV) { // Update live variables
if (isKill)
LV->replaceKillInstruction(Src, MI, NewMI);
if (isDead)
LV->replaceKillInstruction(Dest, MI, NewMI);
}
MFI->insert(MBBI, NewMI); // Insert the new inst
Chris Lattner
committed
}
Chris Lattner
committed
/// commuteInstruction - We have a few instructions that must be hacked on to
/// commute them.
///
Evan Cheng
committed
MachineInstr *
X86InstrInfo::commuteInstruction(MachineInstr *MI, bool NewMI) const {
Chris Lattner
committed
switch (MI->getOpcode()) {
case X86::SHRD16rri8: // A = SHRD16rri8 B, C, I -> A = SHLD16rri8 C, B, (16-I)
case X86::SHLD16rri8: // A = SHLD16rri8 B, C, I -> A = SHRD16rri8 C, B, (16-I)
Chris Lattner
committed
case X86::SHRD32rri8: // A = SHRD32rri8 B, C, I -> A = SHLD32rri8 C, B, (32-I)
case X86::SHLD32rri8: // A = SHLD32rri8 B, C, I -> A = SHRD32rri8 C, B, (32-I)
case X86::SHRD64rri8: // A = SHRD64rri8 B, C, I -> A = SHLD64rri8 C, B, (64-I)
case X86::SHLD64rri8:{// A = SHLD64rri8 B, C, I -> A = SHRD64rri8 C, B, (64-I)
unsigned Opc;
unsigned Size;
switch (MI->getOpcode()) {
default: assert(0 && "Unreachable!");
case X86::SHRD16rri8: Size = 16; Opc = X86::SHLD16rri8; break;
case X86::SHLD16rri8: Size = 16; Opc = X86::SHRD16rri8; break;
case X86::SHRD32rri8: Size = 32; Opc = X86::SHLD32rri8; break;
case X86::SHLD32rri8: Size = 32; Opc = X86::SHRD32rri8; break;
case X86::SHRD64rri8: Size = 64; Opc = X86::SHLD64rri8; break;
case X86::SHLD64rri8: Size = 64; Opc = X86::SHRD64rri8; break;
}
Chris Lattner
committed
unsigned Amt = MI->getOperand(3).getImm();
Chris Lattner
committed
unsigned A = MI->getOperand(0).getReg();
unsigned B = MI->getOperand(1).getReg();
unsigned C = MI->getOperand(2).getReg();
bool AisDead = MI->getOperand(0).isDead();
bool BisKill = MI->getOperand(1).isKill();
bool CisKill = MI->getOperand(2).isKill();
// If machine instrs are no longer in two-address forms, update
// destination register as well.
if (A == B) {
// Must be two address instruction!
assert(MI->getDesc().getOperandConstraint(0, TOI::TIED_TO) &&
"Expecting a two-address instruction!");
A = C;
CisKill = false;
}
return BuildMI(get(Opc)).addReg(A, true, false, false, AisDead)
.addReg(C, false, false, CisKill)
.addReg(B, false, false, BisKill).addImm(Size-Amt);
Chris Lattner
committed
}
Evan Cheng
committed
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
case X86::CMOVB16rr:
case X86::CMOVB32rr:
case X86::CMOVB64rr:
case X86::CMOVAE16rr:
case X86::CMOVAE32rr:
case X86::CMOVAE64rr:
case X86::CMOVE16rr:
case X86::CMOVE32rr:
case X86::CMOVE64rr:
case X86::CMOVNE16rr:
case X86::CMOVNE32rr:
case X86::CMOVNE64rr:
case X86::CMOVBE16rr:
case X86::CMOVBE32rr:
case X86::CMOVBE64rr:
case X86::CMOVA16rr:
case X86::CMOVA32rr:
case X86::CMOVA64rr:
case X86::CMOVL16rr:
case X86::CMOVL32rr:
case X86::CMOVL64rr:
case X86::CMOVGE16rr:
case X86::CMOVGE32rr:
case X86::CMOVGE64rr:
case X86::CMOVLE16rr:
case X86::CMOVLE32rr:
case X86::CMOVLE64rr:
case X86::CMOVG16rr:
case X86::CMOVG32rr:
case X86::CMOVG64rr:
case X86::CMOVS16rr:
case X86::CMOVS32rr:
case X86::CMOVS64rr:
case X86::CMOVNS16rr:
case X86::CMOVNS32rr:
case X86::CMOVNS64rr:
case X86::CMOVP16rr:
case X86::CMOVP32rr:
case X86::CMOVP64rr:
case X86::CMOVNP16rr:
case X86::CMOVNP32rr:
case X86::CMOVNP64rr: {
unsigned Opc = 0;
switch (MI->getOpcode()) {
default: break;
case X86::CMOVB16rr: Opc = X86::CMOVAE16rr; break;
case X86::CMOVB32rr: Opc = X86::CMOVAE32rr; break;
case X86::CMOVB64rr: Opc = X86::CMOVAE64rr; break;
case X86::CMOVAE16rr: Opc = X86::CMOVB16rr; break;
case X86::CMOVAE32rr: Opc = X86::CMOVB32rr; break;
case X86::CMOVAE64rr: Opc = X86::CMOVB64rr; break;
case X86::CMOVE16rr: Opc = X86::CMOVNE16rr; break;
case X86::CMOVE32rr: Opc = X86::CMOVNE32rr; break;
case X86::CMOVE64rr: Opc = X86::CMOVNE64rr; break;
case X86::CMOVNE16rr: Opc = X86::CMOVE16rr; break;
case X86::CMOVNE32rr: Opc = X86::CMOVE32rr; break;
case X86::CMOVNE64rr: Opc = X86::CMOVE64rr; break;
case X86::CMOVBE16rr: Opc = X86::CMOVA16rr; break;
case X86::CMOVBE32rr: Opc = X86::CMOVA32rr; break;
case X86::CMOVBE64rr: Opc = X86::CMOVA64rr; break;
case X86::CMOVA16rr: Opc = X86::CMOVBE16rr; break;
case X86::CMOVA32rr: Opc = X86::CMOVBE32rr; break;
case X86::CMOVA64rr: Opc = X86::CMOVBE64rr; break;
case X86::CMOVL16rr: Opc = X86::CMOVGE16rr; break;
case X86::CMOVL32rr: Opc = X86::CMOVGE32rr; break;
case X86::CMOVL64rr: Opc = X86::CMOVGE64rr; break;
case X86::CMOVGE16rr: Opc = X86::CMOVL16rr; break;
case X86::CMOVGE32rr: Opc = X86::CMOVL32rr; break;
case X86::CMOVGE64rr: Opc = X86::CMOVL64rr; break;
case X86::CMOVLE16rr: Opc = X86::CMOVG16rr; break;
case X86::CMOVLE32rr: Opc = X86::CMOVG32rr; break;
case X86::CMOVLE64rr: Opc = X86::CMOVG64rr; break;
case X86::CMOVG16rr: Opc = X86::CMOVLE16rr; break;
case X86::CMOVG32rr: Opc = X86::CMOVLE32rr; break;
case X86::CMOVG64rr: Opc = X86::CMOVLE64rr; break;
case X86::CMOVS16rr: Opc = X86::CMOVNS16rr; break;
case X86::CMOVS32rr: Opc = X86::CMOVNS32rr; break;
case X86::CMOVS64rr: Opc = X86::CMOVNS32rr; break;
case X86::CMOVNS16rr: Opc = X86::CMOVS16rr; break;
case X86::CMOVNS32rr: Opc = X86::CMOVS32rr; break;
case X86::CMOVNS64rr: Opc = X86::CMOVS64rr; break;
case X86::CMOVP16rr: Opc = X86::CMOVNP16rr; break;
case X86::CMOVP32rr: Opc = X86::CMOVNP32rr; break;
case X86::CMOVP64rr: Opc = X86::CMOVNP32rr; break;
case X86::CMOVNP16rr: Opc = X86::CMOVP16rr; break;
case X86::CMOVNP32rr: Opc = X86::CMOVP32rr; break;
case X86::CMOVNP64rr: Opc = X86::CMOVP64rr; break;
}
MI->setDesc(get(Opc));
Evan Cheng
committed
// Fallthrough intended.
}
Chris Lattner
committed
default:
Evan Cheng
committed
return TargetInstrInfoImpl::commuteInstruction(MI, NewMI);
Chris Lattner
committed
}
}
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
static X86::CondCode GetCondFromBranchOpc(unsigned BrOpc) {
switch (BrOpc) {
default: return X86::COND_INVALID;
case X86::JE: return X86::COND_E;
case X86::JNE: return X86::COND_NE;
case X86::JL: return X86::COND_L;
case X86::JLE: return X86::COND_LE;
case X86::JG: return X86::COND_G;
case X86::JGE: return X86::COND_GE;
case X86::JB: return X86::COND_B;
case X86::JBE: return X86::COND_BE;
case X86::JA: return X86::COND_A;
case X86::JAE: return X86::COND_AE;
case X86::JS: return X86::COND_S;
case X86::JNS: return X86::COND_NS;
case X86::JP: return X86::COND_P;
case X86::JNP: return X86::COND_NP;
case X86::JO: return X86::COND_O;
case X86::JNO: return X86::COND_NO;
}
}
unsigned X86::GetCondBranchFromCond(X86::CondCode CC) {
switch (CC) {
default: assert(0 && "Illegal condition code!");
case X86::COND_E: return X86::JE;
case X86::COND_NE: return X86::JNE;
case X86::COND_L: return X86::JL;
case X86::COND_LE: return X86::JLE;
case X86::COND_G: return X86::JG;
case X86::COND_GE: return X86::JGE;
case X86::COND_B: return X86::JB;
case X86::COND_BE: return X86::JBE;
case X86::COND_A: return X86::JA;
case X86::COND_AE: return X86::JAE;
case X86::COND_S: return X86::JS;
case X86::COND_NS: return X86::JNS;
case X86::COND_P: return X86::JP;
case X86::COND_NP: return X86::JNP;
case X86::COND_O: return X86::JO;
case X86::COND_NO: return X86::JNO;
}
}
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
/// GetOppositeBranchCondition - Return the inverse of the specified condition,
/// e.g. turning COND_E to COND_NE.
X86::CondCode X86::GetOppositeBranchCondition(X86::CondCode CC) {
switch (CC) {
default: assert(0 && "Illegal condition code!");
case X86::COND_E: return X86::COND_NE;
case X86::COND_NE: return X86::COND_E;
case X86::COND_L: return X86::COND_GE;
case X86::COND_LE: return X86::COND_G;
case X86::COND_G: return X86::COND_LE;
case X86::COND_GE: return X86::COND_L;
case X86::COND_B: return X86::COND_AE;
case X86::COND_BE: return X86::COND_A;
case X86::COND_A: return X86::COND_BE;
case X86::COND_AE: return X86::COND_B;
case X86::COND_S: return X86::COND_NS;
case X86::COND_NS: return X86::COND_S;
case X86::COND_P: return X86::COND_NP;
case X86::COND_NP: return X86::COND_P;
case X86::COND_O: return X86::COND_NO;
case X86::COND_NO: return X86::COND_O;
}
}
bool X86InstrInfo::isUnpredicatedTerminator(const MachineInstr *MI) const {
const TargetInstrDesc &TID = MI->getDesc();
if (!TID.isTerminator()) return false;
// Conditional branch is a special case.
if (TID.isBranch() && !TID.isBarrier())
return true;
if (!TID.isPredicable())
return true;
return !isPredicated(MI);
// For purposes of branch analysis do not count FP_REG_KILL as a terminator.
static bool isBrAnalysisUnpredicatedTerminator(const MachineInstr *MI,
const X86InstrInfo &TII) {
if (MI->getOpcode() == X86::FP_REG_KILL)
return false;
return TII.isUnpredicatedTerminator(MI);
}
bool X86InstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
MachineBasicBlock *&TBB,
MachineBasicBlock *&FBB,
std::vector<MachineOperand> &Cond) const {
// If the block has no terminators, it just falls into the block after it.
MachineBasicBlock::iterator I = MBB.end();
if (I == MBB.begin() || !isBrAnalysisUnpredicatedTerminator(--I, *this))
return false;
// Get the last instruction in the block.
MachineInstr *LastInst = I;
// If there is only one terminator instruction, process it.
if (I == MBB.begin() || !isBrAnalysisUnpredicatedTerminator(--I, *this)) {
if (!LastInst->getDesc().isBranch())
return true;
// If the block ends with a branch there are 3 possibilities:
// it's an unconditional, conditional, or indirect branch.
if (LastInst->getOpcode() == X86::JMP) {
TBB = LastInst->getOperand(0).getMBB();
return false;
}
X86::CondCode BranchCode = GetCondFromBranchOpc(LastInst->getOpcode());
if (BranchCode == X86::COND_INVALID)
return true; // Can't handle indirect branch.
// Otherwise, block ends with fall-through condbranch.
TBB = LastInst->getOperand(0).getMBB();
Cond.push_back(MachineOperand::CreateImm(BranchCode));
return false;
}
// Get the instruction before it if it's a terminator.
MachineInstr *SecondLastInst = I;
// If there are three terminators, we don't know what sort of block this is.
if (SecondLastInst && I != MBB.begin() &&
isBrAnalysisUnpredicatedTerminator(--I, *this))
return true;
// If the block ends with X86::JMP and a conditional branch, handle it.
X86::CondCode BranchCode = GetCondFromBranchOpc(SecondLastInst->getOpcode());
if (BranchCode != X86::COND_INVALID && LastInst->getOpcode() == X86::JMP) {
TBB = SecondLastInst->getOperand(0).getMBB();
Cond.push_back(MachineOperand::CreateImm(BranchCode));
FBB = LastInst->getOperand(0).getMBB();
// If the block ends with two X86::JMPs, handle it. The second one is not
// executed, so remove it.
if (SecondLastInst->getOpcode() == X86::JMP &&
LastInst->getOpcode() == X86::JMP) {
TBB = SecondLastInst->getOperand(0).getMBB();
I = LastInst;
I->eraseFromParent();
return false;
}
// Otherwise, can't handle this.
return true;
}
Evan Cheng
committed
unsigned X86InstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
MachineBasicBlock::iterator I = MBB.end();
Evan Cheng
committed
if (I == MBB.begin()) return 0;
--I;
if (I->getOpcode() != X86::JMP &&
GetCondFromBranchOpc(I->getOpcode()) == X86::COND_INVALID)
Evan Cheng
committed
return 0;
// Remove the branch.
I->eraseFromParent();
I = MBB.end();
Evan Cheng
committed
if (I == MBB.begin()) return 1;
--I;
if (GetCondFromBranchOpc(I->getOpcode()) == X86::COND_INVALID)
Evan Cheng
committed
return 1;
// Remove the branch.
I->eraseFromParent();
Evan Cheng
committed
return 2;
}
Owen Anderson
committed
static const MachineInstrBuilder &X86InstrAddOperand(MachineInstrBuilder &MIB,
MachineOperand &MO) {
if (MO.isRegister())
MIB = MIB.addReg(MO.getReg(), MO.isDef(), MO.isImplicit(),
MO.isKill(), MO.isDead(), MO.getSubReg());
Owen Anderson
committed
else if (MO.isImmediate())
MIB = MIB.addImm(MO.getImm());
else if (MO.isFrameIndex())
MIB = MIB.addFrameIndex(MO.getIndex());
else if (MO.isGlobalAddress())
MIB = MIB.addGlobalAddress(MO.getGlobal(), MO.getOffset());
else if (MO.isConstantPoolIndex())
MIB = MIB.addConstantPoolIndex(MO.getIndex(), MO.getOffset());
else if (MO.isJumpTableIndex())
MIB = MIB.addJumpTableIndex(MO.getIndex());
else if (MO.isExternalSymbol())
MIB = MIB.addExternalSymbol(MO.getSymbolName());
else
assert(0 && "Unknown operand for X86InstrAddOperand!");
return MIB;
}
Evan Cheng
committed
unsigned
X86InstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
MachineBasicBlock *FBB,
const std::vector<MachineOperand> &Cond) const {
// Shouldn't be a fall through.
assert(TBB && "InsertBranch must not be told to insert a fallthrough");
assert((Cond.size() == 1 || Cond.size() == 0) &&
"X86 branch conditions have one component!");
if (FBB == 0) { // One way branch.
if (Cond.empty()) {
// Unconditional branch?
BuildMI(&MBB, get(X86::JMP)).addMBB(TBB);
} else {
// Conditional branch.
unsigned Opc = GetCondBranchFromCond((X86::CondCode)Cond[0].getImm());
BuildMI(&MBB, get(Opc)).addMBB(TBB);
Evan Cheng
committed
return 1;
}
unsigned Opc = GetCondBranchFromCond((X86::CondCode)Cond[0].getImm());
BuildMI(&MBB, get(Opc)).addMBB(TBB);
BuildMI(&MBB, get(X86::JMP)).addMBB(FBB);
Evan Cheng
committed
return 2;
}
Owen Anderson
committed
void X86InstrInfo::copyRegToReg(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
unsigned DestReg, unsigned SrcReg,
const TargetRegisterClass *DestRC,
const TargetRegisterClass *SrcRC) const {
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
if (DestRC == SrcRC) {
unsigned Opc;
if (DestRC == &X86::GR64RegClass) {
Opc = X86::MOV64rr;
} else if (DestRC == &X86::GR32RegClass) {
Opc = X86::MOV32rr;
} else if (DestRC == &X86::GR16RegClass) {
Opc = X86::MOV16rr;
} else if (DestRC == &X86::GR8RegClass) {
Opc = X86::MOV8rr;
} else if (DestRC == &X86::GR32_RegClass) {
Opc = X86::MOV32_rr;
} else if (DestRC == &X86::GR16_RegClass) {
Opc = X86::MOV16_rr;
} else if (DestRC == &X86::RFP32RegClass) {
Opc = X86::MOV_Fp3232;
} else if (DestRC == &X86::RFP64RegClass || DestRC == &X86::RSTRegClass) {
Opc = X86::MOV_Fp6464;
} else if (DestRC == &X86::RFP80RegClass) {
Opc = X86::MOV_Fp8080;
} else if (DestRC == &X86::FR32RegClass) {
Opc = X86::FsMOVAPSrr;
} else if (DestRC == &X86::FR64RegClass) {
Opc = X86::FsMOVAPDrr;
} else if (DestRC == &X86::VR128RegClass) {
Opc = X86::MOVAPSrr;
} else if (DestRC == &X86::VR64RegClass) {
Opc = X86::MMX_MOVQ64rr;
} else {
assert(0 && "Unknown regclass");
abort();
Owen Anderson
committed
}
BuildMI(MBB, MI, get(Opc), DestReg).addReg(SrcReg);
return;
Owen Anderson
committed
}
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
// Moving EFLAGS to / from another register requires a push and a pop.
if (SrcRC == &X86::CCRRegClass) {
assert(SrcReg == X86::EFLAGS);
if (DestRC == &X86::GR64RegClass) {
BuildMI(MBB, MI, get(X86::PUSHFQ));
BuildMI(MBB, MI, get(X86::POP64r), DestReg);
return;
} else if (DestRC == &X86::GR32RegClass) {
BuildMI(MBB, MI, get(X86::PUSHFD));
BuildMI(MBB, MI, get(X86::POP32r), DestReg);
return;
}
} else if (DestRC == &X86::CCRRegClass) {
assert(DestReg == X86::EFLAGS);
if (SrcRC == &X86::GR64RegClass) {
BuildMI(MBB, MI, get(X86::PUSH64r)).addReg(SrcReg);
BuildMI(MBB, MI, get(X86::POPFQ));
return;
} else if (SrcRC == &X86::GR32RegClass) {
BuildMI(MBB, MI, get(X86::PUSH32r)).addReg(SrcReg);
BuildMI(MBB, MI, get(X86::POPFD));
return;
}
Owen Anderson
committed
}
// Moving from ST(0) turns into FpGET_ST0_32 etc.
if (SrcRC == &X86::RSTRegClass) {
// Copying from ST(0)/ST(1).
assert((SrcReg == X86::ST0 || SrcReg == X86::ST1) &&
"Can only copy from ST(0)/ST(1) right now");
bool isST0 = SrcReg == X86::ST0;
unsigned Opc;
if (DestRC == &X86::RFP32RegClass)
Opc = isST0 ? X86::FpGET_ST0_32 : X86::FpGET_ST1_32;
else if (DestRC == &X86::RFP64RegClass)
Opc = isST0 ? X86::FpGET_ST0_64 : X86::FpGET_ST1_64;
else {
assert(DestRC == &X86::RFP80RegClass);
Opc = isST0 ? X86::FpGET_ST0_80 : X86::FpGET_ST1_80;
}
BuildMI(MBB, MI, get(Opc), DestReg);
return;
}
// Moving to ST(0) turns into FpSET_ST0_32 etc.
if (DestRC == &X86::RSTRegClass) {
// Copying to ST(0). FIXME: handle ST(1) also
assert(DestReg == X86::ST0 && "Can only copy to TOS right now");
unsigned Opc;
if (SrcRC == &X86::RFP32RegClass)
Opc = X86::FpSET_ST0_32;
else if (SrcRC == &X86::RFP64RegClass)
Opc = X86::FpSET_ST0_64;
else {
assert(SrcRC == &X86::RFP80RegClass);
Opc = X86::FpSET_ST0_80;
}
BuildMI(MBB, MI, get(Opc)).addReg(SrcReg);
return;
}
assert(0 && "Not yet supported!");
Owen Anderson
committed
}
Owen Anderson
committed
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
static unsigned getStoreRegOpcode(const TargetRegisterClass *RC,
unsigned StackAlign) {
unsigned Opc = 0;
if (RC == &X86::GR64RegClass) {
Opc = X86::MOV64mr;
} else if (RC == &X86::GR32RegClass) {
Opc = X86::MOV32mr;
} else if (RC == &X86::GR16RegClass) {
Opc = X86::MOV16mr;
} else if (RC == &X86::GR8RegClass) {
Opc = X86::MOV8mr;
} else if (RC == &X86::GR32_RegClass) {
Opc = X86::MOV32_mr;
} else if (RC == &X86::GR16_RegClass) {
Opc = X86::MOV16_mr;
} else if (RC == &X86::RFP80RegClass) {
Opc = X86::ST_FpP80m; // pops
} else if (RC == &X86::RFP64RegClass) {
Opc = X86::ST_Fp64m;
} else if (RC == &X86::RFP32RegClass) {
Opc = X86::ST_Fp32m;
} else if (RC == &X86::FR32RegClass) {
Opc = X86::MOVSSmr;
} else if (RC == &X86::FR64RegClass) {
Opc = X86::MOVSDmr;
} else if (RC == &X86::VR128RegClass) {
// FIXME: Use movaps once we are capable of selectively
// aligning functions that spill SSE registers on 16-byte boundaries.
Opc = StackAlign >= 16 ? X86::MOVAPSmr : X86::MOVUPSmr;
} else if (RC == &X86::VR64RegClass) {
Opc = X86::MMX_MOVQ64mr;
} else {
assert(0 && "Unknown regclass");
abort();
}
return Opc;
}
void X86InstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
unsigned SrcReg, bool isKill, int FrameIdx,
const TargetRegisterClass *RC) const {
unsigned Opc = getStoreRegOpcode(RC, RI.getStackAlignment());
addFrameReference(BuildMI(MBB, MI, get(Opc)), FrameIdx)
.addReg(SrcReg, false, false, isKill);
}
void X86InstrInfo::storeRegToAddr(MachineFunction &MF, unsigned SrcReg,
bool isKill,
SmallVectorImpl<MachineOperand> &Addr,
const TargetRegisterClass *RC,
SmallVectorImpl<MachineInstr*> &NewMIs) const {
unsigned Opc = getStoreRegOpcode(RC, RI.getStackAlignment());
MachineInstrBuilder MIB = BuildMI(get(Opc));
for (unsigned i = 0, e = Addr.size(); i != e; ++i)
MIB = X86InstrAddOperand(MIB, Addr[i]);
MIB.addReg(SrcReg, false, false, isKill);
NewMIs.push_back(MIB);
}
static unsigned getLoadRegOpcode(const TargetRegisterClass *RC,
unsigned StackAlign) {
unsigned Opc = 0;
if (RC == &X86::GR64RegClass) {
Opc = X86::MOV64rm;
} else if (RC == &X86::GR32RegClass) {
Opc = X86::MOV32rm;
} else if (RC == &X86::GR16RegClass) {
Opc = X86::MOV16rm;
} else if (RC == &X86::GR8RegClass) {
Opc = X86::MOV8rm;
} else if (RC == &X86::GR32_RegClass) {
Opc = X86::MOV32_rm;
} else if (RC == &X86::GR16_RegClass) {
Opc = X86::MOV16_rm;
} else if (RC == &X86::RFP80RegClass) {
Opc = X86::LD_Fp80m;
} else if (RC == &X86::RFP64RegClass) {
Opc = X86::LD_Fp64m;
} else if (RC == &X86::RFP32RegClass) {
Opc = X86::LD_Fp32m;
} else if (RC == &X86::FR32RegClass) {
Opc = X86::MOVSSrm;
} else if (RC == &X86::FR64RegClass) {
Opc = X86::MOVSDrm;
} else if (RC == &X86::VR128RegClass) {
// FIXME: Use movaps once we are capable of selectively
// aligning functions that spill SSE registers on 16-byte boundaries.
Opc = StackAlign >= 16 ? X86::MOVAPSrm : X86::MOVUPSrm;
} else if (RC == &X86::VR64RegClass) {
Opc = X86::MMX_MOVQ64rm;
} else {
assert(0 && "Unknown regclass");
abort();
}
return Opc;
}
void X86InstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
unsigned DestReg, int FrameIdx,
const TargetRegisterClass *RC) const{
unsigned Opc = getLoadRegOpcode(RC, RI.getStackAlignment());
addFrameReference(BuildMI(MBB, MI, get(Opc), DestReg), FrameIdx);
}
void X86InstrInfo::loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
SmallVectorImpl<MachineOperand> &Addr,
const TargetRegisterClass *RC,
Owen Anderson
committed
SmallVectorImpl<MachineInstr*> &NewMIs) const {
unsigned Opc = getLoadRegOpcode(RC, RI.getStackAlignment());
MachineInstrBuilder MIB = BuildMI(get(Opc), DestReg);
for (unsigned i = 0, e = Addr.size(); i != e; ++i)
MIB = X86InstrAddOperand(MIB, Addr[i]);
NewMIs.push_back(MIB);
}
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
bool X86InstrInfo::spillCalleeSavedRegisters(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
const std::vector<CalleeSavedInfo> &CSI) const {
if (CSI.empty())
return false;
bool is64Bit = TM.getSubtarget<X86Subtarget>().is64Bit();
unsigned SlotSize = is64Bit ? 8 : 4;
MachineFunction &MF = *MBB.getParent();
X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
X86FI->setCalleeSavedFrameSize(CSI.size() * SlotSize);
unsigned Opc = is64Bit ? X86::PUSH64r : X86::PUSH32r;
for (unsigned i = CSI.size(); i != 0; --i) {
unsigned Reg = CSI[i-1].getReg();
// Add the callee-saved register as live-in. It's killed at the spill.
MBB.addLiveIn(Reg);
BuildMI(MBB, MI, get(Opc)).addReg(Reg);
}
return true;
}
bool X86InstrInfo::restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
const std::vector<CalleeSavedInfo> &CSI) const {
if (CSI.empty())
return false;
bool is64Bit = TM.getSubtarget<X86Subtarget>().is64Bit();
unsigned Opc = is64Bit ? X86::POP64r : X86::POP32r;
for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
unsigned Reg = CSI[i].getReg();
BuildMI(MBB, MI, get(Opc), Reg);
}
return true;
}
static MachineInstr *FuseTwoAddrInst(unsigned Opcode,
SmallVector<MachineOperand,4> &MOs,
MachineInstr *MI, const TargetInstrInfo &TII) {
// Create the base instruction with the memory operand as the first part.
MachineInstr *NewMI = new MachineInstr(TII.get(Opcode), true);
MachineInstrBuilder MIB(NewMI);
unsigned NumAddrOps = MOs.size();
for (unsigned i = 0; i != NumAddrOps; ++i)
MIB = X86InstrAddOperand(MIB, MOs[i]);
if (NumAddrOps < 4) // FrameIndex only
MIB.addImm(1).addReg(0).addImm(0);
// Loop over the rest of the ri operands, converting them over.
unsigned NumOps = MI->getDesc().getNumOperands()-2;
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
for (unsigned i = 0; i != NumOps; ++i) {
MachineOperand &MO = MI->getOperand(i+2);
MIB = X86InstrAddOperand(MIB, MO);
}
for (unsigned i = NumOps+2, e = MI->getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI->getOperand(i);
MIB = X86InstrAddOperand(MIB, MO);
}
return MIB;
}
static MachineInstr *FuseInst(unsigned Opcode, unsigned OpNo,
SmallVector<MachineOperand,4> &MOs,
MachineInstr *MI, const TargetInstrInfo &TII) {
MachineInstr *NewMI = new MachineInstr(TII.get(Opcode), true);
MachineInstrBuilder MIB(NewMI);
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI->getOperand(i);
if (i == OpNo) {
assert(MO.isRegister() && "Expected to fold into reg operand!");
unsigned NumAddrOps = MOs.size();
for (unsigned i = 0; i != NumAddrOps; ++i)
MIB = X86InstrAddOperand(MIB, MOs[i]);
if (NumAddrOps < 4) // FrameIndex only
MIB.addImm(1).addReg(0).addImm(0);
} else {
MIB = X86InstrAddOperand(MIB, MO);
}
}
return MIB;
}
static MachineInstr *MakeM0Inst(const TargetInstrInfo &TII, unsigned Opcode,
SmallVector<MachineOperand,4> &MOs,
MachineInstr *MI) {
MachineInstrBuilder MIB = BuildMI(TII.get(Opcode));
unsigned NumAddrOps = MOs.size();
for (unsigned i = 0; i != NumAddrOps; ++i)
MIB = X86InstrAddOperand(MIB, MOs[i]);
if (NumAddrOps < 4) // FrameIndex only
MIB.addImm(1).addReg(0).addImm(0);
return MIB.addImm(0);
}
MachineInstr*
X86InstrInfo::foldMemoryOperand(MachineInstr *MI, unsigned i,
Evan Cheng
committed
SmallVector<MachineOperand,4> &MOs) const {
const DenseMap<unsigned*, unsigned> *OpcodeTablePtr = NULL;
bool isTwoAddrFold = false;
unsigned NumOps = MI->getDesc().getNumOperands();
bool isTwoAddr = NumOps > 1 &&
MI->getDesc().getOperandConstraint(1, TOI::TIED_TO) != -1;
MachineInstr *NewMI = NULL;
// Folding a memory location into the two-address part of a two-address
// instruction is different than folding it other places. It requires
// replacing the *two* registers with the memory location.
if (isTwoAddr && NumOps >= 2 && i < 2 &&
MI->getOperand(0).isRegister() &&
MI->getOperand(1).isRegister() &&
MI->getOperand(0).getReg() == MI->getOperand(1).getReg()) {
OpcodeTablePtr = &RegOp2MemOpTable2Addr;
isTwoAddrFold = true;
} else if (i == 0) { // If operand 0
if (MI->getOpcode() == X86::MOV16r0)
NewMI = MakeM0Inst(*this, X86::MOV16mi, MOs, MI);
else if (MI->getOpcode() == X86::MOV32r0)
NewMI = MakeM0Inst(*this, X86::MOV32mi, MOs, MI);
else if (MI->getOpcode() == X86::MOV64r0)
NewMI = MakeM0Inst(*this, X86::MOV64mi32, MOs, MI);
else if (MI->getOpcode() == X86::MOV8r0)
NewMI = MakeM0Inst(*this, X86::MOV8mi, MOs, MI);
if (NewMI)
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
return NewMI;
OpcodeTablePtr = &RegOp2MemOpTable0;
} else if (i == 1) {
OpcodeTablePtr = &RegOp2MemOpTable1;
} else if (i == 2) {
OpcodeTablePtr = &RegOp2MemOpTable2;
}
// If table selected...
if (OpcodeTablePtr) {
// Find the Opcode to fuse
DenseMap<unsigned*, unsigned>::iterator I =
OpcodeTablePtr->find((unsigned*)MI->getOpcode());
if (I != OpcodeTablePtr->end()) {
if (isTwoAddrFold)
NewMI = FuseTwoAddrInst(I->second, MOs, MI, *this);
else
NewMI = FuseInst(I->second, i, MOs, MI, *this);
return NewMI;
}
}
// No fusion
if (PrintFailedFusing)
return NULL;
}
Evan Cheng
committed
MachineInstr* X86InstrInfo::foldMemoryOperand(MachineFunction &MF,
MachineInstr *MI,
SmallVectorImpl<unsigned> &Ops,
int FrameIndex) const {
// Check switch flag
if (NoFusing) return NULL;
Evan Cheng
committed
const MachineFrameInfo *MFI = MF.getFrameInfo();
unsigned Alignment = MFI->getObjectAlignment(FrameIndex);
// FIXME: Move alignment requirement into tables?
if (Alignment < 16) {
switch (MI->getOpcode()) {
default: break;
// Not always safe to fold movsd into these instructions since their load
// folding variants expects the address to be 16 byte aligned.
case X86::FsANDNPDrr:
case X86::FsANDNPSrr:
case X86::FsANDPDrr:
case X86::FsANDPSrr:
case X86::FsORPDrr:
case X86::FsORPSrr:
case X86::FsXORPDrr:
case X86::FsXORPSrr:
return NULL;
}