Newer
Older
case X86ISD::LOAD_PACK: return "X86ISD::LOAD_PACK";
case X86ISD::LOAD_UA: return "X86ISD::LOAD_UA";
case X86ISD::GlobalBaseReg: return "X86ISD::GlobalBaseReg";
case X86ISD::Wrapper: return "X86ISD::Wrapper";
case X86ISD::S2VEC: return "X86ISD::S2VEC";
case X86ISD::PEXTRW: return "X86ISD::PEXTRW";
case X86ISD::PINSRW: return "X86ISD::PINSRW";
case X86ISD::FMAX: return "X86ISD::FMAX";
case X86ISD::FMIN: return "X86ISD::FMIN";
/// isLegalAddressImmediate - Return true if the integer value or
/// GlobalValue can be used as the offset of the target addressing mode.
bool X86TargetLowering::isLegalAddressImmediate(int64_t V) const {
// X86 allows a sign-extended 32-bit immediate field.
return (V > -(1LL << 32) && V < (1LL << 32)-1);
}
bool X86TargetLowering::isLegalAddressImmediate(GlobalValue *GV) const {
// In 64-bit mode, GV is 64-bit so it won't fit in the 32-bit displacement
// field unless we are in small code model.
if (Subtarget->is64Bit() &&
getTargetMachine().getCodeModel() != CodeModel::Small)
Anton Korobeynikov
committed
return (!Subtarget->GVRequiresExtraLoad(GV, getTargetMachine(), false));
5028
5029
5030
5031
5032
5033
5034
5035
5036
5037
5038
5039
5040
5041
5042
5043
5044
5045
5046
5047
5048
5049
5050
5051
5052
5053
5054
5055
5056
5057
5058
5059
5060
5061
5062
5063
5064
5065
5066
}
/// isShuffleMaskLegal - Targets can use this to indicate that they only
/// support *some* VECTOR_SHUFFLE operations, those with specific masks.
/// By default, if a target supports the VECTOR_SHUFFLE node, all mask values
/// are assumed to be legal.
bool
X86TargetLowering::isShuffleMaskLegal(SDOperand Mask, MVT::ValueType VT) const {
// Only do shuffles on 128-bit vector types for now.
if (MVT::getSizeInBits(VT) == 64) return false;
return (Mask.Val->getNumOperands() <= 4 ||
isSplatMask(Mask.Val) ||
isPSHUFHW_PSHUFLWMask(Mask.Val) ||
X86::isUNPCKLMask(Mask.Val) ||
X86::isUNPCKL_v_undef_Mask(Mask.Val) ||
X86::isUNPCKHMask(Mask.Val));
}
bool X86TargetLowering::isVectorClearMaskLegal(std::vector<SDOperand> &BVOps,
MVT::ValueType EVT,
SelectionDAG &DAG) const {
unsigned NumElts = BVOps.size();
// Only do shuffles on 128-bit vector types for now.
if (MVT::getSizeInBits(EVT) * NumElts == 64) return false;
if (NumElts == 2) return true;
if (NumElts == 4) {
return (isMOVLMask(BVOps) || isCommutedMOVL(BVOps, true) ||
isSHUFPMask(BVOps) || isCommutedSHUFP(BVOps));
}
return false;
}
//===----------------------------------------------------------------------===//
// X86 Scheduler Hooks
//===----------------------------------------------------------------------===//
MachineBasicBlock *
X86TargetLowering::InsertAtEndOfBasicBlock(MachineInstr *MI,
MachineBasicBlock *BB) {
const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
switch (MI->getOpcode()) {
default: assert(false && "Unexpected instr type to insert");
case X86::CMOV_FR32:
case X86::CMOV_FR64:
case X86::CMOV_V4F32:
case X86::CMOV_V2F64:
case X86::CMOV_V2I64: {
// To "insert" a SELECT_CC instruction, we actually have to insert the
// diamond control-flow pattern. The incoming instruction knows the
// destination vreg to set, the condition code register to branch on, the
// true/false values to select between, and a branch opcode to use.
const BasicBlock *LLVM_BB = BB->getBasicBlock();
ilist<MachineBasicBlock>::iterator It = BB;
++It;
// thisMBB:
// ...
// TrueVal = ...
// cmpTY ccX, r1, r2
// bCC copy1MBB
// fallthrough --> copy0MBB
MachineBasicBlock *thisMBB = BB;
MachineBasicBlock *copy0MBB = new MachineBasicBlock(LLVM_BB);
MachineBasicBlock *sinkMBB = new MachineBasicBlock(LLVM_BB);
unsigned Opc =
X86::GetCondBranchFromCond((X86::CondCode)MI->getOperand(3).getImm());
BuildMI(BB, TII->get(Opc)).addMBB(sinkMBB);
MachineFunction *F = BB->getParent();
F->getBasicBlockList().insert(It, copy0MBB);
F->getBasicBlockList().insert(It, sinkMBB);
// Update machine-CFG edges by first adding all successors of the current
// block to the new block which will contain the Phi node for the select.
for(MachineBasicBlock::succ_iterator i = BB->succ_begin(),
e = BB->succ_end(); i != e; ++i)
sinkMBB->addSuccessor(*i);
// Next, remove all successors of the current block, and add the true
// and fallthrough blocks as its successors.
while(!BB->succ_empty())
BB->removeSuccessor(BB->succ_begin());
BB->addSuccessor(copy0MBB);
BB->addSuccessor(sinkMBB);
// copy0MBB:
// %FalseValue = ...
// # fallthrough to sinkMBB
BB = copy0MBB;
// Update machine-CFG edges
BB->addSuccessor(sinkMBB);
// sinkMBB:
// %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
// ...
BB = sinkMBB;
BuildMI(BB, TII->get(X86::PHI), MI->getOperand(0).getReg())
.addReg(MI->getOperand(1).getReg()).addMBB(copy0MBB)
.addReg(MI->getOperand(2).getReg()).addMBB(thisMBB);
delete MI; // The pseudo instruction is gone now.
return BB;
}
case X86::FP_TO_INT16_IN_MEM:
case X86::FP_TO_INT32_IN_MEM:
case X86::FP_TO_INT64_IN_MEM: {
// Change the floating point control register to use "round towards zero"
// mode when truncating to an integer value.
MachineFunction *F = BB->getParent();
int CWFrameIdx = F->getFrameInfo()->CreateStackObject(2, 2);
addFrameReference(BuildMI(BB, TII->get(X86::FNSTCW16m)), CWFrameIdx);
// Load the old value of the high byte of the control word...
unsigned OldCW =
F->getSSARegMap()->createVirtualRegister(X86::GR16RegisterClass);
addFrameReference(BuildMI(BB, TII->get(X86::MOV16rm), OldCW), CWFrameIdx);
// Set the high part to be round to zero...
addFrameReference(BuildMI(BB, TII->get(X86::MOV16mi)), CWFrameIdx)
.addImm(0xC7F);
// Reload the modified control word now...
addFrameReference(BuildMI(BB, TII->get(X86::FLDCW16m)), CWFrameIdx);
// Restore the memory image of control word to original value
addFrameReference(BuildMI(BB, TII->get(X86::MOV16mr)), CWFrameIdx)
.addReg(OldCW);
5154
5155
5156
5157
5158
5159
5160
5161
5162
5163
5164
5165
5166
5167
5168
5169
5170
5171
5172
5173
5174
// Get the X86 opcode to use.
unsigned Opc;
switch (MI->getOpcode()) {
default: assert(0 && "illegal opcode!");
case X86::FP_TO_INT16_IN_MEM: Opc = X86::FpIST16m; break;
case X86::FP_TO_INT32_IN_MEM: Opc = X86::FpIST32m; break;
case X86::FP_TO_INT64_IN_MEM: Opc = X86::FpIST64m; break;
}
X86AddressMode AM;
MachineOperand &Op = MI->getOperand(0);
if (Op.isRegister()) {
AM.BaseType = X86AddressMode::RegBase;
AM.Base.Reg = Op.getReg();
} else {
AM.BaseType = X86AddressMode::FrameIndexBase;
AM.Base.FrameIndex = Op.getFrameIndex();
}
Op = MI->getOperand(1);
if (Op.isImmediate())
AM.Scale = Op.getImm();
Op = MI->getOperand(2);
if (Op.isImmediate())
AM.IndexReg = Op.getImm();
Op = MI->getOperand(3);
if (Op.isGlobalAddress()) {
AM.GV = Op.getGlobal();
} else {
AM.Disp = Op.getImm();
addFullAddress(BuildMI(BB, TII->get(Opc)), AM)
.addReg(MI->getOperand(4).getReg());
// Reload the original control word now.
addFrameReference(BuildMI(BB, TII->get(X86::FLDCW16m)), CWFrameIdx);
delete MI; // The pseudo instruction is gone now.
return BB;
}
}
}
//===----------------------------------------------------------------------===//
// X86 Optimization Hooks
//===----------------------------------------------------------------------===//
void X86TargetLowering::computeMaskedBitsForTargetNode(const SDOperand Op,
uint64_t Mask,
uint64_t &KnownZero,
uint64_t &KnownOne,
unsigned Depth) const {
assert((Opc >= ISD::BUILTIN_OP_END ||
Opc == ISD::INTRINSIC_WO_CHAIN ||
Opc == ISD::INTRINSIC_W_CHAIN ||
Opc == ISD::INTRINSIC_VOID) &&
"Should use MaskedValueIsZero if you don't know whether Op"
" is a target node!");
case X86ISD::SETCC:
KnownZero |= (MVT::getIntVTBitMask(Op.getValueType()) ^ 1ULL);
break;
5223
5224
5225
5226
5227
5228
5229
5230
5231
5232
5233
5234
5235
5236
5237
5238
5239
5240
5241
5242
5243
5244
5245
/// getShuffleScalarElt - Returns the scalar element that will make up the ith
/// element of the result of the vector shuffle.
static SDOperand getShuffleScalarElt(SDNode *N, unsigned i, SelectionDAG &DAG) {
MVT::ValueType VT = N->getValueType(0);
SDOperand PermMask = N->getOperand(2);
unsigned NumElems = PermMask.getNumOperands();
SDOperand V = (i < NumElems) ? N->getOperand(0) : N->getOperand(1);
i %= NumElems;
if (V.getOpcode() == ISD::SCALAR_TO_VECTOR) {
return (i == 0)
? V.getOperand(0) : DAG.getNode(ISD::UNDEF, MVT::getVectorBaseType(VT));
} else if (V.getOpcode() == ISD::VECTOR_SHUFFLE) {
SDOperand Idx = PermMask.getOperand(i);
if (Idx.getOpcode() == ISD::UNDEF)
return DAG.getNode(ISD::UNDEF, MVT::getVectorBaseType(VT));
return getShuffleScalarElt(V.Val,cast<ConstantSDNode>(Idx)->getValue(),DAG);
}
return SDOperand();
}
/// isGAPlusOffset - Returns true (and the GlobalValue and the offset) if the
/// node is a GlobalAddress + an offset.
static bool isGAPlusOffset(SDNode *N, GlobalValue* &GA, int64_t &Offset) {
unsigned Opc = N->getOpcode();
Evan Cheng
committed
if (Opc == X86ISD::Wrapper) {
if (dyn_cast<GlobalAddressSDNode>(N->getOperand(0))) {
GA = cast<GlobalAddressSDNode>(N->getOperand(0))->getGlobal();
return true;
}
} else if (Opc == ISD::ADD) {
5253
5254
5255
5256
5257
5258
5259
5260
5261
5262
5263
5264
5265
5266
5267
5268
5269
5270
5271
5272
5273
5274
5275
5276
5277
5278
5279
5280
5281
5282
5283
5284
5285
5286
5287
5288
5289
5290
5291
5292
5293
5294
5295
5296
5297
5298
5299
5300
5301
5302
5303
SDOperand N1 = N->getOperand(0);
SDOperand N2 = N->getOperand(1);
if (isGAPlusOffset(N1.Val, GA, Offset)) {
ConstantSDNode *V = dyn_cast<ConstantSDNode>(N2);
if (V) {
Offset += V->getSignExtended();
return true;
}
} else if (isGAPlusOffset(N2.Val, GA, Offset)) {
ConstantSDNode *V = dyn_cast<ConstantSDNode>(N1);
if (V) {
Offset += V->getSignExtended();
return true;
}
}
}
return false;
}
/// isConsecutiveLoad - Returns true if N is loading from an address of Base
/// + Dist * Size.
static bool isConsecutiveLoad(SDNode *N, SDNode *Base, int Dist, int Size,
MachineFrameInfo *MFI) {
if (N->getOperand(0).Val != Base->getOperand(0).Val)
return false;
SDOperand Loc = N->getOperand(1);
SDOperand BaseLoc = Base->getOperand(1);
if (Loc.getOpcode() == ISD::FrameIndex) {
if (BaseLoc.getOpcode() != ISD::FrameIndex)
return false;
int FI = dyn_cast<FrameIndexSDNode>(Loc)->getIndex();
int BFI = dyn_cast<FrameIndexSDNode>(BaseLoc)->getIndex();
int FS = MFI->getObjectSize(FI);
int BFS = MFI->getObjectSize(BFI);
if (FS != BFS || FS != Size) return false;
return MFI->getObjectOffset(FI) == (MFI->getObjectOffset(BFI) + Dist*Size);
} else {
GlobalValue *GV1 = NULL;
GlobalValue *GV2 = NULL;
int64_t Offset1 = 0;
int64_t Offset2 = 0;
bool isGA1 = isGAPlusOffset(Loc.Val, GV1, Offset1);
bool isGA2 = isGAPlusOffset(BaseLoc.Val, GV2, Offset2);
if (isGA1 && isGA2 && GV1 == GV2)
return Offset1 == (Offset2 + Dist*Size);
}
return false;
}
static bool isBaseAlignment16(SDNode *Base, MachineFrameInfo *MFI,
const X86Subtarget *Subtarget) {
GlobalValue *GV;
int64_t Offset;
if (isGAPlusOffset(Base, GV, Offset))
return (GV->getAlignment() >= 16 && (Offset % 16) == 0);
else {
assert(Base->getOpcode() == ISD::FrameIndex && "Unexpected base node!");
int BFI = dyn_cast<FrameIndexSDNode>(Base)->getIndex();
if (BFI < 0)
// Fixed objects do not specify alignment, however the offsets are known.
return ((Subtarget->getStackAlignment() % 16) == 0 &&
(MFI->getObjectOffset(BFI) % 16) == 0);
else
return MFI->getObjectAlignment(BFI) >= 16;
}
return false;
}
/// PerformShuffleCombine - Combine a vector_shuffle that is equal to
/// build_vector load1, load2, load3, load4, <0, 1, 2, 3> into a 128-bit load
/// if the load addresses are consecutive, non-overlapping, and in the right
/// order.
static SDOperand PerformShuffleCombine(SDNode *N, SelectionDAG &DAG,
const X86Subtarget *Subtarget) {
MachineFunction &MF = DAG.getMachineFunction();
MachineFrameInfo *MFI = MF.getFrameInfo();
MVT::ValueType VT = N->getValueType(0);
MVT::ValueType EVT = MVT::getVectorBaseType(VT);
SDOperand PermMask = N->getOperand(2);
int NumElems = (int)PermMask.getNumOperands();
SDNode *Base = NULL;
for (int i = 0; i < NumElems; ++i) {
SDOperand Idx = PermMask.getOperand(i);
if (Idx.getOpcode() == ISD::UNDEF) {
if (!Base) return SDOperand();
} else {
SDOperand Arg =
getShuffleScalarElt(N, cast<ConstantSDNode>(Idx)->getValue(), DAG);
if (!Arg.Val || !ISD::isNON_EXTLoad(Arg.Val))
return SDOperand();
if (!Base)
Base = Arg.Val;
else if (!isConsecutiveLoad(Arg.Val, Base,
i, MVT::getSizeInBits(EVT)/8,MFI))
return SDOperand();
}
}
bool isAlign16 = isBaseAlignment16(Base->getOperand(1).Val, MFI, Subtarget);
if (isAlign16) {
LoadSDNode *LD = cast<LoadSDNode>(Base);
return DAG.getLoad(VT, LD->getChain(), LD->getBasePtr(), LD->getSrcValue(),
LD->getSrcValueOffset());
} else {
// Just use movups, it's shorter.
std::vector<MVT::ValueType> Tys;
Tys.push_back(MVT::v4f32);
Tys.push_back(MVT::Other);
SmallVector<SDOperand, 3> Ops;
Ops.push_back(Base->getOperand(0));
Ops.push_back(Base->getOperand(1));
Ops.push_back(Base->getOperand(2));
return DAG.getNode(ISD::BIT_CONVERT, VT,
DAG.getNode(X86ISD::LOAD_UA, Tys, &Ops[0], Ops.size()));
Evan Cheng
committed
}
}
/// PerformSELECTCombine - Do target-specific dag combines on SELECT nodes.
static SDOperand PerformSELECTCombine(SDNode *N, SelectionDAG &DAG,
const X86Subtarget *Subtarget) {
SDOperand Cond = N->getOperand(0);
// If we have SSE[12] support, try to form min/max nodes.
if (Subtarget->hasSSE2() &&
(N->getValueType(0) == MVT::f32 || N->getValueType(0) == MVT::f64)) {
if (Cond.getOpcode() == ISD::SETCC) {
// Get the LHS/RHS of the select.
SDOperand LHS = N->getOperand(1);
SDOperand RHS = N->getOperand(2);
ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
unsigned Opcode = 0;
if (LHS == Cond.getOperand(0) && RHS == Cond.getOperand(1)) {
switch (CC) {
default: break;
case ISD::SETOLE: // (X <= Y) ? X : Y -> min
case ISD::SETULE:
case ISD::SETLE:
if (!UnsafeFPMath) break;
// FALL THROUGH.
case ISD::SETOLT: // (X olt/lt Y) ? X : Y -> min
case ISD::SETLT:
Opcode = X86ISD::FMIN;
break;
case ISD::SETOGT: // (X > Y) ? X : Y -> max
case ISD::SETUGT:
case ISD::SETGT:
if (!UnsafeFPMath) break;
// FALL THROUGH.
case ISD::SETUGE: // (X uge/ge Y) ? X : Y -> max
case ISD::SETGE:
Opcode = X86ISD::FMAX;
break;
}
} else if (LHS == Cond.getOperand(1) && RHS == Cond.getOperand(0)) {
switch (CC) {
default: break;
case ISD::SETOGT: // (X > Y) ? Y : X -> min
case ISD::SETUGT:
case ISD::SETGT:
if (!UnsafeFPMath) break;
// FALL THROUGH.
case ISD::SETUGE: // (X uge/ge Y) ? Y : X -> min
case ISD::SETGE:
Opcode = X86ISD::FMIN;
break;
case ISD::SETOLE: // (X <= Y) ? Y : X -> max
case ISD::SETULE:
case ISD::SETLE:
if (!UnsafeFPMath) break;
// FALL THROUGH.
case ISD::SETOLT: // (X olt/lt Y) ? Y : X -> max
case ISD::SETLT:
Opcode = X86ISD::FMAX;
break;
}
if (Opcode)
return DAG.getNode(Opcode, N->getValueType(0), LHS, RHS);
}
return SDOperand();
}
SDOperand X86TargetLowering::PerformDAGCombine(SDNode *N,
DAGCombinerInfo &DCI) const {
SelectionDAG &DAG = DCI.DAG;
switch (N->getOpcode()) {
default: break;
case ISD::VECTOR_SHUFFLE:
return PerformShuffleCombine(N, DAG, Subtarget);
case ISD::SELECT:
return PerformSELECTCombine(N, DAG, Subtarget);
}
return SDOperand();
}
//===----------------------------------------------------------------------===//
// X86 Inline Assembly Support
//===----------------------------------------------------------------------===//
/// getConstraintType - Given a constraint letter, return the type of
/// constraint it is for this target.
X86TargetLowering::ConstraintType
X86TargetLowering::getConstraintType(char ConstraintLetter) const {
switch (ConstraintLetter) {
case 'A':
case 'r':
case 'R':
case 'l':
case 'q':
case 'Q':
case 'x':
case 'Y':
return C_RegisterClass;
default: return TargetLowering::getConstraintType(ConstraintLetter);
}
}
/// isOperandValidForConstraint - Return the specified operand (possibly
/// modified) if the specified SDOperand is valid for the specified target
/// constraint letter, otherwise return null.
SDOperand X86TargetLowering::
isOperandValidForConstraint(SDOperand Op, char Constraint, SelectionDAG &DAG) {
switch (Constraint) {
default: break;
case 'i':
// Literal immediates are always ok.
if (isa<ConstantSDNode>(Op)) return Op;
// If we are in non-pic codegen mode, we allow the address of a global to
// be used with 'i'.
if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op)) {
if (getTargetMachine().getRelocationModel() == Reloc::PIC_)
return SDOperand(0, 0);
if (GA->getOpcode() != ISD::TargetGlobalAddress)
Op = DAG.getTargetGlobalAddress(GA->getGlobal(), GA->getValueType(0),
GA->getOffset());
return Op;
}
// Otherwise, not valid for this mode.
return SDOperand(0, 0);
}
return TargetLowering::isOperandValidForConstraint(Op, Constraint, DAG);
}
getRegClassForInlineAsmConstraint(const std::string &Constraint,
MVT::ValueType VT) const {
if (Constraint.size() == 1) {
// FIXME: not handling fp-stack yet!
// FIXME: not handling MMX registers yet ('y' constraint).
switch (Constraint[0]) { // GCC X86 Constraint Letters
default: break; // Unknown constraint letter
case 'A': // EAX/EDX
if (VT == MVT::i32 || VT == MVT::i64)
return make_vector<unsigned>(X86::EAX, X86::EDX, 0);
break;
case 'r': // GENERAL_REGS
case 'R': // LEGACY_REGS
Chris Lattner
committed
if (VT == MVT::i64 && Subtarget->is64Bit())
return make_vector<unsigned>(X86::RAX, X86::RDX, X86::RCX, X86::RBX,
X86::RSI, X86::RDI, X86::RBP, X86::RSP,
X86::R8, X86::R9, X86::R10, X86::R11,
X86::R12, X86::R13, X86::R14, X86::R15, 0);
if (VT == MVT::i32)
return make_vector<unsigned>(X86::EAX, X86::EDX, X86::ECX, X86::EBX,
X86::ESI, X86::EDI, X86::EBP, X86::ESP, 0);
else if (VT == MVT::i16)
return make_vector<unsigned>(X86::AX, X86::DX, X86::CX, X86::BX,
X86::SI, X86::DI, X86::BP, X86::SP, 0);
else if (VT == MVT::i8)
return make_vector<unsigned>(X86::AL, X86::DL, X86::CL, X86::BL, 0);
break;
if (VT == MVT::i32)
return make_vector<unsigned>(X86::EAX, X86::EDX, X86::ECX, X86::EBX,
X86::ESI, X86::EDI, X86::EBP, 0);
else if (VT == MVT::i16)
return make_vector<unsigned>(X86::AX, X86::DX, X86::CX, X86::BX,
X86::SI, X86::DI, X86::BP, 0);
else if (VT == MVT::i8)
return make_vector<unsigned>(X86::AL, X86::DL, X86::CL, X86::DL, 0);
break;
case 'q': // Q_REGS (GENERAL_REGS in 64-bit mode)
case 'Q': // Q_REGS
if (VT == MVT::i32)
return make_vector<unsigned>(X86::EAX, X86::EDX, X86::ECX, X86::EBX, 0);
else if (VT == MVT::i16)
return make_vector<unsigned>(X86::AX, X86::DX, X86::CX, X86::BX, 0);
else if (VT == MVT::i8)
return make_vector<unsigned>(X86::AL, X86::DL, X86::CL, X86::DL, 0);
break;
case 'x': // SSE_REGS if SSE1 allowed
if (Subtarget->hasSSE1())
return make_vector<unsigned>(X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7,
0);
return std::vector<unsigned>();
case 'Y': // SSE_REGS if SSE2 allowed
if (Subtarget->hasSSE2())
return make_vector<unsigned>(X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7,
0);
return std::vector<unsigned>();
}
}
return std::vector<unsigned>();
std::pair<unsigned, const TargetRegisterClass*>
X86TargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
MVT::ValueType VT) const {
// Use the default implementation in TargetLowering to convert the register
// constraint into a member of a register class.
std::pair<unsigned, const TargetRegisterClass*> Res;
Res = TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
// Not found as a standard register?
if (Res.second == 0) {
// GCC calls "st(0)" just plain "st".
if (StringsEqualNoCase("{st}", Constraint)) {
Res.first = X86::ST0;
Res.second = X86::RSTRegisterClass;
}
// Otherwise, check to see if this is a register class of the wrong value
// type. For example, we want to map "{ax},i32" -> {eax}, we don't want it to
// turn into {ax},{dx}.
if (Res.second->hasType(VT))
return Res; // Correct type already, nothing to do.
// All of the single-register GCC register classes map their values onto
// 16-bit register pieces "ax","dx","cx","bx","si","di","bp","sp". If we
// really want an 8-bit or 32-bit register, map to the appropriate register
// class and return the appropriate register.
if (Res.second != X86::GR16RegisterClass)
return Res;
5609
5610
5611
5612
5613
5614
5615
5616
5617
5618
5619
5620
5621
5622
5623
5624
5625
5626
5627
5628
5629
5630
5631
5632
5633
5634
5635
5636
5637
5638
if (VT == MVT::i8) {
unsigned DestReg = 0;
switch (Res.first) {
default: break;
case X86::AX: DestReg = X86::AL; break;
case X86::DX: DestReg = X86::DL; break;
case X86::CX: DestReg = X86::CL; break;
case X86::BX: DestReg = X86::BL; break;
}
if (DestReg) {
Res.first = DestReg;
Res.second = Res.second = X86::GR8RegisterClass;
}
} else if (VT == MVT::i32) {
unsigned DestReg = 0;
switch (Res.first) {
default: break;
case X86::AX: DestReg = X86::EAX; break;
case X86::DX: DestReg = X86::EDX; break;
case X86::CX: DestReg = X86::ECX; break;
case X86::BX: DestReg = X86::EBX; break;
case X86::SI: DestReg = X86::ESI; break;
case X86::DI: DestReg = X86::EDI; break;
case X86::BP: DestReg = X86::EBP; break;
case X86::SP: DestReg = X86::ESP; break;
}
if (DestReg) {
Res.first = DestReg;
Res.second = Res.second = X86::GR32RegisterClass;
}
} else if (VT == MVT::i64) {
unsigned DestReg = 0;
switch (Res.first) {
default: break;
case X86::AX: DestReg = X86::RAX; break;
case X86::DX: DestReg = X86::RDX; break;
case X86::CX: DestReg = X86::RCX; break;
case X86::BX: DestReg = X86::RBX; break;
case X86::SI: DestReg = X86::RSI; break;
case X86::DI: DestReg = X86::RDI; break;
case X86::BP: DestReg = X86::RBP; break;
case X86::SP: DestReg = X86::RSP; break;
}
if (DestReg) {
Res.first = DestReg;
Res.second = Res.second = X86::GR64RegisterClass;
}
return Res;
}