"llvm/lib/Target/git@repo.hca.bsc.es:rferrer/llvm-epi-0.8.git" did not exist on "51ec74550923ea27e43700df5110c002f48172da"
Newer
Older
case X86ISD::FP_TO_INT16_IN_MEM: return "X86ISD::FP_TO_INT16_IN_MEM";
case X86ISD::FP_TO_INT32_IN_MEM: return "X86ISD::FP_TO_INT32_IN_MEM";
case X86ISD::FP_TO_INT64_IN_MEM: return "X86ISD::FP_TO_INT64_IN_MEM";
case X86ISD::FLD: return "X86ISD::FLD";
case X86ISD::FST: return "X86ISD::FST";
case X86ISD::FP_GET_RESULT: return "X86ISD::FP_GET_RESULT";
case X86ISD::FP_SET_RESULT: return "X86ISD::FP_SET_RESULT";
case X86ISD::CALL: return "X86ISD::CALL";
case X86ISD::TAILCALL: return "X86ISD::TAILCALL";
case X86ISD::RDTSC_DAG: return "X86ISD::RDTSC_DAG";
case X86ISD::CMP: return "X86ISD::CMP";
case X86ISD::COMI: return "X86ISD::COMI";
case X86ISD::UCOMI: return "X86ISD::UCOMI";
case X86ISD::SETCC: return "X86ISD::SETCC";
case X86ISD::CMOV: return "X86ISD::CMOV";
case X86ISD::BRCOND: return "X86ISD::BRCOND";
case X86ISD::RET_FLAG: return "X86ISD::RET_FLAG";
case X86ISD::REP_STOS: return "X86ISD::REP_STOS";
case X86ISD::REP_MOVS: return "X86ISD::REP_MOVS";
case X86ISD::LOAD_PACK: return "X86ISD::LOAD_PACK";
case X86ISD::LOAD_UA: return "X86ISD::LOAD_UA";
case X86ISD::GlobalBaseReg: return "X86ISD::GlobalBaseReg";
case X86ISD::Wrapper: return "X86ISD::Wrapper";
case X86ISD::S2VEC: return "X86ISD::S2VEC";
case X86ISD::PEXTRW: return "X86ISD::PEXTRW";
case X86ISD::PINSRW: return "X86ISD::PINSRW";
case X86ISD::FMAX: return "X86ISD::FMAX";
case X86ISD::FMIN: return "X86ISD::FMIN";
/// isLegalAddressImmediate - Return true if the integer value can be used
/// as the offset of the target addressing mode for load / store of the
/// given type.
bool X86TargetLowering::isLegalAddressImmediate(int64_t V,const Type *Ty) const{
// X86 allows a sign-extended 32-bit immediate field.
return (V > -(1LL << 32) && V < (1LL << 32)-1);
}
/// isLegalAddressImmediate - Return true if the GlobalValue can be used as
/// the offset of the target addressing mode.
bool X86TargetLowering::isLegalAddressImmediate(GlobalValue *GV) const {
// In 64-bit mode, GV is 64-bit so it won't fit in the 32-bit displacement
// field unless we are in small code model.
if (Subtarget->is64Bit() &&
getTargetMachine().getCodeModel() != CodeModel::Small)
Anton Korobeynikov
committed
return (!Subtarget->GVRequiresExtraLoad(GV, getTargetMachine(), false));
/// isLegalAddressScale - Return true if the integer value can be used as the
/// scale of the target addressing mode for load / store of the given type.
bool X86TargetLowering::isLegalAddressScale(int64_t S, const Type *Ty) const {
switch (S) {
default:
return false;
case 2: case 4: case 8:
return true;
// FIXME: These require both scale + index last and thus more expensive.
// How to tell LSR to try for 2, 4, 8 first?
case 3: case 5: case 9:
return true;
}
}
/// isLegalAddressScaleAndImm - Return true if S works for IsLegalAddressScale
/// and V works for isLegalAddressImmediate _and_ both can be applied
/// simultaneously to the same instruction.
bool X86TargetLowering::isLegalAddressScaleAndImm(int64_t S, int64_t V,
const Type* Ty) const {
return isLegalAddressScale(S, Ty) && isLegalAddressImmediate(V, Ty);
}
/// isLegalAddressScaleAndImm - Return true if S works for IsLegalAddressScale
/// and GV works for isLegalAddressImmediate _and_ both can be applied
/// simultaneously to the same instruction.
bool X86TargetLowering::isLegalAddressScaleAndImm(int64_t S, GlobalValue *GV,
const Type* Ty) const {
return isLegalAddressScale(S, Ty) && isLegalAddressImmediate(GV);
}
4083
4084
4085
4086
4087
4088
4089
4090
4091
4092
4093
4094
4095
4096
4097
4098
4099
4100
4101
4102
4103
4104
4105
4106
/// isShuffleMaskLegal - Targets can use this to indicate that they only
/// support *some* VECTOR_SHUFFLE operations, those with specific masks.
/// By default, if a target supports the VECTOR_SHUFFLE node, all mask values
/// are assumed to be legal.
bool
X86TargetLowering::isShuffleMaskLegal(SDOperand Mask, MVT::ValueType VT) const {
// Only do shuffles on 128-bit vector types for now.
if (MVT::getSizeInBits(VT) == 64) return false;
return (Mask.Val->getNumOperands() <= 4 ||
isSplatMask(Mask.Val) ||
isPSHUFHW_PSHUFLWMask(Mask.Val) ||
X86::isUNPCKLMask(Mask.Val) ||
X86::isUNPCKL_v_undef_Mask(Mask.Val) ||
X86::isUNPCKHMask(Mask.Val));
}
bool X86TargetLowering::isVectorClearMaskLegal(std::vector<SDOperand> &BVOps,
MVT::ValueType EVT,
SelectionDAG &DAG) const {
unsigned NumElts = BVOps.size();
// Only do shuffles on 128-bit vector types for now.
if (MVT::getSizeInBits(EVT) * NumElts == 64) return false;
if (NumElts == 2) return true;
if (NumElts == 4) {
return (isMOVLMask(&BVOps[0], 4) ||
isCommutedMOVL(&BVOps[0], 4, true) ||
isSHUFPMask(&BVOps[0], 4) ||
isCommutedSHUFP(&BVOps[0], 4));
}
return false;
}
//===----------------------------------------------------------------------===//
// X86 Scheduler Hooks
//===----------------------------------------------------------------------===//
MachineBasicBlock *
X86TargetLowering::InsertAtEndOfBasicBlock(MachineInstr *MI,
MachineBasicBlock *BB) {
const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
switch (MI->getOpcode()) {
default: assert(false && "Unexpected instr type to insert");
case X86::CMOV_FR32:
case X86::CMOV_FR64:
case X86::CMOV_V4F32:
case X86::CMOV_V2F64:
case X86::CMOV_V2I64: {
// To "insert" a SELECT_CC instruction, we actually have to insert the
// diamond control-flow pattern. The incoming instruction knows the
// destination vreg to set, the condition code register to branch on, the
// true/false values to select between, and a branch opcode to use.
const BasicBlock *LLVM_BB = BB->getBasicBlock();
ilist<MachineBasicBlock>::iterator It = BB;
++It;
// thisMBB:
// ...
// TrueVal = ...
// cmpTY ccX, r1, r2
// bCC copy1MBB
// fallthrough --> copy0MBB
MachineBasicBlock *thisMBB = BB;
MachineBasicBlock *copy0MBB = new MachineBasicBlock(LLVM_BB);
MachineBasicBlock *sinkMBB = new MachineBasicBlock(LLVM_BB);
unsigned Opc =
X86::GetCondBranchFromCond((X86::CondCode)MI->getOperand(3).getImm());
BuildMI(BB, TII->get(Opc)).addMBB(sinkMBB);
MachineFunction *F = BB->getParent();
F->getBasicBlockList().insert(It, copy0MBB);
F->getBasicBlockList().insert(It, sinkMBB);
// Update machine-CFG edges by first adding all successors of the current
// block to the new block which will contain the Phi node for the select.
for(MachineBasicBlock::succ_iterator i = BB->succ_begin(),
e = BB->succ_end(); i != e; ++i)
sinkMBB->addSuccessor(*i);
// Next, remove all successors of the current block, and add the true
// and fallthrough blocks as its successors.
while(!BB->succ_empty())
BB->removeSuccessor(BB->succ_begin());
BB->addSuccessor(copy0MBB);
BB->addSuccessor(sinkMBB);
// copy0MBB:
// %FalseValue = ...
// # fallthrough to sinkMBB
BB = copy0MBB;
// Update machine-CFG edges
BB->addSuccessor(sinkMBB);
// sinkMBB:
// %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
// ...
BB = sinkMBB;
BuildMI(BB, TII->get(X86::PHI), MI->getOperand(0).getReg())
.addReg(MI->getOperand(1).getReg()).addMBB(copy0MBB)
.addReg(MI->getOperand(2).getReg()).addMBB(thisMBB);
delete MI; // The pseudo instruction is gone now.
return BB;
}
case X86::FP_TO_INT16_IN_MEM:
case X86::FP_TO_INT32_IN_MEM:
case X86::FP_TO_INT64_IN_MEM: {
// Change the floating point control register to use "round towards zero"
// mode when truncating to an integer value.
MachineFunction *F = BB->getParent();
int CWFrameIdx = F->getFrameInfo()->CreateStackObject(2, 2);
addFrameReference(BuildMI(BB, TII->get(X86::FNSTCW16m)), CWFrameIdx);
// Load the old value of the high byte of the control word...
unsigned OldCW =
F->getSSARegMap()->createVirtualRegister(X86::GR16RegisterClass);
addFrameReference(BuildMI(BB, TII->get(X86::MOV16rm), OldCW), CWFrameIdx);
// Set the high part to be round to zero...
addFrameReference(BuildMI(BB, TII->get(X86::MOV16mi)), CWFrameIdx)
.addImm(0xC7F);
// Reload the modified control word now...
addFrameReference(BuildMI(BB, TII->get(X86::FLDCW16m)), CWFrameIdx);
// Restore the memory image of control word to original value
addFrameReference(BuildMI(BB, TII->get(X86::MOV16mr)), CWFrameIdx)
.addReg(OldCW);
4209
4210
4211
4212
4213
4214
4215
4216
4217
4218
4219
4220
4221
4222
4223
4224
4225
4226
4227
4228
4229
// Get the X86 opcode to use.
unsigned Opc;
switch (MI->getOpcode()) {
default: assert(0 && "illegal opcode!");
case X86::FP_TO_INT16_IN_MEM: Opc = X86::FpIST16m; break;
case X86::FP_TO_INT32_IN_MEM: Opc = X86::FpIST32m; break;
case X86::FP_TO_INT64_IN_MEM: Opc = X86::FpIST64m; break;
}
X86AddressMode AM;
MachineOperand &Op = MI->getOperand(0);
if (Op.isRegister()) {
AM.BaseType = X86AddressMode::RegBase;
AM.Base.Reg = Op.getReg();
} else {
AM.BaseType = X86AddressMode::FrameIndexBase;
AM.Base.FrameIndex = Op.getFrameIndex();
}
Op = MI->getOperand(1);
if (Op.isImmediate())
AM.Scale = Op.getImm();
Op = MI->getOperand(2);
if (Op.isImmediate())
AM.IndexReg = Op.getImm();
Op = MI->getOperand(3);
if (Op.isGlobalAddress()) {
AM.GV = Op.getGlobal();
} else {
AM.Disp = Op.getImm();
addFullAddress(BuildMI(BB, TII->get(Opc)), AM)
.addReg(MI->getOperand(4).getReg());
// Reload the original control word now.
addFrameReference(BuildMI(BB, TII->get(X86::FLDCW16m)), CWFrameIdx);
delete MI; // The pseudo instruction is gone now.
return BB;
}
}
}
//===----------------------------------------------------------------------===//
// X86 Optimization Hooks
//===----------------------------------------------------------------------===//
void X86TargetLowering::computeMaskedBitsForTargetNode(const SDOperand Op,
uint64_t Mask,
uint64_t &KnownZero,
uint64_t &KnownOne,
unsigned Depth) const {
assert((Opc >= ISD::BUILTIN_OP_END ||
Opc == ISD::INTRINSIC_WO_CHAIN ||
Opc == ISD::INTRINSIC_W_CHAIN ||
Opc == ISD::INTRINSIC_VOID) &&
"Should use MaskedValueIsZero if you don't know whether Op"
" is a target node!");
case X86ISD::SETCC:
KnownZero |= (MVT::getIntVTBitMask(Op.getValueType()) ^ 1ULL);
break;
4278
4279
4280
4281
4282
4283
4284
4285
4286
4287
4288
4289
4290
4291
4292
4293
4294
4295
4296
4297
4298
4299
4300
/// getShuffleScalarElt - Returns the scalar element that will make up the ith
/// element of the result of the vector shuffle.
static SDOperand getShuffleScalarElt(SDNode *N, unsigned i, SelectionDAG &DAG) {
MVT::ValueType VT = N->getValueType(0);
SDOperand PermMask = N->getOperand(2);
unsigned NumElems = PermMask.getNumOperands();
SDOperand V = (i < NumElems) ? N->getOperand(0) : N->getOperand(1);
i %= NumElems;
if (V.getOpcode() == ISD::SCALAR_TO_VECTOR) {
return (i == 0)
? V.getOperand(0) : DAG.getNode(ISD::UNDEF, MVT::getVectorBaseType(VT));
} else if (V.getOpcode() == ISD::VECTOR_SHUFFLE) {
SDOperand Idx = PermMask.getOperand(i);
if (Idx.getOpcode() == ISD::UNDEF)
return DAG.getNode(ISD::UNDEF, MVT::getVectorBaseType(VT));
return getShuffleScalarElt(V.Val,cast<ConstantSDNode>(Idx)->getValue(),DAG);
}
return SDOperand();
}
/// isGAPlusOffset - Returns true (and the GlobalValue and the offset) if the
/// node is a GlobalAddress + an offset.
static bool isGAPlusOffset(SDNode *N, GlobalValue* &GA, int64_t &Offset) {
unsigned Opc = N->getOpcode();
Evan Cheng
committed
if (Opc == X86ISD::Wrapper) {
if (dyn_cast<GlobalAddressSDNode>(N->getOperand(0))) {
GA = cast<GlobalAddressSDNode>(N->getOperand(0))->getGlobal();
return true;
}
} else if (Opc == ISD::ADD) {
4308
4309
4310
4311
4312
4313
4314
4315
4316
4317
4318
4319
4320
4321
4322
4323
4324
4325
4326
4327
4328
4329
4330
4331
4332
4333
4334
4335
4336
4337
4338
4339
4340
4341
4342
4343
4344
4345
4346
4347
4348
4349
4350
4351
4352
4353
4354
4355
4356
4357
4358
SDOperand N1 = N->getOperand(0);
SDOperand N2 = N->getOperand(1);
if (isGAPlusOffset(N1.Val, GA, Offset)) {
ConstantSDNode *V = dyn_cast<ConstantSDNode>(N2);
if (V) {
Offset += V->getSignExtended();
return true;
}
} else if (isGAPlusOffset(N2.Val, GA, Offset)) {
ConstantSDNode *V = dyn_cast<ConstantSDNode>(N1);
if (V) {
Offset += V->getSignExtended();
return true;
}
}
}
return false;
}
/// isConsecutiveLoad - Returns true if N is loading from an address of Base
/// + Dist * Size.
static bool isConsecutiveLoad(SDNode *N, SDNode *Base, int Dist, int Size,
MachineFrameInfo *MFI) {
if (N->getOperand(0).Val != Base->getOperand(0).Val)
return false;
SDOperand Loc = N->getOperand(1);
SDOperand BaseLoc = Base->getOperand(1);
if (Loc.getOpcode() == ISD::FrameIndex) {
if (BaseLoc.getOpcode() != ISD::FrameIndex)
return false;
int FI = dyn_cast<FrameIndexSDNode>(Loc)->getIndex();
int BFI = dyn_cast<FrameIndexSDNode>(BaseLoc)->getIndex();
int FS = MFI->getObjectSize(FI);
int BFS = MFI->getObjectSize(BFI);
if (FS != BFS || FS != Size) return false;
return MFI->getObjectOffset(FI) == (MFI->getObjectOffset(BFI) + Dist*Size);
} else {
GlobalValue *GV1 = NULL;
GlobalValue *GV2 = NULL;
int64_t Offset1 = 0;
int64_t Offset2 = 0;
bool isGA1 = isGAPlusOffset(Loc.Val, GV1, Offset1);
bool isGA2 = isGAPlusOffset(BaseLoc.Val, GV2, Offset2);
if (isGA1 && isGA2 && GV1 == GV2)
return Offset1 == (Offset2 + Dist*Size);
}
return false;
}
static bool isBaseAlignment16(SDNode *Base, MachineFrameInfo *MFI,
const X86Subtarget *Subtarget) {
GlobalValue *GV;
int64_t Offset;
if (isGAPlusOffset(Base, GV, Offset))
return (GV->getAlignment() >= 16 && (Offset % 16) == 0);
else {
assert(Base->getOpcode() == ISD::FrameIndex && "Unexpected base node!");
int BFI = dyn_cast<FrameIndexSDNode>(Base)->getIndex();
if (BFI < 0)
// Fixed objects do not specify alignment, however the offsets are known.
return ((Subtarget->getStackAlignment() % 16) == 0 &&
(MFI->getObjectOffset(BFI) % 16) == 0);
else
return MFI->getObjectAlignment(BFI) >= 16;
}
return false;
}
/// PerformShuffleCombine - Combine a vector_shuffle that is equal to
/// build_vector load1, load2, load3, load4, <0, 1, 2, 3> into a 128-bit load
/// if the load addresses are consecutive, non-overlapping, and in the right
/// order.
static SDOperand PerformShuffleCombine(SDNode *N, SelectionDAG &DAG,
const X86Subtarget *Subtarget) {
MachineFunction &MF = DAG.getMachineFunction();
MachineFrameInfo *MFI = MF.getFrameInfo();
MVT::ValueType VT = N->getValueType(0);
MVT::ValueType EVT = MVT::getVectorBaseType(VT);
SDOperand PermMask = N->getOperand(2);
int NumElems = (int)PermMask.getNumOperands();
SDNode *Base = NULL;
for (int i = 0; i < NumElems; ++i) {
SDOperand Idx = PermMask.getOperand(i);
if (Idx.getOpcode() == ISD::UNDEF) {
if (!Base) return SDOperand();
} else {
SDOperand Arg =
getShuffleScalarElt(N, cast<ConstantSDNode>(Idx)->getValue(), DAG);
if (!Arg.Val || !ISD::isNON_EXTLoad(Arg.Val))
return SDOperand();
if (!Base)
Base = Arg.Val;
else if (!isConsecutiveLoad(Arg.Val, Base,
i, MVT::getSizeInBits(EVT)/8,MFI))
return SDOperand();
}
}
bool isAlign16 = isBaseAlignment16(Base->getOperand(1).Val, MFI, Subtarget);
if (isAlign16) {
LoadSDNode *LD = cast<LoadSDNode>(Base);
return DAG.getLoad(VT, LD->getChain(), LD->getBasePtr(), LD->getSrcValue(),
LD->getSrcValueOffset());
} else {
// Just use movups, it's shorter.
SDVTList Tys = DAG.getVTList(MVT::v4f32, MVT::Other);
SmallVector<SDOperand, 3> Ops;
Ops.push_back(Base->getOperand(0));
Ops.push_back(Base->getOperand(1));
Ops.push_back(Base->getOperand(2));
return DAG.getNode(ISD::BIT_CONVERT, VT,
DAG.getNode(X86ISD::LOAD_UA, Tys, &Ops[0], Ops.size()));
Evan Cheng
committed
}
}
/// PerformSELECTCombine - Do target-specific dag combines on SELECT nodes.
static SDOperand PerformSELECTCombine(SDNode *N, SelectionDAG &DAG,
const X86Subtarget *Subtarget) {
SDOperand Cond = N->getOperand(0);
// If we have SSE[12] support, try to form min/max nodes.
if (Subtarget->hasSSE2() &&
(N->getValueType(0) == MVT::f32 || N->getValueType(0) == MVT::f64)) {
if (Cond.getOpcode() == ISD::SETCC) {
// Get the LHS/RHS of the select.
SDOperand LHS = N->getOperand(1);
SDOperand RHS = N->getOperand(2);
ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
unsigned Opcode = 0;
if (LHS == Cond.getOperand(0) && RHS == Cond.getOperand(1)) {
switch (CC) {
default: break;
case ISD::SETOLE: // (X <= Y) ? X : Y -> min
case ISD::SETULE:
case ISD::SETLE:
if (!UnsafeFPMath) break;
// FALL THROUGH.
case ISD::SETOLT: // (X olt/lt Y) ? X : Y -> min
case ISD::SETLT:
Opcode = X86ISD::FMIN;
break;
case ISD::SETOGT: // (X > Y) ? X : Y -> max
case ISD::SETUGT:
case ISD::SETGT:
if (!UnsafeFPMath) break;
// FALL THROUGH.
case ISD::SETUGE: // (X uge/ge Y) ? X : Y -> max
case ISD::SETGE:
Opcode = X86ISD::FMAX;
break;
}
} else if (LHS == Cond.getOperand(1) && RHS == Cond.getOperand(0)) {
switch (CC) {
default: break;
case ISD::SETOGT: // (X > Y) ? Y : X -> min
case ISD::SETUGT:
case ISD::SETGT:
if (!UnsafeFPMath) break;
// FALL THROUGH.
case ISD::SETUGE: // (X uge/ge Y) ? Y : X -> min
case ISD::SETGE:
Opcode = X86ISD::FMIN;
break;
case ISD::SETOLE: // (X <= Y) ? Y : X -> max
case ISD::SETULE:
case ISD::SETLE:
if (!UnsafeFPMath) break;
// FALL THROUGH.
case ISD::SETOLT: // (X olt/lt Y) ? Y : X -> max
case ISD::SETLT:
Opcode = X86ISD::FMAX;
break;
}
if (Opcode)
return DAG.getNode(Opcode, N->getValueType(0), LHS, RHS);
}
return SDOperand();
}
SDOperand X86TargetLowering::PerformDAGCombine(SDNode *N,
DAGCombinerInfo &DCI) const {
SelectionDAG &DAG = DCI.DAG;
switch (N->getOpcode()) {
default: break;
case ISD::VECTOR_SHUFFLE:
return PerformShuffleCombine(N, DAG, Subtarget);
case ISD::SELECT:
return PerformSELECTCombine(N, DAG, Subtarget);
}
return SDOperand();
}
//===----------------------------------------------------------------------===//
// X86 Inline Assembly Support
//===----------------------------------------------------------------------===//
/// getConstraintType - Given a constraint letter, return the type of
/// constraint it is for this target.
X86TargetLowering::ConstraintType
X86TargetLowering::getConstraintType(char ConstraintLetter) const {
switch (ConstraintLetter) {
case 'A':
case 'r':
case 'R':
case 'l':
case 'q':
case 'Q':
case 'x':
case 'Y':
return C_RegisterClass;
default: return TargetLowering::getConstraintType(ConstraintLetter);
}
}
/// isOperandValidForConstraint - Return the specified operand (possibly
/// modified) if the specified SDOperand is valid for the specified target
/// constraint letter, otherwise return null.
SDOperand X86TargetLowering::
isOperandValidForConstraint(SDOperand Op, char Constraint, SelectionDAG &DAG) {
switch (Constraint) {
default: break;
case 'I':
if (isa<ConstantSDNode>(Op)) {
unsigned Value = cast<ConstantSDNode>(Op)->getValue();
return Op;
else
return SDOperand(0,0);
} else {
return SDOperand(0,0);
}
break;
case 'i':
// Literal immediates are always ok.
if (isa<ConstantSDNode>(Op)) return Op;
// If we are in non-pic codegen mode, we allow the address of a global to
// be used with 'i'.
if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op)) {
if (getTargetMachine().getRelocationModel() == Reloc::PIC_)
return SDOperand(0, 0);
if (GA->getOpcode() != ISD::TargetGlobalAddress)
Op = DAG.getTargetGlobalAddress(GA->getGlobal(), GA->getValueType(0),
GA->getOffset());
return Op;
}
// Otherwise, not valid for this mode.
return SDOperand(0, 0);
}
return TargetLowering::isOperandValidForConstraint(Op, Constraint, DAG);
}
getRegClassForInlineAsmConstraint(const std::string &Constraint,
MVT::ValueType VT) const {
if (Constraint.size() == 1) {
// FIXME: not handling fp-stack yet!
// FIXME: not handling MMX registers yet ('y' constraint).
switch (Constraint[0]) { // GCC X86 Constraint Letters
default: break; // Unknown constraint letter
case 'A': // EAX/EDX
if (VT == MVT::i32 || VT == MVT::i64)
return make_vector<unsigned>(X86::EAX, X86::EDX, 0);
break;
case 'r': // GENERAL_REGS
case 'R': // LEGACY_REGS
Chris Lattner
committed
if (VT == MVT::i64 && Subtarget->is64Bit())
return make_vector<unsigned>(X86::RAX, X86::RDX, X86::RCX, X86::RBX,
X86::RSI, X86::RDI, X86::RBP, X86::RSP,
X86::R8, X86::R9, X86::R10, X86::R11,
X86::R12, X86::R13, X86::R14, X86::R15, 0);
if (VT == MVT::i32)
return make_vector<unsigned>(X86::EAX, X86::EDX, X86::ECX, X86::EBX,
X86::ESI, X86::EDI, X86::EBP, X86::ESP, 0);
else if (VT == MVT::i16)
return make_vector<unsigned>(X86::AX, X86::DX, X86::CX, X86::BX,
X86::SI, X86::DI, X86::BP, X86::SP, 0);
else if (VT == MVT::i8)
return make_vector<unsigned>(X86::AL, X86::DL, X86::CL, X86::BL, 0);
break;
if (VT == MVT::i32)
return make_vector<unsigned>(X86::EAX, X86::EDX, X86::ECX, X86::EBX,
X86::ESI, X86::EDI, X86::EBP, 0);
else if (VT == MVT::i16)
return make_vector<unsigned>(X86::AX, X86::DX, X86::CX, X86::BX,
X86::SI, X86::DI, X86::BP, 0);
else if (VT == MVT::i8)
return make_vector<unsigned>(X86::AL, X86::DL, X86::CL, X86::DL, 0);
break;
case 'q': // Q_REGS (GENERAL_REGS in 64-bit mode)
case 'Q': // Q_REGS
if (VT == MVT::i32)
return make_vector<unsigned>(X86::EAX, X86::EDX, X86::ECX, X86::EBX, 0);
else if (VT == MVT::i16)
return make_vector<unsigned>(X86::AX, X86::DX, X86::CX, X86::BX, 0);
else if (VT == MVT::i8)
return make_vector<unsigned>(X86::AL, X86::DL, X86::CL, X86::DL, 0);
break;
case 'x': // SSE_REGS if SSE1 allowed
if (Subtarget->hasSSE1())
return make_vector<unsigned>(X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7,
0);
return std::vector<unsigned>();
case 'Y': // SSE_REGS if SSE2 allowed
if (Subtarget->hasSSE2())
return make_vector<unsigned>(X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7,
0);
return std::vector<unsigned>();
}
}
return std::vector<unsigned>();
std::pair<unsigned, const TargetRegisterClass*>
X86TargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
MVT::ValueType VT) const {
// Use the default implementation in TargetLowering to convert the register
// constraint into a member of a register class.
std::pair<unsigned, const TargetRegisterClass*> Res;
Res = TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
// Not found as a standard register?
if (Res.second == 0) {
// GCC calls "st(0)" just plain "st".
if (StringsEqualNoCase("{st}", Constraint)) {
Res.first = X86::ST0;
Res.second = X86::RSTRegisterClass;
}
// Otherwise, check to see if this is a register class of the wrong value
// type. For example, we want to map "{ax},i32" -> {eax}, we don't want it to
// turn into {ax},{dx}.
if (Res.second->hasType(VT))
return Res; // Correct type already, nothing to do.
// All of the single-register GCC register classes map their values onto
// 16-bit register pieces "ax","dx","cx","bx","si","di","bp","sp". If we
// really want an 8-bit or 32-bit register, map to the appropriate register
// class and return the appropriate register.
if (Res.second != X86::GR16RegisterClass)
return Res;
4673
4674
4675
4676
4677
4678
4679
4680
4681
4682
4683
4684
4685
4686
4687
4688
4689
4690
4691
4692
4693
4694
4695
4696
4697
4698
4699
4700
4701
4702
if (VT == MVT::i8) {
unsigned DestReg = 0;
switch (Res.first) {
default: break;
case X86::AX: DestReg = X86::AL; break;
case X86::DX: DestReg = X86::DL; break;
case X86::CX: DestReg = X86::CL; break;
case X86::BX: DestReg = X86::BL; break;
}
if (DestReg) {
Res.first = DestReg;
Res.second = Res.second = X86::GR8RegisterClass;
}
} else if (VT == MVT::i32) {
unsigned DestReg = 0;
switch (Res.first) {
default: break;
case X86::AX: DestReg = X86::EAX; break;
case X86::DX: DestReg = X86::EDX; break;
case X86::CX: DestReg = X86::ECX; break;
case X86::BX: DestReg = X86::EBX; break;
case X86::SI: DestReg = X86::ESI; break;
case X86::DI: DestReg = X86::EDI; break;
case X86::BP: DestReg = X86::EBP; break;
case X86::SP: DestReg = X86::ESP; break;
}
if (DestReg) {
Res.first = DestReg;
Res.second = Res.second = X86::GR32RegisterClass;
}
} else if (VT == MVT::i64) {
unsigned DestReg = 0;
switch (Res.first) {
default: break;
case X86::AX: DestReg = X86::RAX; break;
case X86::DX: DestReg = X86::RDX; break;
case X86::CX: DestReg = X86::RCX; break;
case X86::BX: DestReg = X86::RBX; break;
case X86::SI: DestReg = X86::RSI; break;
case X86::DI: DestReg = X86::RDI; break;
case X86::BP: DestReg = X86::RBP; break;
case X86::SP: DestReg = X86::RSP; break;
}
if (DestReg) {
Res.first = DestReg;
Res.second = Res.second = X86::GR64RegisterClass;
}
return Res;
}