Newer
Older
if (TokenFactorIndex != -1) {
Ops.push_back(NewChain);
NewChain = DAG.getNode(ISD::TokenFactor, MVT::Other, &Ops[0],
Ops.size());
}
return DAG.getStore(NewChain, NewLd, St->getBasePtr(),
St->getSrcValue(), St->getSrcValueOffset(),
St->isVolatile(), St->getAlignment());
}
// Otherwise, lower to two 32-bit copies.
SDValue LoAddr = Ld->getBasePtr();
SDValue HiAddr = DAG.getNode(ISD::ADD, MVT::i32, LoAddr,
DAG.getConstant(4, MVT::i32));
SDValue LoLd = DAG.getLoad(MVT::i32, Ld->getChain(), LoAddr,
Ld->getSrcValue(), Ld->getSrcValueOffset(),
Ld->isVolatile(), Ld->getAlignment());
SDValue HiLd = DAG.getLoad(MVT::i32, Ld->getChain(), HiAddr,
Ld->getSrcValue(), Ld->getSrcValueOffset()+4,
Ld->isVolatile(),
MinAlign(Ld->getAlignment(), 4));
if (TokenFactorIndex != -1) {
Ops.push_back(LoLd);
Ops.push_back(HiLd);
NewChain = DAG.getNode(ISD::TokenFactor, MVT::Other, &Ops[0],
Ops.size());
}
LoAddr = St->getBasePtr();
HiAddr = DAG.getNode(ISD::ADD, MVT::i32, LoAddr,
DAG.getConstant(4, MVT::i32));
SDValue LoSt = DAG.getStore(NewChain, LoLd, LoAddr,
St->getSrcValue(), St->getSrcValueOffset(),
St->isVolatile(), St->getAlignment());
SDValue HiSt = DAG.getStore(NewChain, HiLd, HiAddr,
St->getSrcValue(),
St->getSrcValueOffset() + 4,
St->isVolatile(),
MinAlign(St->getAlignment(), 4));
return DAG.getNode(ISD::TokenFactor, MVT::Other, LoSt, HiSt);
}
}
/// PerformFORCombine - Do target-specific dag combines on X86ISD::FOR and
/// X86ISD::FXOR nodes.
static SDValue PerformFORCombine(SDNode *N, SelectionDAG &DAG) {
assert(N->getOpcode() == X86ISD::FOR || N->getOpcode() == X86ISD::FXOR);
// F[X]OR(0.0, x) -> x
// F[X]OR(x, 0.0) -> x
if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(0)))
if (C->getValueAPF().isPosZero())
return N->getOperand(1);
if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(1)))
if (C->getValueAPF().isPosZero())
return N->getOperand(0);
}
/// PerformFANDCombine - Do target-specific dag combines on X86ISD::FAND nodes.
static SDValue PerformFANDCombine(SDNode *N, SelectionDAG &DAG) {
// FAND(0.0, x) -> 0.0
// FAND(x, 0.0) -> 0.0
if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(0)))
if (C->getValueAPF().isPosZero())
return N->getOperand(0);
if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(1)))
if (C->getValueAPF().isPosZero())
return N->getOperand(1);
}
SDValue X86TargetLowering::PerformDAGCombine(SDNode *N,
DAGCombinerInfo &DCI) const {
SelectionDAG &DAG = DCI.DAG;
switch (N->getOpcode()) {
default: break;
Evan Cheng
committed
case ISD::VECTOR_SHUFFLE: return PerformShuffleCombine(N, DAG, *this);
case ISD::BUILD_VECTOR:
return PerformBuildVectorCombine(N, DAG, Subtarget, *this);
case ISD::SELECT: return PerformSELECTCombine(N, DAG, Subtarget);
Evan Cheng
committed
case ISD::STORE: return PerformSTORECombine(N, DAG, Subtarget);
case X86ISD::FOR: return PerformFORCombine(N, DAG);
case X86ISD::FAND: return PerformFANDCombine(N, DAG);
}
}
//===----------------------------------------------------------------------===//
// X86 Inline Assembly Support
//===----------------------------------------------------------------------===//
/// getConstraintType - Given a constraint letter, return the type of
/// constraint it is for this target.
X86TargetLowering::ConstraintType
X86TargetLowering::getConstraintType(const std::string &Constraint) const {
if (Constraint.size() == 1) {
switch (Constraint[0]) {
case 'A':
case 'r':
case 'R':
case 'l':
case 'q':
case 'Q':
case 'x':
case 'Y':
return C_RegisterClass;
default:
break;
}
return TargetLowering::getConstraintType(Constraint);
}
/// LowerXConstraint - try to replace an X constraint, which matches anything,
/// with another that has more specific requirements based on the type of the
/// corresponding operand.
LowerXConstraint(MVT ConstraintVT) const {
// FP X constraints get lowered to SSE1/2 registers if available, otherwise
// 'f' like normal targets.
if (ConstraintVT.isFloatingPoint()) {
return "Y";
if (Subtarget->hasSSE1())
return "x";
}
return TargetLowering::LowerXConstraint(ConstraintVT);
/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
/// vector. If it is invalid, don't add anything to Ops.
void X86TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
char Constraint,
Evan Cheng
committed
bool hasMemory,
switch (Constraint) {
default: break;
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
if (C->getZExtValue() <= 31) {
Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
break;
}
return;
case 'J':
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
if (C->getZExtValue() <= 63) {
Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
break;
}
}
return;
case 'N':
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
if (C->getZExtValue() <= 255) {
Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
break;
}
return;
// Literal immediates are always ok.
if (ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op)) {
Result = DAG.getTargetConstant(CST->getZExtValue(), Op.getValueType());
break;
}
// If we are in non-pic codegen mode, we allow the address of a global (with
// an optional displacement) to be used with 'i'.
GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op);
int64_t Offset = 0;
// Match either (GA) or (GA+C)
if (GA) {
Offset = GA->getOffset();
} else if (Op.getOpcode() == ISD::ADD) {
ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
GA = dyn_cast<GlobalAddressSDNode>(Op.getOperand(0));
if (C && GA) {
Offset = GA->getOffset()+C->getZExtValue();
} else {
C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
GA = dyn_cast<GlobalAddressSDNode>(Op.getOperand(0));
if (C && GA)
Offset = GA->getOffset()+C->getZExtValue();
else
C = 0, GA = 0;
}
}
if (GA) {
Evan Cheng
committed
if (hasMemory)
Op = LowerGlobalAddress(GA->getGlobal(), DAG);
else
Op = DAG.getTargetGlobalAddress(GA->getGlobal(), GA->getValueType(0),
Offset);
Result = Op;
break;
}
// Otherwise, not valid for this mode.
return;
}
Gabor Greif
committed
if (Result.getNode()) {
Ops.push_back(Result);
return;
}
Evan Cheng
committed
return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, hasMemory,
Ops, DAG);
}
getRegClassForInlineAsmConstraint(const std::string &Constraint,
if (Constraint.size() == 1) {
// FIXME: not handling fp-stack yet!
switch (Constraint[0]) { // GCC X86 Constraint Letters
default: break; // Unknown constraint letter
case 'A': // EAX/EDX
if (VT == MVT::i32 || VT == MVT::i64)
return make_vector<unsigned>(X86::EAX, X86::EDX, 0);
break;
case 'q': // Q_REGS (GENERAL_REGS in 64-bit mode)
case 'Q': // Q_REGS
if (VT == MVT::i32)
return make_vector<unsigned>(X86::EAX, X86::EDX, X86::ECX, X86::EBX, 0);
else if (VT == MVT::i16)
return make_vector<unsigned>(X86::AX, X86::DX, X86::CX, X86::BX, 0);
else if (VT == MVT::i8)
return make_vector<unsigned>(X86::AL, X86::DL, X86::CL, X86::BL, 0);
else if (VT == MVT::i64)
return make_vector<unsigned>(X86::RAX, X86::RDX, X86::RCX, X86::RBX, 0);
break;
return std::vector<unsigned>();
std::pair<unsigned, const TargetRegisterClass*>
X86TargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
// First, see if this is a constraint that directly corresponds to an LLVM
// register class.
if (Constraint.size() == 1) {
// GCC Constraint Letters
switch (Constraint[0]) {
default: break;
case 'r': // GENERAL_REGS
case 'R': // LEGACY_REGS
case 'l': // INDEX_REGS
if (VT == MVT::i64 && Subtarget->is64Bit())
return std::make_pair(0U, X86::GR64RegisterClass);
if (VT == MVT::i32)
return std::make_pair(0U, X86::GR32RegisterClass);
else if (VT == MVT::i16)
return std::make_pair(0U, X86::GR16RegisterClass);
else if (VT == MVT::i8)
return std::make_pair(0U, X86::GR8RegisterClass);
break;
case 'f': // FP Stack registers.
// If SSE is enabled for this VT, use f80 to ensure the isel moves the
// value to the correct fpstack register class.
if (VT == MVT::f32 && !isScalarFPTypeInSSEReg(VT))
return std::make_pair(0U, X86::RFP32RegisterClass);
if (VT == MVT::f64 && !isScalarFPTypeInSSEReg(VT))
return std::make_pair(0U, X86::RFP64RegisterClass);
return std::make_pair(0U, X86::RFP80RegisterClass);
Chris Lattner
committed
case 'y': // MMX_REGS if MMX allowed.
if (!Subtarget->hasMMX()) break;
return std::make_pair(0U, X86::VR64RegisterClass);
break;
case 'Y': // SSE_REGS if SSE2 allowed
if (!Subtarget->hasSSE2()) break;
// FALL THROUGH.
case 'x': // SSE_REGS if SSE1 allowed
if (!Subtarget->hasSSE1()) break;
switch (VT.getSimpleVT()) {
default: break;
// Scalar SSE types.
case MVT::f32:
case MVT::i32:
return std::make_pair(0U, X86::FR32RegisterClass);
case MVT::f64:
case MVT::i64:
return std::make_pair(0U, X86::FR64RegisterClass);
// Vector types.
case MVT::v16i8:
case MVT::v8i16:
case MVT::v4i32:
case MVT::v2i64:
case MVT::v4f32:
case MVT::v2f64:
return std::make_pair(0U, X86::VR128RegisterClass);
}
// Use the default implementation in TargetLowering to convert the register
// constraint into a member of a register class.
std::pair<unsigned, const TargetRegisterClass*> Res;
Res = TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
// Not found as a standard register?
if (Res.second == 0) {
// GCC calls "st(0)" just plain "st".
if (StringsEqualNoCase("{st}", Constraint)) {
Res.first = X86::ST0;
Chris Lattner
committed
Res.second = X86::RFP80RegisterClass;
// Otherwise, check to see if this is a register class of the wrong value
// type. For example, we want to map "{ax},i32" -> {eax}, we don't want it to
// turn into {ax},{dx}.
if (Res.second->hasType(VT))
return Res; // Correct type already, nothing to do.
// All of the single-register GCC register classes map their values onto
// 16-bit register pieces "ax","dx","cx","bx","si","di","bp","sp". If we
// really want an 8-bit or 32-bit register, map to the appropriate register
// class and return the appropriate register.
7346
7347
7348
7349
7350
7351
7352
7353
7354
7355
7356
7357
7358
7359
7360
7361
7362
7363
7364
7365
7366
7367
7368
7369
7370
7371
7372
7373
7374
7375
7376
7377
7378
7379
7380
7381
7382
7383
7384
7385
7386
7387
7388
7389
7390
7391
7392
7393
if (Res.second == X86::GR16RegisterClass) {
if (VT == MVT::i8) {
unsigned DestReg = 0;
switch (Res.first) {
default: break;
case X86::AX: DestReg = X86::AL; break;
case X86::DX: DestReg = X86::DL; break;
case X86::CX: DestReg = X86::CL; break;
case X86::BX: DestReg = X86::BL; break;
}
if (DestReg) {
Res.first = DestReg;
Res.second = Res.second = X86::GR8RegisterClass;
}
} else if (VT == MVT::i32) {
unsigned DestReg = 0;
switch (Res.first) {
default: break;
case X86::AX: DestReg = X86::EAX; break;
case X86::DX: DestReg = X86::EDX; break;
case X86::CX: DestReg = X86::ECX; break;
case X86::BX: DestReg = X86::EBX; break;
case X86::SI: DestReg = X86::ESI; break;
case X86::DI: DestReg = X86::EDI; break;
case X86::BP: DestReg = X86::EBP; break;
case X86::SP: DestReg = X86::ESP; break;
}
if (DestReg) {
Res.first = DestReg;
Res.second = Res.second = X86::GR32RegisterClass;
}
} else if (VT == MVT::i64) {
unsigned DestReg = 0;
switch (Res.first) {
default: break;
case X86::AX: DestReg = X86::RAX; break;
case X86::DX: DestReg = X86::RDX; break;
case X86::CX: DestReg = X86::RCX; break;
case X86::BX: DestReg = X86::RBX; break;
case X86::SI: DestReg = X86::RSI; break;
case X86::DI: DestReg = X86::RDI; break;
case X86::BP: DestReg = X86::RBP; break;
case X86::SP: DestReg = X86::RSP; break;
}
if (DestReg) {
Res.first = DestReg;
Res.second = Res.second = X86::GR64RegisterClass;
}
} else if (Res.second == X86::FR32RegisterClass ||
Res.second == X86::FR64RegisterClass ||
Res.second == X86::VR128RegisterClass) {
// Handle references to XMM physical registers that got mapped into the
// wrong class. This can happen with constraints like {xmm0} where the
// target independent register mapper will just pick the first match it can
// find, ignoring the required type.
if (VT == MVT::f32)
Res.second = X86::FR32RegisterClass;
else if (VT == MVT::f64)
Res.second = X86::FR64RegisterClass;
else if (X86::VR128RegisterClass->hasType(VT))
Res.second = X86::VR128RegisterClass;
return Res;
}