Skip to content
X86ISelLowering.cpp 303 KiB
Newer Older
                                               X86::AND32ri, X86::MOV32rm, 
                                               X86::LCMPXCHG32, X86::MOV32rr,
                                               X86::NOT32r, X86::EAX,
                                               X86::GR32RegisterClass);
  case X86::ATOMOR32:
    return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::OR32rr, 
                                               X86::OR32ri, X86::MOV32rm, 
                                               X86::LCMPXCHG32, X86::MOV32rr,
                                               X86::NOT32r, X86::EAX,
                                               X86::GR32RegisterClass);
  case X86::ATOMXOR32:
    return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::XOR32rr,
                                               X86::XOR32ri, X86::MOV32rm, 
                                               X86::LCMPXCHG32, X86::MOV32rr,
                                               X86::NOT32r, X86::EAX,
                                               X86::GR32RegisterClass);
  case X86::ATOMNAND32:
    return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND32rr,
                                               X86::AND32ri, X86::MOV32rm,
                                               X86::LCMPXCHG32, X86::MOV32rr,
                                               X86::NOT32r, X86::EAX,
                                               X86::GR32RegisterClass, true);
  case X86::ATOMMIN32:
    return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVL32rr);
  case X86::ATOMMAX32:
    return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVG32rr);
  case X86::ATOMUMIN32:
    return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVB32rr);
  case X86::ATOMUMAX32:
    return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVA32rr);

  case X86::ATOMAND16:
    return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND16rr,
                                               X86::AND16ri, X86::MOV16rm,
                                               X86::LCMPXCHG16, X86::MOV16rr,
                                               X86::NOT16r, X86::AX,
                                               X86::GR16RegisterClass);
  case X86::ATOMOR16:
    return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::OR16rr, 
                                               X86::OR16ri, X86::MOV16rm,
                                               X86::LCMPXCHG16, X86::MOV16rr,
                                               X86::NOT16r, X86::AX,
                                               X86::GR16RegisterClass);
  case X86::ATOMXOR16:
    return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::XOR16rr,
                                               X86::XOR16ri, X86::MOV16rm,
                                               X86::LCMPXCHG16, X86::MOV16rr,
                                               X86::NOT16r, X86::AX,
                                               X86::GR16RegisterClass);
  case X86::ATOMNAND16:
    return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND16rr,
                                               X86::AND16ri, X86::MOV16rm,
                                               X86::LCMPXCHG16, X86::MOV16rr,
                                               X86::NOT16r, X86::AX,
                                               X86::GR16RegisterClass, true);
  case X86::ATOMMIN16:
    return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVL16rr);
  case X86::ATOMMAX16:
    return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVG16rr);
  case X86::ATOMUMIN16:
    return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVB16rr);
  case X86::ATOMUMAX16:
    return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVA16rr);

  case X86::ATOMAND8:
    return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND8rr,
                                               X86::AND8ri, X86::MOV8rm,
                                               X86::LCMPXCHG8, X86::MOV8rr,
                                               X86::NOT8r, X86::AL,
                                               X86::GR8RegisterClass);
  case X86::ATOMOR8:
    return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::OR8rr, 
                                               X86::OR8ri, X86::MOV8rm,
                                               X86::LCMPXCHG8, X86::MOV8rr,
                                               X86::NOT8r, X86::AL,
                                               X86::GR8RegisterClass);
  case X86::ATOMXOR8:
    return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::XOR8rr,
                                               X86::XOR8ri, X86::MOV8rm,
                                               X86::LCMPXCHG8, X86::MOV8rr,
                                               X86::NOT8r, X86::AL,
                                               X86::GR8RegisterClass);
  case X86::ATOMNAND8:
    return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND8rr,
                                               X86::AND8ri, X86::MOV8rm,
                                               X86::LCMPXCHG8, X86::MOV8rr,
                                               X86::NOT8r, X86::AL,
                                               X86::GR8RegisterClass, true);
  // FIXME: There are no CMOV8 instructions; MIN/MAX need some other way.
  // This group is for 64-bit host.
  case X86::ATOMAND64:
    return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND64rr,
                                               X86::AND64ri32, X86::MOV64rm, 
                                               X86::LCMPXCHG64, X86::MOV64rr,
                                               X86::NOT64r, X86::RAX,
                                               X86::GR64RegisterClass);
  case X86::ATOMOR64:
    return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::OR64rr, 
                                               X86::OR64ri32, X86::MOV64rm, 
                                               X86::LCMPXCHG64, X86::MOV64rr,
                                               X86::NOT64r, X86::RAX,
                                               X86::GR64RegisterClass);
  case X86::ATOMXOR64:
    return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::XOR64rr,
                                               X86::XOR64ri32, X86::MOV64rm, 
                                               X86::LCMPXCHG64, X86::MOV64rr,
                                               X86::NOT64r, X86::RAX,
                                               X86::GR64RegisterClass);
  case X86::ATOMNAND64:
    return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND64rr,
                                               X86::AND64ri32, X86::MOV64rm,
                                               X86::LCMPXCHG64, X86::MOV64rr,
                                               X86::NOT64r, X86::RAX,
                                               X86::GR64RegisterClass, true);
  case X86::ATOMMIN64:
    return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVL64rr);
  case X86::ATOMMAX64:
    return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVG64rr);
  case X86::ATOMUMIN64:
    return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVB64rr);
  case X86::ATOMUMAX64:
    return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVA64rr);

  // This group does 64-bit operations on a 32-bit host.
  case X86::ATOMAND6432:
    return EmitAtomicBit6432WithCustomInserter(MI, BB, 
                                               X86::AND32rr, X86::AND32rr,
                                               X86::AND32ri, X86::AND32ri,
                                               false);
  case X86::ATOMOR6432:
    return EmitAtomicBit6432WithCustomInserter(MI, BB, 
                                               X86::OR32rr, X86::OR32rr,
                                               X86::OR32ri, X86::OR32ri,
                                               false);
  case X86::ATOMXOR6432:
    return EmitAtomicBit6432WithCustomInserter(MI, BB, 
                                               X86::XOR32rr, X86::XOR32rr,
                                               X86::XOR32ri, X86::XOR32ri,
                                               false);
  case X86::ATOMNAND6432:
    return EmitAtomicBit6432WithCustomInserter(MI, BB, 
                                               X86::AND32rr, X86::AND32rr,
                                               X86::AND32ri, X86::AND32ri,
                                               true);
  case X86::ATOMADD6432:
    return EmitAtomicBit6432WithCustomInserter(MI, BB, 
                                               X86::ADD32rr, X86::ADC32rr,
                                               X86::ADD32ri, X86::ADC32ri,
                                               false);
  case X86::ATOMSUB6432:
    return EmitAtomicBit6432WithCustomInserter(MI, BB, 
                                               X86::SUB32rr, X86::SBB32rr,
                                               X86::SUB32ri, X86::SBB32ri,
                                               false);
  case X86::ATOMSWAP6432:
    return EmitAtomicBit6432WithCustomInserter(MI, BB, 
                                               X86::MOV32rr, X86::MOV32rr,
                                               X86::MOV32ri, X86::MOV32ri,
                                               false);
  }
}

//===----------------------------------------------------------------------===//
//                           X86 Optimization Hooks
//===----------------------------------------------------------------------===//

Dan Gohman's avatar
Dan Gohman committed
void X86TargetLowering::computeMaskedBitsForTargetNode(const SDValue Op,
  unsigned Opc = Op.getOpcode();
Evan Cheng's avatar
Evan Cheng committed
  assert((Opc >= ISD::BUILTIN_OP_END ||
          Opc == ISD::INTRINSIC_WO_CHAIN ||
          Opc == ISD::INTRINSIC_W_CHAIN ||
          Opc == ISD::INTRINSIC_VOID) &&
         "Should use MaskedValueIsZero if you don't know whether Op"
         " is a target node!");
  KnownZero = KnownOne = APInt(Mask.getBitWidth(), 0);   // Don't know anything.
  switch (Opc) {
Evan Cheng's avatar
Evan Cheng committed
  default: break;
    KnownZero |= APInt::getHighBitsSet(Mask.getBitWidth(),
                                       Mask.getBitWidth() - 1);
/// isGAPlusOffset - Returns true (and the GlobalValue and the offset) if the
/// node is a GlobalAddress + offset.
bool X86TargetLowering::isGAPlusOffset(SDNode *N,
                                       GlobalValue* &GA, int64_t &Offset) const{
  if (N->getOpcode() == X86ISD::Wrapper) {
    if (isa<GlobalAddressSDNode>(N->getOperand(0))) {
      GA = cast<GlobalAddressSDNode>(N->getOperand(0))->getGlobal();
      Offset = cast<GlobalAddressSDNode>(N->getOperand(0))->getOffset();
  return TargetLowering::isGAPlusOffset(N, GA, Offset);
static bool isBaseAlignmentOfN(unsigned N, SDNode *Base,
                               const TargetLowering &TLI) {
    return (GV->getAlignment() >= N && (Offset % N) == 0);
  // DAG combine handles the stack object case.
Dan Gohman's avatar
Dan Gohman committed
static bool EltsFromConsecutiveLoads(SDNode *N, SDValue PermMask,
                                     unsigned NumElems, MVT EVT,
                                     SDNode *&Base,
                                     SelectionDAG &DAG, MachineFrameInfo *MFI,
                                     const TargetLowering &TLI) {
  Base = NULL;
  for (unsigned i = 0; i < NumElems; ++i) {
Dan Gohman's avatar
Dan Gohman committed
    SDValue Idx = PermMask.getOperand(i);
    if (Idx.getOpcode() == ISD::UNDEF) {
      if (!Base)
        return false;
      continue;
    }

Dan Gohman's avatar
Dan Gohman committed
    SDValue Elt = DAG.getShuffleScalarElt(N, i);
    if (!Elt.getNode() ||
        (Elt.getOpcode() != ISD::UNDEF && !ISD::isNON_EXTLoad(Elt.getNode())))
      if (Base->getOpcode() == ISD::UNDEF)
        return false;
                               EVT.getSizeInBits()/8, i, MFI))

/// PerformShuffleCombine - Combine a vector_shuffle that is equal to
/// build_vector load1, load2, load3, load4, <0, 1, 2, 3> into a 128-bit load
/// if the load addresses are consecutive, non-overlapping, and in the right
/// order.
Dan Gohman's avatar
Dan Gohman committed
static SDValue PerformShuffleCombine(SDNode *N, SelectionDAG &DAG,
  MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
  MVT VT = N->getValueType(0);
  MVT EVT = VT.getVectorElementType();
Dan Gohman's avatar
Dan Gohman committed
  SDValue PermMask = N->getOperand(2);
  unsigned NumElems = PermMask.getNumOperands();
  if (!EltsFromConsecutiveLoads(N, PermMask, NumElems, EVT, Base,
                                DAG, MFI, TLI))
Dan Gohman's avatar
Dan Gohman committed
    return SDValue();
  LoadSDNode *LD = cast<LoadSDNode>(Base);
  if (isBaseAlignmentOfN(16, Base->getOperand(1).getNode(), TLI))
    return DAG.getLoad(VT, LD->getChain(), LD->getBasePtr(), LD->getSrcValue(),
                       LD->getSrcValueOffset(), LD->isVolatile());
  return DAG.getLoad(VT, LD->getChain(), LD->getBasePtr(), LD->getSrcValue(),
                     LD->getSrcValueOffset(), LD->isVolatile(),
                     LD->getAlignment());
/// PerformBuildVectorCombine - build_vector 0,(load i64 / f64) -> movq / movsd.
Dan Gohman's avatar
Dan Gohman committed
static SDValue PerformBuildVectorCombine(SDNode *N, SelectionDAG &DAG,
                                         const X86Subtarget *Subtarget,
                                         const TargetLowering &TLI) {
  // Ignore single operand BUILD_VECTOR.
Dan Gohman's avatar
Dan Gohman committed
    return SDValue();
  MVT VT = N->getValueType(0);
  MVT EVT = VT.getVectorElementType();
  if ((EVT != MVT::i64 && EVT != MVT::f64) || Subtarget->is64Bit())
    // We are looking for load i64 and zero extend. We want to transform
    // it before legalizer has a chance to expand it. Also look for i64
    // BUILD_PAIR bit casted to f64.
Dan Gohman's avatar
Dan Gohman committed
    return SDValue();
  // This must be an insertion into a zero vector.
Dan Gohman's avatar
Dan Gohman committed
  SDValue HighElt = N->getOperand(1);
Dan Gohman's avatar
Dan Gohman committed
    return SDValue();
    if (Base->getOpcode() != ISD::BIT_CONVERT)
Dan Gohman's avatar
Dan Gohman committed
      return SDValue();
Dan Gohman's avatar
Dan Gohman committed
      return SDValue();
  LoadSDNode *LD = cast<LoadSDNode>(Base);
  
  // Load must not be an extload.
  if (LD->getExtensionType() != ISD::NON_EXTLOAD)
Dan Gohman's avatar
Dan Gohman committed
    return SDValue();
  SDVTList Tys = DAG.getVTList(VT, MVT::Other);
  SDValue Ops[] = { LD->getChain(), LD->getBasePtr() };
  SDValue ResNode = DAG.getNode(X86ISD::VZEXT_LOAD, Tys, Ops, 2);
  DAG.ReplaceAllUsesOfValueWith(SDValue(Base, 1), ResNode.getValue(1));
  return ResNode;
/// PerformSELECTCombine - Do target-specific dag combines on SELECT nodes.
Dan Gohman's avatar
Dan Gohman committed
static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG,
                                      const X86Subtarget *Subtarget) {
Dan Gohman's avatar
Dan Gohman committed
  SDValue Cond = N->getOperand(0);
  // If we have SSE[12] support, try to form min/max nodes.
  if (Subtarget->hasSSE2() &&
      (N->getValueType(0) == MVT::f32 || N->getValueType(0) == MVT::f64)) {
    if (Cond.getOpcode() == ISD::SETCC) {
      // Get the LHS/RHS of the select.
Dan Gohman's avatar
Dan Gohman committed
      SDValue LHS = N->getOperand(1);
      SDValue RHS = N->getOperand(2);
      ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
      if (LHS == Cond.getOperand(0) && RHS == Cond.getOperand(1)) {
        switch (CC) {
        default: break;
        case ISD::SETOLE: // (X <= Y) ? X : Y -> min
        case ISD::SETULE:
        case ISD::SETLE:
          if (!UnsafeFPMath) break;
          // FALL THROUGH.
        case ISD::SETOLT:  // (X olt/lt Y) ? X : Y -> min
        case ISD::SETLT:
        case ISD::SETOGT: // (X > Y) ? X : Y -> max
        case ISD::SETUGT:
        case ISD::SETGT:
          if (!UnsafeFPMath) break;
          // FALL THROUGH.
        case ISD::SETUGE:  // (X uge/ge Y) ? X : Y -> max
        case ISD::SETGE:
      } else if (LHS == Cond.getOperand(1) && RHS == Cond.getOperand(0)) {
        switch (CC) {
        default: break;
        case ISD::SETOGT: // (X > Y) ? Y : X -> min
        case ISD::SETUGT:
        case ISD::SETGT:
          if (!UnsafeFPMath) break;
          // FALL THROUGH.
        case ISD::SETUGE:  // (X uge/ge Y) ? Y : X -> min
        case ISD::SETGE:
        case ISD::SETOLE:   // (X <= Y) ? Y : X -> max
        case ISD::SETULE:
        case ISD::SETLE:
          if (!UnsafeFPMath) break;
          // FALL THROUGH.
        case ISD::SETOLT:   // (X olt/lt Y) ? Y : X -> max
        case ISD::SETLT:
      if (Opcode)
        return DAG.getNode(Opcode, N->getValueType(0), LHS, RHS);
Dan Gohman's avatar
Dan Gohman committed
  return SDValue();
/// PerformSTORECombine - Do target-specific dag combines on STORE nodes.
Dan Gohman's avatar
Dan Gohman committed
static SDValue PerformSTORECombine(SDNode *N, SelectionDAG &DAG,
                                     const X86Subtarget *Subtarget) {
  // Turn load->store of MMX types into GPR load/stores.  This avoids clobbering
  // the FP state in cases where an emms may be missing.
  // A preferable solution to the general problem is to figure out the right
  // places to insert EMMS.  This qualifies as a quick hack.
  StoreSDNode *St = cast<StoreSDNode>(N);
  if (St->getValue().getValueType().isVector() &&
      St->getValue().getValueType().getSizeInBits() == 64 &&
      isa<LoadSDNode>(St->getValue()) &&
      !cast<LoadSDNode>(St->getValue())->isVolatile() &&
      St->getChain().hasOneUse() && !St->isVolatile()) {
    LoadSDNode *Ld = 0;
    int TokenFactorIndex = -1;
Dan Gohman's avatar
Dan Gohman committed
    SmallVector<SDValue, 8> Ops;
    // Must be a store of a load.  We currently handle two cases:  the load
    // is a direct child, and it's under an intervening TokenFactor.  It is
    // possible to dig deeper under nested TokenFactors.
    if (ChainVal == LdVal)
      Ld = cast<LoadSDNode>(St->getChain());
    else if (St->getValue().hasOneUse() &&
             ChainVal->getOpcode() == ISD::TokenFactor) {
      for (unsigned i=0, e = ChainVal->getNumOperands(); i != e; ++i) {
          TokenFactorIndex = i;
          Ld = cast<LoadSDNode>(St->getValue());
        } else
          Ops.push_back(ChainVal->getOperand(i));
      }
    }
    if (Ld) {
      // If we are a 64-bit capable x86, lower to a single movq load/store pair.
      if (Subtarget->is64Bit()) {
Dan Gohman's avatar
Dan Gohman committed
        SDValue NewLd = DAG.getLoad(MVT::i64, Ld->getChain(), 
                                      Ld->getBasePtr(), Ld->getSrcValue(), 
                                      Ld->getSrcValueOffset(), Ld->isVolatile(),
                                      Ld->getAlignment());
Dan Gohman's avatar
Dan Gohman committed
        SDValue NewChain = NewLd.getValue(1);
        if (TokenFactorIndex != -1) {
          NewChain = DAG.getNode(ISD::TokenFactor, MVT::Other, &Ops[0], 
                                 Ops.size());
        }
        return DAG.getStore(NewChain, NewLd, St->getBasePtr(),
                            St->getSrcValue(), St->getSrcValueOffset(),
                            St->isVolatile(), St->getAlignment());
      }

      // Otherwise, lower to two 32-bit copies.
Dan Gohman's avatar
Dan Gohman committed
      SDValue LoAddr = Ld->getBasePtr();
      SDValue HiAddr = DAG.getNode(ISD::ADD, MVT::i32, LoAddr,
                                     DAG.getConstant(4, MVT::i32));
Dan Gohman's avatar
Dan Gohman committed
      SDValue LoLd = DAG.getLoad(MVT::i32, Ld->getChain(), LoAddr,
                                   Ld->getSrcValue(), Ld->getSrcValueOffset(),
                                   Ld->isVolatile(), Ld->getAlignment());
Dan Gohman's avatar
Dan Gohman committed
      SDValue HiLd = DAG.getLoad(MVT::i32, Ld->getChain(), HiAddr,
                                   Ld->getSrcValue(), Ld->getSrcValueOffset()+4,
                                   Ld->isVolatile(), 
                                   MinAlign(Ld->getAlignment(), 4));

Dan Gohman's avatar
Dan Gohman committed
      SDValue NewChain = LoLd.getValue(1);
      if (TokenFactorIndex != -1) {
        Ops.push_back(LoLd);
        Ops.push_back(HiLd);
        NewChain = DAG.getNode(ISD::TokenFactor, MVT::Other, &Ops[0], 
                               Ops.size());
      }

      LoAddr = St->getBasePtr();
      HiAddr = DAG.getNode(ISD::ADD, MVT::i32, LoAddr,
                           DAG.getConstant(4, MVT::i32));
Dan Gohman's avatar
Dan Gohman committed
      SDValue LoSt = DAG.getStore(NewChain, LoLd, LoAddr,
                          St->getSrcValue(), St->getSrcValueOffset(),
                          St->isVolatile(), St->getAlignment());
Dan Gohman's avatar
Dan Gohman committed
      SDValue HiSt = DAG.getStore(NewChain, HiLd, HiAddr,
Gabor Greif's avatar
Gabor Greif committed
                                    St->getSrcValue(),
                                    St->getSrcValueOffset() + 4,
                                    St->isVolatile(), 
                                    MinAlign(St->getAlignment(), 4));
      return DAG.getNode(ISD::TokenFactor, MVT::Other, LoSt, HiSt);
Dan Gohman's avatar
Dan Gohman committed
  return SDValue();
Chris Lattner's avatar
Chris Lattner committed
/// PerformFORCombine - Do target-specific dag combines on X86ISD::FOR and
/// X86ISD::FXOR nodes.
Dan Gohman's avatar
Dan Gohman committed
static SDValue PerformFORCombine(SDNode *N, SelectionDAG &DAG) {
Chris Lattner's avatar
Chris Lattner committed
  assert(N->getOpcode() == X86ISD::FOR || N->getOpcode() == X86ISD::FXOR);
  // F[X]OR(0.0, x) -> x
  // F[X]OR(x, 0.0) -> x
  if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(0)))
    if (C->getValueAPF().isPosZero())
      return N->getOperand(1);
  if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(1)))
    if (C->getValueAPF().isPosZero())
      return N->getOperand(0);
Dan Gohman's avatar
Dan Gohman committed
  return SDValue();
}

/// PerformFANDCombine - Do target-specific dag combines on X86ISD::FAND nodes.
Dan Gohman's avatar
Dan Gohman committed
static SDValue PerformFANDCombine(SDNode *N, SelectionDAG &DAG) {
  // FAND(0.0, x) -> 0.0
  // FAND(x, 0.0) -> 0.0
  if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(0)))
    if (C->getValueAPF().isPosZero())
      return N->getOperand(0);
  if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(1)))
    if (C->getValueAPF().isPosZero())
      return N->getOperand(1);
Dan Gohman's avatar
Dan Gohman committed
  return SDValue();
Dan Gohman's avatar
Dan Gohman committed
SDValue X86TargetLowering::PerformDAGCombine(SDNode *N,
                                               DAGCombinerInfo &DCI) const {
  SelectionDAG &DAG = DCI.DAG;
  switch (N->getOpcode()) {
  default: break;
  case ISD::VECTOR_SHUFFLE: return PerformShuffleCombine(N, DAG, *this);
  case ISD::BUILD_VECTOR:
    return PerformBuildVectorCombine(N, DAG, Subtarget, *this);
  case ISD::SELECT:         return PerformSELECTCombine(N, DAG, Subtarget);
  case ISD::STORE:          return PerformSTORECombine(N, DAG, Subtarget);
Chris Lattner's avatar
Chris Lattner committed
  case X86ISD::FXOR:
  case X86ISD::FOR:         return PerformFORCombine(N, DAG);
  case X86ISD::FAND:        return PerformFANDCombine(N, DAG);
Dan Gohman's avatar
Dan Gohman committed
  return SDValue();
//===----------------------------------------------------------------------===//
//                           X86 Inline Assembly Support
//===----------------------------------------------------------------------===//

/// getConstraintType - Given a constraint letter, return the type of
/// constraint it is for this target.
X86TargetLowering::ConstraintType
X86TargetLowering::getConstraintType(const std::string &Constraint) const {
  if (Constraint.size() == 1) {
    switch (Constraint[0]) {
    case 'A':
  return TargetLowering::getConstraintType(Constraint);
/// LowerXConstraint - try to replace an X constraint, which matches anything,
/// with another that has more specific requirements based on the type of the
/// corresponding operand.
const char *X86TargetLowering::
LowerXConstraint(MVT ConstraintVT) const {
  // FP X constraints get lowered to SSE1/2 registers if available, otherwise
  // 'f' like normal targets.
  if (ConstraintVT.isFloatingPoint()) {
    if (Subtarget->hasSSE2())
      return "Y";
    if (Subtarget->hasSSE1())
      return "x";
  }
  
  return TargetLowering::LowerXConstraint(ConstraintVT);
/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
/// vector.  If it is invalid, don't add anything to Ops.
Dan Gohman's avatar
Dan Gohman committed
void X86TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
Dan Gohman's avatar
Dan Gohman committed
                                                     std::vector<SDValue>&Ops,
                                                     SelectionDAG &DAG) const {
Dan Gohman's avatar
Dan Gohman committed
  SDValue Result(0, 0);
  case 'I':
    if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
      if (C->getZExtValue() <= 31) {
        Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
  case 'J':
    if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
      if (C->getZExtValue() <= 63) {
        Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
        break;
      }
    }
    return;
  case 'N':
    if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
      if (C->getZExtValue() <= 255) {
        Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
Chris Lattner's avatar
Chris Lattner committed
  case 'i': {
    // Literal immediates are always ok.
    if (ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op)) {
      Result = DAG.getTargetConstant(CST->getZExtValue(), Op.getValueType());
Chris Lattner's avatar
Chris Lattner committed
    // If we are in non-pic codegen mode, we allow the address of a global (with
    // an optional displacement) to be used with 'i'.
    GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op);
    int64_t Offset = 0;
    
    // Match either (GA) or (GA+C)
    if (GA) {
      Offset = GA->getOffset();
    } else if (Op.getOpcode() == ISD::ADD) {
      ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
      GA = dyn_cast<GlobalAddressSDNode>(Op.getOperand(0));
      if (C && GA) {
        Offset = GA->getOffset()+C->getZExtValue();
Chris Lattner's avatar
Chris Lattner committed
      } else {
        C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
        GA = dyn_cast<GlobalAddressSDNode>(Op.getOperand(0));
        if (C && GA)
          Offset = GA->getOffset()+C->getZExtValue();
Chris Lattner's avatar
Chris Lattner committed
        else
          C = 0, GA = 0;
      }
    }
    
    if (GA) {
        Op = LowerGlobalAddress(GA->getGlobal(), Offset, DAG);
      else
        Op = DAG.getTargetGlobalAddress(GA->getGlobal(), GA->getValueType(0),
                                        Offset);
    // Otherwise, not valid for this mode.
  return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, hasMemory,
                                                      Ops, DAG);
std::vector<unsigned> X86TargetLowering::
getRegClassForInlineAsmConstraint(const std::string &Constraint,
  if (Constraint.size() == 1) {
    // FIXME: not handling fp-stack yet!
    switch (Constraint[0]) {      // GCC X86 Constraint Letters
    default: break;  // Unknown constraint letter
    case 'A':   // EAX/EDX
      if (VT == MVT::i32 || VT == MVT::i64)
        return make_vector<unsigned>(X86::EAX, X86::EDX, 0);
      break;
    case 'q':   // Q_REGS (GENERAL_REGS in 64-bit mode)
    case 'Q':   // Q_REGS
      if (VT == MVT::i32)
        return make_vector<unsigned>(X86::EAX, X86::EDX, X86::ECX, X86::EBX, 0);
      else if (VT == MVT::i16)
        return make_vector<unsigned>(X86::AX, X86::DX, X86::CX, X86::BX, 0);
      else if (VT == MVT::i8)
        return make_vector<unsigned>(X86::AL, X86::DL, X86::CL, X86::BL, 0);
      else if (VT == MVT::i64)
        return make_vector<unsigned>(X86::RAX, X86::RDX, X86::RCX, X86::RBX, 0);
      break;
  return std::vector<unsigned>();
std::pair<unsigned, const TargetRegisterClass*>
X86TargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
  // First, see if this is a constraint that directly corresponds to an LLVM
  // register class.
  if (Constraint.size() == 1) {
    // GCC Constraint Letters
    switch (Constraint[0]) {
    default: break;
    case 'r':   // GENERAL_REGS
    case 'R':   // LEGACY_REGS
    case 'l':   // INDEX_REGS
        return std::make_pair(0U, X86::GR8RegisterClass);
      if (VT == MVT::i16)
        return std::make_pair(0U, X86::GR16RegisterClass);
      if (VT == MVT::i32 || !Subtarget->is64Bit())
        return std::make_pair(0U, X86::GR32RegisterClass);  
      return std::make_pair(0U, X86::GR64RegisterClass);
    case 'f':  // FP Stack registers.
      // If SSE is enabled for this VT, use f80 to ensure the isel moves the
      // value to the correct fpstack register class.
      if (VT == MVT::f32 && !isScalarFPTypeInSSEReg(VT))
        return std::make_pair(0U, X86::RFP32RegisterClass);
      if (VT == MVT::f64 && !isScalarFPTypeInSSEReg(VT))
        return std::make_pair(0U, X86::RFP64RegisterClass);
      return std::make_pair(0U, X86::RFP80RegisterClass);
    case 'y':   // MMX_REGS if MMX allowed.
      if (!Subtarget->hasMMX()) break;
      return std::make_pair(0U, X86::VR64RegisterClass);
    case 'Y':   // SSE_REGS if SSE2 allowed
      if (!Subtarget->hasSSE2()) break;
      // FALL THROUGH.
    case 'x':   // SSE_REGS if SSE1 allowed
      if (!Subtarget->hasSSE1()) break;

      switch (VT.getSimpleVT()) {
      default: break;
      // Scalar SSE types.
      case MVT::f32:
      case MVT::i32:
        return std::make_pair(0U, X86::FR32RegisterClass);
        return std::make_pair(0U, X86::FR64RegisterClass);
      // Vector types.
      case MVT::v16i8:
      case MVT::v8i16:
      case MVT::v4i32:
      case MVT::v2i64:
      case MVT::v4f32:
      case MVT::v2f64:
        return std::make_pair(0U, X86::VR128RegisterClass);
      }
  // Use the default implementation in TargetLowering to convert the register
  // constraint into a member of a register class.
  std::pair<unsigned, const TargetRegisterClass*> Res;
  Res = TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
Chris Lattner's avatar
Chris Lattner committed

  // Not found as a standard register?
  if (Res.second == 0) {
    // GCC calls "st(0)" just plain "st".
    if (StringsEqualNoCase("{st}", Constraint)) {
      Res.first = X86::ST0;
Chris Lattner's avatar
Chris Lattner committed
    return Res;
  }
  // Otherwise, check to see if this is a register class of the wrong value
  // type.  For example, we want to map "{ax},i32" -> {eax}, we don't want it to
  // turn into {ax},{dx}.
  if (Res.second->hasType(VT))
    return Res;   // Correct type already, nothing to do.
  // All of the single-register GCC register classes map their values onto
  // 16-bit register pieces "ax","dx","cx","bx","si","di","bp","sp".  If we
  // really want an 8-bit or 32-bit register, map to the appropriate register
  // class and return the appropriate register.
  if (Res.second == X86::GR16RegisterClass) {
    if (VT == MVT::i8) {
      unsigned DestReg = 0;
      switch (Res.first) {
      default: break;
      case X86::AX: DestReg = X86::AL; break;
      case X86::DX: DestReg = X86::DL; break;
      case X86::CX: DestReg = X86::CL; break;
      case X86::BX: DestReg = X86::BL; break;
      }
      if (DestReg) {
        Res.first = DestReg;
        Res.second = Res.second = X86::GR8RegisterClass;
      }
    } else if (VT == MVT::i32) {
      unsigned DestReg = 0;
      switch (Res.first) {
      default: break;
      case X86::AX: DestReg = X86::EAX; break;
      case X86::DX: DestReg = X86::EDX; break;
      case X86::CX: DestReg = X86::ECX; break;
      case X86::BX: DestReg = X86::EBX; break;
      case X86::SI: DestReg = X86::ESI; break;
      case X86::DI: DestReg = X86::EDI; break;
      case X86::BP: DestReg = X86::EBP; break;
      case X86::SP: DestReg = X86::ESP; break;
      }
      if (DestReg) {
        Res.first = DestReg;
        Res.second = Res.second = X86::GR32RegisterClass;
      }
    } else if (VT == MVT::i64) {
      unsigned DestReg = 0;
      switch (Res.first) {
      default: break;
      case X86::AX: DestReg = X86::RAX; break;
      case X86::DX: DestReg = X86::RDX; break;
      case X86::CX: DestReg = X86::RCX; break;
      case X86::BX: DestReg = X86::RBX; break;
      case X86::SI: DestReg = X86::RSI; break;
      case X86::DI: DestReg = X86::RDI; break;
      case X86::BP: DestReg = X86::RBP; break;
      case X86::SP: DestReg = X86::RSP; break;
      }
      if (DestReg) {
        Res.first = DestReg;
        Res.second = Res.second = X86::GR64RegisterClass;
      }
  } else if (Res.second == X86::FR32RegisterClass ||
             Res.second == X86::FR64RegisterClass ||
             Res.second == X86::VR128RegisterClass) {
    // Handle references to XMM physical registers that got mapped into the
    // wrong class.  This can happen with constraints like {xmm0} where the
    // target independent register mapper will just pick the first match it can
    // find, ignoring the required type.
    if (VT == MVT::f32)
      Res.second = X86::FR32RegisterClass;
    else if (VT == MVT::f64)
      Res.second = X86::FR64RegisterClass;
    else if (X86::VR128RegisterClass->hasType(VT))
      Res.second = X86::VR128RegisterClass;