Skip to content
X86ISelLowering.cpp 197 KiB
Newer Older
    for (unsigned i = 0; i < 4; ++i) {
      bool isZero = !(NonZeros & (1 << i));
      if (isZero)
        V[i] = getZeroVector(VT, DAG);
      else
        V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, VT, Op.getOperand(i));
    }

    for (unsigned i = 0; i < 2; ++i) {
      switch ((NonZeros & (0x3 << i*2)) >> (i*2)) {
        default: break;
        case 0:
          V[i] = V[i*2];  // Must be a zero vector.
          break;
        case 1:
          V[i] = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V[i*2+1], V[i*2],
                             getMOVLMask(NumElems, DAG));
          break;
        case 2:
          V[i] = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V[i*2], V[i*2+1],
                             getMOVLMask(NumElems, DAG));
          break;
        case 3:
          V[i] = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V[i*2], V[i*2+1],
                             getUnpacklMask(NumElems, DAG));
          break;
      }
    }

    // Take advantage of the fact GR32 to VR128 scalar_to_vector (i.e. movd)
    // FIXME: we can do the same for v4f32 case when we know both parts of
    // the lower half come from scalar_to_vector (loadf32). We should do
    // that in post legalizer dag combiner with target specific hooks.
    if (MVT::isInteger(EVT) && (NonZeros & (0x3 << 2)) == 0)
      return V[0];
    MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems);
    MVT::ValueType EVT = MVT::getVectorBaseType(MaskVT);
    std::vector<SDOperand> MaskVec;
    bool Reverse = (NonZeros & 0x3) == 2;
    for (unsigned i = 0; i < 2; ++i)
      if (Reverse)
        MaskVec.push_back(DAG.getConstant(1-i, EVT));
      else
        MaskVec.push_back(DAG.getConstant(i, EVT));
    Reverse = ((NonZeros & (0x3 << 2)) >> 2) == 2;
    for (unsigned i = 0; i < 2; ++i)
      if (Reverse)
        MaskVec.push_back(DAG.getConstant(1-i+NumElems, EVT));
      else
        MaskVec.push_back(DAG.getConstant(i+NumElems, EVT));
    SDOperand ShufMask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT,
                                     &MaskVec[0], MaskVec.size());
    return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V[0], V[1], ShufMask);
  }

  if (Values.size() > 2) {
    // Expand into a number of unpckl*.
    // e.g. for v4f32
    //   Step 1: unpcklps 0, 2 ==> X: <?, ?, 2, 0>
    //         : unpcklps 1, 3 ==> Y: <?, ?, 3, 1>
    //   Step 2: unpcklps X, Y ==>    <3, 2, 1, 0>
    SDOperand UnpckMask = getUnpacklMask(NumElems, DAG);
    for (unsigned i = 0; i < NumElems; ++i)
      V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, VT, Op.getOperand(i));
    NumElems >>= 1;
    while (NumElems != 0) {
      for (unsigned i = 0; i < NumElems; ++i)
        V[i] = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V[i], V[i + NumElems],
                           UnpckMask);
      NumElems >>= 1;
    }
    return V[0];
  }

  return SDOperand();
}

SDOperand
X86TargetLowering::LowerVECTOR_SHUFFLE(SDOperand Op, SelectionDAG &DAG) {
  SDOperand V1 = Op.getOperand(0);
  SDOperand V2 = Op.getOperand(1);
  SDOperand PermMask = Op.getOperand(2);
  MVT::ValueType VT = Op.getValueType();
  unsigned NumElems = PermMask.getNumOperands();
  bool V1IsUndef = V1.getOpcode() == ISD::UNDEF;
  bool V2IsUndef = V2.getOpcode() == ISD::UNDEF;
  bool V1IsSplat = false;
  bool V2IsSplat = false;
  if (isUndefShuffle(Op.Val))
    return DAG.getNode(ISD::UNDEF, VT);

  if (isSplatMask(PermMask.Val)) {
    if (NumElems <= 4) return Op;
    // Promote it to a v4i32 splat.
    return PromoteSplat(Op, DAG);
  if (X86::isMOVLMask(PermMask.Val))
    return (V1IsUndef) ? V2 : Op;
  if (X86::isMOVSHDUPMask(PermMask.Val) ||
      X86::isMOVSLDUPMask(PermMask.Val) ||
      X86::isMOVHLPSMask(PermMask.Val) ||
      X86::isMOVHPMask(PermMask.Val) ||
      X86::isMOVLPMask(PermMask.Val))
    return Op;
  if (ShouldXformToMOVHLPS(PermMask.Val) ||
      ShouldXformToMOVLP(V1.Val, V2.Val, PermMask.Val))
    return CommuteVectorShuffle(Op, V1, V2, PermMask, DAG);
  V1IsSplat = isSplatVector(V1.Val);
  V2IsSplat = isSplatVector(V2.Val);
  if ((V1IsSplat || V1IsUndef) && !(V2IsSplat || V2IsUndef)) {
    Op = CommuteVectorShuffle(Op, V1, V2, PermMask, DAG);
    std::swap(V1IsSplat, V2IsSplat);
    std::swap(V1IsUndef, V2IsUndef);
  }

  if (isCommutedMOVL(PermMask.Val, V2IsSplat, V2IsUndef)) {
    if (V2IsUndef) return V1;
    Op = CommuteVectorShuffle(Op, V1, V2, PermMask, DAG);
    if (V2IsSplat) {
      // V2 is a splat, so the mask may be malformed. That is, it may point
      // to any V2 element. The instruction selectior won't like this. Get
      // a corrected mask and commute to form a proper MOVS{S|D}.
      SDOperand NewMask = getMOVLMask(NumElems, DAG);
      if (NewMask.Val != PermMask.Val)
        Op = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, NewMask);
    return Op;
  if (X86::isUNPCKL_v_undef_Mask(PermMask.Val) ||
      X86::isUNPCKLMask(PermMask.Val) ||
      X86::isUNPCKHMask(PermMask.Val))
    return Op;
  if (V2IsSplat) {
    // Normalize mask so all entries that point to V2 points to its first
    // element then try to match unpck{h|l} again. If match, return a
    // new vector_shuffle with the corrected mask.
    SDOperand NewMask = NormalizeMask(PermMask, DAG);
    if (NewMask.Val != PermMask.Val) {
      if (X86::isUNPCKLMask(PermMask.Val, true)) {
        SDOperand NewMask = getUnpacklMask(NumElems, DAG);
        return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, NewMask);
      } else if (X86::isUNPCKHMask(PermMask.Val, true)) {
        SDOperand NewMask = getUnpackhMask(NumElems, DAG);
        return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, NewMask);
      }
    }
  }

  // Normalize the node to match x86 shuffle ops if needed
  if (V2.getOpcode() != ISD::UNDEF && isCommutedSHUFP(PermMask.Val))
      Op = CommuteVectorShuffle(Op, V1, V2, PermMask, DAG);

  if (Commuted) {
    // Commute is back and try unpck* again.
    Op = CommuteVectorShuffle(Op, V1, V2, PermMask, DAG);
    if (X86::isUNPCKL_v_undef_Mask(PermMask.Val) ||
        X86::isUNPCKLMask(PermMask.Val) ||
        X86::isUNPCKHMask(PermMask.Val))
      return Op;
  }

  // If VT is integer, try PSHUF* first, then SHUFP*.
  if (MVT::isInteger(VT)) {
    if (X86::isPSHUFDMask(PermMask.Val) ||
        X86::isPSHUFHWMask(PermMask.Val) ||
        X86::isPSHUFLWMask(PermMask.Val)) {
      if (V2.getOpcode() != ISD::UNDEF)
        return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1,
                           DAG.getNode(ISD::UNDEF, V1.getValueType()),PermMask);
      return Op;
    }

    if (X86::isSHUFPMask(PermMask.Val))
      return Op;

    // Handle v8i16 shuffle high / low shuffle node pair.
    if (VT == MVT::v8i16 && isPSHUFHW_PSHUFLWMask(PermMask.Val)) {
      MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems);
      MVT::ValueType BaseVT = MVT::getVectorBaseType(MaskVT);
      std::vector<SDOperand> MaskVec;
      for (unsigned i = 0; i != 4; ++i)
        MaskVec.push_back(PermMask.getOperand(i));
      for (unsigned i = 4; i != 8; ++i)
        MaskVec.push_back(DAG.getConstant(i, BaseVT));
      SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT,
                                   &MaskVec[0], MaskVec.size());
      V1 = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, Mask);
      MaskVec.clear();
      for (unsigned i = 0; i != 4; ++i)
        MaskVec.push_back(DAG.getConstant(i, BaseVT));
      for (unsigned i = 4; i != 8; ++i)
        MaskVec.push_back(PermMask.getOperand(i));
      Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0],MaskVec.size());
      return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, Mask);
    }
  } else {
    // Floating point cases in the other order.
    if (X86::isSHUFPMask(PermMask.Val))
      return Op;
    if (X86::isPSHUFDMask(PermMask.Val) ||
        X86::isPSHUFHWMask(PermMask.Val) ||
        X86::isPSHUFLWMask(PermMask.Val)) {
      if (V2.getOpcode() != ISD::UNDEF)
        return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1,
                           DAG.getNode(ISD::UNDEF, V1.getValueType()),PermMask);
      return Op;
    }
  }

  if (NumElems == 4) {
    MVT::ValueType MaskVT = PermMask.getValueType();
    MVT::ValueType MaskEVT = MVT::getVectorBaseType(MaskVT);
    std::vector<std::pair<int, int> > Locs;
    Locs.reserve(NumElems);
    std::vector<SDOperand> Mask1(NumElems, DAG.getNode(ISD::UNDEF, MaskEVT));
    std::vector<SDOperand> Mask2(NumElems, DAG.getNode(ISD::UNDEF, MaskEVT));
    unsigned NumHi = 0;
    unsigned NumLo = 0;
    // If no more than two elements come from either vector. This can be
    // implemented with two shuffles. First shuffle gather the elements.
    // The second shuffle, which takes the first shuffle as both of its
    // vector operands, put the elements into the right order.
    for (unsigned i = 0; i != NumElems; ++i) {
      SDOperand Elt = PermMask.getOperand(i);
      if (Elt.getOpcode() == ISD::UNDEF) {
        Locs[i] = std::make_pair(-1, -1);
      } else {
        unsigned Val = cast<ConstantSDNode>(Elt)->getValue();
        if (Val < NumElems) {
          Locs[i] = std::make_pair(0, NumLo);
          Mask1[NumLo] = Elt;
          NumLo++;
        } else {
          Locs[i] = std::make_pair(1, NumHi);
          if (2+NumHi < NumElems)
            Mask1[2+NumHi] = Elt;
          NumHi++;
        }
      }
    }
    if (NumLo <= 2 && NumHi <= 2) {
      V1 = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2,
                       DAG.getNode(ISD::BUILD_VECTOR, MaskVT,
                                   &Mask1[0], Mask1.size()));
      for (unsigned i = 0; i != NumElems; ++i) {
        if (Locs[i].first == -1)
          continue;
        else {
          unsigned Idx = (i < NumElems/2) ? 0 : NumElems;
          Idx += Locs[i].first * (NumElems/2) + Locs[i].second;
          Mask2[i] = DAG.getConstant(Idx, MaskEVT);
        }
      }

      return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V1,
                         DAG.getNode(ISD::BUILD_VECTOR, MaskVT,
                                     &Mask2[0], Mask2.size()));
    }

    // Break it into (shuffle shuffle_hi, shuffle_lo).
    Locs.clear();
    std::vector<SDOperand> LoMask(NumElems, DAG.getNode(ISD::UNDEF, MaskEVT));
    std::vector<SDOperand> HiMask(NumElems, DAG.getNode(ISD::UNDEF, MaskEVT));
    std::vector<SDOperand> *MaskPtr = &LoMask;
    unsigned MaskIdx = 0;
    unsigned LoIdx = 0;
    unsigned HiIdx = NumElems/2;
    for (unsigned i = 0; i != NumElems; ++i) {
      if (i == NumElems/2) {
        MaskPtr = &HiMask;
        MaskIdx = 1;
        LoIdx = 0;
        HiIdx = NumElems/2;
      }
      SDOperand Elt = PermMask.getOperand(i);
      if (Elt.getOpcode() == ISD::UNDEF) {
        Locs[i] = std::make_pair(-1, -1);
      } else if (cast<ConstantSDNode>(Elt)->getValue() < NumElems) {
        Locs[i] = std::make_pair(MaskIdx, LoIdx);
        (*MaskPtr)[LoIdx] = Elt;
        LoIdx++;
      } else {
        Locs[i] = std::make_pair(MaskIdx, HiIdx);
        (*MaskPtr)[HiIdx] = Elt;
        HiIdx++;
      }
    }

    SDOperand LoShuffle =
      DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2,
                  DAG.getNode(ISD::BUILD_VECTOR, MaskVT,
                              &LoMask[0], LoMask.size()));
      DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2,
                  DAG.getNode(ISD::BUILD_VECTOR, MaskVT,
                              &HiMask[0], HiMask.size()));
    std::vector<SDOperand> MaskOps;
    for (unsigned i = 0; i != NumElems; ++i) {
      if (Locs[i].first == -1) {
        MaskOps.push_back(DAG.getNode(ISD::UNDEF, MaskEVT));
      } else {
        unsigned Idx = Locs[i].first * NumElems + Locs[i].second;
        MaskOps.push_back(DAG.getConstant(Idx, MaskEVT));
      }
    }
    return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, LoShuffle, HiShuffle,
                       DAG.getNode(ISD::BUILD_VECTOR, MaskVT,
                                   &MaskOps[0], MaskOps.size()));
  }

  return SDOperand();
}

SDOperand
X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDOperand Op, SelectionDAG &DAG) {
  if (!isa<ConstantSDNode>(Op.getOperand(1)))
    return SDOperand();

  MVT::ValueType VT = Op.getValueType();
  // TODO: handle v16i8.
  if (MVT::getSizeInBits(VT) == 16) {
    // Transform it so it match pextrw which produces a 32-bit result.
    MVT::ValueType EVT = (MVT::ValueType)(VT+1);
    SDOperand Extract = DAG.getNode(X86ISD::PEXTRW, EVT,
                                    Op.getOperand(0), Op.getOperand(1));
    SDOperand Assert  = DAG.getNode(ISD::AssertZext, EVT, Extract,
                                    DAG.getValueType(VT));
    return DAG.getNode(ISD::TRUNCATE, VT, Assert);
  } else if (MVT::getSizeInBits(VT) == 32) {
    SDOperand Vec = Op.getOperand(0);
    unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getValue();
    if (Idx == 0)
      return Op;
    // SHUFPS the element to the lowest double word, then movss.
    MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(4);
    std::vector<SDOperand> IdxVec;
    IdxVec.push_back(DAG.getConstant(Idx, MVT::getVectorBaseType(MaskVT)));
    IdxVec.push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorBaseType(MaskVT)));
    IdxVec.push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorBaseType(MaskVT)));
    IdxVec.push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorBaseType(MaskVT)));
    SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT,
                                 &IdxVec[0], IdxVec.size());
    Vec = DAG.getNode(ISD::VECTOR_SHUFFLE, Vec.getValueType(),
                      Vec, DAG.getNode(ISD::UNDEF, Vec.getValueType()), Mask);
    return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, VT, Vec,
                       DAG.getConstant(0, getPointerTy()));
  } else if (MVT::getSizeInBits(VT) == 64) {
    SDOperand Vec = Op.getOperand(0);
    unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getValue();
    if (Idx == 0)
      return Op;

    // UNPCKHPD the element to the lowest double word, then movsd.
    // Note if the lower 64 bits of the result of the UNPCKHPD is then stored
    // to a f64mem, the whole operation is folded into a single MOVHPDmr.
    MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(4);
    std::vector<SDOperand> IdxVec;
    IdxVec.push_back(DAG.getConstant(1, MVT::getVectorBaseType(MaskVT)));
    IdxVec.push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorBaseType(MaskVT)));
    SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT,
                                 &IdxVec[0], IdxVec.size());
    Vec = DAG.getNode(ISD::VECTOR_SHUFFLE, Vec.getValueType(),
                      Vec, DAG.getNode(ISD::UNDEF, Vec.getValueType()), Mask);
    return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, VT, Vec,
                       DAG.getConstant(0, getPointerTy()));
  }

  return SDOperand();
}

SDOperand
X86TargetLowering::LowerINSERT_VECTOR_ELT(SDOperand Op, SelectionDAG &DAG) {
  // Transform it so it match pinsrw which expects a 16-bit value in a GR32
  // as its second argument.
  MVT::ValueType VT = Op.getValueType();
  MVT::ValueType BaseVT = MVT::getVectorBaseType(VT);
  SDOperand N0 = Op.getOperand(0);
  SDOperand N1 = Op.getOperand(1);
  SDOperand N2 = Op.getOperand(2);
  if (MVT::getSizeInBits(BaseVT) == 16) {
    if (N1.getValueType() != MVT::i32)
      N1 = DAG.getNode(ISD::ANY_EXTEND, MVT::i32, N1);
    if (N2.getValueType() != MVT::i32)
      N2 = DAG.getConstant(cast<ConstantSDNode>(N2)->getValue(), MVT::i32);
    return DAG.getNode(X86ISD::PINSRW, VT, N0, N1, N2);
  } else if (MVT::getSizeInBits(BaseVT) == 32) {
    unsigned Idx = cast<ConstantSDNode>(N2)->getValue();
    if (Idx == 0) {
      // Use a movss.
      N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, VT, N1);
      MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(4);
      MVT::ValueType BaseVT = MVT::getVectorBaseType(MaskVT);
      std::vector<SDOperand> MaskVec;
      MaskVec.push_back(DAG.getConstant(4, BaseVT));
      for (unsigned i = 1; i <= 3; ++i)
        MaskVec.push_back(DAG.getConstant(i, BaseVT));
      return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, N0, N1,
                         DAG.getNode(ISD::BUILD_VECTOR, MaskVT,
                                     &MaskVec[0], MaskVec.size()));
    } else {
      // Use two pinsrw instructions to insert a 32 bit value.
      Idx <<= 1;
      if (MVT::isFloatingPoint(N1.getValueType())) {
        if (ISD::isNON_EXTLoad(N1.Val)) {
          LoadSDNode *LD = cast<LoadSDNode>(N1);
          N1 = DAG.getLoad(MVT::i32, LD->getChain(), LD->getBasePtr(),
                           LD->getSrcValue(), LD->getSrcValueOffset());
        } else {
          N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, MVT::v4f32, N1);
          N1 = DAG.getNode(ISD::BIT_CONVERT, MVT::v4i32, N1);
          N1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::i32, N1,
                           DAG.getConstant(0, getPointerTy()));
        }
      }
      N0 = DAG.getNode(ISD::BIT_CONVERT, MVT::v8i16, N0);
      N0 = DAG.getNode(X86ISD::PINSRW, MVT::v8i16, N0, N1,
                       DAG.getConstant(Idx, getPointerTy()));
      N1 = DAG.getNode(ISD::SRL, MVT::i32, N1, DAG.getConstant(16, MVT::i8));
      N0 = DAG.getNode(X86ISD::PINSRW, MVT::v8i16, N0, N1,
                       DAG.getConstant(Idx+1, getPointerTy()));
      return DAG.getNode(ISD::BIT_CONVERT, VT, N0);
    }
  }

  return SDOperand();
}

SDOperand
X86TargetLowering::LowerSCALAR_TO_VECTOR(SDOperand Op, SelectionDAG &DAG) {
  SDOperand AnyExt = DAG.getNode(ISD::ANY_EXTEND, MVT::i32, Op.getOperand(0));
  return DAG.getNode(X86ISD::S2VEC, Op.getValueType(), AnyExt);
}

// ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as
// their target countpart wrapped in the X86ISD::Wrapper node. Suppose N is
// one of the above mentioned nodes. It has to be wrapped because otherwise
// Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only
// be used to form addressing mode. These wrapped nodes will be selected
// into MOV32ri.
SDOperand
X86TargetLowering::LowerConstantPool(SDOperand Op, SelectionDAG &DAG) {
  ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
  SDOperand Result = DAG.getTargetConstantPool(CP->getConstVal(),
                                               getPointerTy(),
                                               CP->getAlignment());
  Result = DAG.getNode(X86ISD::Wrapper, getPointerTy(), Result);
  // With PIC, the address is actually $g + Offset.
  if (getTargetMachine().getRelocationModel() == Reloc::PIC_ &&
      !Subtarget->isPICStyleRIPRel()) {
    Result = DAG.getNode(ISD::ADD, getPointerTy(),
                         DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()),
                         Result);
  }

  return Result;
}

SDOperand
X86TargetLowering::LowerGlobalAddress(SDOperand Op, SelectionDAG &DAG) {
  GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
  SDOperand Result = DAG.getTargetGlobalAddress(GV, getPointerTy());
  Result = DAG.getNode(X86ISD::Wrapper, getPointerTy(), Result);
  // With PIC, the address is actually $g + Offset.
  if (getTargetMachine().getRelocationModel() == Reloc::PIC_ &&
      !Subtarget->isPICStyleRIPRel()) {
    Result = DAG.getNode(ISD::ADD, getPointerTy(),
                         DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()),
                         Result);
  
  // For Darwin & Mingw32, external and weak symbols are indirect, so we want to
  // load the value at address GV, not the value of GV itself. This means that
  // the GlobalAddress must be in the base or index register of the address, not
  // the GV offset field. Platform check is inside GVRequiresExtraLoad() call
  // The same applies for external symbols during PIC codegen
  if (Subtarget->GVRequiresExtraLoad(GV, getTargetMachine(), false))
    Result = DAG.getLoad(getPointerTy(), DAG.getEntryNode(), Result, NULL, 0);

  return Result;
}

SDOperand
X86TargetLowering::LowerExternalSymbol(SDOperand Op, SelectionDAG &DAG) {
  const char *Sym = cast<ExternalSymbolSDNode>(Op)->getSymbol();
  SDOperand Result = DAG.getTargetExternalSymbol(Sym, getPointerTy());
  Result = DAG.getNode(X86ISD::Wrapper, getPointerTy(), Result);
  // With PIC, the address is actually $g + Offset.
  if (getTargetMachine().getRelocationModel() == Reloc::PIC_ &&
      !Subtarget->isPICStyleRIPRel()) {
    Result = DAG.getNode(ISD::ADD, getPointerTy(),
                         DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()),
                         Result);
  }

  return Result;
}

SDOperand X86TargetLowering::LowerJumpTable(SDOperand Op, SelectionDAG &DAG) {
  JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
  SDOperand Result = DAG.getTargetJumpTable(JT->getIndex(), getPointerTy());
  Result = DAG.getNode(X86ISD::Wrapper, getPointerTy(), Result);
  // With PIC, the address is actually $g + Offset.
  if (getTargetMachine().getRelocationModel() == Reloc::PIC_ &&
      !Subtarget->isPICStyleRIPRel()) {
    Result = DAG.getNode(ISD::ADD, getPointerTy(),
                         DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()),
                         Result);
  }

  return Result;
}

SDOperand X86TargetLowering::LowerShift(SDOperand Op, SelectionDAG &DAG) {
    assert(Op.getNumOperands() == 3 && Op.getValueType() == MVT::i32 &&
           "Not an i64 shift!");
    bool isSRA = Op.getOpcode() == ISD::SRA_PARTS;
    SDOperand ShOpLo = Op.getOperand(0);
    SDOperand ShOpHi = Op.getOperand(1);
    SDOperand ShAmt  = Op.getOperand(2);
    SDOperand Tmp1 = isSRA ?
      DAG.getNode(ISD::SRA, MVT::i32, ShOpHi, DAG.getConstant(31, MVT::i8)) :
      DAG.getConstant(0, MVT::i32);

    SDOperand Tmp2, Tmp3;
    if (Op.getOpcode() == ISD::SHL_PARTS) {
      Tmp2 = DAG.getNode(X86ISD::SHLD, MVT::i32, ShOpHi, ShOpLo, ShAmt);
      Tmp3 = DAG.getNode(ISD::SHL, MVT::i32, ShOpLo, ShAmt);
    } else {
      Tmp2 = DAG.getNode(X86ISD::SHRD, MVT::i32, ShOpLo, ShOpHi, ShAmt);
Evan Cheng's avatar
Evan Cheng committed
      Tmp3 = DAG.getNode(isSRA ? ISD::SRA : ISD::SRL, MVT::i32, ShOpHi, ShAmt);
    const MVT::ValueType *VTs = DAG.getNodeValueTypes(MVT::Other, MVT::Flag);
    SDOperand AndNode = DAG.getNode(ISD::AND, MVT::i8, ShAmt,
                                    DAG.getConstant(32, MVT::i8));
    SDOperand COps[]={DAG.getEntryNode(), AndNode, DAG.getConstant(0, MVT::i8)};
    SDOperand InFlag = DAG.getNode(X86ISD::CMP, VTs, 2, COps, 3).getValue(1);
    SDOperand CC = DAG.getConstant(X86::COND_NE, MVT::i8);
    VTs = DAG.getNodeValueTypes(MVT::i32, MVT::Flag);
    SmallVector<SDOperand, 4> Ops;
    if (Op.getOpcode() == ISD::SHL_PARTS) {
      Ops.push_back(Tmp2);
      Ops.push_back(Tmp3);
      Ops.push_back(CC);
      Ops.push_back(InFlag);
      Hi = DAG.getNode(X86ISD::CMOV, VTs, 2, &Ops[0], Ops.size());
      InFlag = Hi.getValue(1);

      Ops.clear();
      Ops.push_back(Tmp3);
      Ops.push_back(Tmp1);
      Ops.push_back(CC);
      Ops.push_back(InFlag);
      Lo = DAG.getNode(X86ISD::CMOV, VTs, 2, &Ops[0], Ops.size());
    } else {
      Ops.push_back(Tmp2);
      Ops.push_back(Tmp3);
      Ops.push_back(CC);
Evan Cheng's avatar
Evan Cheng committed
      Ops.push_back(InFlag);
      Lo = DAG.getNode(X86ISD::CMOV, VTs, 2, &Ops[0], Ops.size());
      InFlag = Lo.getValue(1);

      Ops.clear();
      Ops.push_back(Tmp3);
      Ops.push_back(Tmp1);
      Ops.push_back(CC);
      Ops.push_back(InFlag);
      Hi = DAG.getNode(X86ISD::CMOV, VTs, 2, &Ops[0], Ops.size());
    VTs = DAG.getNodeValueTypes(MVT::i32, MVT::i32);
    Ops.clear();
    Ops.push_back(Lo);
    Ops.push_back(Hi);
    return DAG.getNode(ISD::MERGE_VALUES, VTs, 2, &Ops[0], Ops.size());
}

SDOperand X86TargetLowering::LowerSINT_TO_FP(SDOperand Op, SelectionDAG &DAG) {
  assert(Op.getOperand(0).getValueType() <= MVT::i64 &&
         Op.getOperand(0).getValueType() >= MVT::i16 &&
         "Unknown SINT_TO_FP to lower!");
  SDOperand Result;
  MVT::ValueType SrcVT = Op.getOperand(0).getValueType();
  unsigned Size = MVT::getSizeInBits(SrcVT)/8;
  MachineFunction &MF = DAG.getMachineFunction();
  int SSFI = MF.getFrameInfo()->CreateStackObject(Size, Size);
  SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
Evan Cheng's avatar
Evan Cheng committed
  SDOperand Chain = DAG.getStore(DAG.getEntryNode(), Op.getOperand(0),

  // Build the FILD
  std::vector<MVT::ValueType> Tys;
  Tys.push_back(MVT::f64);
  Tys.push_back(MVT::Other);
  if (X86ScalarSSE) Tys.push_back(MVT::Flag);
  std::vector<SDOperand> Ops;
  Ops.push_back(Chain);
  Ops.push_back(StackSlot);
  Ops.push_back(DAG.getValueType(SrcVT));
  Result = DAG.getNode(X86ScalarSSE ? X86ISD::FILD_FLAG :X86ISD::FILD,

  if (X86ScalarSSE) {
    Chain = Result.getValue(1);
    SDOperand InFlag = Result.getValue(2);

    // FIXME: Currently the FST is flagged to the FILD_FLAG. This
    // shouldn't be necessary except that RFP cannot be live across
    // multiple blocks. When stackifier is fixed, they can be uncoupled.
    MachineFunction &MF = DAG.getMachineFunction();
    int SSFI = MF.getFrameInfo()->CreateStackObject(8, 8);
    SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
    std::vector<MVT::ValueType> Tys;
    Ops.push_back(Chain);
    Ops.push_back(DAG.getValueType(Op.getValueType()));
    Ops.push_back(InFlag);
    Chain = DAG.getNode(X86ISD::FST, Tys, &Ops[0], Ops.size());
    Result = DAG.getLoad(Op.getValueType(), Chain, StackSlot, NULL, 0);
  return Result;
}

SDOperand X86TargetLowering::LowerFP_TO_SINT(SDOperand Op, SelectionDAG &DAG) {
  assert(Op.getValueType() <= MVT::i64 && Op.getValueType() >= MVT::i16 &&
         "Unknown FP_TO_SINT to lower!");
  // We lower FP->sint64 into FISTP64, followed by a load, all to a temporary
  // stack slot.
  MachineFunction &MF = DAG.getMachineFunction();
  unsigned MemSize = MVT::getSizeInBits(Op.getValueType())/8;
  int SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize);
  SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());

  unsigned Opc;
  switch (Op.getValueType()) {
    default: assert(0 && "Invalid FP_TO_SINT to lower!");
    case MVT::i16: Opc = X86ISD::FP_TO_INT16_IN_MEM; break;
    case MVT::i32: Opc = X86ISD::FP_TO_INT32_IN_MEM; break;
    case MVT::i64: Opc = X86ISD::FP_TO_INT64_IN_MEM; break;
  }

  SDOperand Chain = DAG.getEntryNode();
  SDOperand Value = Op.getOperand(0);
  if (X86ScalarSSE) {
    assert(Op.getValueType() == MVT::i64 && "Invalid FP_TO_SINT to lower!");
    Chain = DAG.getStore(Chain, Value, StackSlot, NULL, 0);
    Tys.push_back(MVT::Other);
    std::vector<SDOperand> Ops;
    Ops.push_back(Chain);
    Ops.push_back(StackSlot);
    Ops.push_back(DAG.getValueType(Op.getOperand(0).getValueType()));
    Value = DAG.getNode(X86ISD::FLD, Tys, &Ops[0], Ops.size());
    Chain = Value.getValue(1);
    SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize);
    StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
Evan Cheng's avatar
Evan Cheng committed

  // Build the FP_TO_INT*_IN_MEM
  std::vector<SDOperand> Ops;
  Ops.push_back(Chain);
  Ops.push_back(Value);
  Ops.push_back(StackSlot);
  SDOperand FIST = DAG.getNode(Opc, MVT::Other, &Ops[0], Ops.size());
  return DAG.getLoad(Op.getValueType(), FIST, StackSlot, NULL, 0);
}

SDOperand X86TargetLowering::LowerFABS(SDOperand Op, SelectionDAG &DAG) {
  MVT::ValueType VT = Op.getValueType();
  const Type *OpNTy =  MVT::getTypeForValueType(VT);
  std::vector<Constant*> CV;
  if (VT == MVT::f64) {
    CV.push_back(ConstantFP::get(OpNTy, BitsToDouble(~(1ULL << 63))));
    CV.push_back(ConstantFP::get(OpNTy, 0.0));
  } else {
    CV.push_back(ConstantFP::get(OpNTy, BitsToFloat(~(1U << 31))));
    CV.push_back(ConstantFP::get(OpNTy, 0.0));
    CV.push_back(ConstantFP::get(OpNTy, 0.0));
    CV.push_back(ConstantFP::get(OpNTy, 0.0));
  }
  Constant *CS = ConstantStruct::get(CV);
  SDOperand CPIdx = DAG.getConstantPool(CS, getPointerTy(), 4);
Evan Cheng's avatar
Evan Cheng committed
  std::vector<MVT::ValueType> Tys;
  Tys.push_back(VT);
  Tys.push_back(MVT::Other);
  SmallVector<SDOperand, 3> Ops;
  Ops.push_back(DAG.getEntryNode());
  Ops.push_back(CPIdx);
  Ops.push_back(DAG.getSrcValue(NULL));
  SDOperand Mask = DAG.getNode(X86ISD::LOAD_PACK, Tys, &Ops[0], Ops.size());
  return DAG.getNode(X86ISD::FAND, VT, Op.getOperand(0), Mask);
}

SDOperand X86TargetLowering::LowerFNEG(SDOperand Op, SelectionDAG &DAG) {
  MVT::ValueType VT = Op.getValueType();
  const Type *OpNTy =  MVT::getTypeForValueType(VT);
  std::vector<Constant*> CV;
  if (VT == MVT::f64) {
    CV.push_back(ConstantFP::get(OpNTy, BitsToDouble(1ULL << 63)));
    CV.push_back(ConstantFP::get(OpNTy, 0.0));
  } else {
    CV.push_back(ConstantFP::get(OpNTy, BitsToFloat(1U << 31)));
    CV.push_back(ConstantFP::get(OpNTy, 0.0));
    CV.push_back(ConstantFP::get(OpNTy, 0.0));
    CV.push_back(ConstantFP::get(OpNTy, 0.0));
  }
  Constant *CS = ConstantStruct::get(CV);
  SDOperand CPIdx = DAG.getConstantPool(CS, getPointerTy(), 4);
Evan Cheng's avatar
Evan Cheng committed
  std::vector<MVT::ValueType> Tys;
  Tys.push_back(VT);
  Tys.push_back(MVT::Other);
  SmallVector<SDOperand, 3> Ops;
  Ops.push_back(DAG.getEntryNode());
  Ops.push_back(CPIdx);
  Ops.push_back(DAG.getSrcValue(NULL));
  SDOperand Mask = DAG.getNode(X86ISD::LOAD_PACK, Tys, &Ops[0], Ops.size());
  return DAG.getNode(X86ISD::FXOR, VT, Op.getOperand(0), Mask);
}

SDOperand X86TargetLowering::LowerFCOPYSIGN(SDOperand Op, SelectionDAG &DAG) {
  SDOperand Op0 = Op.getOperand(0);
  SDOperand Op1 = Op.getOperand(1);
  MVT::ValueType VT = Op.getValueType();
  MVT::ValueType SrcVT = Op1.getValueType();
  const Type *SrcTy =  MVT::getTypeForValueType(SrcVT);

  // If second operand is smaller, extend it first.
  if (MVT::getSizeInBits(SrcVT) < MVT::getSizeInBits(VT)) {
    Op1 = DAG.getNode(ISD::FP_EXTEND, VT, Op1);
    SrcVT = VT;
  }

  // First get the sign bit of second operand.
  std::vector<Constant*> CV;
  if (SrcVT == MVT::f64) {
    CV.push_back(ConstantFP::get(SrcTy, BitsToDouble(1ULL << 63)));
    CV.push_back(ConstantFP::get(SrcTy, 0.0));
  } else {
    CV.push_back(ConstantFP::get(SrcTy, BitsToFloat(1U << 31)));
    CV.push_back(ConstantFP::get(SrcTy, 0.0));
    CV.push_back(ConstantFP::get(SrcTy, 0.0));
    CV.push_back(ConstantFP::get(SrcTy, 0.0));
  }
  Constant *CS = ConstantStruct::get(CV);
  SDOperand CPIdx = DAG.getConstantPool(CS, getPointerTy(), 4);
  SDVTList Tys = DAG.getVTList(SrcVT, MVT::Other);
  SmallVector<SDOperand, 3> Ops;
  Ops.push_back(DAG.getEntryNode());
  Ops.push_back(CPIdx);
  Ops.push_back(DAG.getSrcValue(NULL));
  SDOperand Mask1 = DAG.getNode(X86ISD::LOAD_PACK, Tys, &Ops[0], Ops.size());
  SDOperand SignBit = DAG.getNode(X86ISD::FAND, SrcVT, Op1, Mask1);

  // Shift sign bit right or left if the two operands have different types.
  if (MVT::getSizeInBits(SrcVT) > MVT::getSizeInBits(VT)) {
    // Op0 is MVT::f32, Op1 is MVT::f64.
    SignBit = DAG.getNode(ISD::SCALAR_TO_VECTOR, MVT::v2f64, SignBit);
    SignBit = DAG.getNode(X86ISD::FSRL, MVT::v2f64, SignBit,
                          DAG.getConstant(32, MVT::i32));
    SignBit = DAG.getNode(ISD::BIT_CONVERT, MVT::v4f32, SignBit);
    SignBit = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::f32, SignBit,
                          DAG.getConstant(0, getPointerTy()));
  }

  // Clear first operand sign bit.
  CV.clear();
  if (VT == MVT::f64) {
    CV.push_back(ConstantFP::get(SrcTy, BitsToDouble(~(1ULL << 63))));
    CV.push_back(ConstantFP::get(SrcTy, 0.0));
  } else {
    CV.push_back(ConstantFP::get(SrcTy, BitsToFloat(~(1U << 31))));
    CV.push_back(ConstantFP::get(SrcTy, 0.0));
    CV.push_back(ConstantFP::get(SrcTy, 0.0));
    CV.push_back(ConstantFP::get(SrcTy, 0.0));
  }
  CS = ConstantStruct::get(CV);
  CPIdx = DAG.getConstantPool(CS, getPointerTy(), 4);
  Tys = DAG.getVTList(VT, MVT::Other);
  Ops.clear();
  Ops.push_back(DAG.getEntryNode());
  Ops.push_back(CPIdx);
  Ops.push_back(DAG.getSrcValue(NULL));
  SDOperand Mask2 = DAG.getNode(X86ISD::LOAD_PACK, Tys, &Ops[0], Ops.size());
  SDOperand Val = DAG.getNode(X86ISD::FAND, VT, Op0, Mask2);

  // Or the value with the sign bit.
  return DAG.getNode(X86ISD::FOR, VT, Val, SignBit);
SDOperand X86TargetLowering::LowerSETCC(SDOperand Op, SelectionDAG &DAG,
                                        SDOperand Chain) {
  assert(Op.getValueType() == MVT::i8 && "SetCC type must be 8-bit integer");
  SDOperand Cond;
  SDOperand Op0 = Op.getOperand(0);
  SDOperand Op1 = Op.getOperand(1);
  SDOperand CC = Op.getOperand(2);
  ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get();
  const MVT::ValueType *VTs1 = DAG.getNodeValueTypes(MVT::Other, MVT::Flag);
  const MVT::ValueType *VTs2 = DAG.getNodeValueTypes(MVT::i8, MVT::Flag);
  bool isFP = MVT::isFloatingPoint(Op.getOperand(1).getValueType());
  unsigned X86CC;
  if (translateX86CC(cast<CondCodeSDNode>(CC)->get(), isFP, X86CC,
                     Op0, Op1, DAG)) {
    SDOperand Ops1[] = { Chain, Op0, Op1 };
    Cond = DAG.getNode(X86ISD::CMP, VTs1, 2, Ops1, 3).getValue(1);
    SDOperand Ops2[] = { DAG.getConstant(X86CC, MVT::i8), Cond };
    return DAG.getNode(X86ISD::SETCC, VTs2, 2, Ops2, 2);
  assert(isFP && "Illegal integer SetCC!");

  SDOperand COps[] = { Chain, Op0, Op1 };
  Cond = DAG.getNode(X86ISD::CMP, VTs1, 2, COps, 3).getValue(1);

  switch (SetCCOpcode) {
  default: assert(false && "Illegal floating point SetCC!");
  case ISD::SETOEQ: {  // !PF & ZF
    SDOperand Ops1[] = { DAG.getConstant(X86::COND_NP, MVT::i8), Cond };
    SDOperand Tmp1 = DAG.getNode(X86ISD::SETCC, VTs2, 2, Ops1, 2);
    SDOperand Ops2[] = { DAG.getConstant(X86::COND_E, MVT::i8),
    SDOperand Tmp2 = DAG.getNode(X86ISD::SETCC, VTs2, 2, Ops2, 2);
    return DAG.getNode(ISD::AND, MVT::i8, Tmp1, Tmp2);
  }
  case ISD::SETUNE: {  // PF | !ZF
    SDOperand Ops1[] = { DAG.getConstant(X86::COND_P, MVT::i8), Cond };
    SDOperand Tmp1 = DAG.getNode(X86ISD::SETCC, VTs2, 2, Ops1, 2);
    SDOperand Ops2[] = { DAG.getConstant(X86::COND_NE, MVT::i8),
    SDOperand Tmp2 = DAG.getNode(X86ISD::SETCC, VTs2, 2, Ops2, 2);
    return DAG.getNode(ISD::OR, MVT::i8, Tmp1, Tmp2);
  }
SDOperand X86TargetLowering::LowerSELECT(SDOperand Op, SelectionDAG &DAG) {
  bool addTest = true;
  SDOperand Chain = DAG.getEntryNode();
  SDOperand Cond  = Op.getOperand(0);
  SDOperand CC;
  const MVT::ValueType *VTs = DAG.getNodeValueTypes(MVT::Other, MVT::Flag);

  if (Cond.getOpcode() == ISD::SETCC)
    Cond = LowerSETCC(Cond, DAG, Chain);

  if (Cond.getOpcode() == X86ISD::SETCC) {
    CC = Cond.getOperand(0);

    // If condition flag is set by a X86ISD::CMP, then make a copy of it
    // (since flag operand cannot be shared). Use it as the condition setting
    // operand in place of the X86ISD::SETCC.
    // If the X86ISD::SETCC has more than one use, then perhaps it's better
    // to use a test instead of duplicating the X86ISD::CMP (for register
    // pressure reason)?
    SDOperand Cmp = Cond.getOperand(1);
    unsigned Opc = Cmp.getOpcode();
    bool IllegalFPCMov = !X86ScalarSSE &&
      MVT::isFloatingPoint(Op.getValueType()) &&
      !hasFPCMov(cast<ConstantSDNode>(CC)->getSignExtended());
    if ((Opc == X86ISD::CMP || Opc == X86ISD::COMI || Opc == X86ISD::UCOMI) &&
        !IllegalFPCMov) {
      SDOperand Ops[] = { Chain, Cmp.getOperand(1), Cmp.getOperand(2) };
      Cond = DAG.getNode(Opc, VTs, 2, Ops, 3);
      addTest = false;
    }
  }
    CC = DAG.getConstant(X86::COND_NE, MVT::i8);
    SDOperand Ops[] = { Chain, Cond, DAG.getConstant(0, MVT::i8) };
    Cond = DAG.getNode(X86ISD::CMP, VTs, 2, Ops, 3);
  VTs = DAG.getNodeValueTypes(Op.getValueType(), MVT::Flag);
  SmallVector<SDOperand, 4> Ops;
  // X86ISD::CMOV means set the result (which is operand 1) to the RHS if
  // condition is true.
  Ops.push_back(Op.getOperand(2));
  Ops.push_back(Op.getOperand(1));
  Ops.push_back(CC);
  Ops.push_back(Cond.getValue(1));
  return DAG.getNode(X86ISD::CMOV, VTs, 2, &Ops[0], Ops.size());
SDOperand X86TargetLowering::LowerBRCOND(SDOperand Op, SelectionDAG &DAG) {
  bool addTest = true;
  SDOperand Chain = Op.getOperand(0);
  SDOperand Cond  = Op.getOperand(1);
  SDOperand Dest  = Op.getOperand(2);
  SDOperand CC;
  const MVT::ValueType *VTs = DAG.getNodeValueTypes(MVT::Other, MVT::Flag);

  if (Cond.getOpcode() == ISD::SETCC)
    Cond = LowerSETCC(Cond, DAG, Chain);

  if (Cond.getOpcode() == X86ISD::SETCC) {
    // If condition flag is set by a X86ISD::CMP, then make a copy of it
    // (since flag operand cannot be shared). Use it as the condition setting
    // operand in place of the X86ISD::SETCC.
    // If the X86ISD::SETCC has more than one use, then perhaps it's better
    // to use a test instead of duplicating the X86ISD::CMP (for register
    // pressure reason)?
    SDOperand Cmp = Cond.getOperand(1);
    unsigned Opc = Cmp.getOpcode();
    if (Opc == X86ISD::CMP || Opc == X86ISD::COMI || Opc == X86ISD::UCOMI) {
      SDOperand Ops[] = { Chain, Cmp.getOperand(1), Cmp.getOperand(2) };
      Cond = DAG.getNode(Opc, VTs, 2, Ops, 3);
      addTest = false;
    }
  }
    CC = DAG.getConstant(X86::COND_NE, MVT::i8);
    SDOperand Ops[] = { Chain, Cond, DAG.getConstant(0, MVT::i8) };
    Cond = DAG.getNode(X86ISD::CMP, VTs, 2, Ops, 3);
  }
  return DAG.getNode(X86ISD::BRCOND, Op.getValueType(),
                     Cond, Op.getOperand(2), CC, Cond.getValue(1));
SDOperand X86TargetLowering::LowerCALL(SDOperand Op, SelectionDAG &DAG) {
  unsigned CallingConv= cast<ConstantSDNode>(Op.getOperand(1))->getValue();
Evan Cheng's avatar
Evan Cheng committed
  if (Subtarget->is64Bit())
    return LowerX86_64CCCCallTo(Op, DAG);
      assert(0 && "Unsupported calling convention");
    case CallingConv::Fast:
        return LowerFastCCCallTo(Op, DAG);
    case CallingConv::C:
    case CallingConv::X86_StdCall:
      return LowerCCCCallTo(Op, DAG, true);
    case CallingConv::X86_FastCall:
      return LowerFastCCCallTo(Op, DAG, true);
    }
SDOperand X86TargetLowering::LowerRET(SDOperand Op, SelectionDAG &DAG) {
  SDOperand Copy;
  switch(Op.getNumOperands()) {
    default:
      assert(0 && "Do not know how to return this many arguments!");
      abort();
      return DAG.getNode(X86ISD::RET_FLAG, MVT::Other, Op.getOperand(0),
                        DAG.getConstant(getBytesToPopOnReturn(), MVT::i16));
      MVT::ValueType ArgVT = Op.getOperand(1).getValueType();
Evan Cheng's avatar
Evan Cheng committed
      if (MVT::isVector(ArgVT) ||
          (Subtarget->is64Bit() && MVT::isFloatingPoint(ArgVT))) {
        // Integer or FP vector result -> XMM0.
        if (DAG.getMachineFunction().liveout_empty())
          DAG.getMachineFunction().addLiveOut(X86::XMM0);
        Copy = DAG.getCopyToReg(Op.getOperand(0), X86::XMM0, Op.getOperand(1),
                                SDOperand());
      } else if (MVT::isInteger(ArgVT)) {
Evan Cheng's avatar
Evan Cheng committed
        // Integer result -> EAX / RAX.
        // The C calling convention guarantees the return value has been
        // promoted to at least MVT::i32. The X86-64 ABI doesn't require the
        // value to be promoted MVT::i64. So we don't have to extend it to
        // 64-bit. Return the value in EAX, but mark RAX as liveout.
        unsigned Reg = Subtarget->is64Bit() ? X86::RAX : X86::EAX;
        if (DAG.getMachineFunction().liveout_empty())
Evan Cheng's avatar
Evan Cheng committed
          DAG.getMachineFunction().addLiveOut(Reg);
Evan Cheng's avatar
Evan Cheng committed
        Reg = (ArgVT == MVT::i64) ? X86::RAX : X86::EAX;
        Copy = DAG.getCopyToReg(Op.getOperand(0), Reg, Op.getOperand(1),