Skip to content
X86ISelLowering.cpp 244 KiB
Newer Older
      
      // If this is an 8 or 16-bit value, it is really passed promoted to 32
      // bits.  Insert an assert[sz]ext to capture this, then truncate to the
      // right size.
      if (VA.getLocInfo() == CCValAssign::SExt)
        ArgValue = DAG.getNode(ISD::AssertSext, RegVT, ArgValue,
                               DAG.getValueType(VA.getValVT()));
      else if (VA.getLocInfo() == CCValAssign::ZExt)
        ArgValue = DAG.getNode(ISD::AssertZext, RegVT, ArgValue,
                               DAG.getValueType(VA.getValVT()));
      
      if (VA.getLocInfo() != CCValAssign::Full)
        ArgValue = DAG.getNode(ISD::TRUNCATE, VA.getValVT(), ArgValue);
      
      ArgValues.push_back(ArgValue);
    } else {
      assert(VA.isMemLoc());
      ArgValues.push_back(LowerMemArgument(Op, DAG, VA, MFI, Root, i));
  
  unsigned StackSize = CCInfo.getNextStackOffset();
  // align stack specially for tail calls
  if (CC==CallingConv::Fast)
    StackSize = GetAlignedArgumentStackSize(StackSize,DAG);
  // If the function takes variable number of arguments, make a frame index for
  // the start of the first vararg value... for expansion of llvm.va_start.
  if (isVarArg)
    VarArgsFrameIndex = MFI->CreateFixedObject(1, StackSize);
  // Tail call calling convention (CallingConv::Fast) does not support varargs.
  assert( !(isVarArg && CC == CallingConv::Fast) && 
         "CallingConv::Fast does not support varargs.");

  if (isStdCall && !isVarArg && 
      (CC==CallingConv::Fast && PerformTailCallOpt || CC!=CallingConv::Fast)) {
    BytesToPopOnReturn  = StackSize;    // Callee pops everything..
    BytesToPopOnReturn  = 0; // Callee pops nothing.
    
    // If this is an sret function, the return should pop the hidden pointer.
    if (NumArgs &&
        (cast<ConstantSDNode>(Op.getOperand(3))->getValue() &
         ISD::ParamFlags::StructReturn))
      BytesToPopOnReturn = 4;  
    
    BytesCallerReserves = StackSize;
Evan Cheng's avatar
Evan Cheng committed
  RegSaveFrameIndex = 0xAAAAAAA;  // X86-64 only.
  X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
  FuncInfo->setBytesToPopOnReturn(BytesToPopOnReturn);
  // Return the new list of results.
  return DAG.getNode(ISD::MERGE_VALUES, Op.Val->getVTList(),
                     &ArgValues[0], ArgValues.size()).getValue(Op.ResNo);
SDOperand X86TargetLowering::LowerCCCCallTo(SDOperand Op, SelectionDAG &DAG,
  SDOperand Chain     = Op.getOperand(0);
  bool isVarArg       = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0;
  SDOperand Callee    = Op.getOperand(4);
  unsigned NumOps     = (Op.getNumOperands() - 5) / 2;
  // Analyze operands of the call, assigning locations to each operand.
  SmallVector<CCValAssign, 16> ArgLocs;
  CCState CCInfo(CC, isVarArg, getTargetMachine(), ArgLocs);
  if(CC==CallingConv::Fast && PerformTailCallOpt)
    CCInfo.AnalyzeCallOperands(Op.Val, CC_X86_32_TailCall);
  else
    CCInfo.AnalyzeCallOperands(Op.Val, CC_X86_32_C);
  
  // Get a count of how many bytes are to be pushed on the stack.
  unsigned NumBytes = CCInfo.getNextStackOffset();
  if (CC==CallingConv::Fast)
    NumBytes = GetAlignedArgumentStackSize(NumBytes, DAG);
  Chain = DAG.getCALLSEQ_START(Chain,DAG.getConstant(NumBytes, getPointerTy()));

  SmallVector<std::pair<unsigned, SDOperand>, 8> RegsToPass;
  SmallVector<SDOperand, 8> MemOpChains;
  // Walk the register/memloc assignments, inserting copies/loads.
  for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
    CCValAssign &VA = ArgLocs[i];
    SDOperand Arg = Op.getOperand(5+2*VA.getValNo());
    
    // Promote the value if needed.
    switch (VA.getLocInfo()) {
    default: assert(0 && "Unknown loc info!");
    case CCValAssign::Full: break;
    case CCValAssign::SExt:
      Arg = DAG.getNode(ISD::SIGN_EXTEND, VA.getLocVT(), Arg);
      break;
    case CCValAssign::ZExt:
      Arg = DAG.getNode(ISD::ZERO_EXTEND, VA.getLocVT(), Arg);
      break;
    case CCValAssign::AExt:
      Arg = DAG.getNode(ISD::ANY_EXTEND, VA.getLocVT(), Arg);
      break;
    
    if (VA.isRegLoc()) {
      RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
    } else {
      assert(VA.isMemLoc());
      if (StackPtr.Val == 0)
        StackPtr = DAG.getRegister(getStackPtrReg(), getPointerTy());

      MemOpChains.push_back(LowerMemOpCallTo(Op, DAG, StackPtr, VA, Chain,
                                             Arg));
Chris Lattner's avatar
Chris Lattner committed
  // If the first argument is an sret pointer, remember it.
  bool isSRet = NumOps &&
    (cast<ConstantSDNode>(Op.getOperand(6))->getValue() &
     ISD::ParamFlags::StructReturn);
    Chain = DAG.getNode(ISD::TokenFactor, MVT::Other,
                        &MemOpChains[0], MemOpChains.size());
  // Build a sequence of copy-to-reg nodes chained together with token chain
  // and flag operands which copy the outgoing args into registers.
  SDOperand InFlag;
  for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
    Chain = DAG.getCopyToReg(Chain, RegsToPass[i].first, RegsToPass[i].second,
                             InFlag);
  // ELF / PIC requires GOT in the EBX register before function calls via PLT
  // GOT pointer.
  if (getTargetMachine().getRelocationModel() == Reloc::PIC_ &&
      Subtarget->isPICStyleGOT()) {
    Chain = DAG.getCopyToReg(Chain, X86::EBX,
                             DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()),
                             InFlag);
    InFlag = Chain.getValue(1);
  }
  
  // If the callee is a GlobalAddress node (quite common, every direct call is)
  // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
  if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
    // We should use extra load for direct calls to dllimported functions in
    // non-JIT mode.
    if (!Subtarget->GVRequiresExtraLoad(G->getGlobal(),
                                        getTargetMachine(), true))
      Callee = DAG.getTargetGlobalAddress(G->getGlobal(), getPointerTy());
  } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee))
    Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy());

  // Returns a chain & a flag for retval copy to use.
  SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Flag);
  SmallVector<SDOperand, 8> Ops;
  Ops.push_back(Chain);
  Ops.push_back(Callee);

  // Add argument registers to the end of the list so that they are known live
  // into the call.
  for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
    Ops.push_back(DAG.getRegister(RegsToPass[i].first,
                                  RegsToPass[i].second.getValueType()));

  // Add an implicit use GOT pointer in EBX.
  if (getTargetMachine().getRelocationModel() == Reloc::PIC_ &&
      Subtarget->isPICStyleGOT())
    Ops.push_back(DAG.getRegister(X86::EBX, getPointerTy()));
  if (InFlag.Val)
    Ops.push_back(InFlag);
 
  Chain = DAG.getNode(X86ISD::CALL, NodeTys, &Ops[0], Ops.size());
  // Create the CALLSEQ_END node.
  unsigned NumBytesForCalleeToPush = 0;

  if (CC == CallingConv::X86_StdCall || 
      (CC == CallingConv::Fast && PerformTailCallOpt)) {
Chris Lattner's avatar
Chris Lattner committed
      NumBytesForCalleeToPush = isSRet ? 4 : 0;
      NumBytesForCalleeToPush = NumBytes;
    assert(!(isVarArg && CC==CallingConv::Fast) &&
            "CallingConv::Fast does not support varargs.");
  } else {
    // If this is is a call to a struct-return function, the callee
    // pops the hidden struct pointer, so we have to push it back.
    // This is common for Darwin/X86, Linux & Mingw32 targets.
Chris Lattner's avatar
Chris Lattner committed
    NumBytesForCalleeToPush = isSRet ? 4 : 0;

  Chain = DAG.getCALLSEQ_END(Chain,
                             DAG.getConstant(NumBytes, getPointerTy()),
                             DAG.getConstant(NumBytesForCalleeToPush,
                                             getPointerTy()),
                             InFlag);
  // Handle result values, copying them out of physregs into vregs that we
  // return.
  return SDOperand(LowerCallResult(Chain, InFlag, Op.Val, CC, DAG), Op.ResNo);
Evan Cheng's avatar
Evan Cheng committed

//===----------------------------------------------------------------------===//
Chris Lattner's avatar
Chris Lattner committed
//                   FastCall Calling Convention implementation
Evan Cheng's avatar
Evan Cheng committed
//===----------------------------------------------------------------------===//
Chris Lattner's avatar
Chris Lattner committed
//
// The X86 'fastcall' calling convention passes up to two integer arguments in
// registers (an appropriate portion of ECX/EDX), passes arguments in C order,
// and requires that the callee pop its arguments off the stack (allowing proper
// tail calls), and has the same return value conventions as C calling convs.
//
// This calling convention always arranges for the callee pop value to be 8n+4
// bytes, which is needed for tail recursion elimination and stack alignment
// reasons.
Evan Cheng's avatar
Evan Cheng committed
SDOperand
Chris Lattner's avatar
Chris Lattner committed
X86TargetLowering::LowerFastCCArguments(SDOperand Op, SelectionDAG &DAG) {
Evan Cheng's avatar
Evan Cheng committed
  MachineFunction &MF = DAG.getMachineFunction();
  MachineFrameInfo *MFI = MF.getFrameInfo();
  SDOperand Root = Op.getOperand(0);
  bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0;
Chris Lattner's avatar
Chris Lattner committed

  // Assign locations to all of the incoming arguments.
  CCState CCInfo(MF.getFunction()->getCallingConv(), isVarArg,
                 getTargetMachine(), ArgLocs);
  CCInfo.AnalyzeFormalArguments(Op.Val, CC_X86_32_FastCall);
  
  SmallVector<SDOperand, 8> ArgValues;
  unsigned LastVal = ~0U;
  for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
    CCValAssign &VA = ArgLocs[i];
    // TODO: If an arg is passed in two places (e.g. reg and stack), skip later
    // places.
    assert(VA.getValNo() != LastVal &&
           "Don't support value assigned to multiple locs yet");
    LastVal = VA.getValNo();
    if (VA.isRegLoc()) {
      MVT::ValueType RegVT = VA.getLocVT();
      TargetRegisterClass *RC;
      if (RegVT == MVT::i32)
        RC = X86::GR32RegisterClass;
      else {
        assert(MVT::isVector(RegVT));
        RC = X86::VR128RegisterClass;
      unsigned Reg = AddLiveIn(DAG.getMachineFunction(), VA.getLocReg(), RC);
      SDOperand ArgValue = DAG.getCopyFromReg(Root, Reg, RegVT);
      
      // If this is an 8 or 16-bit value, it is really passed promoted to 32
      // bits.  Insert an assert[sz]ext to capture this, then truncate to the
      // right size.
      if (VA.getLocInfo() == CCValAssign::SExt)
        ArgValue = DAG.getNode(ISD::AssertSext, RegVT, ArgValue,
                               DAG.getValueType(VA.getValVT()));
      else if (VA.getLocInfo() == CCValAssign::ZExt)
        ArgValue = DAG.getNode(ISD::AssertZext, RegVT, ArgValue,
                               DAG.getValueType(VA.getValVT()));
      
      if (VA.getLocInfo() != CCValAssign::Full)
        ArgValue = DAG.getNode(ISD::TRUNCATE, VA.getValVT(), ArgValue);
      
      ArgValues.push_back(ArgValue);
    } else {
      assert(VA.isMemLoc());
      ArgValues.push_back(LowerMemArgument(Op, DAG, VA, MFI, Root, i));
Evan Cheng's avatar
Evan Cheng committed
  ArgValues.push_back(Root);

  unsigned StackSize = CCInfo.getNextStackOffset();
Anton Korobeynikov's avatar
Anton Korobeynikov committed
  if (!Subtarget->isTargetCygMing() && !Subtarget->isTargetWindows()) {
    // Make sure the instruction takes 8n+4 bytes to make sure the start of the
    // arguments and the arguments after the retaddr has been pushed are
    // aligned.
    if ((StackSize & 7) == 0)
      StackSize += 4;
  }
Chris Lattner's avatar
Chris Lattner committed

  VarArgsFrameIndex = 0xAAAAAAA;   // fastcc functions can't have varargs.
  RegSaveFrameIndex = 0xAAAAAAA;   // X86-64 only.
  BytesToPopOnReturn = StackSize;  // Callee pops all stack arguments.
Chris Lattner's avatar
Chris Lattner committed
  BytesCallerReserves = 0;

  X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
  FuncInfo->setBytesToPopOnReturn(BytesToPopOnReturn);
Chris Lattner's avatar
Chris Lattner committed

Evan Cheng's avatar
Evan Cheng committed
  // Return the new list of results.
  return DAG.getNode(ISD::MERGE_VALUES, Op.Val->getVTList(),
                     &ArgValues[0], ArgValues.size()).getValue(Op.ResNo);
SDOperand
X86TargetLowering::LowerMemOpCallTo(SDOperand Op, SelectionDAG &DAG,
                                    const SDOperand &StackPtr,
                                    const CCValAssign &VA,
                                    SDOperand Chain,
                                    SDOperand Arg) {
  SDOperand PtrOff = DAG.getConstant(VA.getLocMemOffset(), getPointerTy());
  PtrOff = DAG.getNode(ISD::ADD, getPointerTy(), StackPtr, PtrOff);
  SDOperand FlagsOp = Op.getOperand(6+2*VA.getValNo());
  unsigned Flags    = cast<ConstantSDNode>(FlagsOp)->getValue();
  if (Flags & ISD::ParamFlags::ByVal) {
    unsigned Align = 1 << ((Flags & ISD::ParamFlags::ByValAlign) >>
                           ISD::ParamFlags::ByValAlignOffs);

    unsigned  Size = (Flags & ISD::ParamFlags::ByValSize) >>
        ISD::ParamFlags::ByValSizeOffs;

    SDOperand AlignNode = DAG.getConstant(Align, MVT::i32);
    SDOperand  SizeNode = DAG.getConstant(Size, MVT::i32);
Chris Lattner's avatar
Chris Lattner committed
    SDOperand AlwaysInline = DAG.getConstant(1, MVT::i32);
    return DAG.getMemcpy(Chain, PtrOff, Arg, SizeNode, AlignNode,
                         AlwaysInline);
  } else {
    return DAG.getStore(Chain, Arg, PtrOff, NULL, 0);
  }
}

Chris Lattner's avatar
Chris Lattner committed
SDOperand X86TargetLowering::LowerFastCCCallTo(SDOperand Op, SelectionDAG &DAG,
                                               unsigned CC) {
Evan Cheng's avatar
Evan Cheng committed
  SDOperand Chain     = Op.getOperand(0);
  bool isTailCall     = cast<ConstantSDNode>(Op.getOperand(3))->getValue() != 0;
  bool isVarArg       = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0;
Evan Cheng's avatar
Evan Cheng committed
  SDOperand Callee    = Op.getOperand(4);

  // Analyze operands of the call, assigning locations to each operand.
  SmallVector<CCValAssign, 16> ArgLocs;
  CCState CCInfo(CC, isVarArg, getTargetMachine(), ArgLocs);
  CCInfo.AnalyzeCallOperands(Op.Val, CC_X86_32_FastCall);
  
  // Get a count of how many bytes are to be pushed on the stack.
  unsigned NumBytes = CCInfo.getNextStackOffset();
Chris Lattner's avatar
Chris Lattner committed

Anton Korobeynikov's avatar
Anton Korobeynikov committed
  if (!Subtarget->isTargetCygMing() && !Subtarget->isTargetWindows()) {
    // Make sure the instruction takes 8n+4 bytes to make sure the start of the
    // arguments and the arguments after the retaddr has been pushed are
    // aligned.
Chris Lattner's avatar
Chris Lattner committed

Evan Cheng's avatar
Evan Cheng committed
  Chain = DAG.getCALLSEQ_START(Chain,DAG.getConstant(NumBytes, getPointerTy()));
  SmallVector<std::pair<unsigned, SDOperand>, 8> RegsToPass;
  SmallVector<SDOperand, 8> MemOpChains;
  
  SDOperand StackPtr;
  
  // Walk the register/memloc assignments, inserting copies/loads.
  for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
    CCValAssign &VA = ArgLocs[i];
    SDOperand Arg = Op.getOperand(5+2*VA.getValNo());
    
    // Promote the value if needed.
    switch (VA.getLocInfo()) {
      default: assert(0 && "Unknown loc info!");
      case CCValAssign::Full: break;
      case CCValAssign::SExt:
        Arg = DAG.getNode(ISD::SIGN_EXTEND, VA.getLocVT(), Arg);
        break;
      case CCValAssign::ZExt:
        Arg = DAG.getNode(ISD::ZERO_EXTEND, VA.getLocVT(), Arg);
        break;
      case CCValAssign::AExt:
        Arg = DAG.getNode(ISD::ANY_EXTEND, VA.getLocVT(), Arg);
Chris Lattner's avatar
Chris Lattner committed
        break;
    
    if (VA.isRegLoc()) {
      RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
    } else {
      assert(VA.isMemLoc());
      if (StackPtr.Val == 0)
        StackPtr = DAG.getRegister(getStackPtrReg(), getPointerTy());

      MemOpChains.push_back(LowerMemOpCallTo(Op, DAG, StackPtr, VA, Chain,
                                             Arg));
Chris Lattner's avatar
Chris Lattner committed

Evan Cheng's avatar
Evan Cheng committed
  if (!MemOpChains.empty())
    Chain = DAG.getNode(ISD::TokenFactor, MVT::Other,
                        &MemOpChains[0], MemOpChains.size());

  // Build a sequence of copy-to-reg nodes chained together with token chain
  // and flag operands which copy the outgoing args into registers.
  SDOperand InFlag;
  for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
    Chain = DAG.getCopyToReg(Chain, RegsToPass[i].first, RegsToPass[i].second,
                             InFlag);
    InFlag = Chain.getValue(1);
  }

  // If the callee is a GlobalAddress node (quite common, every direct call is)
  // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
  if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
    // We should use extra load for direct calls to dllimported functions in
    // non-JIT mode.
    if (!Subtarget->GVRequiresExtraLoad(G->getGlobal(),
                                        getTargetMachine(), true))
      Callee = DAG.getTargetGlobalAddress(G->getGlobal(), getPointerTy());
  } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee))
Evan Cheng's avatar
Evan Cheng committed
    Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy());

Chris Lattner's avatar
Chris Lattner committed
  // ELF / PIC requires GOT in the EBX register before function calls via PLT
  // GOT pointer.
  if (getTargetMachine().getRelocationModel() == Reloc::PIC_ &&
      Subtarget->isPICStyleGOT()) {
    Chain = DAG.getCopyToReg(Chain, X86::EBX,
                             DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()),
                             InFlag);
    InFlag = Chain.getValue(1);
  }

  // Returns a chain & a flag for retval copy to use.
  SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Flag);
  SmallVector<SDOperand, 8> Ops;
Evan Cheng's avatar
Evan Cheng committed
  Ops.push_back(Chain);
  Ops.push_back(Callee);

  // Add argument registers to the end of the list so that they are known live
  // into the call.
  for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
    Ops.push_back(DAG.getRegister(RegsToPass[i].first,
Evan Cheng's avatar
Evan Cheng committed
                                  RegsToPass[i].second.getValueType()));

Chris Lattner's avatar
Chris Lattner committed
  // Add an implicit use GOT pointer in EBX.
  if (getTargetMachine().getRelocationModel() == Reloc::PIC_ &&
      Subtarget->isPICStyleGOT())
    Ops.push_back(DAG.getRegister(X86::EBX, getPointerTy()));

Evan Cheng's avatar
Evan Cheng committed
  if (InFlag.Val)
    Ops.push_back(InFlag);

  assert(isTailCall==false && "no tail call here");
  Chain = DAG.getNode(X86ISD::CALL,
Evan Cheng's avatar
Evan Cheng committed
                      NodeTys, &Ops[0], Ops.size());
  InFlag = Chain.getValue(1);

  // Returns a flag for retval copy to use.
  NodeTys = DAG.getVTList(MVT::Other, MVT::Flag);
Evan Cheng's avatar
Evan Cheng committed
  Ops.clear();
  Ops.push_back(Chain);
  Ops.push_back(DAG.getConstant(NumBytes, getPointerTy()));
Chris Lattner's avatar
Chris Lattner committed
  Ops.push_back(DAG.getConstant(NumBytes, getPointerTy()));
Evan Cheng's avatar
Evan Cheng committed
  Ops.push_back(InFlag);
  Chain = DAG.getNode(ISD::CALLSEQ_END, NodeTys, &Ops[0], Ops.size());
Chris Lattner's avatar
Chris Lattner committed

  // Handle result values, copying them out of physregs into vregs that we
  // return.
  return SDOperand(LowerCallResult(Chain, InFlag, Op.Val, CC, DAG), Op.ResNo);
//===----------------------------------------------------------------------===//
//                Fast Calling Convention (tail call) implementation
//===----------------------------------------------------------------------===//

//  Like std call, callee cleans arguments, convention except that ECX is
//  reserved for storing the tail called function address. Only 2 registers are
//  free for argument passing (inreg). Tail call optimization is performed
//  provided:
//                * tailcallopt is enabled
//                * caller/callee are fastcc
//                * elf/pic is disabled OR
//                * elf/pic enabled + callee is in module + callee has
//                  visibility protected or hidden
//  To keep the stack aligned according to platform abi the function
//  GetAlignedArgumentStackSize ensures that argument delta is always multiples
//  of stack alignment. (Dynamic linkers need this - darwin's dyld for example)
//  If a tail called function callee has more arguments than the caller the
//  caller needs to make sure that there is room to move the RETADDR to. This is
//  achieved by reserving an area the size of the argument delta right after the
//  original REtADDR, but before the saved framepointer or the spilled registers
//  e.g. caller(arg1, arg2) calls callee(arg1, arg2,arg3,arg4)
//  stack layout:
//    arg1
//    arg2
//    RETADDR
//    [ new RETADDR 
//      move area ]
//    (possible EBP)
//    ESI
//    EDI
//    local1 ..

/// GetAlignedArgumentStackSize - Make the stack size align e.g 16n + 12 aligned
/// for a 16 byte align requirement.
unsigned X86TargetLowering::GetAlignedArgumentStackSize(unsigned StackSize, 
                                                        SelectionDAG& DAG) {
  if (PerformTailCallOpt) {
    MachineFunction &MF = DAG.getMachineFunction();
    const TargetMachine &TM = MF.getTarget();
    const TargetFrameInfo &TFI = *TM.getFrameInfo();
    unsigned StackAlignment = TFI.getStackAlignment();
    uint64_t AlignMask = StackAlignment - 1; 
    int64_t Offset = StackSize;
    unsigned SlotSize = Subtarget->is64Bit() ? 8 : 4;
    if ( (Offset & AlignMask) <= (StackAlignment - SlotSize) ) {
      // Number smaller than 12 so just add the difference.
      Offset += ((StackAlignment - SlotSize) - (Offset & AlignMask));
    } else {
      // Mask out lower bits, add stackalignment once plus the 12 bytes.
      Offset = ((~AlignMask) & Offset) + StackAlignment + 
        (StackAlignment-SlotSize);
    }
    StackSize = Offset;
  }
  return StackSize;
}

/// IsEligibleForTailCallElimination - Check to see whether the next instruction
/// following the call is a return. A function is eligible if caller/callee
/// calling conventions match, currently only fastcc supports tail calls, and
/// the function CALL is immediatly followed by a RET.
bool X86TargetLowering::IsEligibleForTailCallOptimization(SDOperand Call,
                                                      SDOperand Ret,
                                                      SelectionDAG& DAG) const {
  if (!PerformTailCallOpt)
    return false;

  // Check whether CALL node immediatly preceeds the RET node and whether the
  // return uses the result of the node or is a void return.
  unsigned NumOps = Ret.getNumOperands();
  if ((NumOps == 1 && 
       (Ret.getOperand(0) == SDOperand(Call.Val,1) ||
        Ret.getOperand(0) == SDOperand(Call.Val,0))) ||
Evan Cheng's avatar
Evan Cheng committed
      (NumOps > 1 &&
       Ret.getOperand(0) == SDOperand(Call.Val,Call.Val->getNumValues()-1) &&
       Ret.getOperand(1) == SDOperand(Call.Val,0))) {
    MachineFunction &MF = DAG.getMachineFunction();
    unsigned CallerCC = MF.getFunction()->getCallingConv();
    unsigned CalleeCC = cast<ConstantSDNode>(Call.getOperand(1))->getValue();
    if (CalleeCC == CallingConv::Fast && CallerCC == CalleeCC) {
      SDOperand Callee = Call.getOperand(4);
      // On elf/pic %ebx needs to be livein.
      if (getTargetMachine().getRelocationModel() != Reloc::PIC_ ||
          !Subtarget->isPICStyleGOT())
        return true;

      // Can only do local tail calls with PIC.
      GlobalValue * GV = 0;
      GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee);
      if(G != 0 &&
         (GV = G->getGlobal()) &&
         (GV->hasHiddenVisibility() || GV->hasProtectedVisibility()))
        return true;

  return false;
}

SDOperand X86TargetLowering::LowerX86_TailCallTo(SDOperand Op, 
                                                     SelectionDAG &DAG,
                                                     unsigned CC) {
  SDOperand Chain     = Op.getOperand(0);
  bool isVarArg       = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0;
  bool isTailCall     = cast<ConstantSDNode>(Op.getOperand(3))->getValue() != 0;
  SDOperand Callee    = Op.getOperand(4);
  bool is64Bit        = Subtarget->is64Bit();

  assert(isTailCall && PerformTailCallOpt && "Should only emit tail calls.");

  // Analyze operands of the call, assigning locations to each operand.
  SmallVector<CCValAssign, 16> ArgLocs;
  CCState CCInfo(CC, isVarArg, getTargetMachine(), ArgLocs);
  if (is64Bit)
    CCInfo.AnalyzeCallOperands(Op.Val, CC_X86_64_TailCall);
  else
    CCInfo.AnalyzeCallOperands(Op.Val, CC_X86_32_TailCall);
  
  
  // Lower arguments at fp - stackoffset + fpdiff.
  MachineFunction &MF = DAG.getMachineFunction();

  unsigned NumBytesToBePushed = 
    GetAlignedArgumentStackSize(CCInfo.getNextStackOffset(), DAG);
    
  unsigned NumBytesCallerPushed = 
    MF.getInfo<X86MachineFunctionInfo>()->getBytesToPopOnReturn();
  int FPDiff = NumBytesCallerPushed - NumBytesToBePushed;

  // Set the delta of movement of the returnaddr stackslot.
  // But only set if delta is greater than previous delta.
  if (FPDiff < (MF.getInfo<X86MachineFunctionInfo>()->getTCReturnAddrDelta()))
    MF.getInfo<X86MachineFunctionInfo>()->setTCReturnAddrDelta(FPDiff);

  Chain = DAG.
   getCALLSEQ_START(Chain, DAG.getConstant(NumBytesToBePushed, getPointerTy()));

  // Adjust the Return address stack slot.
  SDOperand RetAddrFrIdx, NewRetAddrFrIdx;
  if (FPDiff) {
    MVT::ValueType VT = is64Bit ? MVT::i64 : MVT::i32;
    RetAddrFrIdx = getReturnAddressFrameIndex(DAG);
    // Load the "old" Return address.
      DAG.getLoad(VT, Chain,RetAddrFrIdx, NULL, 0);
    // Calculate the new stack slot for the return address.
    int SlotSize = is64Bit ? 8 : 4;
    int NewReturnAddrFI = 
      MF.getFrameInfo()->CreateFixedObject(SlotSize, FPDiff-SlotSize);
    NewRetAddrFrIdx = DAG.getFrameIndex(NewReturnAddrFI, VT);
    Chain = SDOperand(RetAddrFrIdx.Val, 1);
  }

  SmallVector<std::pair<unsigned, SDOperand>, 8> RegsToPass;
  SmallVector<SDOperand, 8> MemOpChains;
  SmallVector<SDOperand, 8> MemOpChains2;
  SDOperand FramePtr, StackPtr;
  SDOperand PtrOff;
  SDOperand FIN;
  int FI = 0;

  // Walk the register/memloc assignments, inserting copies/loads.  Lower
  // arguments first to the stack slot where they would normally - in case of a
  // normal function call - be.
  for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
    CCValAssign &VA = ArgLocs[i];
    SDOperand Arg = Op.getOperand(5+2*VA.getValNo());
    
    // Promote the value if needed.
    switch (VA.getLocInfo()) {
    default: assert(0 && "Unknown loc info!");
    case CCValAssign::Full: break;
    case CCValAssign::SExt:
      Arg = DAG.getNode(ISD::SIGN_EXTEND, VA.getLocVT(), Arg);
      break;
    case CCValAssign::ZExt:
      Arg = DAG.getNode(ISD::ZERO_EXTEND, VA.getLocVT(), Arg);
      break;
    case CCValAssign::AExt:
      Arg = DAG.getNode(ISD::ANY_EXTEND, VA.getLocVT(), Arg);
      break;
    }
    
    if (VA.isRegLoc()) {
      RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
    } else {
      assert(VA.isMemLoc());
      if (StackPtr.Val == 0)
        StackPtr = DAG.getRegister(getStackPtrReg(), getPointerTy());

      MemOpChains.push_back(LowerMemOpCallTo(Op, DAG, StackPtr, VA, Chain,
                                             Arg));
    }
  }

  if (!MemOpChains.empty())
    Chain = DAG.getNode(ISD::TokenFactor, MVT::Other,
                        &MemOpChains[0], MemOpChains.size());

  // Build a sequence of copy-to-reg nodes chained together with token chain
  // and flag operands which copy the outgoing args into registers.
  SDOperand InFlag;
  for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
    Chain = DAG.getCopyToReg(Chain, RegsToPass[i].first, RegsToPass[i].second,
                             InFlag);
    InFlag = Chain.getValue(1);
  }
  InFlag = SDOperand();
  // Copy from stack slots to stack slot of a tail called function. This needs
  // to be done because if we would lower the arguments directly to their real
  // stack slot we might end up overwriting each other.
  // TODO: To make this more efficient (sometimes saving a store/load) we could
  // analyse the arguments and emit this store/load/store sequence only for
  // arguments which would be overwritten otherwise.
  for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
    CCValAssign &VA = ArgLocs[i];
    if (!VA.isRegLoc()) {
      SDOperand FlagsOp = Op.getOperand(6+2*VA.getValNo());
      unsigned Flags    = cast<ConstantSDNode>(FlagsOp)->getValue();
      
      // Get source stack slot. 
      SDOperand PtrOff = DAG.getConstant(VA.getLocMemOffset(), getPointerTy());
      PtrOff = DAG.getNode(ISD::ADD, getPointerTy(), StackPtr, PtrOff);
      // Create frame index.
      int32_t Offset = VA.getLocMemOffset()+FPDiff;
      uint32_t OpSize = (MVT::getSizeInBits(VA.getLocVT())+7)/8;
      FI = MF.getFrameInfo()->CreateFixedObject(OpSize, Offset);
      FIN = DAG.getFrameIndex(FI, MVT::i32);
      if (Flags & ISD::ParamFlags::ByVal) {
        // Copy relative to framepointer.
        unsigned Align = 1 << ((Flags & ISD::ParamFlags::ByValAlign) >>
                               ISD::ParamFlags::ByValAlignOffs);

        unsigned  Size = (Flags & ISD::ParamFlags::ByValSize) >>
          ISD::ParamFlags::ByValSizeOffs;
 
        SDOperand AlignNode = DAG.getConstant(Align, MVT::i32);
        SDOperand  SizeNode = DAG.getConstant(Size, MVT::i32);
        SDOperand AlwaysInline = DAG.getConstant(1, MVT::i1);

        MemOpChains2.push_back(DAG.getMemcpy(Chain, FIN, PtrOff, SizeNode, 
                                             AlignNode,AlwaysInline));
      } else {
        SDOperand LoadedArg = DAG.getLoad(VA.getValVT(), Chain, PtrOff, NULL,0);
        // Store relative to framepointer.
        MemOpChains2.push_back(DAG.getStore(Chain, LoadedArg, FIN, NULL, 0));
      }
    }
  }

  if (!MemOpChains2.empty())
    Chain = DAG.getNode(ISD::TokenFactor, MVT::Other,
                        &MemOpChains2[0], MemOpChains.size());

  // Store the return address to the appropriate stack slot.
  if (FPDiff)
    Chain = DAG.getStore(Chain,RetAddrFrIdx, NewRetAddrFrIdx, NULL, 0);

  // ELF / PIC requires GOT in the EBX register before function calls via PLT
  // GOT pointer.
  // Does not work with tail call since ebx is not restored correctly by
  // tailcaller. TODO: at least for x86 - verify for x86-64

  // If the callee is a GlobalAddress node (quite common, every direct call is)
  // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
  if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
    // We should use extra load for direct calls to dllimported functions in
    // non-JIT mode.
    if (!Subtarget->GVRequiresExtraLoad(G->getGlobal(),
                                        getTargetMachine(), true))
      Callee = DAG.getTargetGlobalAddress(G->getGlobal(), getPointerTy());
  } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee))
    Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy());
  else {
    assert(Callee.getOpcode() == ISD::LOAD && 
           "Function destination must be loaded into virtual register");
    unsigned Opc = is64Bit ? X86::R9 : X86::ECX;

    Chain = DAG.getCopyToReg(Chain, 
                             DAG.getRegister(Opc, getPointerTy()) , 
                             Callee,InFlag);
    Callee = DAG.getRegister(Opc, getPointerTy());
    // Add register as live out.
    DAG.getMachineFunction().addLiveOut(Opc);
  }
   
  SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Flag);
  SmallVector<SDOperand, 8> Ops;

  Ops.push_back(Chain);
  Ops.push_back(DAG.getConstant(NumBytesToBePushed, getPointerTy()));
  Ops.push_back(DAG.getConstant(0, getPointerTy()));
  if (InFlag.Val)
    Ops.push_back(InFlag);
  Chain = DAG.getNode(ISD::CALLSEQ_END, NodeTys, &Ops[0], Ops.size());
  InFlag = Chain.getValue(1);

  // Returns a chain & a flag for retval copy to use.
  NodeTys = DAG.getVTList(MVT::Other, MVT::Flag);
  Ops.clear();
  Ops.push_back(Chain);
  Ops.push_back(Callee);
  Ops.push_back(DAG.getConstant(FPDiff, MVT::i32));
  // Add argument registers to the end of the list so that they are known live
  // into the call.
  for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
    Ops.push_back(DAG.getRegister(RegsToPass[i].first,
                                  RegsToPass[i].second.getValueType()));
  if (InFlag.Val)
    Ops.push_back(InFlag);
  assert(InFlag.Val && 
         "Flag must be set. Depend on flag being set in LowerRET");
  Chain = DAG.getNode(X86ISD::TAILCALL,
                      Op.Val->getVTList(), &Ops[0], Ops.size());
    
  return SDOperand(Chain.Val, Op.ResNo);
}
Chris Lattner's avatar
Chris Lattner committed

//===----------------------------------------------------------------------===//
Chris Lattner's avatar
Chris Lattner committed
//                 X86-64 C Calling Convention implementation
//===----------------------------------------------------------------------===//
Chris Lattner's avatar
Chris Lattner committed

Chris Lattner's avatar
Chris Lattner committed
X86TargetLowering::LowerX86_64CCCArguments(SDOperand Op, SelectionDAG &DAG) {
  MachineFunction &MF = DAG.getMachineFunction();
  MachineFrameInfo *MFI = MF.getFrameInfo();
  SDOperand Root = Op.getOperand(0);
Chris Lattner's avatar
Chris Lattner committed
  bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0;
  unsigned CC= MF.getFunction()->getCallingConv();
Chris Lattner's avatar
Chris Lattner committed
  static const unsigned GPR64ArgRegs[] = {
    X86::RDI, X86::RSI, X86::RDX, X86::RCX, X86::R8,  X86::R9
Chris Lattner's avatar
Chris Lattner committed
  static const unsigned XMMArgRegs[] = {
    X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
    X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
  
  // Assign locations to all of the incoming arguments.
Chris Lattner's avatar
Chris Lattner committed
  SmallVector<CCValAssign, 16> ArgLocs;
  if (CC == CallingConv::Fast && PerformTailCallOpt)
    CCInfo.AnalyzeFormalArguments(Op.Val, CC_X86_64_TailCall);
  else
    CCInfo.AnalyzeFormalArguments(Op.Val, CC_X86_64_C);
Chris Lattner's avatar
Chris Lattner committed
  
  SmallVector<SDOperand, 8> ArgValues;
  unsigned LastVal = ~0U;
  for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
    CCValAssign &VA = ArgLocs[i];
    // TODO: If an arg is passed in two places (e.g. reg and stack), skip later
    // places.
    assert(VA.getValNo() != LastVal &&
           "Don't support value assigned to multiple locs yet");
    LastVal = VA.getValNo();
Chris Lattner's avatar
Chris Lattner committed
    if (VA.isRegLoc()) {
      MVT::ValueType RegVT = VA.getLocVT();
      TargetRegisterClass *RC;
      if (RegVT == MVT::i32)
        RC = X86::GR32RegisterClass;
      else if (RegVT == MVT::i64)
        RC = X86::GR64RegisterClass;
      else if (RegVT == MVT::f32)
        RC = X86::FR32RegisterClass;
      else if (RegVT == MVT::f64)
        RC = X86::FR64RegisterClass;
      else {
        assert(MVT::isVector(RegVT));
        if (MVT::getSizeInBits(RegVT) == 64) {
          RC = X86::GR64RegisterClass;       // MMX values are passed in GPRs.
          RegVT = MVT::i64;
        } else
Chris Lattner's avatar
Chris Lattner committed
          RC = X86::VR128RegisterClass;

      unsigned Reg = AddLiveIn(DAG.getMachineFunction(), VA.getLocReg(), RC);
      SDOperand ArgValue = DAG.getCopyFromReg(Root, Reg, RegVT);
Chris Lattner's avatar
Chris Lattner committed
      
      // If this is an 8 or 16-bit value, it is really passed promoted to 32
      // bits.  Insert an assert[sz]ext to capture this, then truncate to the
      // right size.
      if (VA.getLocInfo() == CCValAssign::SExt)
        ArgValue = DAG.getNode(ISD::AssertSext, RegVT, ArgValue,
                               DAG.getValueType(VA.getValVT()));
      else if (VA.getLocInfo() == CCValAssign::ZExt)
        ArgValue = DAG.getNode(ISD::AssertZext, RegVT, ArgValue,
                               DAG.getValueType(VA.getValVT()));
      
      if (VA.getLocInfo() != CCValAssign::Full)
        ArgValue = DAG.getNode(ISD::TRUNCATE, VA.getValVT(), ArgValue);
      
      // Handle MMX values passed in GPRs.
      if (RegVT != VA.getLocVT() && RC == X86::GR64RegisterClass &&
          MVT::getSizeInBits(RegVT) == 64)
        ArgValue = DAG.getNode(ISD::BIT_CONVERT, VA.getLocVT(), ArgValue);
      
Chris Lattner's avatar
Chris Lattner committed
      ArgValues.push_back(ArgValue);
    } else {
      assert(VA.isMemLoc());
      ArgValues.push_back(LowerMemArgument(Op, DAG, VA, MFI, Root, i));
Chris Lattner's avatar
Chris Lattner committed
    }
  }
  
  unsigned StackSize = CCInfo.getNextStackOffset();
  if (CC==CallingConv::Fast)
    StackSize =GetAlignedArgumentStackSize(StackSize, DAG);
Chris Lattner's avatar
Chris Lattner committed
  
  // If the function takes variable number of arguments, make a frame index for
  // the start of the first vararg value... for expansion of llvm.va_start.
  if (isVarArg) {
    assert(CC!=CallingConv::Fast 
           && "Var arg not supported with calling convention fastcc");
Chris Lattner's avatar
Chris Lattner committed
    unsigned NumIntRegs = CCInfo.getFirstUnallocated(GPR64ArgRegs, 6);
    unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs, 8);
    
    // For X86-64, if there are vararg parameters that are passed via
    // registers, then we must store them to their spots on the stack so they
    // may be loaded by deferencing the result of va_next.
    VarArgsGPOffset = NumIntRegs * 8;
    VarArgsFPOffset = 6 * 8 + NumXMMRegs * 16;
    VarArgsFrameIndex = MFI->CreateFixedObject(1, StackSize);
    RegSaveFrameIndex = MFI->CreateStackObject(6 * 8 + 8 * 16, 16);

    // Store the integer parameter registers.
    SmallVector<SDOperand, 8> MemOps;
    SDOperand RSFIN = DAG.getFrameIndex(RegSaveFrameIndex, getPointerTy());
    SDOperand FIN = DAG.getNode(ISD::ADD, getPointerTy(), RSFIN,
                              DAG.getConstant(VarArgsGPOffset, getPointerTy()));
    for (; NumIntRegs != 6; ++NumIntRegs) {
      unsigned VReg = AddLiveIn(MF, GPR64ArgRegs[NumIntRegs],
                                X86::GR64RegisterClass);
      SDOperand Val = DAG.getCopyFromReg(Root, VReg, MVT::i64);
      SDOperand Store = DAG.getStore(Val.getValue(1), Val, FIN, NULL, 0);
      MemOps.push_back(Store);
      FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN,
                        DAG.getConstant(8, getPointerTy()));
Chris Lattner's avatar
Chris Lattner committed
    // Now store the XMM (fp + vector) parameter registers.
    FIN = DAG.getNode(ISD::ADD, getPointerTy(), RSFIN,
                      DAG.getConstant(VarArgsFPOffset, getPointerTy()));
    for (; NumXMMRegs != 8; ++NumXMMRegs) {
      unsigned VReg = AddLiveIn(MF, XMMArgRegs[NumXMMRegs],
                                X86::VR128RegisterClass);
      SDOperand Val = DAG.getCopyFromReg(Root, VReg, MVT::v4f32);
      SDOperand Store = DAG.getStore(Val.getValue(1), Val, FIN, NULL, 0);
      MemOps.push_back(Store);
      FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN,
                        DAG.getConstant(16, getPointerTy()));
    }
    if (!MemOps.empty())
        Root = DAG.getNode(ISD::TokenFactor, MVT::Other,
                           &MemOps[0], MemOps.size());
  // Tail call convention (fastcc) needs callee pop.
Evan Cheng's avatar
Evan Cheng committed
  if (CC == CallingConv::Fast && PerformTailCallOpt) {
    BytesToPopOnReturn = StackSize;  // Callee pops everything.
    BytesCallerReserves = 0;
  } else {
    BytesToPopOnReturn = 0;  // Callee pops nothing.
    BytesCallerReserves = StackSize;
  }
  X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
  FuncInfo->setBytesToPopOnReturn(BytesToPopOnReturn);

  // Return the new list of results.
  return DAG.getNode(ISD::MERGE_VALUES, Op.Val->getVTList(),
                     &ArgValues[0], ArgValues.size()).getValue(Op.ResNo);
Chris Lattner's avatar
Chris Lattner committed
SDOperand
X86TargetLowering::LowerX86_64CCCCallTo(SDOperand Op, SelectionDAG &DAG,
                                        unsigned CC) {
  SDOperand Chain     = Op.getOperand(0);
Chris Lattner's avatar
Chris Lattner committed
  bool isVarArg       = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0;
  SDOperand Callee    = Op.getOperand(4);
  
  // Analyze operands of the call, assigning locations to each operand.
Chris Lattner's avatar
Chris Lattner committed
  SmallVector<CCValAssign, 16> ArgLocs;
  CCState CCInfo(CC, isVarArg, getTargetMachine(), ArgLocs);
Evan Cheng's avatar
Evan Cheng committed
  if (CC==CallingConv::Fast && PerformTailCallOpt)
    CCInfo.AnalyzeCallOperands(Op.Val, CC_X86_64_TailCall);
  else
    CCInfo.AnalyzeCallOperands(Op.Val, CC_X86_64_C);
Chris Lattner's avatar
Chris Lattner committed
    
  // Get a count of how many bytes are to be pushed on the stack.
  unsigned NumBytes = CCInfo.getNextStackOffset();
  if (CC == CallingConv::Fast)
    NumBytes = GetAlignedArgumentStackSize(NumBytes,DAG);

  Chain = DAG.getCALLSEQ_START(Chain,DAG.getConstant(NumBytes, getPointerTy()));

  SmallVector<std::pair<unsigned, SDOperand>, 8> RegsToPass;
  SmallVector<SDOperand, 8> MemOpChains;
Chris Lattner's avatar
Chris Lattner committed
  SDOperand StackPtr;
  
  // Walk the register/memloc assignments, inserting copies/loads.
  for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
    CCValAssign &VA = ArgLocs[i];
    SDOperand Arg = Op.getOperand(5+2*VA.getValNo());
    
    // Promote the value if needed.
    switch (VA.getLocInfo()) {
    default: assert(0 && "Unknown loc info!");
    case CCValAssign::Full: break;
    case CCValAssign::SExt:
      Arg = DAG.getNode(ISD::SIGN_EXTEND, VA.getLocVT(), Arg);
      break;
    case CCValAssign::ZExt:
      Arg = DAG.getNode(ISD::ZERO_EXTEND, VA.getLocVT(), Arg);
      break;
    case CCValAssign::AExt:
      Arg = DAG.getNode(ISD::ANY_EXTEND, VA.getLocVT(), Arg);
Chris Lattner's avatar
Chris Lattner committed
    
    if (VA.isRegLoc()) {
      RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
    } else {
      assert(VA.isMemLoc());
      if (StackPtr.Val == 0)