Newer
Older
//===-- X86ISelLowering.cpp - X86 DAG Lowering Implementation -------------===//
//
// The LLVM Compiler Infrastructure
//
// This file was developed by Chris Lattner and is distributed under
// the University of Illinois Open Source License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the interfaces that X86 uses to lower LLVM code into a
// selection DAG.
//
//===----------------------------------------------------------------------===//
#include "X86.h"
#include "X86ISelLowering.h"
#include "X86MachineFunctionInfo.h"
#include "X86TargetMachine.h"
#include "llvm/CallingConv.h"
#include "llvm/Constants.h"
#include "llvm/DerivedTypes.h"
Lauro Ramos Venancio
committed
#include "llvm/GlobalVariable.h"
#include "llvm/Function.h"
#include "llvm/ADT/VectorExtras.h"
#include "llvm/Analysis/ScalarEvolutionExpressions.h"
#include "llvm/CodeGen/CallingConvLower.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/SelectionDAG.h"
#include "llvm/CodeGen/SSARegMap.h"
#include "llvm/Target/TargetOptions.h"
#include "llvm/ParameterAttributes.h"
using namespace llvm;
X86TargetLowering::X86TargetLowering(TargetMachine &TM)
: TargetLowering(TM) {
Subtarget = &TM.getSubtarget<X86Subtarget>();
X86ScalarSSE = Subtarget->hasSSE2();
X86StackPtr = Subtarget->is64Bit() ? X86::RSP : X86::ESP;
RegInfo = TM.getRegisterInfo();
// Set up the TargetLowering object.
// X86 is weird, it always uses i8 for shift amounts and setcc results.
setShiftAmountType(MVT::i8);
setSetCCResultType(MVT::i8);
setSetCCResultContents(ZeroOrOneSetCCResult);
setSchedulingPreference(SchedulingForRegPressure);
setShiftAmountFlavor(Mask); // shl X, 32 == shl X, 0
setStackPointerRegisterToSaveRestore(X86StackPtr);
if (Subtarget->isTargetDarwin()) {
// Darwin should use _setjmp/_longjmp instead of setjmp/longjmp.
setUseUnderscoreSetJmp(false);
setUseUnderscoreLongJmp(false);
// MS runtime is weird: it exports _setjmp, but longjmp!
setUseUnderscoreSetJmp(true);
setUseUnderscoreLongJmp(false);
} else {
setUseUnderscoreSetJmp(true);
setUseUnderscoreLongJmp(true);
}
// Set up the register classes.
Evan Cheng
committed
addRegisterClass(MVT::i8, X86::GR8RegisterClass);
addRegisterClass(MVT::i16, X86::GR16RegisterClass);
addRegisterClass(MVT::i32, X86::GR32RegisterClass);
if (Subtarget->is64Bit())
addRegisterClass(MVT::i64, X86::GR64RegisterClass);
setLoadXAction(ISD::SEXTLOAD, MVT::i1, Expand);
// Promote all UINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have this
// operation.
setOperationAction(ISD::UINT_TO_FP , MVT::i1 , Promote);
setOperationAction(ISD::UINT_TO_FP , MVT::i8 , Promote);
setOperationAction(ISD::UINT_TO_FP , MVT::i16 , Promote);
if (Subtarget->is64Bit()) {
setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Expand);
setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Promote);
} else {
if (X86ScalarSSE)
// If SSE i64 SINT_TO_FP is not available, expand i32 UINT_TO_FP.
setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Expand);
else
setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Promote);
}
// Promote i1/i8 SINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have
// this operation.
setOperationAction(ISD::SINT_TO_FP , MVT::i1 , Promote);
setOperationAction(ISD::SINT_TO_FP , MVT::i8 , Promote);
// SSE has no i16 to fp conversion, only i32
if (X86ScalarSSE) {
setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Promote);
// f32 and f64 cases are Legal, f80 case is not
setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom);
} else {
setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Custom);
setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom);
}
if (!Subtarget->is64Bit()) {
// Custom lower SINT_TO_FP and FP_TO_SINT from/to i64 in 32-bit mode.
setOperationAction(ISD::SINT_TO_FP , MVT::i64 , Custom);
setOperationAction(ISD::FP_TO_SINT , MVT::i64 , Custom);
}
// Promote i1/i8 FP_TO_SINT to larger FP_TO_SINTS's, as X86 doesn't have
// this operation.
setOperationAction(ISD::FP_TO_SINT , MVT::i1 , Promote);
setOperationAction(ISD::FP_TO_SINT , MVT::i8 , Promote);
if (X86ScalarSSE) {
setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Promote);
// f32 and f64 cases are Legal, f80 case is not
setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom);
setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Custom);
setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom);
}
// Handle FP_TO_UINT by promoting the destination to a larger signed
// conversion.
setOperationAction(ISD::FP_TO_UINT , MVT::i1 , Promote);
setOperationAction(ISD::FP_TO_UINT , MVT::i8 , Promote);
setOperationAction(ISD::FP_TO_UINT , MVT::i16 , Promote);
if (Subtarget->is64Bit()) {
setOperationAction(ISD::FP_TO_UINT , MVT::i64 , Expand);
setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Promote);
} else {
if (X86ScalarSSE && !Subtarget->hasSSE3())
// Expand FP_TO_UINT into a select.
// FIXME: We would like to use a Custom expander here eventually to do
// the optimal thing for SSE vs. the default expansion in the legalizer.
setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Expand);
else
// With SSE3 we can use fisttpll to convert to a signed i64.
setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Promote);
}
Chris Lattner
committed
// TODO: when we have SSE, these could be more efficient, by using movd/movq.
if (!X86ScalarSSE) {
setOperationAction(ISD::BIT_CONVERT , MVT::f32 , Expand);
setOperationAction(ISD::BIT_CONVERT , MVT::i32 , Expand);
}
setOperationAction(ISD::BR_JT , MVT::Other, Expand);
setOperationAction(ISD::BR_CC , MVT::Other, Expand);
setOperationAction(ISD::SELECT_CC , MVT::Other, Expand);
setOperationAction(ISD::MEMMOVE , MVT::Other, Expand);
Christopher Lamb
committed
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Legal);
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16 , Legal);
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Legal);
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand);
setOperationAction(ISD::FP_ROUND_INREG , MVT::f32 , Expand);
setOperationAction(ISD::FREM , MVT::f64 , Expand);
setOperationAction(ISD::CTPOP , MVT::i8 , Expand);
setOperationAction(ISD::CTTZ , MVT::i8 , Expand);
setOperationAction(ISD::CTLZ , MVT::i8 , Expand);
setOperationAction(ISD::CTPOP , MVT::i16 , Expand);
setOperationAction(ISD::CTTZ , MVT::i16 , Expand);
setOperationAction(ISD::CTLZ , MVT::i16 , Expand);
setOperationAction(ISD::CTPOP , MVT::i32 , Expand);
setOperationAction(ISD::CTTZ , MVT::i32 , Expand);
setOperationAction(ISD::CTLZ , MVT::i32 , Expand);
if (Subtarget->is64Bit()) {
setOperationAction(ISD::CTPOP , MVT::i64 , Expand);
setOperationAction(ISD::CTTZ , MVT::i64 , Expand);
setOperationAction(ISD::CTLZ , MVT::i64 , Expand);
}
setOperationAction(ISD::READCYCLECOUNTER , MVT::i64 , Custom);
// These should be promoted to a larger select which is supported.
setOperationAction(ISD::SELECT , MVT::i1 , Promote);
setOperationAction(ISD::SELECT , MVT::i8 , Promote);
setOperationAction(ISD::SELECT , MVT::i16 , Custom);
setOperationAction(ISD::SELECT , MVT::i32 , Custom);
setOperationAction(ISD::SELECT , MVT::f32 , Custom);
setOperationAction(ISD::SELECT , MVT::f64 , Custom);
setOperationAction(ISD::SELECT , MVT::f80 , Custom);
setOperationAction(ISD::SETCC , MVT::i8 , Custom);
setOperationAction(ISD::SETCC , MVT::i16 , Custom);
setOperationAction(ISD::SETCC , MVT::i32 , Custom);
setOperationAction(ISD::SETCC , MVT::f32 , Custom);
setOperationAction(ISD::SETCC , MVT::f64 , Custom);
setOperationAction(ISD::SETCC , MVT::f80 , Custom);
if (Subtarget->is64Bit()) {
setOperationAction(ISD::SELECT , MVT::i64 , Custom);
setOperationAction(ISD::SETCC , MVT::i64 , Custom);
}
if (!Subtarget->is64Bit())
setOperationAction(ISD::EH_RETURN , MVT::Other, Custom);
setOperationAction(ISD::ConstantPool , MVT::i32 , Custom);
setOperationAction(ISD::JumpTable , MVT::i32 , Custom);
Lauro Ramos Venancio
committed
setOperationAction(ISD::GlobalTLSAddress, MVT::i32 , Custom);
setOperationAction(ISD::ExternalSymbol , MVT::i32 , Custom);
if (Subtarget->is64Bit()) {
setOperationAction(ISD::ConstantPool , MVT::i64 , Custom);
setOperationAction(ISD::JumpTable , MVT::i64 , Custom);
setOperationAction(ISD::GlobalAddress , MVT::i64 , Custom);
setOperationAction(ISD::ExternalSymbol, MVT::i64 , Custom);
}
// 64-bit addm sub, shl, sra, srl (iff 32-bit x86)
setOperationAction(ISD::SHL_PARTS , MVT::i32 , Custom);
setOperationAction(ISD::SRA_PARTS , MVT::i32 , Custom);
setOperationAction(ISD::SRL_PARTS , MVT::i32 , Custom);
// X86 wants to expand memset / memcpy itself.
setOperationAction(ISD::MEMSET , MVT::Other, Custom);
setOperationAction(ISD::MEMCPY , MVT::Other, Custom);
// We don't have line number support yet.
setOperationAction(ISD::LOCATION, MVT::Other, Expand);
setOperationAction(ISD::DEBUG_LOC, MVT::Other, Expand);
if (!Subtarget->isTargetDarwin() &&
!Subtarget->isTargetELF() &&
setOperationAction(ISD::LABEL, MVT::Other, Expand);
Anton Korobeynikov
committed
setOperationAction(ISD::EXCEPTIONADDR, MVT::i64, Expand);
setOperationAction(ISD::EHSELECTION, MVT::i64, Expand);
setOperationAction(ISD::EXCEPTIONADDR, MVT::i32, Expand);
setOperationAction(ISD::EHSELECTION, MVT::i32, Expand);
if (Subtarget->is64Bit()) {
// FIXME: Verify
setExceptionPointerRegister(X86::RAX);
setExceptionSelectorRegister(X86::RDX);
} else {
setExceptionPointerRegister(X86::EAX);
setExceptionSelectorRegister(X86::EDX);
}
setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i32, Custom);
Anton Korobeynikov
committed
setOperationAction(ISD::TRAMPOLINE, MVT::Other, Custom);
// VASTART needs to be custom lowered to use the VarArgsFrameIndex
setOperationAction(ISD::VASTART , MVT::Other, Custom);
setOperationAction(ISD::VAARG , MVT::Other, Expand);
setOperationAction(ISD::VAEND , MVT::Other, Expand);
Evan Cheng
committed
if (Subtarget->is64Bit())
setOperationAction(ISD::VACOPY , MVT::Other, Custom);
else
setOperationAction(ISD::VACOPY , MVT::Other, Expand);
setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
if (Subtarget->is64Bit())
setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Expand);
Anton Korobeynikov
committed
if (Subtarget->isTargetCygMing())
setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Custom);
else
setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand);
if (X86ScalarSSE) {
// Set up the FP register classes.
addRegisterClass(MVT::f32, X86::FR32RegisterClass);
addRegisterClass(MVT::f64, X86::FR64RegisterClass);
// Use ANDPD to simulate FABS.
setOperationAction(ISD::FABS , MVT::f64, Custom);
setOperationAction(ISD::FABS , MVT::f32, Custom);
// Use XORP to simulate FNEG.
setOperationAction(ISD::FNEG , MVT::f64, Custom);
setOperationAction(ISD::FNEG , MVT::f32, Custom);
// Use ANDPD and ORPD to simulate FCOPYSIGN.
setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom);
setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom);
setOperationAction(ISD::FSIN , MVT::f64, Expand);
setOperationAction(ISD::FCOS , MVT::f64, Expand);
setOperationAction(ISD::FREM , MVT::f64, Expand);
setOperationAction(ISD::FSIN , MVT::f32, Expand);
setOperationAction(ISD::FCOS , MVT::f32, Expand);
setOperationAction(ISD::FREM , MVT::f32, Expand);
// Expand FP immediates into loads from the stack, except for the special
// cases we handle.
setOperationAction(ISD::ConstantFP, MVT::f64, Expand);
setOperationAction(ISD::ConstantFP, MVT::f32, Expand);
addLegalFPImmediate(APFloat(+0.0)); // xorps / xorpd
// Conversions to long double (in X87) go through memory.
setConvertAction(MVT::f32, MVT::f80, Expand);
setConvertAction(MVT::f64, MVT::f80, Expand);
// Conversions from long double (in X87) go through memory.
setConvertAction(MVT::f80, MVT::f32, Expand);
setConvertAction(MVT::f80, MVT::f64, Expand);
} else {
// Set up the FP register classes.
addRegisterClass(MVT::f64, X86::RFP64RegisterClass);
addRegisterClass(MVT::f32, X86::RFP32RegisterClass);
setOperationAction(ISD::UNDEF, MVT::f64, Expand);
setOperationAction(ISD::UNDEF, MVT::f32, Expand);
setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
// Floating truncations need to go through memory.
setConvertAction(MVT::f80, MVT::f32, Expand);
setConvertAction(MVT::f64, MVT::f32, Expand);
setConvertAction(MVT::f80, MVT::f64, Expand);
if (!UnsafeFPMath) {
setOperationAction(ISD::FSIN , MVT::f64 , Expand);
setOperationAction(ISD::FCOS , MVT::f64 , Expand);
}
setOperationAction(ISD::ConstantFP, MVT::f64, Expand);
setOperationAction(ISD::ConstantFP, MVT::f32, Expand);
addLegalFPImmediate(APFloat(+0.0)); // FLD0
addLegalFPImmediate(APFloat(+1.0)); // FLD1
addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS
addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS
}
// Long double always uses X87.
addRegisterClass(MVT::f80, X86::RFP80RegisterClass);
setOperationAction(ISD::UNDEF, MVT::f80, Expand);
setOperationAction(ISD::FCOPYSIGN, MVT::f80, Expand);
setOperationAction(ISD::ConstantFP, MVT::f80, Expand);
// First set operation action for all vector types to expand. Then we
// will selectively turn on ones that can be effectively codegen'd.
for (unsigned VT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE;
VT <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++VT) {
setOperationAction(ISD::ADD , (MVT::ValueType)VT, Expand);
setOperationAction(ISD::SUB , (MVT::ValueType)VT, Expand);
setOperationAction(ISD::FADD, (MVT::ValueType)VT, Expand);
setOperationAction(ISD::FSUB, (MVT::ValueType)VT, Expand);
setOperationAction(ISD::MUL , (MVT::ValueType)VT, Expand);
setOperationAction(ISD::FMUL, (MVT::ValueType)VT, Expand);
setOperationAction(ISD::SDIV, (MVT::ValueType)VT, Expand);
setOperationAction(ISD::UDIV, (MVT::ValueType)VT, Expand);
setOperationAction(ISD::FDIV, (MVT::ValueType)VT, Expand);
setOperationAction(ISD::SREM, (MVT::ValueType)VT, Expand);
setOperationAction(ISD::UREM, (MVT::ValueType)VT, Expand);
setOperationAction(ISD::LOAD, (MVT::ValueType)VT, Expand);
setOperationAction(ISD::VECTOR_SHUFFLE, (MVT::ValueType)VT, Expand);
setOperationAction(ISD::EXTRACT_VECTOR_ELT, (MVT::ValueType)VT, Expand);
setOperationAction(ISD::INSERT_VECTOR_ELT, (MVT::ValueType)VT, Expand);
setOperationAction(ISD::FABS, (MVT::ValueType)VT, Expand);
setOperationAction(ISD::FSIN, (MVT::ValueType)VT, Expand);
setOperationAction(ISD::FCOS, (MVT::ValueType)VT, Expand);
setOperationAction(ISD::FREM, (MVT::ValueType)VT, Expand);
setOperationAction(ISD::FPOWI, (MVT::ValueType)VT, Expand);
setOperationAction(ISD::FSQRT, (MVT::ValueType)VT, Expand);
setOperationAction(ISD::FCOPYSIGN, (MVT::ValueType)VT, Expand);
addRegisterClass(MVT::v8i8, X86::VR64RegisterClass);
addRegisterClass(MVT::v4i16, X86::VR64RegisterClass);
addRegisterClass(MVT::v2i32, X86::VR64RegisterClass);
addRegisterClass(MVT::v1i64, X86::VR64RegisterClass);
setOperationAction(ISD::ADD, MVT::v8i8, Legal);
setOperationAction(ISD::ADD, MVT::v4i16, Legal);
setOperationAction(ISD::ADD, MVT::v2i32, Legal);
Chris Lattner
committed
setOperationAction(ISD::ADD, MVT::v1i64, Legal);
setOperationAction(ISD::SUB, MVT::v8i8, Legal);
setOperationAction(ISD::SUB, MVT::v4i16, Legal);
setOperationAction(ISD::SUB, MVT::v2i32, Legal);
setOperationAction(ISD::MULHS, MVT::v4i16, Legal);
setOperationAction(ISD::MUL, MVT::v4i16, Legal);
setOperationAction(ISD::AND, MVT::v8i8, Promote);
AddPromotedToType (ISD::AND, MVT::v8i8, MVT::v1i64);
setOperationAction(ISD::AND, MVT::v4i16, Promote);
AddPromotedToType (ISD::AND, MVT::v4i16, MVT::v1i64);
setOperationAction(ISD::AND, MVT::v2i32, Promote);
AddPromotedToType (ISD::AND, MVT::v2i32, MVT::v1i64);
setOperationAction(ISD::AND, MVT::v1i64, Legal);
setOperationAction(ISD::OR, MVT::v8i8, Promote);
AddPromotedToType (ISD::OR, MVT::v8i8, MVT::v1i64);
setOperationAction(ISD::OR, MVT::v4i16, Promote);
AddPromotedToType (ISD::OR, MVT::v4i16, MVT::v1i64);
setOperationAction(ISD::OR, MVT::v2i32, Promote);
AddPromotedToType (ISD::OR, MVT::v2i32, MVT::v1i64);
setOperationAction(ISD::OR, MVT::v1i64, Legal);
setOperationAction(ISD::XOR, MVT::v8i8, Promote);
AddPromotedToType (ISD::XOR, MVT::v8i8, MVT::v1i64);
setOperationAction(ISD::XOR, MVT::v4i16, Promote);
AddPromotedToType (ISD::XOR, MVT::v4i16, MVT::v1i64);
setOperationAction(ISD::XOR, MVT::v2i32, Promote);
AddPromotedToType (ISD::XOR, MVT::v2i32, MVT::v1i64);
setOperationAction(ISD::XOR, MVT::v1i64, Legal);
setOperationAction(ISD::LOAD, MVT::v8i8, Promote);
AddPromotedToType (ISD::LOAD, MVT::v8i8, MVT::v1i64);
setOperationAction(ISD::LOAD, MVT::v4i16, Promote);
AddPromotedToType (ISD::LOAD, MVT::v4i16, MVT::v1i64);
setOperationAction(ISD::LOAD, MVT::v2i32, Promote);
AddPromotedToType (ISD::LOAD, MVT::v2i32, MVT::v1i64);
setOperationAction(ISD::LOAD, MVT::v1i64, Legal);
setOperationAction(ISD::BUILD_VECTOR, MVT::v8i8, Custom);
setOperationAction(ISD::BUILD_VECTOR, MVT::v4i16, Custom);
setOperationAction(ISD::BUILD_VECTOR, MVT::v2i32, Custom);
setOperationAction(ISD::BUILD_VECTOR, MVT::v1i64, Custom);
setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8i8, Custom);
setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4i16, Custom);
setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i32, Custom);
setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v1i64, Custom);
setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i8, Custom);
setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i16, Custom);
setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2i32, Custom);
setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v1i64, Custom);
}
addRegisterClass(MVT::v4f32, X86::VR128RegisterClass);
setOperationAction(ISD::FADD, MVT::v4f32, Legal);
setOperationAction(ISD::FSUB, MVT::v4f32, Legal);
setOperationAction(ISD::FMUL, MVT::v4f32, Legal);
setOperationAction(ISD::FDIV, MVT::v4f32, Legal);
setOperationAction(ISD::FSQRT, MVT::v4f32, Legal);
setOperationAction(ISD::FNEG, MVT::v4f32, Custom);
setOperationAction(ISD::LOAD, MVT::v4f32, Legal);
setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom);
setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4f32, Custom);
setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom);
setOperationAction(ISD::SELECT, MVT::v4f32, Custom);
}
addRegisterClass(MVT::v2f64, X86::VR128RegisterClass);
addRegisterClass(MVT::v16i8, X86::VR128RegisterClass);
addRegisterClass(MVT::v8i16, X86::VR128RegisterClass);
addRegisterClass(MVT::v4i32, X86::VR128RegisterClass);
addRegisterClass(MVT::v2i64, X86::VR128RegisterClass);
setOperationAction(ISD::ADD, MVT::v16i8, Legal);
setOperationAction(ISD::ADD, MVT::v8i16, Legal);
setOperationAction(ISD::ADD, MVT::v4i32, Legal);
setOperationAction(ISD::ADD, MVT::v2i64, Legal);
setOperationAction(ISD::SUB, MVT::v16i8, Legal);
setOperationAction(ISD::SUB, MVT::v8i16, Legal);
setOperationAction(ISD::SUB, MVT::v4i32, Legal);
setOperationAction(ISD::SUB, MVT::v2i64, Legal);
setOperationAction(ISD::MUL, MVT::v8i16, Legal);
setOperationAction(ISD::FADD, MVT::v2f64, Legal);
setOperationAction(ISD::FSUB, MVT::v2f64, Legal);
setOperationAction(ISD::FMUL, MVT::v2f64, Legal);
setOperationAction(ISD::FDIV, MVT::v2f64, Legal);
setOperationAction(ISD::FSQRT, MVT::v2f64, Legal);
setOperationAction(ISD::FNEG, MVT::v2f64, Custom);
setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16i8, Custom);
setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i16, Custom);
setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom);
setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom);
// Implement v4f32 insert_vector_elt in terms of SSE2 v8i16 ones.
setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom);
// Custom lower build_vector, vector_shuffle, and extract_vector_elt.
for (unsigned VT = (unsigned)MVT::v16i8; VT != (unsigned)MVT::v2i64; VT++) {
setOperationAction(ISD::BUILD_VECTOR, (MVT::ValueType)VT, Custom);
setOperationAction(ISD::VECTOR_SHUFFLE, (MVT::ValueType)VT, Custom);
setOperationAction(ISD::EXTRACT_VECTOR_ELT, (MVT::ValueType)VT, Custom);
}
setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Custom);
setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64, Custom);
setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Custom);
setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Custom);
setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Custom);
setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Custom);
// Promote v16i8, v8i16, v4i32 load, select, and, or, xor to v2i64.
for (unsigned VT = (unsigned)MVT::v16i8; VT != (unsigned)MVT::v2i64; VT++) {
setOperationAction(ISD::AND, (MVT::ValueType)VT, Promote);
AddPromotedToType (ISD::AND, (MVT::ValueType)VT, MVT::v2i64);
setOperationAction(ISD::OR, (MVT::ValueType)VT, Promote);
AddPromotedToType (ISD::OR, (MVT::ValueType)VT, MVT::v2i64);
setOperationAction(ISD::XOR, (MVT::ValueType)VT, Promote);
AddPromotedToType (ISD::XOR, (MVT::ValueType)VT, MVT::v2i64);
setOperationAction(ISD::LOAD, (MVT::ValueType)VT, Promote);
AddPromotedToType (ISD::LOAD, (MVT::ValueType)VT, MVT::v2i64);
setOperationAction(ISD::SELECT, (MVT::ValueType)VT, Promote);
AddPromotedToType (ISD::SELECT, (MVT::ValueType)VT, MVT::v2i64);
// Custom lower v2i64 and v2f64 selects.
setOperationAction(ISD::LOAD, MVT::v2f64, Legal);
setOperationAction(ISD::LOAD, MVT::v2i64, Legal);
setOperationAction(ISD::SELECT, MVT::v2f64, Custom);
setOperationAction(ISD::SELECT, MVT::v2i64, Custom);
}
// We want to custom lower some of our intrinsics.
setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
// We have target-specific dag combine patterns for the following nodes:
setTargetDAGCombine(ISD::VECTOR_SHUFFLE);
setTargetDAGCombine(ISD::SELECT);
computeRegisterProperties();
// FIXME: These should be based on subtarget info. Plus, the values should
// be smaller when we are in optimizing for size mode.
maxStoresPerMemset = 16; // For %llvm.memset -> sequence of stores
maxStoresPerMemcpy = 16; // For %llvm.memcpy -> sequence of stores
maxStoresPerMemmove = 16; // For %llvm.memmove -> sequence of stores
allowUnalignedMemoryAccesses = true; // x86 supports it!
}
//===----------------------------------------------------------------------===//
// Return Value Calling Convention Implementation
//===----------------------------------------------------------------------===//
#include "X86GenCallingConv.inc"
Chris Lattner
committed
/// LowerRET - Lower an ISD::RET node.
SDOperand X86TargetLowering::LowerRET(SDOperand Op, SelectionDAG &DAG) {
assert((Op.getNumOperands() & 1) == 1 && "ISD::RET should have odd # args");
Chris Lattner
committed
SmallVector<CCValAssign, 16> RVLocs;
unsigned CC = DAG.getMachineFunction().getFunction()->getCallingConv();
bool isVarArg = DAG.getMachineFunction().getFunction()->isVarArg();
CCState CCInfo(CC, isVarArg, getTargetMachine(), RVLocs);
CCInfo.AnalyzeReturn(Op.Val, RetCC_X86);
// If this is the first return lowered for this function, add the regs to the
// liveout set for the function.
if (DAG.getMachineFunction().liveout_empty()) {
Chris Lattner
committed
for (unsigned i = 0; i != RVLocs.size(); ++i)
if (RVLocs[i].isRegLoc())
DAG.getMachineFunction().addLiveOut(RVLocs[i].getLocReg());
}
SDOperand Chain = Op.getOperand(0);
SDOperand Flag;
// Copy the result values into the output registers.
Chris Lattner
committed
if (RVLocs.size() != 1 || !RVLocs[0].isRegLoc() ||
RVLocs[0].getLocReg() != X86::ST0) {
for (unsigned i = 0; i != RVLocs.size(); ++i) {
CCValAssign &VA = RVLocs[i];
assert(VA.isRegLoc() && "Can only return in registers!");
Chain = DAG.getCopyToReg(Chain, VA.getLocReg(), Op.getOperand(i*2+1),
Flag);
Flag = Chain.getValue(1);
}
} else {
// We need to handle a destination of ST0 specially, because it isn't really
// a register.
SDOperand Value = Op.getOperand(1);
// If this is an FP return with ScalarSSE, we need to move the value from
// an XMM register onto the fp-stack.
if (X86ScalarSSE) {
SDOperand MemLoc;
// If this is a load into a scalarsse value, don't store the loaded value
// back to the stack, only to reload it: just replace the scalar-sse load.
if (ISD::isNON_EXTLoad(Value.Val) &&
(Chain == Value.getValue(1) || Chain == Value.getOperand(0))) {
Chain = Value.getOperand(0);
MemLoc = Value.getOperand(1);
} else {
// Spill the value to memory and reload it into top of stack.
Chris Lattner
committed
unsigned Size = MVT::getSizeInBits(RVLocs[0].getValVT())/8;
MachineFunction &MF = DAG.getMachineFunction();
int SSFI = MF.getFrameInfo()->CreateStackObject(Size, Size);
MemLoc = DAG.getFrameIndex(SSFI, getPointerTy());
Chain = DAG.getStore(Op.getOperand(0), Value, MemLoc, NULL, 0);
}
SDVTList Tys = DAG.getVTList(RVLocs[0].getValVT(), MVT::Other);
Chris Lattner
committed
SDOperand Ops[] = {Chain, MemLoc, DAG.getValueType(RVLocs[0].getValVT())};
Value = DAG.getNode(X86ISD::FLD, Tys, Ops, 3);
Chain = Value.getValue(1);
}
SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag);
SDOperand Ops[] = { Chain, Value };
Chain = DAG.getNode(X86ISD::FP_SET_RESULT, Tys, Ops, 2);
Flag = Chain.getValue(1);
}
SDOperand BytesToPop = DAG.getConstant(getBytesToPopOnReturn(), MVT::i16);
if (Flag.Val)
return DAG.getNode(X86ISD::RET_FLAG, MVT::Other, Chain, BytesToPop, Flag);
else
return DAG.getNode(X86ISD::RET_FLAG, MVT::Other, Chain, BytesToPop);
}
/// LowerCallResult - Lower the result values of an ISD::CALL into the
/// appropriate copies out of appropriate physical registers. This assumes that
/// Chain/InFlag are the input chain/flag to use, and that TheCall is the call
/// being lowered. The returns a SDNode with the same number of values as the
/// ISD::CALL.
SDNode *X86TargetLowering::
LowerCallResult(SDOperand Chain, SDOperand InFlag, SDNode *TheCall,
unsigned CallingConv, SelectionDAG &DAG) {
// Assign locations to each value returned by this call.
Chris Lattner
committed
SmallVector<CCValAssign, 16> RVLocs;
bool isVarArg = cast<ConstantSDNode>(TheCall->getOperand(2))->getValue() != 0;
CCState CCInfo(CallingConv, isVarArg, getTargetMachine(), RVLocs);
CCInfo.AnalyzeCallResult(TheCall, RetCC_X86);
SmallVector<SDOperand, 8> ResultVals;
// Copy all of the result registers out of their specified physreg.
Chris Lattner
committed
if (RVLocs.size() != 1 || RVLocs[0].getLocReg() != X86::ST0) {
for (unsigned i = 0; i != RVLocs.size(); ++i) {
Chain = DAG.getCopyFromReg(Chain, RVLocs[i].getLocReg(),
RVLocs[i].getValVT(), InFlag).getValue(1);
InFlag = Chain.getValue(2);
ResultVals.push_back(Chain.getValue(0));
}
} else {
// Copies from the FP stack are special, as ST0 isn't a valid register
// before the fp stackifier runs.
// Copy ST0 into an RFP register with FP_GET_RESULT.
SDVTList Tys = DAG.getVTList(RVLocs[0].getValVT(), MVT::Other, MVT::Flag);
SDOperand GROps[] = { Chain, InFlag };
SDOperand RetVal = DAG.getNode(X86ISD::FP_GET_RESULT, Tys, GROps, 2);
Chain = RetVal.getValue(1);
InFlag = RetVal.getValue(2);
// If we are using ScalarSSE, store ST(0) to the stack and reload it into
// an XMM register.
if (X86ScalarSSE) {
// FIXME: Currently the FST is flagged to the FP_GET_RESULT. This
// shouldn't be necessary except that RFP cannot be live across
// multiple blocks. When stackifier is fixed, they can be uncoupled.
MachineFunction &MF = DAG.getMachineFunction();
int SSFI = MF.getFrameInfo()->CreateStackObject(8, 8);
SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
SDOperand Ops[] = {
Chris Lattner
committed
Chain, RetVal, StackSlot, DAG.getValueType(RVLocs[0].getValVT()), InFlag
};
Chain = DAG.getNode(X86ISD::FST, MVT::Other, Ops, 5);
Chris Lattner
committed
RetVal = DAG.getLoad(RVLocs[0].getValVT(), Chain, StackSlot, NULL, 0);
Chain = RetVal.getValue(1);
}
ResultVals.push_back(RetVal);
// Merge everything together with a MERGE_VALUES node.
ResultVals.push_back(Chain);
return DAG.getNode(ISD::MERGE_VALUES, TheCall->getVTList(),
&ResultVals[0], ResultVals.size()).Val;
}
//===----------------------------------------------------------------------===//
// C & StdCall Calling Convention implementation
//===----------------------------------------------------------------------===//
// StdCall calling convention seems to be standard for many Windows' API
// routines and around. It differs from C calling convention just a little:
// callee should clean up the stack, not caller. Symbols should be also
// decorated in some fancy way :) It doesn't support any vector arguments.
/// AddLiveIn - This helper function adds the specified physical register to the
/// MachineFunction as a live in value. It also creates a corresponding virtual
/// register for it.
static unsigned AddLiveIn(MachineFunction &MF, unsigned PReg,
const TargetRegisterClass *RC) {
assert(RC->contains(PReg) && "Not the correct regclass!");
unsigned VReg = MF.getSSARegMap()->createVirtualRegister(RC);
MF.addLiveIn(PReg, VReg);
return VReg;
}
SDOperand X86TargetLowering::LowerMemArgument(SDOperand Op, SelectionDAG &DAG,
const CCValAssign &VA,
MachineFrameInfo *MFI,
SDOperand Root, unsigned i) {
// Create the nodes corresponding to a load from this parameter slot.
int FI = MFI->CreateFixedObject(MVT::getSizeInBits(VA.getValVT())/8,
VA.getLocMemOffset());
SDOperand FIN = DAG.getFrameIndex(FI, getPointerTy());
unsigned Flags = cast<ConstantSDNode>(Op.getOperand(3 + i))->getValue();
if (Flags & ISD::ParamFlags::ByVal)
return FIN;
else
return DAG.getLoad(VA.getValVT(), Root, FIN, NULL, 0);
}
SDOperand X86TargetLowering::LowerCCCArguments(SDOperand Op, SelectionDAG &DAG,
bool isStdCall) {
unsigned NumArgs = Op.Val->getNumValues() - 1;
MachineFunction &MF = DAG.getMachineFunction();
MachineFrameInfo *MFI = MF.getFrameInfo();
SDOperand Root = Op.getOperand(0);
bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0;
// Assign locations to all of the incoming arguments.
SmallVector<CCValAssign, 16> ArgLocs;
CCState CCInfo(MF.getFunction()->getCallingConv(), isVarArg,
getTargetMachine(), ArgLocs);
CCInfo.AnalyzeFormalArguments(Op.Val, CC_X86_32_C);
SmallVector<SDOperand, 8> ArgValues;
unsigned LastVal = ~0U;
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
CCValAssign &VA = ArgLocs[i];
// TODO: If an arg is passed in two places (e.g. reg and stack), skip later
// places.
assert(VA.getValNo() != LastVal &&
"Don't support value assigned to multiple locs yet");
LastVal = VA.getValNo();
if (VA.isRegLoc()) {
MVT::ValueType RegVT = VA.getLocVT();
TargetRegisterClass *RC;
if (RegVT == MVT::i32)
RC = X86::GR32RegisterClass;
else {
assert(MVT::isVector(RegVT));
RC = X86::VR128RegisterClass;
unsigned Reg = AddLiveIn(DAG.getMachineFunction(), VA.getLocReg(), RC);
SDOperand ArgValue = DAG.getCopyFromReg(Root, Reg, RegVT);
// If this is an 8 or 16-bit value, it is really passed promoted to 32
// bits. Insert an assert[sz]ext to capture this, then truncate to the
// right size.
if (VA.getLocInfo() == CCValAssign::SExt)
ArgValue = DAG.getNode(ISD::AssertSext, RegVT, ArgValue,
DAG.getValueType(VA.getValVT()));
else if (VA.getLocInfo() == CCValAssign::ZExt)
ArgValue = DAG.getNode(ISD::AssertZext, RegVT, ArgValue,
DAG.getValueType(VA.getValVT()));
if (VA.getLocInfo() != CCValAssign::Full)
ArgValue = DAG.getNode(ISD::TRUNCATE, VA.getValVT(), ArgValue);
ArgValues.push_back(ArgValue);
} else {
assert(VA.isMemLoc());
ArgValues.push_back(LowerMemArgument(Op, DAG, VA, MFI, Root, i));
unsigned StackSize = CCInfo.getNextStackOffset();
ArgValues.push_back(Root);
// If the function takes variable number of arguments, make a frame index for
// the start of the first vararg value... for expansion of llvm.va_start.
VarArgsFrameIndex = MFI->CreateFixedObject(1, StackSize);
if (isStdCall && !isVarArg) {
BytesToPopOnReturn = StackSize; // Callee pops everything..
BytesCallerReserves = 0;
} else {
BytesToPopOnReturn = 0; // Callee pops nothing.
// If this is an sret function, the return should pop the hidden pointer.
if (NumArgs &&
(cast<ConstantSDNode>(Op.getOperand(3))->getValue() &
ISD::ParamFlags::StructReturn))
BytesToPopOnReturn = 4;
BytesCallerReserves = StackSize;
Anton Korobeynikov
committed
Anton Korobeynikov
committed
X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
FuncInfo->setBytesToPopOnReturn(BytesToPopOnReturn);
// Return the new list of results.
return DAG.getNode(ISD::MERGE_VALUES, Op.Val->getVTList(),
&ArgValues[0], ArgValues.size()).getValue(Op.ResNo);
}
SDOperand X86TargetLowering::LowerCCCCallTo(SDOperand Op, SelectionDAG &DAG,
Chris Lattner
committed
unsigned CC) {
SDOperand Chain = Op.getOperand(0);
bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0;
bool isTailCall = cast<ConstantSDNode>(Op.getOperand(3))->getValue() != 0;
SDOperand Callee = Op.getOperand(4);
unsigned NumOps = (Op.getNumOperands() - 5) / 2;
// Analyze operands of the call, assigning locations to each operand.
SmallVector<CCValAssign, 16> ArgLocs;
CCState CCInfo(CC, isVarArg, getTargetMachine(), ArgLocs);
CCInfo.AnalyzeCallOperands(Op.Val, CC_X86_32_C);
// Get a count of how many bytes are to be pushed on the stack.
unsigned NumBytes = CCInfo.getNextStackOffset();
Chain = DAG.getCALLSEQ_START(Chain,DAG.getConstant(NumBytes, getPointerTy()));
SmallVector<std::pair<unsigned, SDOperand>, 8> RegsToPass;
SmallVector<SDOperand, 8> MemOpChains;
SDOperand StackPtr;
// Walk the register/memloc assignments, inserting copies/loads.
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
CCValAssign &VA = ArgLocs[i];
SDOperand Arg = Op.getOperand(5+2*VA.getValNo());
// Promote the value if needed.
switch (VA.getLocInfo()) {
default: assert(0 && "Unknown loc info!");
case CCValAssign::Full: break;
case CCValAssign::SExt:
Arg = DAG.getNode(ISD::SIGN_EXTEND, VA.getLocVT(), Arg);
break;
case CCValAssign::ZExt:
Arg = DAG.getNode(ISD::ZERO_EXTEND, VA.getLocVT(), Arg);
break;
case CCValAssign::AExt:
Arg = DAG.getNode(ISD::ANY_EXTEND, VA.getLocVT(), Arg);
break;
}
if (VA.isRegLoc()) {
RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
} else {
assert(VA.isMemLoc());
if (StackPtr.Val == 0)
StackPtr = DAG.getRegister(getStackPtrReg(), getPointerTy());
SDOperand PtrOff = DAG.getConstant(VA.getLocMemOffset(), getPointerTy());
PtrOff = DAG.getNode(ISD::ADD, getPointerTy(), StackPtr, PtrOff);
MemOpChains.push_back(DAG.getStore(Chain, Arg, PtrOff, NULL, 0));
}
}
// If the first argument is an sret pointer, remember it.
bool isSRet = NumOps &&
(cast<ConstantSDNode>(Op.getOperand(6))->getValue() &
ISD::ParamFlags::StructReturn);
if (!MemOpChains.empty())
Chain = DAG.getNode(ISD::TokenFactor, MVT::Other,
&MemOpChains[0], MemOpChains.size());
// Build a sequence of copy-to-reg nodes chained together with token chain
// and flag operands which copy the outgoing args into registers.
SDOperand InFlag;
for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
Chain = DAG.getCopyToReg(Chain, RegsToPass[i].first, RegsToPass[i].second,
InFlag);
InFlag = Chain.getValue(1);
}
Evan Cheng
committed
// ELF / PIC requires GOT in the EBX register before function calls via PLT
// GOT pointer.
if (getTargetMachine().getRelocationModel() == Reloc::PIC_ &&
Subtarget->isPICStyleGOT()) {
Chain = DAG.getCopyToReg(Chain, X86::EBX,
DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()),
InFlag);
InFlag = Chain.getValue(1);
}
// If the callee is a GlobalAddress node (quite common, every direct call is)
// turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
Anton Korobeynikov
committed
// We should use extra load for direct calls to dllimported functions in
// non-JIT mode.
if (!Subtarget->GVRequiresExtraLoad(G->getGlobal(),
getTargetMachine(), true))
Callee = DAG.getTargetGlobalAddress(G->getGlobal(), getPointerTy());
} else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee))
Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy());
// Returns a chain & a flag for retval copy to use.
SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Flag);
SmallVector<SDOperand, 8> Ops;
Ops.push_back(Chain);
Ops.push_back(Callee);
// Add argument registers to the end of the list so that they are known live
// into the call.
for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
Ops.push_back(DAG.getRegister(RegsToPass[i].first,
RegsToPass[i].second.getValueType()));
Evan Cheng
committed
// Add an implicit use GOT pointer in EBX.
if (getTargetMachine().getRelocationModel() == Reloc::PIC_ &&
Subtarget->isPICStyleGOT())
Ops.push_back(DAG.getRegister(X86::EBX, getPointerTy()));
if (InFlag.Val)
Ops.push_back(InFlag);
Chain = DAG.getNode(isTailCall ? X86ISD::TAILCALL : X86ISD::CALL,
NodeTys, &Ops[0], Ops.size());
InFlag = Chain.getValue(1);
// Create the CALLSEQ_END node.
unsigned NumBytesForCalleeToPush = 0;
Chris Lattner
committed
if (CC == CallingConv::X86_StdCall) {
if (isVarArg)
Chris Lattner
committed
else
NumBytesForCalleeToPush = NumBytes;
} else {
// If this is is a call to a struct-return function, the callee
// pops the hidden struct pointer, so we have to push it back.
// This is common for Darwin/X86, Linux & Mingw32 targets.
NodeTys = DAG.getVTList(MVT::Other, MVT::Flag);
Ops.clear();
Ops.push_back(Chain);
Ops.push_back(DAG.getConstant(NumBytes, getPointerTy()));
Ops.push_back(DAG.getConstant(NumBytesForCalleeToPush, getPointerTy()));
Chain = DAG.getNode(ISD::CALLSEQ_END, NodeTys, &Ops[0], Ops.size());
InFlag = Chain.getValue(1);
// Handle result values, copying them out of physregs into vregs that we
// return.
Chris Lattner
committed
return SDOperand(LowerCallResult(Chain, InFlag, Op.Val, CC, DAG), Op.ResNo);
}
//===----------------------------------------------------------------------===//
//===----------------------------------------------------------------------===//
//
// The X86 'fastcall' calling convention passes up to two integer arguments in
// registers (an appropriate portion of ECX/EDX), passes arguments in C order,
// and requires that the callee pop its arguments off the stack (allowing proper
// tail calls), and has the same return value conventions as C calling convs.
//
// This calling convention always arranges for the callee pop value to be 8n+4
// bytes, which is needed for tail recursion elimination and stack alignment
// reasons.
X86TargetLowering::LowerFastCCArguments(SDOperand Op, SelectionDAG &DAG) {
MachineFunction &MF = DAG.getMachineFunction();
MachineFrameInfo *MFI = MF.getFrameInfo();
SDOperand Root = Op.getOperand(0);
bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0;
// Assign locations to all of the incoming arguments.
Chris Lattner
committed
SmallVector<CCValAssign, 16> ArgLocs;
CCState CCInfo(MF.getFunction()->getCallingConv(), isVarArg,
getTargetMachine(), ArgLocs);
CCInfo.AnalyzeFormalArguments(Op.Val, CC_X86_32_FastCall);
Chris Lattner
committed
SmallVector<SDOperand, 8> ArgValues;
unsigned LastVal = ~0U;
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
CCValAssign &VA = ArgLocs[i];
// TODO: If an arg is passed in two places (e.g. reg and stack), skip later
// places.
assert(VA.getValNo() != LastVal &&
"Don't support value assigned to multiple locs yet");
LastVal = VA.getValNo();
Chris Lattner
committed
Chris Lattner
committed
if (VA.isRegLoc()) {
MVT::ValueType RegVT = VA.getLocVT();
TargetRegisterClass *RC;
if (RegVT == MVT::i32)