"llvm/git@repo.hca.bsc.es:lalbano/llvm-bpevl.git" did not exist on "df69c69427dea7f5b3b3a4d4564bc77b0926ec88"
Newer
Older
//===-- X86ISelLowering.h - X86 DAG Lowering Interface ----------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file was developed by Chris Lattner and is distributed under
// the University of Illinois Open Source License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the interfaces that X86 uses to lower LLVM code into a
// selection DAG.
//
//===----------------------------------------------------------------------===//
#include "X86.h"
#include "X86ISelLowering.h"
#include "X86TargetMachine.h"
#include "llvm/CallingConv.h"
#include "llvm/Constants.h"
#include "llvm/Function.h"
#include "llvm/ADT/VectorExtras.h"
#include "llvm/Analysis/ScalarEvolutionExpressions.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/SelectionDAG.h"
#include "llvm/CodeGen/SSARegMap.h"
#include "llvm/Target/TargetOptions.h"
using namespace llvm;
// FIXME: temporary.
#include "llvm/Support/CommandLine.h"
static cl::opt<bool> EnableFastCC("enable-x86-fastcc", cl::Hidden,
cl::desc("Enable fastcc on X86"));
X86TargetLowering::X86TargetLowering(TargetMachine &TM)
: TargetLowering(TM) {
Subtarget = &TM.getSubtarget<X86Subtarget>();
X86ScalarSSE = Subtarget->hasSSE2();
// Set up the TargetLowering object.
// X86 is weird, it always uses i8 for shift amounts and setcc results.
setShiftAmountType(MVT::i8);
setSetCCResultType(MVT::i8);
setSetCCResultContents(ZeroOrOneSetCCResult);
setSchedulingPreference(SchedulingForRegPressure);
setShiftAmountFlavor(Mask); // shl X, 32 == shl X, 0
setStackPointerRegisterToSaveRestore(X86::ESP);
// Darwin should use _setjmp/_longjmp instead of setjmp/longjmp.
setUseUnderscoreSetJmpLongJmp(true);
// Add legal addressing mode scale values.
addLegalAddressScale(8);
addLegalAddressScale(4);
addLegalAddressScale(2);
// Enter the ones which require both scale + index last. These are more
// expensive.
addLegalAddressScale(9);
addLegalAddressScale(5);
addLegalAddressScale(3);
// Set up the register classes.
addRegisterClass(MVT::i8, X86::R8RegisterClass);
addRegisterClass(MVT::i16, X86::R16RegisterClass);
addRegisterClass(MVT::i32, X86::R32RegisterClass);
// Promote all UINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have this
// operation.
setOperationAction(ISD::UINT_TO_FP , MVT::i1 , Promote);
setOperationAction(ISD::UINT_TO_FP , MVT::i8 , Promote);
setOperationAction(ISD::UINT_TO_FP , MVT::i16 , Promote);
if (X86ScalarSSE)
// No SSE i64 SINT_TO_FP, so expand i32 UINT_TO_FP instead.
setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Expand);
else
setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Promote);
// Promote i1/i8 SINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have
// this operation.
setOperationAction(ISD::SINT_TO_FP , MVT::i1 , Promote);
setOperationAction(ISD::SINT_TO_FP , MVT::i8 , Promote);
// SSE has no i16 to fp conversion, only i32
if (X86ScalarSSE)
setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Promote);
else {
setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Custom);
setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom);
}
// We can handle SINT_TO_FP and FP_TO_SINT from/to i64 even though i64
// isn't legal.
setOperationAction(ISD::SINT_TO_FP , MVT::i64 , Custom);
setOperationAction(ISD::FP_TO_SINT , MVT::i64 , Custom);
// Promote i1/i8 FP_TO_SINT to larger FP_TO_SINTS's, as X86 doesn't have
// this operation.
setOperationAction(ISD::FP_TO_SINT , MVT::i1 , Promote);
setOperationAction(ISD::FP_TO_SINT , MVT::i8 , Promote);
if (X86ScalarSSE) {
setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Promote);
} else {
setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Custom);
setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom);
}
// Handle FP_TO_UINT by promoting the destination to a larger signed
// conversion.
setOperationAction(ISD::FP_TO_UINT , MVT::i1 , Promote);
setOperationAction(ISD::FP_TO_UINT , MVT::i8 , Promote);
setOperationAction(ISD::FP_TO_UINT , MVT::i16 , Promote);
if (X86ScalarSSE && !Subtarget->hasSSE3())
// Expand FP_TO_UINT into a select.
// FIXME: We would like to use a Custom expander here eventually to do
// the optimal thing for SSE vs. the default expansion in the legalizer.
setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Expand);
else
// With SSE3 we can use fisttpll to convert to a signed i64.
setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Promote);
setOperationAction(ISD::BIT_CONVERT , MVT::f32 , Expand);
setOperationAction(ISD::BIT_CONVERT , MVT::i32 , Expand);
setOperationAction(ISD::BR_CC , MVT::Other, Expand);
setOperationAction(ISD::SELECT_CC , MVT::Other, Expand);
setOperationAction(ISD::MEMMOVE , MVT::Other, Expand);
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16 , Expand);
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Expand);
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand);
setOperationAction(ISD::FP_ROUND_INREG , MVT::f32 , Expand);
setOperationAction(ISD::SEXTLOAD , MVT::i1 , Expand);
setOperationAction(ISD::FREM , MVT::f64 , Expand);
setOperationAction(ISD::CTPOP , MVT::i8 , Expand);
setOperationAction(ISD::CTTZ , MVT::i8 , Expand);
setOperationAction(ISD::CTLZ , MVT::i8 , Expand);
setOperationAction(ISD::CTPOP , MVT::i16 , Expand);
setOperationAction(ISD::CTTZ , MVT::i16 , Expand);
setOperationAction(ISD::CTLZ , MVT::i16 , Expand);
setOperationAction(ISD::CTPOP , MVT::i32 , Expand);
setOperationAction(ISD::CTTZ , MVT::i32 , Expand);
setOperationAction(ISD::CTLZ , MVT::i32 , Expand);
setOperationAction(ISD::READCYCLECOUNTER , MVT::i64 , Custom);
// These should be promoted to a larger select which is supported.
setOperationAction(ISD::SELECT , MVT::i1 , Promote);
setOperationAction(ISD::SELECT , MVT::i8 , Promote);
setOperationAction(ISD::SELECT , MVT::i16 , Custom);
setOperationAction(ISD::SELECT , MVT::i32 , Custom);
setOperationAction(ISD::SELECT , MVT::f32 , Custom);
setOperationAction(ISD::SELECT , MVT::f64 , Custom);
setOperationAction(ISD::SETCC , MVT::i8 , Custom);
setOperationAction(ISD::SETCC , MVT::i16 , Custom);
setOperationAction(ISD::SETCC , MVT::i32 , Custom);
setOperationAction(ISD::SETCC , MVT::f32 , Custom);
setOperationAction(ISD::SETCC , MVT::f64 , Custom);
setOperationAction(ISD::ConstantPool , MVT::i32 , Custom);
setOperationAction(ISD::ExternalSymbol , MVT::i32 , Custom);
// 64-bit addm sub, shl, sra, srl (iff 32-bit x86)
setOperationAction(ISD::SHL_PARTS , MVT::i32 , Custom);
setOperationAction(ISD::SRA_PARTS , MVT::i32 , Custom);
setOperationAction(ISD::SRL_PARTS , MVT::i32 , Custom);
// X86 wants to expand memset / memcpy itself.
setOperationAction(ISD::MEMSET , MVT::Other, Custom);
setOperationAction(ISD::MEMCPY , MVT::Other, Custom);
// We don't have line number support yet.
setOperationAction(ISD::LOCATION, MVT::Other, Expand);
setOperationAction(ISD::DEBUG_LOC, MVT::Other, Expand);
setOperationAction(ISD::DEBUG_LABEL, MVT::Other, Expand);
// VASTART needs to be custom lowered to use the VarArgsFrameIndex
setOperationAction(ISD::VASTART , MVT::Other, Custom);
// Use the default implementation.
setOperationAction(ISD::VAARG , MVT::Other, Expand);
setOperationAction(ISD::VACOPY , MVT::Other, Expand);
setOperationAction(ISD::VAEND , MVT::Other, Expand);
setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Expand);
setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
if (X86ScalarSSE) {
// Set up the FP register classes.
addRegisterClass(MVT::f32, X86::FR32RegisterClass);
addRegisterClass(MVT::f64, X86::FR64RegisterClass);
// SSE has no load+extend ops
setOperationAction(ISD::EXTLOAD, MVT::f32, Expand);
setOperationAction(ISD::ZEXTLOAD, MVT::f32, Expand);
// Use ANDPD to simulate FABS.
setOperationAction(ISD::FABS , MVT::f64, Custom);
setOperationAction(ISD::FABS , MVT::f32, Custom);
// Use XORP to simulate FNEG.
setOperationAction(ISD::FNEG , MVT::f64, Custom);
setOperationAction(ISD::FNEG , MVT::f32, Custom);
setOperationAction(ISD::FSIN , MVT::f64, Expand);
setOperationAction(ISD::FCOS , MVT::f64, Expand);
setOperationAction(ISD::FREM , MVT::f64, Expand);
setOperationAction(ISD::FSIN , MVT::f32, Expand);
setOperationAction(ISD::FCOS , MVT::f32, Expand);
setOperationAction(ISD::FREM , MVT::f32, Expand);
// Expand FP immediates into loads from the stack, except for the special
// cases we handle.
setOperationAction(ISD::ConstantFP, MVT::f64, Expand);
setOperationAction(ISD::ConstantFP, MVT::f32, Expand);
addLegalFPImmediate(+0.0); // xorps / xorpd
} else {
// Set up the FP register classes.
addRegisterClass(MVT::f64, X86::RFPRegisterClass);
setOperationAction(ISD::UNDEF, MVT::f64, Expand);
if (!UnsafeFPMath) {
setOperationAction(ISD::FSIN , MVT::f64 , Expand);
setOperationAction(ISD::FCOS , MVT::f64 , Expand);
}
setOperationAction(ISD::ConstantFP, MVT::f64, Expand);
addLegalFPImmediate(+0.0); // FLD0
addLegalFPImmediate(+1.0); // FLD1
addLegalFPImmediate(-0.0); // FLD0/FCHS
addLegalFPImmediate(-1.0); // FLD1/FCHS
}
// First set operation action for all vector types to expand. Then we
// will selectively turn on ones that can be effectively codegen'd.
for (unsigned VT = (unsigned)MVT::Vector + 1;
VT != (unsigned)MVT::LAST_VALUETYPE; VT++) {
setOperationAction(ISD::ADD , (MVT::ValueType)VT, Expand);
setOperationAction(ISD::SUB , (MVT::ValueType)VT, Expand);
setOperationAction(ISD::MUL , (MVT::ValueType)VT, Expand);
setOperationAction(ISD::LOAD, (MVT::ValueType)VT, Expand);
setOperationAction(ISD::VECTOR_SHUFFLE, (MVT::ValueType)VT, Expand);
setOperationAction(ISD::EXTRACT_VECTOR_ELT, (MVT::ValueType)VT, Expand);
setOperationAction(ISD::INSERT_VECTOR_ELT, (MVT::ValueType)VT, Expand);
addRegisterClass(MVT::v8i8, X86::VR64RegisterClass);
addRegisterClass(MVT::v4i16, X86::VR64RegisterClass);
addRegisterClass(MVT::v2i32, X86::VR64RegisterClass);
setOperationAction(ISD::BUILD_VECTOR, MVT::v8i8, Expand);
setOperationAction(ISD::BUILD_VECTOR, MVT::v4i16, Expand);
setOperationAction(ISD::BUILD_VECTOR, MVT::v2i32, Expand);
}
addRegisterClass(MVT::v4f32, X86::VR128RegisterClass);
setOperationAction(ISD::ADD, MVT::v4f32, Legal);
setOperationAction(ISD::SUB, MVT::v4f32, Legal);
setOperationAction(ISD::MUL, MVT::v4f32, Legal);
setOperationAction(ISD::LOAD, MVT::v4f32, Legal);
setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom);
setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4f32, Custom);
setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom);
}
addRegisterClass(MVT::v2f64, X86::VR128RegisterClass);
addRegisterClass(MVT::v16i8, X86::VR128RegisterClass);
addRegisterClass(MVT::v8i16, X86::VR128RegisterClass);
addRegisterClass(MVT::v4i32, X86::VR128RegisterClass);
addRegisterClass(MVT::v2i64, X86::VR128RegisterClass);
setOperationAction(ISD::ADD, MVT::v2f64, Legal);
setOperationAction(ISD::ADD, MVT::v16i8, Legal);
setOperationAction(ISD::ADD, MVT::v8i16, Legal);
setOperationAction(ISD::ADD, MVT::v4i32, Legal);
setOperationAction(ISD::SUB, MVT::v2f64, Legal);
setOperationAction(ISD::SUB, MVT::v16i8, Legal);
setOperationAction(ISD::SUB, MVT::v8i16, Legal);
setOperationAction(ISD::SUB, MVT::v4i32, Legal);
setOperationAction(ISD::MUL, MVT::v2f64, Legal);
setOperationAction(ISD::LOAD, MVT::v2f64, Legal);
setOperationAction(ISD::LOAD, MVT::v16i8, Legal);
setOperationAction(ISD::LOAD, MVT::v8i16, Legal);
setOperationAction(ISD::LOAD, MVT::v4i32, Legal);
setOperationAction(ISD::LOAD, MVT::v2i64, Legal);
setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16i8, Custom);
setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i16, Custom);
setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Custom);
setOperationAction(ISD::BUILD_VECTOR, MVT::v16i8, Custom);
setOperationAction(ISD::BUILD_VECTOR, MVT::v8i16, Custom);
setOperationAction(ISD::BUILD_VECTOR, MVT::v4i32, Custom);
setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64, Custom);
setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Custom);
setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i8, Custom);
setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8i16, Custom);
setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4i32, Custom);
setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Custom);
setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Custom);
setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i16, Custom);
setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i32, Custom);
setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom);
}
// We want to custom lower some of our intrinsics.
setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
computeRegisterProperties();
// FIXME: These should be based on subtarget info. Plus, the values should
// be smaller when we are in optimizing for size mode.
maxStoresPerMemset = 16; // For %llvm.memset -> sequence of stores
maxStoresPerMemcpy = 16; // For %llvm.memcpy -> sequence of stores
maxStoresPerMemmove = 16; // For %llvm.memmove -> sequence of stores
allowUnalignedMemoryAccesses = true; // x86 supports it!
}
std::vector<SDOperand>
X86TargetLowering::LowerArguments(Function &F, SelectionDAG &DAG) {
if (F.getCallingConv() == CallingConv::Fast && EnableFastCC)
return LowerFastCCArguments(F, DAG);
return LowerCCCArguments(F, DAG);
}
std::pair<SDOperand, SDOperand>
X86TargetLowering::LowerCallTo(SDOperand Chain, const Type *RetTy,
bool isVarArg, unsigned CallingConv,
bool isTailCall,
SDOperand Callee, ArgListTy &Args,
SelectionDAG &DAG) {
assert((!isVarArg || CallingConv == CallingConv::C) &&
"Only C takes varargs!");
// If the callee is a GlobalAddress node (quite common, every direct call is)
// turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
Callee = DAG.getTargetGlobalAddress(G->getGlobal(), getPointerTy());
else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee))
Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy());
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
if (CallingConv == CallingConv::Fast && EnableFastCC)
return LowerFastCCCallTo(Chain, RetTy, isTailCall, Callee, Args, DAG);
return LowerCCCCallTo(Chain, RetTy, isVarArg, isTailCall, Callee, Args, DAG);
}
//===----------------------------------------------------------------------===//
// C Calling Convention implementation
//===----------------------------------------------------------------------===//
std::vector<SDOperand>
X86TargetLowering::LowerCCCArguments(Function &F, SelectionDAG &DAG) {
std::vector<SDOperand> ArgValues;
MachineFunction &MF = DAG.getMachineFunction();
MachineFrameInfo *MFI = MF.getFrameInfo();
// Add DAG nodes to load the arguments... On entry to a function on the X86,
// the stack frame looks like this:
//
// [ESP] -- return address
// [ESP + 4] -- first argument (leftmost lexically)
// [ESP + 8] -- second argument, if first argument is four bytes in size
// ...
//
unsigned ArgOffset = 0; // Frame mechanisms handle retaddr slot
for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E; ++I) {
MVT::ValueType ObjectVT = getValueType(I->getType());
unsigned ArgIncrement = 4;
unsigned ObjSize;
switch (ObjectVT) {
default: assert(0 && "Unhandled argument type!");
case MVT::i1:
case MVT::i8: ObjSize = 1; break;
case MVT::i16: ObjSize = 2; break;
case MVT::i32: ObjSize = 4; break;
case MVT::i64: ObjSize = ArgIncrement = 8; break;
case MVT::f32: ObjSize = 4; break;
case MVT::f64: ObjSize = ArgIncrement = 8; break;
}
// Create the frame index object for this incoming parameter...
int FI = MFI->CreateFixedObject(ObjSize, ArgOffset);
// Create the SelectionDAG nodes corresponding to a load from this parameter
SDOperand FIN = DAG.getFrameIndex(FI, MVT::i32);
// Don't codegen dead arguments. FIXME: remove this check when we can nuke
// dead loads.
SDOperand ArgValue;
if (!I->use_empty())
ArgValue = DAG.getLoad(ObjectVT, DAG.getEntryNode(), FIN,
DAG.getSrcValue(NULL));
else {
if (MVT::isInteger(ObjectVT))
ArgValue = DAG.getConstant(0, ObjectVT);
else
ArgValue = DAG.getConstantFP(0, ObjectVT);
}
ArgValues.push_back(ArgValue);
ArgOffset += ArgIncrement; // Move on to the next argument...
}
// If the function takes variable number of arguments, make a frame index for
// the start of the first vararg value... for expansion of llvm.va_start.
if (F.isVarArg())
VarArgsFrameIndex = MFI->CreateFixedObject(1, ArgOffset);
ReturnAddrIndex = 0; // No return address slot generated yet.
BytesToPopOnReturn = 0; // Callee pops nothing.
BytesCallerReserves = ArgOffset;
// Finally, inform the code generator which regs we return values in.
switch (getValueType(F.getReturnType())) {
default: assert(0 && "Unknown type!");
case MVT::isVoid: break;
case MVT::i1:
case MVT::i8:
case MVT::i16:
case MVT::i32:
MF.addLiveOut(X86::EAX);
break;
case MVT::i64:
MF.addLiveOut(X86::EAX);
MF.addLiveOut(X86::EDX);
break;
case MVT::f32:
case MVT::f64:
MF.addLiveOut(X86::ST0);
break;
}
return ArgValues;
}
std::pair<SDOperand, SDOperand>
X86TargetLowering::LowerCCCCallTo(SDOperand Chain, const Type *RetTy,
bool isVarArg, bool isTailCall,
SDOperand Callee, ArgListTy &Args,
SelectionDAG &DAG) {
// Count how many bytes are to be pushed on the stack.
unsigned NumBytes = 0;
if (Args.empty()) {
// Save zero bytes.
Chain = DAG.getCALLSEQ_START(Chain, DAG.getConstant(0, getPointerTy()));
} else {
for (unsigned i = 0, e = Args.size(); i != e; ++i)
switch (getValueType(Args[i].second)) {
default: assert(0 && "Unknown value type!");
case MVT::i1:
case MVT::i8:
case MVT::i16:
case MVT::i32:
case MVT::f32:
NumBytes += 4;
break;
case MVT::i64:
case MVT::f64:
NumBytes += 8;
break;
}
Chain = DAG.getCALLSEQ_START(Chain,
DAG.getConstant(NumBytes, getPointerTy()));
// Arguments go on the stack in reverse order, as specified by the ABI.
unsigned ArgOffset = 0;
SDOperand StackPtr = DAG.getRegister(X86::ESP, MVT::i32);
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
std::vector<SDOperand> Stores;
for (unsigned i = 0, e = Args.size(); i != e; ++i) {
SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy());
PtrOff = DAG.getNode(ISD::ADD, MVT::i32, StackPtr, PtrOff);
switch (getValueType(Args[i].second)) {
default: assert(0 && "Unexpected ValueType for argument!");
case MVT::i1:
case MVT::i8:
case MVT::i16:
// Promote the integer to 32 bits. If the input type is signed use a
// sign extend, otherwise use a zero extend.
if (Args[i].second->isSigned())
Args[i].first =DAG.getNode(ISD::SIGN_EXTEND, MVT::i32, Args[i].first);
else
Args[i].first =DAG.getNode(ISD::ZERO_EXTEND, MVT::i32, Args[i].first);
// FALL THROUGH
case MVT::i32:
case MVT::f32:
Stores.push_back(DAG.getNode(ISD::STORE, MVT::Other, Chain,
Args[i].first, PtrOff,
DAG.getSrcValue(NULL)));
ArgOffset += 4;
break;
case MVT::i64:
case MVT::f64:
Stores.push_back(DAG.getNode(ISD::STORE, MVT::Other, Chain,
Args[i].first, PtrOff,
DAG.getSrcValue(NULL)));
ArgOffset += 8;
break;
}
}
Chain = DAG.getNode(ISD::TokenFactor, MVT::Other, Stores);
}
std::vector<MVT::ValueType> RetVals;
MVT::ValueType RetTyVT = getValueType(RetTy);
RetVals.push_back(MVT::Other);
// The result values produced have to be legal. Promote the result.
switch (RetTyVT) {
case MVT::isVoid: break;
default:
RetVals.push_back(RetTyVT);
break;
case MVT::i1:
case MVT::i8:
case MVT::i16:
RetVals.push_back(MVT::i32);
break;
case MVT::f32:
if (X86ScalarSSE)
RetVals.push_back(MVT::f32);
else
RetVals.push_back(MVT::f64);
break;
case MVT::i64:
RetVals.push_back(MVT::i32);
RetVals.push_back(MVT::i32);
break;
}
std::vector<MVT::ValueType> NodeTys;
NodeTys.push_back(MVT::Other); // Returns a chain
NodeTys.push_back(MVT::Flag); // Returns a flag for retval copy to use.
std::vector<SDOperand> Ops;
Ops.push_back(Chain);
Ops.push_back(Callee);
// FIXME: Do not generate X86ISD::TAILCALL for now.
Chain = DAG.getNode(X86ISD::CALL, NodeTys, Ops);
SDOperand InFlag = Chain.getValue(1);
NodeTys.clear();
NodeTys.push_back(MVT::Other); // Returns a chain
NodeTys.push_back(MVT::Flag); // Returns a flag for retval copy to use.
Ops.clear();
Ops.push_back(Chain);
Ops.push_back(DAG.getConstant(NumBytes, getPointerTy()));
Ops.push_back(DAG.getConstant(0, getPointerTy()));
Ops.push_back(InFlag);
Chain = DAG.getNode(ISD::CALLSEQ_END, NodeTys, Ops);
InFlag = Chain.getValue(1);
SDOperand RetVal;
if (RetTyVT != MVT::isVoid) {
default: assert(0 && "Unknown value type to return!");
RetVal = DAG.getCopyFromReg(Chain, X86::AL, MVT::i8, InFlag);
Chain = RetVal.getValue(1);
if (RetTyVT == MVT::i1)
RetVal = DAG.getNode(ISD::TRUNCATE, MVT::i1, RetVal);
break;
RetVal = DAG.getCopyFromReg(Chain, X86::AX, MVT::i16, InFlag);
Chain = RetVal.getValue(1);
case MVT::i32:
RetVal = DAG.getCopyFromReg(Chain, X86::EAX, MVT::i32, InFlag);
Chain = RetVal.getValue(1);
case MVT::i64: {
SDOperand Lo = DAG.getCopyFromReg(Chain, X86::EAX, MVT::i32, InFlag);
SDOperand Hi = DAG.getCopyFromReg(Lo.getValue(1), X86::EDX, MVT::i32,
Lo.getValue(2));
RetVal = DAG.getNode(ISD::BUILD_PAIR, MVT::i64, Lo, Hi);
Chain = Hi.getValue(1);
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
case MVT::f32:
case MVT::f64: {
std::vector<MVT::ValueType> Tys;
Tys.push_back(MVT::f64);
Tys.push_back(MVT::Other);
Tys.push_back(MVT::Flag);
std::vector<SDOperand> Ops;
Ops.push_back(Chain);
Ops.push_back(InFlag);
RetVal = DAG.getNode(X86ISD::FP_GET_RESULT, Tys, Ops);
Chain = RetVal.getValue(1);
InFlag = RetVal.getValue(2);
if (X86ScalarSSE) {
// FIXME: Currently the FST is flagged to the FP_GET_RESULT. This
// shouldn't be necessary except that RFP cannot be live across
// multiple blocks. When stackifier is fixed, they can be uncoupled.
MachineFunction &MF = DAG.getMachineFunction();
int SSFI = MF.getFrameInfo()->CreateStackObject(8, 8);
SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
Tys.clear();
Tys.push_back(MVT::Other);
Ops.clear();
Ops.push_back(Chain);
Ops.push_back(RetVal);
Ops.push_back(StackSlot);
Ops.push_back(DAG.getValueType(RetTyVT));
Ops.push_back(InFlag);
Chain = DAG.getNode(X86ISD::FST, Tys, Ops);
RetVal = DAG.getLoad(RetTyVT, Chain, StackSlot,
DAG.getSrcValue(NULL));
Chain = RetVal.getValue(1);
}
if (RetTyVT == MVT::f32 && !X86ScalarSSE)
// FIXME: we would really like to remember that this FP_ROUND
// operation is okay to eliminate if we allow excess FP precision.
RetVal = DAG.getNode(ISD::FP_ROUND, MVT::f32, RetVal);
break;
}
}
return std::make_pair(RetVal, Chain);
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
}
//===----------------------------------------------------------------------===//
// Fast Calling Convention implementation
//===----------------------------------------------------------------------===//
//
// The X86 'fast' calling convention passes up to two integer arguments in
// registers (an appropriate portion of EAX/EDX), passes arguments in C order,
// and requires that the callee pop its arguments off the stack (allowing proper
// tail calls), and has the same return value conventions as C calling convs.
//
// This calling convention always arranges for the callee pop value to be 8n+4
// bytes, which is needed for tail recursion elimination and stack alignment
// reasons.
//
// Note that this can be enhanced in the future to pass fp vals in registers
// (when we have a global fp allocator) and do other tricks.
//
/// AddLiveIn - This helper function adds the specified physical register to the
/// MachineFunction as a live in value. It also creates a corresponding virtual
/// register for it.
static unsigned AddLiveIn(MachineFunction &MF, unsigned PReg,
TargetRegisterClass *RC) {
assert(RC->contains(PReg) && "Not the correct regclass!");
unsigned VReg = MF.getSSARegMap()->createVirtualRegister(RC);
MF.addLiveIn(PReg, VReg);
return VReg;
}
// FASTCC_NUM_INT_ARGS_INREGS - This is the max number of integer arguments
// to pass in registers. 0 is none, 1 is is "use EAX", 2 is "use EAX and
// EDX". Anything more is illegal.
//
// FIXME: The linscan register allocator currently has problem with
// coalescing. At the time of this writing, whenever it decides to coalesce
// a physreg with a virtreg, this increases the size of the physreg's live
// range, and the live range cannot ever be reduced. This causes problems if
// too many physregs are coaleced with virtregs, which can cause the register
// allocator to wedge itself.
//
// This code triggers this problem more often if we pass args in registers,
// so disable it until this is fixed.
//
// NOTE: this isn't marked const, so that GCC doesn't emit annoying warnings
// about code being dead.
//
static unsigned FASTCC_NUM_INT_ARGS_INREGS = 0;
std::vector<SDOperand>
X86TargetLowering::LowerFastCCArguments(Function &F, SelectionDAG &DAG) {
std::vector<SDOperand> ArgValues;
MachineFunction &MF = DAG.getMachineFunction();
MachineFrameInfo *MFI = MF.getFrameInfo();
// Add DAG nodes to load the arguments... On entry to a function the stack
// frame looks like this:
//
// [ESP] -- return address
// [ESP + 4] -- first nonreg argument (leftmost lexically)
// [ESP + 8] -- second nonreg argument, if first argument is 4 bytes in size
// ...
unsigned ArgOffset = 0; // Frame mechanisms handle retaddr slot
// Keep track of the number of integer regs passed so far. This can be either
// 0 (neither EAX or EDX used), 1 (EAX is used) or 2 (EAX and EDX are both
// used).
unsigned NumIntRegs = 0;
for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E; ++I) {
MVT::ValueType ObjectVT = getValueType(I->getType());
unsigned ArgIncrement = 4;
unsigned ObjSize = 0;
SDOperand ArgValue;
switch (ObjectVT) {
default: assert(0 && "Unhandled argument type!");
case MVT::i1:
case MVT::i8:
if (NumIntRegs < FASTCC_NUM_INT_ARGS_INREGS) {
if (!I->use_empty()) {
unsigned VReg = AddLiveIn(MF, NumIntRegs ? X86::DL : X86::AL,
X86::R8RegisterClass);
ArgValue = DAG.getCopyFromReg(DAG.getRoot(), VReg, MVT::i8);
DAG.setRoot(ArgValue.getValue(1));
Chris Lattner
committed
if (ObjectVT == MVT::i1)
// FIXME: Should insert a assertzext here.
ArgValue = DAG.getNode(ISD::TRUNCATE, MVT::i1, ArgValue);
}
++NumIntRegs;
break;
}
ObjSize = 1;
break;
case MVT::i16:
if (NumIntRegs < FASTCC_NUM_INT_ARGS_INREGS) {
if (!I->use_empty()) {
unsigned VReg = AddLiveIn(MF, NumIntRegs ? X86::DX : X86::AX,
X86::R16RegisterClass);
ArgValue = DAG.getCopyFromReg(DAG.getRoot(), VReg, MVT::i16);
DAG.setRoot(ArgValue.getValue(1));
}
++NumIntRegs;
break;
}
ObjSize = 2;
break;
case MVT::i32:
if (NumIntRegs < FASTCC_NUM_INT_ARGS_INREGS) {
if (!I->use_empty()) {
unsigned VReg = AddLiveIn(MF, NumIntRegs ? X86::EDX : X86::EAX,
X86::R32RegisterClass);
ArgValue = DAG.getCopyFromReg(DAG.getRoot(), VReg, MVT::i32);
DAG.setRoot(ArgValue.getValue(1));
}
++NumIntRegs;
break;
}
ObjSize = 4;
break;
case MVT::i64:
if (NumIntRegs+2 <= FASTCC_NUM_INT_ARGS_INREGS) {
if (!I->use_empty()) {
unsigned BotReg = AddLiveIn(MF, X86::EAX, X86::R32RegisterClass);
unsigned TopReg = AddLiveIn(MF, X86::EDX, X86::R32RegisterClass);
SDOperand Low = DAG.getCopyFromReg(DAG.getRoot(), BotReg, MVT::i32);
SDOperand Hi = DAG.getCopyFromReg(Low.getValue(1), TopReg, MVT::i32);
DAG.setRoot(Hi.getValue(1));
ArgValue = DAG.getNode(ISD::BUILD_PAIR, MVT::i64, Low, Hi);
}
NumIntRegs += 2;
break;
} else if (NumIntRegs+1 <= FASTCC_NUM_INT_ARGS_INREGS) {
if (!I->use_empty()) {
unsigned BotReg = AddLiveIn(MF, X86::EDX, X86::R32RegisterClass);
SDOperand Low = DAG.getCopyFromReg(DAG.getRoot(), BotReg, MVT::i32);
DAG.setRoot(Low.getValue(1));
// Load the high part from memory.
// Create the frame index object for this incoming parameter...
int FI = MFI->CreateFixedObject(4, ArgOffset);
SDOperand FIN = DAG.getFrameIndex(FI, MVT::i32);
SDOperand Hi = DAG.getLoad(MVT::i32, DAG.getEntryNode(), FIN,
DAG.getSrcValue(NULL));
ArgValue = DAG.getNode(ISD::BUILD_PAIR, MVT::i64, Low, Hi);
}
ArgOffset += 4;
NumIntRegs = FASTCC_NUM_INT_ARGS_INREGS;
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
break;
}
ObjSize = ArgIncrement = 8;
break;
case MVT::f32: ObjSize = 4; break;
case MVT::f64: ObjSize = ArgIncrement = 8; break;
}
// Don't codegen dead arguments. FIXME: remove this check when we can nuke
// dead loads.
if (ObjSize && !I->use_empty()) {
// Create the frame index object for this incoming parameter...
int FI = MFI->CreateFixedObject(ObjSize, ArgOffset);
// Create the SelectionDAG nodes corresponding to a load from this
// parameter.
SDOperand FIN = DAG.getFrameIndex(FI, MVT::i32);
ArgValue = DAG.getLoad(ObjectVT, DAG.getEntryNode(), FIN,
DAG.getSrcValue(NULL));
} else if (ArgValue.Val == 0) {
if (MVT::isInteger(ObjectVT))
ArgValue = DAG.getConstant(0, ObjectVT);
else
ArgValue = DAG.getConstantFP(0, ObjectVT);
}
ArgValues.push_back(ArgValue);
if (ObjSize)
ArgOffset += ArgIncrement; // Move on to the next argument.
}
// Make sure the instruction takes 8n+4 bytes to make sure the start of the
// arguments and the arguments after the retaddr has been pushed are aligned.
if ((ArgOffset & 7) == 0)
ArgOffset += 4;
VarArgsFrameIndex = 0xAAAAAAA; // fastcc functions can't have varargs.
ReturnAddrIndex = 0; // No return address slot generated yet.
BytesToPopOnReturn = ArgOffset; // Callee pops all stack arguments.
BytesCallerReserves = 0;
// Finally, inform the code generator which regs we return values in.
switch (getValueType(F.getReturnType())) {
default: assert(0 && "Unknown type!");
case MVT::isVoid: break;
case MVT::i1:
case MVT::i8:
case MVT::i16:
case MVT::i32:
MF.addLiveOut(X86::EAX);
break;
case MVT::i64:
MF.addLiveOut(X86::EAX);
MF.addLiveOut(X86::EDX);
break;
case MVT::f32:
case MVT::f64:
MF.addLiveOut(X86::ST0);
break;
}
return ArgValues;
}
std::pair<SDOperand, SDOperand>
X86TargetLowering::LowerFastCCCallTo(SDOperand Chain, const Type *RetTy,
bool isTailCall, SDOperand Callee,
ArgListTy &Args, SelectionDAG &DAG) {
// Count how many bytes are to be pushed on the stack.
unsigned NumBytes = 0;
// Keep track of the number of integer regs passed so far. This can be either
// 0 (neither EAX or EDX used), 1 (EAX is used) or 2 (EAX and EDX are both
// used).
unsigned NumIntRegs = 0;
for (unsigned i = 0, e = Args.size(); i != e; ++i)
switch (getValueType(Args[i].second)) {
default: assert(0 && "Unknown value type!");
case MVT::i1:
case MVT::i8:
case MVT::i16:
case MVT::i32:
if (NumIntRegs < FASTCC_NUM_INT_ARGS_INREGS) {
++NumIntRegs;
break;
}
// fall through
case MVT::f32:
NumBytes += 4;
break;
case MVT::i64:
if (NumIntRegs+2 <= FASTCC_NUM_INT_ARGS_INREGS) {
NumIntRegs += 2;
break;
} else if (NumIntRegs+1 <= FASTCC_NUM_INT_ARGS_INREGS) {
NumIntRegs = FASTCC_NUM_INT_ARGS_INREGS;
NumBytes += 4;
break;
}
// fall through
case MVT::f64:
NumBytes += 8;
break;
}
// Make sure the instruction takes 8n+4 bytes to make sure the start of the
// arguments and the arguments after the retaddr has been pushed are aligned.
if ((NumBytes & 7) == 0)
NumBytes += 4;
Chain = DAG.getCALLSEQ_START(Chain,DAG.getConstant(NumBytes, getPointerTy()));
// Arguments go on the stack in reverse order, as specified by the ABI.
unsigned ArgOffset = 0;
Chris Lattner
committed
SDOperand StackPtr = DAG.getRegister(X86::ESP, MVT::i32);
NumIntRegs = 0;
std::vector<SDOperand> Stores;
std::vector<SDOperand> RegValuesToPass;
for (unsigned i = 0, e = Args.size(); i != e; ++i) {
switch (getValueType(Args[i].second)) {
default: assert(0 && "Unexpected ValueType for argument!");
case MVT::i1:
Chris Lattner
committed
Args[i].first = DAG.getNode(ISD::ANY_EXTEND, MVT::i8, Args[i].first);
// Fall through.
case MVT::i8:
case MVT::i16:
case MVT::i32:
if (NumIntRegs < FASTCC_NUM_INT_ARGS_INREGS) {
RegValuesToPass.push_back(Args[i].first);
++NumIntRegs;
break;
}
// Fall through
case MVT::f32: {
SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy());
PtrOff = DAG.getNode(ISD::ADD, MVT::i32, StackPtr, PtrOff);
Stores.push_back(DAG.getNode(ISD::STORE, MVT::Other, Chain,
Args[i].first, PtrOff,
DAG.getSrcValue(NULL)));
ArgOffset += 4;
break;
}
case MVT::i64:
// Can pass (at least) part of it in regs?
if (NumIntRegs < FASTCC_NUM_INT_ARGS_INREGS) {
SDOperand Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32,
Args[i].first, DAG.getConstant(1, MVT::i32));
SDOperand Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32,
Args[i].first, DAG.getConstant(0, MVT::i32));
RegValuesToPass.push_back(Lo);
++NumIntRegs;
// Pass both parts in regs?
if (NumIntRegs < FASTCC_NUM_INT_ARGS_INREGS) {
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
RegValuesToPass.push_back(Hi);
++NumIntRegs;
} else {
// Pass the high part in memory.
SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy());
PtrOff = DAG.getNode(ISD::ADD, MVT::i32, StackPtr, PtrOff);
Stores.push_back(DAG.getNode(ISD::STORE, MVT::Other, Chain,
Hi, PtrOff, DAG.getSrcValue(NULL)));
ArgOffset += 4;
}
break;
}
// Fall through
case MVT::f64:
SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy());
PtrOff = DAG.getNode(ISD::ADD, MVT::i32, StackPtr, PtrOff);
Stores.push_back(DAG.getNode(ISD::STORE, MVT::Other, Chain,
Args[i].first, PtrOff,
DAG.getSrcValue(NULL)));
ArgOffset += 8;
break;
}
}
if (!Stores.empty())
Chain = DAG.getNode(ISD::TokenFactor, MVT::Other, Stores);
// Make sure the instruction takes 8n+4 bytes to make sure the start of the
// arguments and the arguments after the retaddr has been pushed are aligned.
if ((ArgOffset & 7) == 0)
ArgOffset += 4;
std::vector<MVT::ValueType> RetVals;
MVT::ValueType RetTyVT = getValueType(RetTy);
RetVals.push_back(MVT::Other);
// The result values produced have to be legal. Promote the result.
switch (RetTyVT) {
case MVT::isVoid: break;
default:
RetVals.push_back(RetTyVT);
break;
case MVT::i1:
case MVT::i8:
case MVT::i16:
RetVals.push_back(MVT::i32);