Newer
Older
//===----- CGCall.h - Encapsulate calling convention details ----*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// These classes wrap the information about a call or function
// definition used to handle ABI compliancy.
//
//===----------------------------------------------------------------------===//
#include "CGCall.h"
#include "CodeGenFunction.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/RecordLayout.h"
#include "llvm/Support/CallSite.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Target/TargetData.h"
#include "ABIInfo.h"
using namespace clang;
using namespace CodeGen;
/***/
// FIXME: Use iterator and sidestep silly type array creation.
CGFunctionInfo &CodeGenTypes::getFunctionInfo(const FunctionNoProtoType *FTNP) {
return getFunctionInfo(FTNP->getResultType(),
llvm::SmallVector<QualType, 16>());
CGFunctionInfo &CodeGenTypes::getFunctionInfo(const FunctionProtoType *FTP) {
llvm::SmallVector<QualType, 16> ArgTys;
// FIXME: Kill copy.
for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
ArgTys.push_back(FTP->getArgType(i));
return getFunctionInfo(FTP->getResultType(), ArgTys);
const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXMethodDecl *MD) {
llvm::SmallVector<QualType, 16> ArgTys;
// Add the 'this' pointer.
ArgTys.push_back(MD->getThisType(Context));
const FunctionProtoType *FTP = MD->getType()->getAsFunctionProtoType();
for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
ArgTys.push_back(FTP->getArgType(i));
return getFunctionInfo(FTP->getResultType(), ArgTys);
}
const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const FunctionDecl *FD) {
if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) {
if (MD->isInstance())
return getFunctionInfo(MD);
}
const FunctionType *FTy = FD->getType()->getAsFunctionType();
if (const FunctionProtoType *FTP = dyn_cast<FunctionProtoType>(FTy))
return getFunctionInfo(FTP);
return getFunctionInfo(cast<FunctionNoProtoType>(FTy));
const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const ObjCMethodDecl *MD) {
llvm::SmallVector<QualType, 16> ArgTys;
ArgTys.push_back(MD->getSelfDecl()->getType());
ArgTys.push_back(Context.getObjCSelType());
// FIXME: Kill copy?
Chris Lattner
committed
for (ObjCMethodDecl::param_iterator i = MD->param_begin(),
ArgTys.push_back((*i)->getType());
return getFunctionInfo(MD->getResultType(), ArgTys);
const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy,
const CallArgList &Args) {
// FIXME: Kill copy.
llvm::SmallVector<QualType, 16> ArgTys;
for (CallArgList::const_iterator i = Args.begin(), e = Args.end();
i != e; ++i)
ArgTys.push_back(i->second);
return getFunctionInfo(ResTy, ArgTys);
const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy,
const FunctionArgList &Args) {
// FIXME: Kill copy.
llvm::SmallVector<QualType, 16> ArgTys;
for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
i != e; ++i)
ArgTys.push_back(i->second);
return getFunctionInfo(ResTy, ArgTys);
}
const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy,
const llvm::SmallVector<QualType, 16> &ArgTys) {
// Lookup or create unique function info.
llvm::FoldingSetNodeID ID;
CGFunctionInfo::Profile(ID, ResTy, ArgTys.begin(), ArgTys.end());
void *InsertPos = 0;
CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, InsertPos);
if (FI)
return *FI;
// Construct the function info.
FI = new CGFunctionInfo(ResTy, ArgTys);
FunctionInfos.InsertNode(FI, InsertPos);
// Compute ABI information.
getABIInfo().computeInfo(*FI, getContext());
/***/
void ABIArgInfo::dump() const {
fprintf(stderr, "(ABIArgInfo Kind=");
switch (TheKind) {
case Direct:
fprintf(stderr, "Direct");
break;
case Ignore:
fprintf(stderr, "Ignore");
break;
case Coerce:
fprintf(stderr, "Coerce Type=");
getCoerceToType()->print(llvm::errs());
break;
case Indirect:
fprintf(stderr, "Indirect Align=%d", getIndirectAlign());
break;
case Expand:
fprintf(stderr, "Expand");
break;
}
fprintf(stderr, ")\n");
}
/***/
static bool isEmptyRecord(ASTContext &Context, QualType T);
/// isEmptyField - Return true iff a the field is "empty", that is it
/// is an unnamed bit-field or an (array of) empty record(s).
static bool isEmptyField(ASTContext &Context, const FieldDecl *FD) {
if (FD->isUnnamedBitfield())
return true;
QualType FT = FD->getType();
// Constant arrays of empty records count as empty, strip them off.
while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT))
FT = AT->getElementType();
return isEmptyRecord(Context, FT);
}
/// isEmptyRecord - Return true iff a structure contains only empty
/// fields. Note that a structure with a flexible array member is not
static bool isEmptyRecord(ASTContext &Context, QualType T) {
const RecordType *RT = T->getAsRecordType();
if (!RT)
return 0;
const RecordDecl *RD = RT->getDecl();
if (RD->hasFlexibleArrayMember())
return false;
for (RecordDecl::field_iterator i = RD->field_begin(Context),
e = RD->field_end(Context); i != e; ++i)
if (!isEmptyField(Context, *i))
return false;
return true;
}
/// isSingleElementStruct - Determine if a structure is a "single
/// element struct", i.e. it has exactly one non-empty field or
/// exactly one field which is itself a single element
/// struct. Structures with flexible array members are never
/// considered single element structs.
///
/// \return The field declaration for the single non-empty field, if
/// it exists.
static const Type *isSingleElementStruct(QualType T, ASTContext &Context) {
const RecordType *RT = T->getAsStructureType();
if (!RT)
return 0;
const RecordDecl *RD = RT->getDecl();
if (RD->hasFlexibleArrayMember())
return 0;
const Type *Found = 0;
for (RecordDecl::field_iterator i = RD->field_begin(Context),
e = RD->field_end(Context); i != e; ++i) {
const FieldDecl *FD = *i;
QualType FT = FD->getType();
// Ignore empty fields.
if (isEmptyField(Context, FD))
continue;
// If we already found an element then this isn't a single-element
// struct.
if (Found)
// Treat single element arrays as the element.
while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) {
if (AT->getSize().getZExtValue() != 1)
break;
FT = AT->getElementType();
}
if (!CodeGenFunction::hasAggregateLLVMType(FT)) {
Found = FT.getTypePtr();
Found = isSingleElementStruct(FT, Context);
if (!Found)
return 0;
}
}
return Found;
}
static bool is32Or64BitBasicType(QualType Ty, ASTContext &Context) {
if (!Ty->getAsBuiltinType() && !Ty->isPointerType())
return false;
uint64_t Size = Context.getTypeSize(Ty);
return Size == 32 || Size == 64;
}
static bool areAllFields32Or64BitBasicType(const RecordDecl *RD,
ASTContext &Context) {
for (RecordDecl::field_iterator i = RD->field_begin(Context),
e = RD->field_end(Context); i != e; ++i) {
const FieldDecl *FD = *i;
if (!is32Or64BitBasicType(FD->getType(), Context))
return false;
// FIXME: Reject bit-fields wholesale; there are two problems, we
// don't know how to expand them yet, and the predicate for
// telling if a bitfield still counts as "basic" is more
// complicated than what we were doing previously.
if (FD->isBitField())
return false;
namespace {
/// DefaultABIInfo - The default implementation for ABI specific
/// details. This implementation provides information which results in
/// self-consistent and sensible LLVM IR generation, but does not
/// conform to any particular ABI.
class DefaultABIInfo : public ABIInfo {
ABIArgInfo classifyReturnType(QualType RetTy,
ASTContext &Context) const;
ABIArgInfo classifyArgumentType(QualType RetTy,
ASTContext &Context) const;
virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context) const {
FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context);
for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
it != ie; ++it)
it->info = classifyArgumentType(it->type, Context);
}
virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
CodeGenFunction &CGF) const;
};
/// X86_32ABIInfo - The X86-32 ABI information.
class X86_32ABIInfo : public ABIInfo {
ASTContext &Context;
bool IsDarwin;
static bool isRegisterSize(unsigned Size) {
return (Size == 8 || Size == 16 || Size == 32 || Size == 64);
}
static bool shouldReturnTypeInRegister(QualType Ty, ASTContext &Context);
ABIArgInfo classifyReturnType(QualType RetTy,
ASTContext &Context) const;
ABIArgInfo classifyArgumentType(QualType RetTy,
ASTContext &Context) const;
virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context) const {
FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context);
for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
it != ie; ++it)
it->info = classifyArgumentType(it->type, Context);
}
virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
CodeGenFunction &CGF) const;
X86_32ABIInfo(ASTContext &Context, bool d)
: ABIInfo(), Context(Context), IsDarwin(d) {}
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
/// shouldReturnTypeInRegister - Determine if the given type should be
/// passed in a register (for the Darwin ABI).
bool X86_32ABIInfo::shouldReturnTypeInRegister(QualType Ty,
ASTContext &Context) {
uint64_t Size = Context.getTypeSize(Ty);
// Type must be register sized.
if (!isRegisterSize(Size))
return false;
if (Ty->isVectorType()) {
// 64- and 128- bit vectors inside structures are not returned in
// registers.
if (Size == 64 || Size == 128)
return false;
return true;
}
// If this is a builtin, pointer, or complex type, it is ok.
if (Ty->getAsBuiltinType() || Ty->isPointerType() || Ty->isAnyComplexType())
return true;
// Arrays are treated like records.
if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty))
return shouldReturnTypeInRegister(AT->getElementType(), Context);
// Otherwise, it must be a record type.
const RecordType *RT = Ty->getAsRecordType();
if (!RT) return false;
// Structure types are passed in register if all fields would be
// passed in a register.
for (RecordDecl::field_iterator i = RT->getDecl()->field_begin(Context),
e = RT->getDecl()->field_end(Context); i != e; ++i) {
const FieldDecl *FD = *i;
// Empty fields are ignored.
if (isEmptyField(Context, FD))
continue;
// Check fields recursively.
if (!shouldReturnTypeInRegister(FD->getType(), Context))
return false;
}
return true;
}
ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy,
ASTContext &Context) const {
if (RetTy->isVoidType()) {
return ABIArgInfo::getIgnore();
} else if (const VectorType *VT = RetTy->getAsVectorType()) {
// On Darwin, some vectors are returned in registers.
if (IsDarwin) {
uint64_t Size = Context.getTypeSize(RetTy);
// 128-bit vectors are a special case; they are returned in
// registers and we need to make sure to pick a type the LLVM
// backend will like.
if (Size == 128)
return ABIArgInfo::getCoerce(llvm::VectorType::get(llvm::Type::Int64Ty,
2));
// Always return in register if it fits in a general purpose
// register, or if it is 64 bits and has a single element.
if ((Size == 8 || Size == 16 || Size == 32) ||
(Size == 64 && VT->getNumElements() == 1))
return ABIArgInfo::getCoerce(llvm::IntegerType::get(Size));
return ABIArgInfo::getIndirect(0);
}
return ABIArgInfo::getDirect();
} else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
// Structures with flexible arrays are always indirect.
if (const RecordType *RT = RetTy->getAsStructureType())
if (RT->getDecl()->hasFlexibleArrayMember())
return ABIArgInfo::getIndirect(0);
// Outside of Darwin, structs and unions are always indirect.
if (!IsDarwin && !RetTy->isAnyComplexType())
return ABIArgInfo::getIndirect(0);
// Classify "single element" structs as their element type.
if (const Type *SeltTy = isSingleElementStruct(RetTy, Context)) {
if (const BuiltinType *BT = SeltTy->getAsBuiltinType()) {
if (BT->isIntegerType()) {
// We need to use the size of the structure, padding
// bit-fields can adjust that to be larger than the single
// element type.
uint64_t Size = Context.getTypeSize(RetTy);
return ABIArgInfo::getCoerce(llvm::IntegerType::get((unsigned) Size));
} else if (BT->getKind() == BuiltinType::Float) {
assert(Context.getTypeSize(RetTy) == Context.getTypeSize(SeltTy) &&
"Unexpect single element structure size!");
return ABIArgInfo::getCoerce(llvm::Type::FloatTy);
} else if (BT->getKind() == BuiltinType::Double) {
assert(Context.getTypeSize(RetTy) == Context.getTypeSize(SeltTy) &&
"Unexpect single element structure size!");
return ABIArgInfo::getCoerce(llvm::Type::DoubleTy);
}
} else if (SeltTy->isPointerType()) {
// FIXME: It would be really nice if this could come out as
// the proper pointer type.
llvm::Type *PtrTy =
llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
return ABIArgInfo::getCoerce(PtrTy);
} else if (SeltTy->isVectorType()) {
// 64- and 128-bit vectors are never returned in a
// register when inside a structure.
uint64_t Size = Context.getTypeSize(RetTy);
if (Size == 64 || Size == 128)
return ABIArgInfo::getIndirect(0);
return classifyReturnType(QualType(SeltTy, 0), Context);
uint64_t Size = Context.getTypeSize(RetTy);
if (isRegisterSize(Size)) {
// Always return in register for unions for now.
// FIXME: This is wrong, but better than treating as a
// structure.
if (RetTy->isUnionType())
return ABIArgInfo::getCoerce(llvm::IntegerType::get(Size));
// Small structures which are register sized are generally returned
// in a register.
if (X86_32ABIInfo::shouldReturnTypeInRegister(RetTy, Context))
return ABIArgInfo::getCoerce(llvm::IntegerType::get(Size));
}
return ABIArgInfo::getIndirect(0);
} else {
return ABIArgInfo::getDirect();
}
}
ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty,
ASTContext &Context) const {
// FIXME: Set alignment on indirect arguments.
if (CodeGenFunction::hasAggregateLLVMType(Ty)) {
// Structures with flexible arrays are always indirect.
if (const RecordType *RT = Ty->getAsStructureType())
if (RT->getDecl()->hasFlexibleArrayMember())
return ABIArgInfo::getIndirect(0);
// Ignore empty structs.
uint64_t Size = Context.getTypeSize(Ty);
if (Ty->isStructureType() && Size == 0)
return ABIArgInfo::getIgnore();
// Expand structs with size <= 128-bits which consist only of
// basic types (int, long long, float, double, xxx*). This is
// non-recursive and does not ignore empty fields.
if (const RecordType *RT = Ty->getAsStructureType()) {
if (Context.getTypeSize(Ty) <= 4*32 &&
areAllFields32Or64BitBasicType(RT->getDecl(), Context))
return ABIArgInfo::getExpand();
}
return ABIArgInfo::getIndirect(0);
} else {
return ABIArgInfo::getDirect();
}
}
llvm::Value *X86_32ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
CodeGenFunction &CGF) const {
const llvm::Type *BP = llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
const llvm::Type *BPP = llvm::PointerType::getUnqual(BP);
CGBuilderTy &Builder = CGF.Builder;
llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP,
"ap");
llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
llvm::Type *PTy =
llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
uint64_t Offset =
llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 4);
llvm::Value *NextAddr =
Builder.CreateGEP(Addr,
llvm::ConstantInt::get(llvm::Type::Int32Ty, Offset),
"ap.next");
Builder.CreateStore(NextAddr, VAListAddrAsBPP);
return AddrTyped;
}
/// X86_64ABIInfo - The X86_64 ABI information.
class X86_64ABIInfo : public ABIInfo {
enum Class {
Integer = 0,
SSE,
SSEUp,
X87,
X87Up,
ComplexX87,
NoClass,
Memory
};
/// merge - Implement the X86_64 ABI merging algorithm.
///
/// Merge an accumulating classification \arg Accum with a field
/// classification \arg Field.
///
/// \param Accum - The accumulating classification. This should
/// always be either NoClass or the result of a previous merge
/// call. In addition, this should never be Memory (the caller
/// should just return Memory for the aggregate).
Class merge(Class Accum, Class Field) const;
/// classify - Determine the x86_64 register classes in which the
/// given type T should be passed.
///
/// \param Lo - The classification for the parts of the type
/// residing in the low word of the containing object.
///
/// \param Hi - The classification for the parts of the type
/// residing in the high word of the containing object.
///
/// \param OffsetBase - The bit offset of this type in the
/// containing object. Some parameters are classified different
/// depending on whether they straddle an eightbyte boundary.
///
/// If a word is unused its result will be NoClass; if a type should
/// be passed in Memory then at least the classification of \arg Lo
/// will be Memory.
///
/// The \arg Lo class will be NoClass iff the argument is ignored.
///
/// If the \arg Lo class is ComplexX87, then the \arg Hi class will
/// also be ComplexX87.
void classify(QualType T, ASTContext &Context, uint64_t OffsetBase,
Class &Lo, Class &Hi) const;
/// getCoerceResult - Given a source type \arg Ty and an LLVM type
/// to coerce to, chose the best way to pass Ty in the same place
/// that \arg CoerceTo would be passed, but while keeping the
/// emitted code as simple as possible.
///
/// FIXME: Note, this should be cleaned up to just take an
/// enumeration of all the ways we might want to pass things,
/// instead of constructing an LLVM type. This makes this code more
/// explicit, and it makes it clearer that we are also doing this
/// for correctness in the case of passing scalar types.
ABIArgInfo getCoerceResult(QualType Ty,
const llvm::Type *CoerceTo,
ASTContext &Context) const;
ABIArgInfo classifyReturnType(QualType RetTy,
ASTContext &Context) const;
ABIArgInfo classifyArgumentType(QualType Ty,
ASTContext &Context,
unsigned &neededInt,
unsigned &neededSSE) const;
public:
virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context) const;
virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
CodeGenFunction &CGF) const;
X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum,
Class Field) const {
// AMD64-ABI 3.2.3p2: Rule 4. Each field of an object is
// classified recursively so that always two fields are
// considered. The resulting class is calculated according to
// the classes of the fields in the eightbyte:
//
// (a) If both classes are equal, this is the resulting class.
//
// (b) If one of the classes is NO_CLASS, the resulting class is
// the other class.
//
// (c) If one of the classes is MEMORY, the result is the MEMORY
// class.
//
// (d) If one of the classes is INTEGER, the result is the
// INTEGER.
//
// (e) If one of the classes is X87, X87UP, COMPLEX_X87 class,
// MEMORY is used as class.
//
// (f) Otherwise class SSE is used.
// Accum should never be memory (we should have returned) or
// ComplexX87 (because this cannot be passed in a structure).
assert((Accum != Memory && Accum != ComplexX87) &&
"Invalid accumulated classification during merge.");
if (Accum == Field || Field == NoClass)
return Accum;
else if (Field == Memory)
return Memory;
else if (Accum == NoClass)
return Field;
else if (Accum == Integer || Field == Integer)
return Integer;
else if (Field == X87 || Field == X87Up || Field == ComplexX87)
return Memory;
return SSE;
void X86_64ABIInfo::classify(QualType Ty,
ASTContext &Context,
Class &Lo, Class &Hi) const {
// FIXME: This code can be simplified by introducing a simple value
// class for Class pairs with appropriate constructor methods for
// the various situations.
// FIXME: Some of the split computations are wrong; unaligned
// vectors shouldn't be passed in registers for example, so there is
// no chance they can straddle an eightbyte. Verify & simplify.
Lo = Hi = NoClass;
Class &Current = OffsetBase < 64 ? Lo : Hi;
Current = Memory;
if (const BuiltinType *BT = Ty->getAsBuiltinType()) {
BuiltinType::Kind k = BT->getKind();
if (k == BuiltinType::Void) {
Current = NoClass;
} else if (k == BuiltinType::Int128 || k == BuiltinType::UInt128) {
Lo = Integer;
Hi = Integer;
} else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) {
Current = Integer;
} else if (k == BuiltinType::Float || k == BuiltinType::Double) {
Current = SSE;
} else if (k == BuiltinType::LongDouble) {
Lo = X87;
Hi = X87Up;
}
// FIXME: _Decimal32 and _Decimal64 are SSE.
// FIXME: _float128 and _Decimal128 are (SSE, SSEUp).
} else if (const EnumType *ET = Ty->getAsEnumType()) {
// Classify the underlying integer type.
classify(ET->getDecl()->getIntegerType(), Context, OffsetBase, Lo, Hi);
} else if (Ty->hasPointerRepresentation()) {
Current = Integer;
} else if (const VectorType *VT = Ty->getAsVectorType()) {
uint64_t Size = Context.getTypeSize(VT);
if (Size == 32) {
// gcc passes all <4 x char>, <2 x short>, <1 x int>, <1 x
// float> as integer.
Current = Integer;
// If this type crosses an eightbyte boundary, it should be
// split.
uint64_t EB_Real = (OffsetBase) / 64;
uint64_t EB_Imag = (OffsetBase + Size - 1) / 64;
if (EB_Real != EB_Imag)
Hi = Lo;
} else if (Size == 64) {
// gcc passes <1 x double> in memory. :(
if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::Double))
return;
// gcc passes <1 x long long> as INTEGER.
if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::LongLong))
Current = Integer;
else
Current = SSE;
Daniel Dunbar
committed
// If this type crosses an eightbyte boundary, it should be
// split.
if (OffsetBase && OffsetBase != 64)
Daniel Dunbar
committed
Hi = Lo;
} else if (Size == 128) {
Lo = SSE;
Hi = SSEUp;
}
} else if (const ComplexType *CT = Ty->getAsComplexType()) {
QualType ET = Context.getCanonicalType(CT->getElementType());
Daniel Dunbar
committed
uint64_t Size = Context.getTypeSize(Ty);
if (ET->isIntegralType()) {
Current = Integer;
else if (Size <= 128)
Lo = Hi = Integer;
} else if (ET == Context.FloatTy)
Current = SSE;
else if (ET == Context.DoubleTy)
Lo = Hi = SSE;
else if (ET == Context.LongDoubleTy)
Current = ComplexX87;
// If this complex type crosses an eightbyte boundary then it
// should be split.
uint64_t EB_Real = (OffsetBase) / 64;
uint64_t EB_Imag = (OffsetBase + Context.getTypeSize(ET)) / 64;
if (Hi == NoClass && EB_Real != EB_Imag)
Hi = Lo;
} else if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
// Arrays are treated like structures.
uint64_t Size = Context.getTypeSize(Ty);
// AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
// than two eightbytes, ..., it has class MEMORY.
if (Size > 128)
return;
// AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned
// fields, it has class MEMORY.
//
// Only need to check alignment of array base.
if (OffsetBase % Context.getTypeAlign(AT->getElementType()))
return;
// Otherwise implement simplified merge. We could be smarter about
// this, but it isn't worth it and would be harder to verify.
Current = NoClass;
uint64_t EltSize = Context.getTypeSize(AT->getElementType());
uint64_t ArraySize = AT->getSize().getZExtValue();
for (uint64_t i=0, Offset=OffsetBase; i<ArraySize; ++i, Offset += EltSize) {
Class FieldLo, FieldHi;
classify(AT->getElementType(), Context, Offset, FieldLo, FieldHi);
Lo = merge(Lo, FieldLo);
Hi = merge(Hi, FieldHi);
if (Lo == Memory || Hi == Memory)
break;
// Do post merger cleanup (see below). Only case we worry about is Memory.
if (Hi == Memory)
Lo = Memory;
assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp array classification.");
} else if (const RecordType *RT = Ty->getAsRecordType()) {
uint64_t Size = Context.getTypeSize(Ty);
// AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
// than two eightbytes, ..., it has class MEMORY.
if (Size > 128)
return;
const RecordDecl *RD = RT->getDecl();
// Assume variable sized types are passed in memory.
if (RD->hasFlexibleArrayMember())
return;
const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
// Reset Lo class, this will be recomputed.
Current = NoClass;
for (RecordDecl::field_iterator i = RD->field_begin(Context),
e = RD->field_end(Context); i != e; ++i, ++idx) {
uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
bool BitField = i->isBitField();
// AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned
// fields, it has class MEMORY.
// Note, skip this test for bit-fields, see below.
if (!BitField && Offset % Context.getTypeAlign(i->getType())) {
Lo = Memory;
return;
}
// Classify this field.
//
// AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate
// exceeds a single eightbyte, each is classified
// separately. Each eightbyte gets initialized to class
// NO_CLASS.
Class FieldLo, FieldHi;
// Bit-fields require special handling, they do not force the
// structure to be passed in memory even if unaligned, and
// therefore they can straddle an eightbyte.
if (BitField) {
// Ignore padding bit-fields.
if (i->isUnnamedBitfield())
continue;
uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
uint64_t Size = i->getBitWidth()->EvaluateAsInt(Context).getZExtValue();
uint64_t EB_Lo = Offset / 64;
uint64_t EB_Hi = (Offset + Size - 1) / 64;
FieldLo = FieldHi = NoClass;
if (EB_Lo) {
assert(EB_Hi == EB_Lo && "Invalid classification, type > 16 bytes.");
FieldLo = NoClass;
FieldHi = Integer;
} else {
FieldLo = Integer;
FieldHi = EB_Hi ? Integer : NoClass;
}
} else
classify(i->getType(), Context, Offset, FieldLo, FieldHi);
Lo = merge(Lo, FieldLo);
Hi = merge(Hi, FieldHi);
if (Lo == Memory || Hi == Memory)
break;
}
// AMD64-ABI 3.2.3p2: Rule 5. Then a post merger cleanup is done:
//
// (a) If one of the classes is MEMORY, the whole argument is
// passed in memory.
//
// (b) If SSEUP is not preceeded by SSE, it is converted to SSE.
// The first of these conditions is guaranteed by how we implement
// the merge (just bail).
//
// The second condition occurs in the case of unions; for example
// union { _Complex double; unsigned; }.
if (Hi == Memory)
Lo = Memory;
if (Hi == SSEUp && Lo != SSE)
Hi = SSE;
ABIArgInfo X86_64ABIInfo::getCoerceResult(QualType Ty,
const llvm::Type *CoerceTo,
ASTContext &Context) const {
if (CoerceTo == llvm::Type::Int64Ty) {
// Integer and pointer types will end up in a general purpose
// register.
if (Ty->isIntegralType() || Ty->isPointerType())
return ABIArgInfo::getDirect();
} else if (CoerceTo == llvm::Type::DoubleTy) {
// FIXME: It would probably be better to make CGFunctionInfo only
// map using canonical types than to canonize here.
QualType CTy = Context.getCanonicalType(Ty);
// Float and double end up in a single SSE reg.
if (CTy == Context.FloatTy || CTy == Context.DoubleTy)
return ABIArgInfo::getDirect();
}
return ABIArgInfo::getCoerce(CoerceTo);
}
ABIArgInfo X86_64ABIInfo::classifyReturnType(QualType RetTy,
ASTContext &Context) const {
// AMD64-ABI 3.2.3p4: Rule 1. Classify the return type with the
// classification algorithm.
X86_64ABIInfo::Class Lo, Hi;
classify(RetTy, Context, 0, Lo, Hi);
// Check some invariants.
assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
assert((Lo != NoClass || Hi == NoClass) && "Invalid null classification.");
assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
const llvm::Type *ResType = 0;
switch (Lo) {
case NoClass:
return ABIArgInfo::getIgnore();
case SSEUp:
case X87Up:
assert(0 && "Invalid classification for lo word.");
// AMD64-ABI 3.2.3p4: Rule 2. Types of class memory are returned via
return ABIArgInfo::getIndirect(0);
// AMD64-ABI 3.2.3p4: Rule 3. If the class is INTEGER, the next
// available register of the sequence %rax, %rdx is used.
case Integer:
ResType = llvm::Type::Int64Ty; break;
// AMD64-ABI 3.2.3p4: Rule 4. If the class is SSE, the next
// available SSE register of the sequence %xmm0, %xmm1 is used.
case SSE:
ResType = llvm::Type::DoubleTy; break;
// AMD64-ABI 3.2.3p4: Rule 6. If the class is X87, the value is
// returned on the X87 stack in %st0 as 80-bit x87 number.
case X87:
ResType = llvm::Type::X86_FP80Ty; break;
// AMD64-ABI 3.2.3p4: Rule 8. If the class is COMPLEX_X87, the real
// part of the value is returned in %st0 and the imaginary part in
// %st1.
assert(Hi == ComplexX87 && "Unexpected ComplexX87 classification.");
ResType = llvm::StructType::get(llvm::Type::X86_FP80Ty,
llvm::Type::X86_FP80Ty,
NULL);
break;
}
switch (Hi) {
// Memory was handled previously and X87 should
// never occur as a hi class.
case Memory:
case X87:
assert(0 && "Invalid classification for hi word.");
case ComplexX87: // Previously handled.
ResType = llvm::StructType::get(ResType, llvm::Type::Int64Ty, NULL);
break;
ResType = llvm::StructType::get(ResType, llvm::Type::DoubleTy, NULL);
break;
// AMD64-ABI 3.2.3p4: Rule 5. If the class is SSEUP, the eightbyte
// is passed in the upper half of the last used SSE register.
//
// SSEUP should always be preceeded by SSE, just widen.
case SSEUp:
assert(Lo == SSE && "Unexpected SSEUp classification.");
ResType = llvm::VectorType::get(llvm::Type::DoubleTy, 2);
break;
// AMD64-ABI 3.2.3p4: Rule 7. If the class is X87UP, the value is
// returned together with the previous X87 value in %st0.
// If X87Up is preceeded by X87, we don't need to do
// anything. However, in some cases with unions it may not be
// preceeded by X87. In such situations we follow gcc and pass the
// extra bits in an SSE reg.
if (Lo != X87)
ResType = llvm::StructType::get(ResType, llvm::Type::DoubleTy, NULL);
return getCoerceResult(RetTy, ResType, Context);
ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty, ASTContext &Context,
unsigned &neededInt,
unsigned &neededSSE) const {
X86_64ABIInfo::Class Lo, Hi;
classify(Ty, Context, 0, Lo, Hi);
// Check some invariants.
// FIXME: Enforce these by construction.
assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
assert((Lo != NoClass || Hi == NoClass) && "Invalid null classification.");
assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
neededInt = 0;
neededSSE = 0;
const llvm::Type *ResType = 0;
switch (Lo) {
case NoClass:
return ABIArgInfo::getIgnore();
// AMD64-ABI 3.2.3p3: Rule 1. If the class is MEMORY, pass the argument
// on the stack.
case Memory:
// AMD64-ABI 3.2.3p3: Rule 5. If the class is X87, X87UP or