Newer
Older
Chris Lattner
committed
//===--- CGExprScalar.cpp - Emit LLVM Code for Scalar Exprs ---------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
Chris Lattner
committed
//
//===----------------------------------------------------------------------===//
//
// This contains code to emit Expr nodes with scalar LLVM types as LLVM code.
//
//===----------------------------------------------------------------------===//
#include "CodeGenFunction.h"
#include "CodeGenModule.h"
#include "clang/AST/RecordLayout.h"
#include "clang/Basic/TargetInfo.h"
Chris Lattner
committed
#include "llvm/Constants.h"
#include "llvm/Function.h"
#include "llvm/Intrinsics.h"
Chris Lattner
committed
#include "llvm/Support/Compiler.h"
#include "llvm/Support/CFG.h"
Chris Lattner
committed
using namespace clang;
using namespace CodeGen;
using llvm::Value;
//===----------------------------------------------------------------------===//
// Scalar Expression Emitter
//===----------------------------------------------------------------------===//
struct BinOpInfo {
Value *LHS;
Value *RHS;
QualType Ty; // Computation Type.
Chris Lattner
committed
const BinaryOperator *E;
};
namespace {
class VISIBILITY_HIDDEN ScalarExprEmitter
: public StmtVisitor<ScalarExprEmitter, Value*> {
CodeGenFunction &CGF;
Chris Lattner
committed
public:
ScalarExprEmitter(CodeGenFunction &cgf) : CGF(cgf),
Builder(CGF.Builder) {
Chris Lattner
committed
}
//===--------------------------------------------------------------------===//
// Utilities
//===--------------------------------------------------------------------===//
const llvm::Type *ConvertType(QualType T) { return CGF.ConvertType(T); }
LValue EmitLValue(const Expr *E) { return CGF.EmitLValue(E); }
Value *EmitLoadOfLValue(LValue LV, QualType T) {
return CGF.EmitLoadOfLValue(LV, T).getScalarVal();
Chris Lattner
committed
}
/// EmitLoadOfLValue - Given an expression with complex type that represents a
/// value l-value, this method emits the address of the l-value, then loads
/// and returns the result.
Value *EmitLoadOfLValue(const Expr *E) {
// FIXME: Volatile
return EmitLoadOfLValue(EmitLValue(E), E->getType());
}
/// EmitConversionToBool - Convert the specified expression value to a
/// boolean (i1) truth value. This is equivalent to "Val != 0".
Value *EmitConversionToBool(Value *Src, QualType DstTy);
/// EmitScalarConversion - Emit a conversion from the specified type to the
/// specified destination type, both of which are LLVM scalar types.
Value *EmitScalarConversion(Value *Src, QualType SrcTy, QualType DstTy);
/// EmitComplexToScalarConversion - Emit a conversion from the specified
/// complex type to the specified destination type, where the destination
/// type is an LLVM scalar type.
Value *EmitComplexToScalarConversion(CodeGenFunction::ComplexPairTy Src,
QualType SrcTy, QualType DstTy);
Chris Lattner
committed
//===--------------------------------------------------------------------===//
// Visitor Methods
//===--------------------------------------------------------------------===//
Value *VisitStmt(Stmt *S) {
S->dump(CGF.getContext().getSourceManager());
Chris Lattner
committed
assert(0 && "Stmt can't have complex result type!");
return 0;
}
Value *VisitExpr(Expr *S);
Value *VisitParenExpr(ParenExpr *PE) { return Visit(PE->getSubExpr()); }
// Leaves.
Value *VisitIntegerLiteral(const IntegerLiteral *E) {
return llvm::ConstantInt::get(E->getValue());
}
Value *VisitFloatingLiteral(const FloatingLiteral *E) {
return llvm::ConstantFP::get(E->getValue());
Chris Lattner
committed
}
Value *VisitCharacterLiteral(const CharacterLiteral *E) {
return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
}
Value *VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *E) {
return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
Chris Lattner
committed
}
Value *VisitCXXZeroInitValueExpr(const CXXZeroInitValueExpr *E) {
return llvm::Constant::getNullValue(ConvertType(E->getType()));
}
Value *VisitGNUNullExpr(const GNUNullExpr *E) {
return llvm::Constant::getNullValue(ConvertType(E->getType()));
}
Chris Lattner
committed
Value *VisitTypesCompatibleExpr(const TypesCompatibleExpr *E) {
return llvm::ConstantInt::get(ConvertType(E->getType()),
CGF.getContext().typesAreCompatible(
E->getArgType1(), E->getArgType2()));
Chris Lattner
committed
}
Sebastian Redl
committed
Value *VisitSizeOfAlignOfExpr(const SizeOfAlignOfExpr *E);
Value *VisitAddrLabelExpr(const AddrLabelExpr *E) {
llvm::Value *V =
llvm::ConstantInt::get(llvm::Type::Int32Ty,
CGF.GetIDForAddrOfLabel(E->getLabel()));
return Builder.CreateIntToPtr(V, ConvertType(E->getType()));
Chris Lattner
committed
// l-values.
Value *VisitDeclRefExpr(DeclRefExpr *E) {
if (const EnumConstantDecl *EC = dyn_cast<EnumConstantDecl>(E->getDecl()))
return llvm::ConstantInt::get(EC->getInitVal());
return EmitLoadOfLValue(E);
}
Value *VisitObjCSelectorExpr(ObjCSelectorExpr *E) {
return CGF.EmitObjCSelectorExpr(E);
}
Value *VisitObjCProtocolExpr(ObjCProtocolExpr *E) {
return CGF.EmitObjCProtocolExpr(E);
}
Value *VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
return EmitLoadOfLValue(E);
}
Value *VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *E) {
return EmitLoadOfLValue(E);
Value *VisitObjCKVCRefExpr(ObjCKVCRefExpr *E) {
return EmitLoadOfLValue(E);
}
Value *VisitObjCMessageExpr(ObjCMessageExpr *E) {
return CGF.EmitObjCMessageExpr(E).getScalarVal();
Chris Lattner
committed
Value *VisitArraySubscriptExpr(ArraySubscriptExpr *E);
Value *VisitShuffleVectorExpr(ShuffleVectorExpr *E);
Chris Lattner
committed
Value *VisitMemberExpr(Expr *E) { return EmitLoadOfLValue(E); }
Nate Begeman
committed
Value *VisitExtVectorElementExpr(Expr *E) { return EmitLoadOfLValue(E); }
Value *VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
return EmitLoadOfLValue(E);
}
Chris Lattner
committed
Value *VisitStringLiteral(Expr *E) { return EmitLValue(E).getAddress(); }
Value *VisitPredefinedExpr(Expr *E) { return EmitLValue(E).getAddress(); }
Value *VisitInitListExpr(InitListExpr *E) {
unsigned NumInitElements = E->getNumInits();
Douglas Gregor
committed
if (E->hadArrayRangeDesignator()) {
CGF.ErrorUnsupported(E, "GNU array range designator extension");
}
const llvm::VectorType *VType =
dyn_cast<llvm::VectorType>(ConvertType(E->getType()));
// We have a scalar in braces. Just use the first element.
if (!VType)
return Visit(E->getInit(0));
unsigned NumVectorElements = VType->getNumElements();
const llvm::Type *ElementType = VType->getElementType();
// Emit individual vector element stores.
llvm::Value *V = llvm::UndefValue::get(VType);
Anders Carlsson
committed
// Emit initializers
unsigned i;
for (i = 0; i < NumInitElements; ++i) {
Value *NewV = Visit(E->getInit(i));
Value *Idx = llvm::ConstantInt::get(llvm::Type::Int32Ty, i);
V = Builder.CreateInsertElement(V, NewV, Idx);
// Emit remaining default initializers
for (/* Do not initialize i*/; i < NumVectorElements; ++i) {
Value *Idx = llvm::ConstantInt::get(llvm::Type::Int32Ty, i);
llvm::Value *NewV = llvm::Constant::getNullValue(ElementType);
V = Builder.CreateInsertElement(V, NewV, Idx);
}
Value *VisitImplicitValueInitExpr(const ImplicitValueInitExpr *E) {
return llvm::Constant::getNullValue(ConvertType(E->getType()));
}
Chris Lattner
committed
Value *VisitImplicitCastExpr(const ImplicitCastExpr *E);
Value *VisitCastExpr(const CastExpr *E) {
return EmitCastExpr(E->getSubExpr(), E->getType());
}
Value *EmitCastExpr(const Expr *E, QualType T);
Value *VisitCallExpr(const CallExpr *E) {
return CGF.EmitCallExpr(E).getScalarVal();
Chris Lattner
committed
}
Value *VisitStmtExpr(const StmtExpr *E);
Chris Lattner
committed
// Unary Operators.
Value *VisitPrePostIncDec(const UnaryOperator *E, bool isInc, bool isPre);
Value *VisitUnaryPostDec(const UnaryOperator *E) {
return VisitPrePostIncDec(E, false, false);
}
Value *VisitUnaryPostInc(const UnaryOperator *E) {
return VisitPrePostIncDec(E, true, false);
}
Value *VisitUnaryPreDec(const UnaryOperator *E) {
return VisitPrePostIncDec(E, false, true);
}
Value *VisitUnaryPreInc(const UnaryOperator *E) {
return VisitPrePostIncDec(E, true, true);
}
Value *VisitUnaryAddrOf(const UnaryOperator *E) {
return EmitLValue(E->getSubExpr()).getAddress();
}
Value *VisitUnaryDeref(const Expr *E) { return EmitLoadOfLValue(E); }
Value *VisitUnaryPlus(const UnaryOperator *E) {
return Visit(E->getSubExpr());
}
Value *VisitUnaryMinus (const UnaryOperator *E);
Value *VisitUnaryNot (const UnaryOperator *E);
Value *VisitUnaryLNot (const UnaryOperator *E);
Value *VisitUnaryReal (const UnaryOperator *E);
Value *VisitUnaryImag (const UnaryOperator *E);
Chris Lattner
committed
Value *VisitUnaryExtension(const UnaryOperator *E) {
return Visit(E->getSubExpr());
}
Value *VisitUnaryOffsetOf(const UnaryOperator *E);
Value *VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
return Visit(DAE->getExpr());
}
Chris Lattner
committed
// Binary Operators.
Value *EmitMul(const BinOpInfo &Ops) {
return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul");
}
Value *EmitDiv(const BinOpInfo &Ops);
Value *EmitRem(const BinOpInfo &Ops);
Value *EmitAdd(const BinOpInfo &Ops);
Value *EmitSub(const BinOpInfo &Ops);
Value *EmitShl(const BinOpInfo &Ops);
Value *EmitShr(const BinOpInfo &Ops);
Value *EmitAnd(const BinOpInfo &Ops) {
return Builder.CreateAnd(Ops.LHS, Ops.RHS, "and");
}
Value *EmitXor(const BinOpInfo &Ops) {
return Builder.CreateXor(Ops.LHS, Ops.RHS, "xor");
}
Value *EmitOr (const BinOpInfo &Ops) {
return Builder.CreateOr(Ops.LHS, Ops.RHS, "or");
}
BinOpInfo EmitBinOps(const BinaryOperator *E);
Value *EmitCompoundAssign(const CompoundAssignOperator *E,
Value *(ScalarExprEmitter::*F)(const BinOpInfo &));
// Binary operators and binary compound assignment operators.
#define HANDLEBINOP(OP) \
Value *VisitBin ## OP(const BinaryOperator *E) { \
return Emit ## OP(EmitBinOps(E)); \
} \
Value *VisitBin ## OP ## Assign(const CompoundAssignOperator *E) { \
return EmitCompoundAssign(E, &ScalarExprEmitter::Emit ## OP); \
}
HANDLEBINOP(Mul);
HANDLEBINOP(Div);
HANDLEBINOP(Rem);
HANDLEBINOP(Add);
HANDLEBINOP(Sub);
HANDLEBINOP(Shl);
HANDLEBINOP(Shr);
HANDLEBINOP(And);
HANDLEBINOP(Xor);
HANDLEBINOP(Or);
#undef HANDLEBINOP
Chris Lattner
committed
// Comparisons.
Value *EmitCompare(const BinaryOperator *E, unsigned UICmpOpc,
unsigned SICmpOpc, unsigned FCmpOpc);
#define VISITCOMP(CODE, UI, SI, FP) \
Value *VisitBin##CODE(const BinaryOperator *E) { \
return EmitCompare(E, llvm::ICmpInst::UI, llvm::ICmpInst::SI, \
llvm::FCmpInst::FP); }
VISITCOMP(LT, ICMP_ULT, ICMP_SLT, FCMP_OLT);
VISITCOMP(GT, ICMP_UGT, ICMP_SGT, FCMP_OGT);
VISITCOMP(LE, ICMP_ULE, ICMP_SLE, FCMP_OLE);
VISITCOMP(GE, ICMP_UGE, ICMP_SGE, FCMP_OGE);
VISITCOMP(EQ, ICMP_EQ , ICMP_EQ , FCMP_OEQ);
VISITCOMP(NE, ICMP_NE , ICMP_NE , FCMP_UNE);
#undef VISITCOMP
Value *VisitBinAssign (const BinaryOperator *E);
Value *VisitBinLAnd (const BinaryOperator *E);
Value *VisitBinLOr (const BinaryOperator *E);
Value *VisitBinComma (const BinaryOperator *E);
// Other Operators.
Value *VisitBlockExpr(const BlockExpr *BE);
Chris Lattner
committed
Value *VisitConditionalOperator(const ConditionalOperator *CO);
Value *VisitChooseExpr(ChooseExpr *CE);
Value *VisitVAArgExpr(VAArgExpr *VE);
Chris Lattner
committed
Value *VisitObjCStringLiteral(const ObjCStringLiteral *E) {
return CGF.EmitObjCStringLiteral(E);
}
Value *VisitObjCEncodeExpr(const ObjCEncodeExpr *E);
Chris Lattner
committed
};
} // end anonymous namespace.
//===----------------------------------------------------------------------===//
// Utilities
//===----------------------------------------------------------------------===//
/// EmitConversionToBool - Convert the specified expression value to a
/// boolean (i1) truth value. This is equivalent to "Val != 0".
Value *ScalarExprEmitter::EmitConversionToBool(Value *Src, QualType SrcType) {
assert(SrcType->isCanonical() && "EmitScalarConversion strips typedefs");
if (SrcType->isRealFloatingType()) {
// Compare against 0.0 for fp scalars.
llvm::Value *Zero = llvm::Constant::getNullValue(Src->getType());
return Builder.CreateFCmpUNE(Src, Zero, "tobool");
}
assert((SrcType->isIntegerType() || isa<llvm::PointerType>(Src->getType())) &&
"Unknown scalar type to convert");
// Because of the type rules of C, we often end up computing a logical value,
// then zero extending it to int, then wanting it as a logical value again.
// Optimize this common case.
if (llvm::ZExtInst *ZI = dyn_cast<llvm::ZExtInst>(Src)) {
if (ZI->getOperand(0)->getType() == llvm::Type::Int1Ty) {
Value *Result = ZI->getOperand(0);
// If there aren't any more uses, zap the instruction to save space.
// Note that there can be more uses, for example if this
// is the result of an assignment.
if (ZI->use_empty())
ZI->eraseFromParent();
return Result;
}
}
// Compare against an integer or pointer null.
llvm::Value *Zero = llvm::Constant::getNullValue(Src->getType());
return Builder.CreateICmpNE(Src, Zero, "tobool");
}
/// EmitScalarConversion - Emit a conversion from the specified type to the
/// specified destination type, both of which are LLVM scalar types.
Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
QualType DstType) {
SrcType = CGF.getContext().getCanonicalType(SrcType);
DstType = CGF.getContext().getCanonicalType(DstType);
if (SrcType == DstType) return Src;
if (DstType->isVoidType()) return 0;
// Handle conversions to bool first, they are special: comparisons against 0.
if (DstType->isBooleanType())
return EmitConversionToBool(Src, SrcType);
const llvm::Type *DstTy = ConvertType(DstType);
// Ignore conversions like int -> uint.
if (Src->getType() == DstTy)
return Src;
// Handle pointer conversions next: pointers can only be converted
// to/from other pointers and integers. Check for pointer types in
// terms of LLVM, as some native types (like Obj-C id) may map to a
// pointer type.
if (isa<llvm::PointerType>(DstTy)) {
// The source value may be an integer, or a pointer.
if (isa<llvm::PointerType>(Src->getType()))
return Builder.CreateBitCast(Src, DstTy, "conv");
assert(SrcType->isIntegerType() && "Not ptr->ptr or int->ptr conversion?");
return Builder.CreateIntToPtr(Src, DstTy, "conv");
}
if (isa<llvm::PointerType>(Src->getType())) {
// Must be an ptr to int cast.
assert(isa<llvm::IntegerType>(DstTy) && "not ptr->int?");
return Builder.CreatePtrToInt(Src, DstTy, "conv");
Nate Begeman
committed
// A scalar can be splatted to an extended vector of the same element type
if (DstType->isExtVectorType() && !isa<VectorType>(SrcType)) {
// Cast the scalar to element type
QualType EltTy = DstType->getAsExtVectorType()->getElementType();
llvm::Value *Elt = EmitScalarConversion(Src, SrcType, EltTy);
// Insert the element in element zero of an undef vector
llvm::Value *UnV = llvm::UndefValue::get(DstTy);
llvm::Value *Idx = llvm::ConstantInt::get(llvm::Type::Int32Ty, 0);
UnV = Builder.CreateInsertElement(UnV, Elt, Idx, "tmp");
// Splat the element across to all elements
llvm::SmallVector<llvm::Constant*, 16> Args;
unsigned NumElements = cast<llvm::VectorType>(DstTy)->getNumElements();
for (unsigned i = 0; i < NumElements; i++)
Args.push_back(llvm::ConstantInt::get(llvm::Type::Int32Ty, 0));
llvm::Constant *Mask = llvm::ConstantVector::get(&Args[0], NumElements);
llvm::Value *Yay = Builder.CreateShuffleVector(UnV, UnV, Mask, "splat");
return Yay;
}
// Allow bitcast from vector to integer/fp of the same size.
if (isa<llvm::VectorType>(Src->getType()) ||
isa<llvm::VectorType>(DstTy))
return Builder.CreateBitCast(Src, DstTy, "conv");
// Finally, we have the arithmetic types: real int/float.
if (isa<llvm::IntegerType>(Src->getType())) {
bool InputSigned = SrcType->isSignedIntegerType();
if (isa<llvm::IntegerType>(DstTy))
return Builder.CreateIntCast(Src, DstTy, InputSigned, "conv");
else if (InputSigned)
return Builder.CreateSIToFP(Src, DstTy, "conv");
else
return Builder.CreateUIToFP(Src, DstTy, "conv");
}
assert(Src->getType()->isFloatingPoint() && "Unknown real conversion");
if (isa<llvm::IntegerType>(DstTy)) {
if (DstType->isSignedIntegerType())
return Builder.CreateFPToSI(Src, DstTy, "conv");
else
return Builder.CreateFPToUI(Src, DstTy, "conv");
}
assert(DstTy->isFloatingPoint() && "Unknown real conversion");
if (DstTy->getTypeID() < Src->getType()->getTypeID())
return Builder.CreateFPTrunc(Src, DstTy, "conv");
else
return Builder.CreateFPExt(Src, DstTy, "conv");
/// EmitComplexToScalarConversion - Emit a conversion from the specified
/// complex type to the specified destination type, where the destination
/// type is an LLVM scalar type.
Value *ScalarExprEmitter::
EmitComplexToScalarConversion(CodeGenFunction::ComplexPairTy Src,
QualType SrcTy, QualType DstTy) {
// Get the source element type.
SrcTy = SrcTy->getAsComplexType()->getElementType();
// Handle conversions to bool first, they are special: comparisons against 0.
if (DstTy->isBooleanType()) {
// Complex != 0 -> (Real != 0) | (Imag != 0)
Src.first = EmitScalarConversion(Src.first, SrcTy, DstTy);
Src.second = EmitScalarConversion(Src.second, SrcTy, DstTy);
return Builder.CreateOr(Src.first, Src.second, "tobool");
}
// C99 6.3.1.7p2: "When a value of complex type is converted to a real type,
// the imaginary part of the complex value is discarded and the value of the
// real part is converted according to the conversion rules for the
// corresponding real type.
return EmitScalarConversion(Src.first, SrcTy, DstTy);
}
Chris Lattner
committed
//===----------------------------------------------------------------------===//
// Visitor Methods
//===----------------------------------------------------------------------===//
Value *ScalarExprEmitter::VisitExpr(Expr *E) {
CGF.ErrorUnsupported(E, "scalar expression");
Chris Lattner
committed
if (E->getType()->isVoidType())
return 0;
return llvm::UndefValue::get(CGF.ConvertType(E->getType()));
}
Value *ScalarExprEmitter::VisitShuffleVectorExpr(ShuffleVectorExpr *E) {
llvm::SmallVector<llvm::Constant*, 32> indices;
for (unsigned i = 2; i < E->getNumSubExprs(); i++) {
indices.push_back(cast<llvm::Constant>(CGF.EmitScalarExpr(E->getExpr(i))));
}
Value* V1 = CGF.EmitScalarExpr(E->getExpr(0));
Value* V2 = CGF.EmitScalarExpr(E->getExpr(1));
Value* SV = llvm::ConstantVector::get(indices.begin(), indices.size());
return Builder.CreateShuffleVector(V1, V2, SV, "shuffle");
}
Chris Lattner
committed
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
Value *ScalarExprEmitter::VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
// Emit subscript expressions in rvalue context's. For most cases, this just
// loads the lvalue formed by the subscript expr. However, we have to be
// careful, because the base of a vector subscript is occasionally an rvalue,
// so we can't get it as an lvalue.
if (!E->getBase()->getType()->isVectorType())
return EmitLoadOfLValue(E);
// Handle the vector case. The base must be a vector, the index must be an
// integer value.
Value *Base = Visit(E->getBase());
Value *Idx = Visit(E->getIdx());
// FIXME: Convert Idx to i32 type.
return Builder.CreateExtractElement(Base, Idx, "vecext");
}
/// VisitImplicitCastExpr - Implicit casts are the same as normal casts, but
/// also handle things like function to pointer-to-function decay, and array to
/// pointer decay.
Value *ScalarExprEmitter::VisitImplicitCastExpr(const ImplicitCastExpr *E) {
const Expr *Op = E->getSubExpr();
// If this is due to array->pointer conversion, emit the array expression as
// an l-value.
if (Op->getType()->isArrayType()) {
// FIXME: For now we assume that all source arrays map to LLVM arrays. This
// will not true when we add support for VLAs.
Value *V = EmitLValue(Op).getAddress(); // Bitfields can't be arrays.
if (!Op->getType()->isVariableArrayType()) {
assert(isa<llvm::PointerType>(V->getType()) && "Expected pointer");
assert(isa<llvm::ArrayType>(cast<llvm::PointerType>(V->getType())
->getElementType()) &&
"Expected pointer to array");
V = Builder.CreateStructGEP(V, 0, "arraydecay");
// The resultant pointer type can be implicitly casted to other pointer
// types as well (e.g. void*) and can be implicitly converted to integer.
const llvm::Type *DestTy = ConvertType(E->getType());
if (V->getType() != DestTy) {
if (isa<llvm::PointerType>(DestTy))
V = Builder.CreateBitCast(V, DestTy, "ptrconv");
else {
assert(isa<llvm::IntegerType>(DestTy) && "Unknown array decay");
V = Builder.CreatePtrToInt(V, DestTy, "ptrconv");
}
}
return V;
} else if (E->getType()->isReferenceType()) {
return EmitLValue(Op).getAddress();
Chris Lattner
committed
}
return EmitCastExpr(Op, E->getType());
}
// VisitCastExpr - Emit code for an explicit or implicit cast. Implicit casts
// have to handle a more broad range of conversions than explicit casts, as they
// handle things like function to ptr-to-function decay etc.
Value *ScalarExprEmitter::EmitCastExpr(const Expr *E, QualType DestTy) {
// Handle cases where the source is an non-complex type.
if (!CGF.hasAggregateLLVMType(E->getType())) {
Value *Src = Visit(const_cast<Expr*>(E));
// Use EmitScalarConversion to perform the conversion.
return EmitScalarConversion(Src, E->getType(), DestTy);
}
Chris Lattner
committed
if (E->getType()->isAnyComplexType()) {
// Handle cases where the source is a complex type.
return EmitComplexToScalarConversion(CGF.EmitComplexExpr(E), E->getType(),
DestTy);
}
// Okay, this is a cast from an aggregate. It must be a cast to void. Just
// evaluate the result and return.
CGF.EmitAggExpr(E, 0, false);
return 0;
Chris Lattner
committed
}
Value *ScalarExprEmitter::VisitStmtExpr(const StmtExpr *E) {
return CGF.EmitCompoundStmt(*E->getSubStmt(),
!E->getType()->isVoidType()).getScalarVal();
Chris Lattner
committed
//===----------------------------------------------------------------------===//
// Unary Operators
//===----------------------------------------------------------------------===//
Value *ScalarExprEmitter::VisitPrePostIncDec(const UnaryOperator *E,
bool isInc, bool isPre) {
Chris Lattner
committed
LValue LV = EmitLValue(E->getSubExpr());
// FIXME: Handle volatile!
Value *InVal = CGF.EmitLoadOfLValue(LV, // false
E->getSubExpr()->getType()).getScalarVal();
Chris Lattner
committed
int AmountVal = isInc ? 1 : -1;
Value *NextVal;
if (isa<llvm::PointerType>(InVal->getType())) {
// FIXME: This isn't right for VLAs.
NextVal = llvm::ConstantInt::get(llvm::Type::Int32Ty, AmountVal);
NextVal = Builder.CreateGEP(InVal, NextVal, "ptrincdec");
} else if (InVal->getType() == llvm::Type::Int1Ty && isInc) {
// Bool++ is an interesting case, due to promotion rules, we get:
// Bool++ -> Bool = Bool+1 -> Bool = (int)Bool+1 ->
// Bool = ((int)Bool+1) != 0
// An interesting aspect of this is that increment is always true.
// Decrement does not have this property.
NextVal = llvm::ConstantInt::getTrue();
} else {
// Add the inc/dec to the real part.
if (isa<llvm::IntegerType>(InVal->getType()))
NextVal = llvm::ConstantInt::get(InVal->getType(), AmountVal);
else if (InVal->getType() == llvm::Type::FloatTy)
llvm::ConstantFP::get(llvm::APFloat(static_cast<float>(AmountVal)));
else if (InVal->getType() == llvm::Type::DoubleTy)
llvm::ConstantFP::get(llvm::APFloat(static_cast<double>(AmountVal)));
else {
llvm::APFloat F(static_cast<float>(AmountVal));
bool ignored;
F.convert(CGF.Target.getLongDoubleFormat(), llvm::APFloat::rmTowardZero,
&ignored);
NextVal = llvm::ConstantFP::get(F);
NextVal = Builder.CreateAdd(InVal, NextVal, isInc ? "inc" : "dec");
}
Chris Lattner
committed
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
// Store the updated result through the lvalue.
CGF.EmitStoreThroughLValue(RValue::get(NextVal), LV,
E->getSubExpr()->getType());
// If this is a postinc, return the value read from memory, otherwise use the
// updated value.
return isPre ? NextVal : InVal;
}
Value *ScalarExprEmitter::VisitUnaryMinus(const UnaryOperator *E) {
Value *Op = Visit(E->getSubExpr());
return Builder.CreateNeg(Op, "neg");
}
Value *ScalarExprEmitter::VisitUnaryNot(const UnaryOperator *E) {
Value *Op = Visit(E->getSubExpr());
return Builder.CreateNot(Op, "neg");
}
Value *ScalarExprEmitter::VisitUnaryLNot(const UnaryOperator *E) {
// Compare operand to zero.
Value *BoolVal = CGF.EvaluateExprAsBool(E->getSubExpr());
// Invert value.
// TODO: Could dynamically modify easy computations here. For example, if
// the operand is an icmp ne, turn into icmp eq.
BoolVal = Builder.CreateNot(BoolVal, "lnot");
// ZExt result to int.
return Builder.CreateZExt(BoolVal, CGF.LLVMIntTy, "lnot.ext");
}
Sebastian Redl
committed
/// VisitSizeOfAlignOfExpr - Return the size or alignment of the type of
/// argument of the sizeof expression as an integer.
Value *
ScalarExprEmitter::VisitSizeOfAlignOfExpr(const SizeOfAlignOfExpr *E) {
QualType TypeToSize = E->getTypeOfArgument();
if (E->isSizeOf()) {
if (const VariableArrayType *VAT =
CGF.getContext().getAsVariableArrayType(TypeToSize)) {
if (E->isArgumentType()) {
// sizeof(type) - make sure to emit the VLA size.
CGF.EmitVLASize(TypeToSize);
}
Anders Carlsson
committed
Anders Carlsson
committed
return CGF.GetVLASize(VAT);
// If this isn't sizeof(vla), the result must be constant; use the
// constant folding logic so we don't have to duplicate it here.
Expr::EvalResult Result;
E->Evaluate(Result, CGF.getContext());
return llvm::ConstantInt::get(Result.Val.getInt());
Chris Lattner
committed
}
Value *ScalarExprEmitter::VisitUnaryReal(const UnaryOperator *E) {
Expr *Op = E->getSubExpr();
Chris Lattner
committed
if (Op->getType()->isAnyComplexType())
return CGF.EmitComplexExpr(Op).first;
return Visit(Op);
}
Value *ScalarExprEmitter::VisitUnaryImag(const UnaryOperator *E) {
Expr *Op = E->getSubExpr();
Chris Lattner
committed
if (Op->getType()->isAnyComplexType())
return CGF.EmitComplexExpr(Op).second;
// __imag on a scalar returns zero. Emit it the subexpr to ensure side
// effects are evaluated.
CGF.EmitScalarExpr(Op);
return llvm::Constant::getNullValue(ConvertType(E->getType()));
Value *ScalarExprEmitter::VisitUnaryOffsetOf(const UnaryOperator *E)
{
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
const Expr* SubExpr = E->getSubExpr();
const llvm::Type* ResultType = ConvertType(E->getType());
llvm::Value* Result = llvm::Constant::getNullValue(ResultType);
while (!isa<CompoundLiteralExpr>(SubExpr)) {
if (const MemberExpr *ME = dyn_cast<MemberExpr>(SubExpr)) {
SubExpr = ME->getBase();
QualType Ty = SubExpr->getType();
RecordDecl *RD = Ty->getAsRecordType()->getDecl();
const ASTRecordLayout &RL = CGF.getContext().getASTRecordLayout(RD);
FieldDecl *FD = cast<FieldDecl>(ME->getMemberDecl());
// FIXME: This is linear time. And the fact that we're indexing
// into the layout by position in the record means that we're
// either stuck numbering the fields in the AST or we have to keep
// the linear search (yuck and yuck).
unsigned i = 0;
for (RecordDecl::field_iterator Field = RD->field_begin(),
FieldEnd = RD->field_end();
Field != FieldEnd; (void)++Field, ++i) {
if (*Field == FD)
break;
}
llvm::Value* Offset =
llvm::ConstantInt::get(ResultType, RL.getFieldOffset(i) / 8);
Result = Builder.CreateAdd(Result, Offset);
} else if (const ArraySubscriptExpr *ASE = dyn_cast<ArraySubscriptExpr>(SubExpr)) {
SubExpr = ASE->getBase();
int64_t size = CGF.getContext().getTypeSize(ASE->getType()) / 8;
llvm::Value* ElemSize = llvm::ConstantInt::get(ResultType, size);
llvm::Value* ElemIndex = CGF.EmitScalarExpr(ASE->getIdx());
bool IndexSigned = ASE->getIdx()->getType()->isSignedIntegerType();
ElemIndex = Builder.CreateIntCast(ElemIndex, ResultType, IndexSigned);
llvm::Value* Offset = Builder.CreateMul(ElemSize, ElemIndex);
Result = Builder.CreateAdd(Result, Offset);
} else {
assert(0 && "This should be impossible!");
}
}
return Result;
Chris Lattner
committed
//===----------------------------------------------------------------------===//
// Binary Operators
//===----------------------------------------------------------------------===//
BinOpInfo ScalarExprEmitter::EmitBinOps(const BinaryOperator *E) {
BinOpInfo Result;
Result.LHS = Visit(E->getLHS());
Result.RHS = Visit(E->getRHS());
Result.Ty = E->getType();
Chris Lattner
committed
Result.E = E;
return Result;
}
Value *ScalarExprEmitter::EmitCompoundAssign(const CompoundAssignOperator *E,
Value *(ScalarExprEmitter::*Func)(const BinOpInfo &)) {
QualType LHSTy = E->getLHS()->getType(), RHSTy = E->getRHS()->getType();
BinOpInfo OpInfo;
// Load the LHS and RHS operands.
LValue LHSLV = EmitLValue(E->getLHS());
OpInfo.LHS = EmitLoadOfLValue(LHSLV, LHSTy);
Chris Lattner
committed
// Determine the computation type. If the RHS is complex, then this is one of
// the add/sub/mul/div operators. All of these operators can be computed in
// with just their real component even though the computation domain really is
// complex.
QualType ComputeType = E->getComputationType();
Chris Lattner
committed
// If the computation type is complex, then the RHS is complex. Emit the RHS.
if (const ComplexType *CT = ComputeType->getAsComplexType()) {
ComputeType = CT->getElementType();
// Emit the RHS, only keeping the real component.
OpInfo.RHS = CGF.EmitComplexExpr(E->getRHS()).first;
RHSTy = RHSTy->getAsComplexType()->getElementType();
} else {
// Otherwise the RHS is a simple scalar value.
OpInfo.RHS = Visit(E->getRHS());
}
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
QualType LComputeTy, RComputeTy, ResultTy;
// Compound assignment does not contain enough information about all
// the types involved for pointer arithmetic cases. Figure it out
// here for now.
if (E->getLHS()->getType()->isPointerType()) {
// Pointer arithmetic cases: ptr +=,-= int and ptr -= ptr,
assert((E->getOpcode() == BinaryOperator::AddAssign ||
E->getOpcode() == BinaryOperator::SubAssign) &&
"Invalid compound assignment operator on pointer type.");
LComputeTy = E->getLHS()->getType();
if (E->getRHS()->getType()->isPointerType()) {
// Degenerate case of (ptr -= ptr) allowed by GCC implicit cast
// extension, the conversion from the pointer difference back to
// the LHS type is handled at the end.
assert(E->getOpcode() == BinaryOperator::SubAssign &&
"Invalid compound assignment operator on pointer type.");
RComputeTy = E->getLHS()->getType();
ResultTy = CGF.getContext().getPointerDiffType();
} else {
RComputeTy = E->getRHS()->getType();
ResultTy = LComputeTy;
}
} else if (E->getRHS()->getType()->isPointerType()) {
// Degenerate case of (int += ptr) allowed by GCC implicit cast
// extension.
assert(E->getOpcode() == BinaryOperator::AddAssign &&
"Invalid compound assignment operator on pointer type.");
LComputeTy = E->getLHS()->getType();
RComputeTy = E->getRHS()->getType();
ResultTy = RComputeTy;
} else {
LComputeTy = RComputeTy = ResultTy = ComputeType;
// Convert the LHS/RHS values to the computation type.
OpInfo.LHS = EmitScalarConversion(OpInfo.LHS, LHSTy, LComputeTy);
OpInfo.RHS = EmitScalarConversion(OpInfo.RHS, RHSTy, RComputeTy);
OpInfo.Ty = ResultTy;
OpInfo.E = E;
// Expand the binary operator.
Value *Result = (this->*Func)(OpInfo);
// Convert the result back to the LHS type.
Result = EmitScalarConversion(Result, ResultTy, LHSTy);
// Store the result value into the LHS lvalue. Bit-fields are
// handled specially because the result is altered by the store,
// i.e., [C99 6.5.16p1] 'An assignment expression has the value of
// the left operand after the assignment...'.
if (LHSLV.isBitfield())
CGF.EmitStoreThroughBitfieldLValue(RValue::get(Result), LHSLV, LHSTy,
&Result);
else
CGF.EmitStoreThroughLValue(RValue::get(Result), LHSLV, LHSTy);
return Result;
}
Chris Lattner
committed
Value *ScalarExprEmitter::EmitDiv(const BinOpInfo &Ops) {
Chris Lattner
committed
return Builder.CreateFDiv(Ops.LHS, Ops.RHS, "div");
else if (Ops.Ty->isUnsignedIntegerType())
Chris Lattner
committed
return Builder.CreateUDiv(Ops.LHS, Ops.RHS, "div");
else
return Builder.CreateSDiv(Ops.LHS, Ops.RHS, "div");
}
Value *ScalarExprEmitter::EmitRem(const BinOpInfo &Ops) {
// Rem in C can't be a floating point type: C99 6.5.5p2.
if (Ops.Ty->isUnsignedIntegerType())
Chris Lattner
committed
return Builder.CreateURem(Ops.LHS, Ops.RHS, "rem");
else
return Builder.CreateSRem(Ops.LHS, Ops.RHS, "rem");
}
Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &Ops) {
if (!Ops.Ty->isPointerType())
Chris Lattner
committed
return Builder.CreateAdd(Ops.LHS, Ops.RHS, "add");
// FIXME: What about a pointer to a VLA?
Value *Ptr, *Idx;
Expr *IdxExp;
const PointerType *PT;
if ((PT = Ops.E->getLHS()->getType()->getAsPointerType())) {
Ptr = Ops.LHS;
Idx = Ops.RHS;
IdxExp = Ops.E->getRHS();
} else { // int + pointer
PT = Ops.E->getRHS()->getType()->getAsPointerType();
assert(PT && "Invalid add expr");
Ptr = Ops.RHS;
Idx = Ops.LHS;
IdxExp = Ops.E->getLHS();
}
unsigned Width = cast<llvm::IntegerType>(Idx->getType())->getBitWidth();
if (Width < CGF.LLVMPointerWidth) {
// Zero or sign extend the pointer value based on whether the index is
// signed or not.
const llvm::Type *IdxType = llvm::IntegerType::get(CGF.LLVMPointerWidth);
if (IdxExp->getType()->isSignedIntegerType())
Idx = Builder.CreateSExt(Idx, IdxType, "idx.ext");
else
Idx = Builder.CreateZExt(Idx, IdxType, "idx.ext");
}
// Explicitly handle GNU void* and function pointer arithmetic
// extensions. The GNU void* casts amount to no-ops since our void*
// type is i8*, but this is future proof.
const QualType ElementType = PT->getPointeeType();
if (ElementType->isVoidType() || ElementType->isFunctionType()) {
const llvm::Type *i8Ty = llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
Value *Casted = Builder.CreateBitCast(Ptr, i8Ty);
Value *Res = Builder.CreateGEP(Casted, Idx, "sub.ptr");
return Builder.CreateBitCast(Res, Ptr->getType());
}
return Builder.CreateGEP(Ptr, Idx, "add.ptr");
Chris Lattner
committed
}
Value *ScalarExprEmitter::EmitSub(const BinOpInfo &Ops) {
if (!isa<llvm::PointerType>(Ops.LHS->getType()))
return Builder.CreateSub(Ops.LHS, Ops.RHS, "sub");
const QualType LHSType = Ops.E->getLHS()->getType();
const QualType LHSElementType = LHSType->getAsPointerType()->getPointeeType();
if (!isa<llvm::PointerType>(Ops.RHS->getType())) {
// pointer - int
Value *Idx = Ops.RHS;
unsigned Width = cast<llvm::IntegerType>(Idx->getType())->getBitWidth();
if (Width < CGF.LLVMPointerWidth) {
// Zero or sign extend the pointer value based on whether the index is
// signed or not.
const llvm::Type *IdxType = llvm::IntegerType::get(CGF.LLVMPointerWidth);
if (Ops.E->getRHS()->getType()->isSignedIntegerType())
Idx = Builder.CreateSExt(Idx, IdxType, "idx.ext");
else
Idx = Builder.CreateZExt(Idx, IdxType, "idx.ext");
}
Idx = Builder.CreateNeg(Idx, "sub.ptr.neg");
// FIXME: The pointer could point to a VLA.
// Explicitly handle GNU void* and function pointer arithmetic
// extensions. The GNU void* casts amount to no-ops since our
// void* type is i8*, but this is future proof.
if (LHSElementType->isVoidType() || LHSElementType->isFunctionType()) {
const llvm::Type *i8Ty = llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
Value *LHSCasted = Builder.CreateBitCast(Ops.LHS, i8Ty);
Value *Res = Builder.CreateGEP(LHSCasted, Idx, "sub.ptr");
return Builder.CreateBitCast(Res, Ops.LHS->getType());
}
return Builder.CreateGEP(Ops.LHS, Idx, "sub.ptr");
// pointer - pointer
Value *LHS = Ops.LHS;
Value *RHS = Ops.RHS;
uint64_t ElementSize;
// Handle GCC extension for pointer arithmetic on void* and function pointer
// types.
if (LHSElementType->isVoidType() || LHSElementType->isFunctionType()) {
ElementSize = 1;
} else {
ElementSize = CGF.getContext().getTypeSize(LHSElementType) / 8;
}
const llvm::Type *ResultType = ConvertType(Ops.Ty);
LHS = Builder.CreatePtrToInt(LHS, ResultType, "sub.ptr.lhs.cast");
RHS = Builder.CreatePtrToInt(RHS, ResultType, "sub.ptr.rhs.cast");
Value *BytesBetween = Builder.CreateSub(LHS, RHS, "sub.ptr.sub");
// Optimize out the shift for element size of 1.
if (ElementSize == 1)
return BytesBetween;
// HACK: LLVM doesn't have an divide instruction that 'knows' there is no
// remainder. As such, we handle common power-of-two cases here to generate
// better code. See PR2247.
if (llvm::isPowerOf2_64(ElementSize)) {
Value *ShAmt =
llvm::ConstantInt::get(ResultType, llvm::Log2_64(ElementSize));
return Builder.CreateAShr(BytesBetween, ShAmt, "sub.ptr.shr");
}
// Otherwise, do a full sdiv.
Value *BytesPerElt = llvm::ConstantInt::get(ResultType, ElementSize);
return Builder.CreateSDiv(BytesBetween, BytesPerElt, "sub.ptr.div");