- Jun 29, 2010
-
-
Duncan Sands authored
to unsigned only to extend back to a pointer sized value on the next line. llvm-svn: 107139
-
Duncan Sands authored
ignored! Remove it. llvm-svn: 107138
-
Duncan Sands authored
but then not actually used - maybe a bug? Remove the variable. llvm-svn: 107137
-
Duncan Sands authored
llvm-svn: 107136
-
Duncan Sands authored
llvm-svn: 107135
-
Duncan Sands authored
llvm-svn: 107134
-
Duncan Sands authored
and thumb_mode. llvm-svn: 107133
-
Gabor Greif authored
llvm-svn: 107132
-
Duncan Sands authored
llvm-svn: 107131
-
Duncan Sands authored
llvm-svn: 107130
-
Gabor Greif authored
in terms of Op<> and ArgOffset. This works for values of {0, 1} for ArgOffset. Please note that ArgOffset will become 0 soon and will go away eventually. llvm-svn: 107129
-
Duncan Sands authored
llvm-svn: 107128
-
Duncan Sands authored
llvm-svn: 107127
-
Duncan Sands authored
llvm-svn: 107126
-
Benjamin Kramer authored
llvm-svn: 107125
-
Chandler Carruth authored
be called. llvm-svn: 107124
-
Chris Lattner authored
avoid passing ASTContext down through all the methods it has. When classifying an argument, or argument piece, as INTEGER, check to see if we have a pointer at exactly the same offset in the preferred type. If so, use that pointer type instead of i64. This allows us to compile A function taking a stringref into something like this: define i8* @foo(i64 %D.coerce0, i8* %D.coerce1) nounwind ssp { entry: %D = alloca %struct.DeclGroup, align 8 ; <%struct.DeclGroup*> [#uses=4] %0 = getelementptr %struct.DeclGroup* %D, i32 0, i32 0 ; <i64*> [#uses=1] store i64 %D.coerce0, i64* %0 %1 = getelementptr %struct.DeclGroup* %D, i32 0, i32 1 ; <i8**> [#uses=1] store i8* %D.coerce1, i8** %1 %tmp = getelementptr inbounds %struct.DeclGroup* %D, i32 0, i32 0 ; <i64*> [#uses=1] %tmp1 = load i64* %tmp ; <i64> [#uses=1] %tmp2 = getelementptr inbounds %struct.DeclGroup* %D, i32 0, i32 1 ; <i8**> [#uses=1] %tmp3 = load i8** %tmp2 ; <i8*> [#uses=1] %add.ptr = getelementptr inbounds i8* %tmp3, i64 %tmp1 ; <i8*> [#uses=1] ret i8* %add.ptr } instead of this: define i8* @foo(i64 %D.coerce0, i64 %D.coerce1) nounwind ssp { entry: %D = alloca %struct.DeclGroup, align 8 ; <%struct.DeclGroup*> [#uses=3] %0 = insertvalue %0 undef, i64 %D.coerce0, 0 ; <%0> [#uses=1] %1 = insertvalue %0 %0, i64 %D.coerce1, 1 ; <%0> [#uses=1] %2 = bitcast %struct.DeclGroup* %D to %0* ; <%0*> [#uses=1] store %0 %1, %0* %2, align 1 %tmp = getelementptr inbounds %struct.DeclGroup* %D, i32 0, i32 0 ; <i64*> [#uses=1] %tmp1 = load i64* %tmp ; <i64> [#uses=1] %tmp2 = getelementptr inbounds %struct.DeclGroup* %D, i32 0, i32 1 ; <i8**> [#uses=1] %tmp3 = load i8** %tmp2 ; <i8*> [#uses=1] %add.ptr = getelementptr inbounds i8* %tmp3, i64 %tmp1 ; <i8*> [#uses=1] ret i8* %add.ptr } This implements rdar://7375902 - [codegen quality] clang x86-64 ABI lowering code punishing StringRef llvm-svn: 107123
-
Evan Cheng authored
llvm-svn: 107122
-
Evan Cheng authored
llvm-svn: 107121
-
Chris Lattner authored
llvm-svn: 107120
-
Jim Grosbach authored
back-edges), make sure not to include dbg_value instructions in the count. Closing in on the end of rdar://7797940 llvm-svn: 107119
-
Greg Clayton authored
the private object back to another internal function. llvm-svn: 107118
-
Dan Gohman authored
instruction to an add scev, it's not safe to blindly transfer the inbounds flag from a gep instruction to an nsw on the scev for the gep. llvm-svn: 107117
-
Bruno Cardoso Lopes authored
llvm-svn: 107116
-
Chris Lattner authored
no functionality change. llvm-svn: 107115
-
Jakob Stoklund Olesen authored
llvm-svn: 107114
-
Bill Wendling authored
llvm-svn: 107112
-
Chris Lattner authored
This is somewhat annoying to do this at this level, but it avoids having ABIInfo know depend on CodeGenTypes for a hint. Nothing is using this yet, so no functionality change. llvm-svn: 107111
-
Bob Wilson authored
There are 2 changes relative to the previous version of the patch: 1) For the "simple" if-conversion case, there's no need to worry about RemoveExtraEdges not handling an unanalyzable branch. Predicated terminators are ignored in this context, so RemoveExtraEdges does the right thing. This might break someday if we ever treat indirect branches (BRIND) as predicable, but for now, I just removed this part of the patch, because in the case where we do not add an unconditional branch, we rely on keeping the fall-through edge to CvtBBI (which is empty after this transformation). The change relative to the previous patch is: @@ -1036,10 +1036,6 @@ IterIfcvt = false; } - // RemoveExtraEdges won't work if the block has an unanalyzable branch, - // which is typically the case for IfConvertSimple, so explicitly remove - // CvtBBI as a successor. - BBI.BB->removeSuccessor(CvtBBI->BB); RemoveExtraEdges(BBI); // Update block info. BB can be iteratively if-converted. 2) My patch exposed a bug in the code for merging the tail of a "diamond", which had previously never been exercised. The code was simply checking that the tail had a single predecessor, but there was a case in MultiSource/Benchmarks/VersaBench/dbms where that single predecessor was neither edge of the diamond. I added the following change to check for that: @@ -1276,7 +1276,18 @@ // tail, add a unconditional branch to it. if (TailBB) { BBInfo TailBBI = BBAnalysis[TailBB->getNumber()]; - if (TailBB->pred_size() == 1 && !TailBBI.HasFallThrough) { + bool CanMergeTail = !TailBBI.HasFallThrough; + // There may still be a fall-through edge from BBI1 or BBI2 to TailBB; + // check if there are any other predecessors besides those. + unsigned NumPreds = TailBB->pred_size(); + if (NumPreds > 1) + CanMergeTail = false; + else if (NumPreds == 1 && CanMergeTail) { + MachineBasicBlock::pred_iterator PI = TailBB->pred_begin(); + if (*PI != BBI1->BB && *PI != BBI2->BB) + CanMergeTail = false; + } + if (CanMergeTail) { MergeBlocks(BBI, TailBBI); TailBBI.IsDone = true; } else { With these fixes, I was able to run all the SingleSource and MultiSource tests successfully. llvm-svn: 107110
-
Dan Gohman authored
properly handles instructions and arguments defined in different functions, or across recursive function iterations. llvm-svn: 107109
-
Bruno Cardoso Lopes authored
llvm-svn: 107108
-
Bob Wilson authored
the same as ARM except that the condition code field is always set to ARMCC::AL. llvm-svn: 107107
-
Chandler Carruth authored
without it we might exit a non-void function without returning. llvm-svn: 107106
-
Chris Lattner authored
llvm-svn: 107105
-
Chris Lattner authored
avoiding making the FCA at all when the types exactly line up. For example, before we made: %struct.DeclGroup = type { i64, i64 } define i64 @_Z3foo9DeclGroup(i64, i64) nounwind { entry: %D = alloca %struct.DeclGroup, align 8 ; <%struct.DeclGroup*> [#uses=3] %2 = insertvalue %struct.DeclGroup undef, i64 %0, 0 ; <%struct.DeclGroup> [#uses=1] %3 = insertvalue %struct.DeclGroup %2, i64 %1, 1 ; <%struct.DeclGroup> [#uses=1] store %struct.DeclGroup %3, %struct.DeclGroup* %D %tmp = getelementptr inbounds %struct.DeclGroup* %D, i32 0, i32 0 ; <i64*> [#uses=1] %tmp1 = load i64* %tmp ; <i64> [#uses=1] %tmp2 = getelementptr inbounds %struct.DeclGroup* %D, i32 0, i32 1 ; <i64*> [#uses=1] %tmp3 = load i64* %tmp2 ; <i64> [#uses=1] %add = add nsw i64 %tmp1, %tmp3 ; <i64> [#uses=1] ret i64 %add } ... which has the pointless insertvalue, which fastisel hates, now we make: %struct.DeclGroup = type { i64, i64 } define i64 @_Z3foo9DeclGroup(i64, i64) nounwind { entry: %D = alloca %struct.DeclGroup, align 8 ; <%struct.DeclGroup*> [#uses=4] %2 = getelementptr %struct.DeclGroup* %D, i32 0, i32 0 ; <i64*> [#uses=1] store i64 %0, i64* %2 %3 = getelementptr %struct.DeclGroup* %D, i32 0, i32 1 ; <i64*> [#uses=1] store i64 %1, i64* %3 %tmp = getelementptr inbounds %struct.DeclGroup* %D, i32 0, i32 0 ; <i64*> [#uses=1] %tmp1 = load i64* %tmp ; <i64> [#uses=1] %tmp2 = getelementptr inbounds %struct.DeclGroup* %D, i32 0, i32 1 ; <i64*> [#uses=1] %tmp3 = load i64* %tmp2 ; <i64> [#uses=1] %add = add nsw i64 %tmp1, %tmp3 ; <i64> [#uses=1] ret i64 %add } This only kicks in when x86-64 abi lowering decides it likes us. llvm-svn: 107104
-
Devang Patel authored
llvm-svn: 107103
-
Craig Silverstein authored
TraverseConstructorInitializer, to be a bit clearer. llvm-svn: 107102
-
Ted Kremenek authored
cxloc::translateSourceLocation() (thus causing all clients of this function to have the same behavior). llvm-svn: 107101
-
Greg Clayton authored
Added the ability to dump any file in the global module cache using any of the "image dump" commands. This allows us to dump the .o files that are used with DWARF + .o since they don't belong the the target list for the current target. llvm-svn: 107100
-
Chris Lattner authored
is a FCA to pass each of the elements as individual scalars. This produces code fast isel is less likely to reject and is easier on the optimizers. For example, before we would compile: struct DeclGroup { long NumDecls; char * Y; }; char * foo(DeclGroup D) { return D.NumDecls+D.Y; } to: %struct.DeclGroup = type { i64, i64 } define i64 @_Z3foo9DeclGroup(%struct.DeclGroup) nounwind { entry: %D = alloca %struct.DeclGroup, align 8 ; <%struct.DeclGroup*> [#uses=3] store %struct.DeclGroup %0, %struct.DeclGroup* %D, align 1 %tmp = getelementptr inbounds %struct.DeclGroup* %D, i32 0, i32 0 ; <i64*> [#uses=1] %tmp1 = load i64* %tmp ; <i64> [#uses=1] %tmp2 = getelementptr inbounds %struct.DeclGroup* %D, i32 0, i32 1 ; <i64*> [#uses=1] %tmp3 = load i64* %tmp2 ; <i64> [#uses=1] %add = add nsw i64 %tmp1, %tmp3 ; <i64> [#uses=1] ret i64 %add } Now we get: %0 = type { i64, i64 } %struct.DeclGroup = type { i64, i8* } define i8* @_Z3foo9DeclGroup(i64, i64) nounwind { entry: %D = alloca %struct.DeclGroup, align 8 ; <%struct.DeclGroup*> [#uses=3] %2 = insertvalue %0 undef, i64 %0, 0 ; <%0> [#uses=1] %3 = insertvalue %0 %2, i64 %1, 1 ; <%0> [#uses=1] %4 = bitcast %struct.DeclGroup* %D to %0* ; <%0*> [#uses=1] store %0 %3, %0* %4, align 1 %tmp = getelementptr inbounds %struct.DeclGroup* %D, i32 0, i32 0 ; <i64*> [#uses=1] %tmp1 = load i64* %tmp ; <i64> [#uses=1] %tmp2 = getelementptr inbounds %struct.DeclGroup* %D, i32 0, i32 1 ; <i8**> [#uses=1] %tmp3 = load i8** %tmp2 ; <i8*> [#uses=1] %add.ptr = getelementptr inbounds i8* %tmp3, i64 %tmp1 ; <i8*> [#uses=1] ret i8* %add.ptr } Elimination of the FCA inside the function is still-to-come. llvm-svn: 107099
-