- Oct 28, 2006
-
-
Chris Lattner authored
llvm-svn: 31262
-
Jim Laskey authored
llvm-svn: 31261
-
Chris Lattner authored
being inserted on unsplit critical edges, which introduces (sometimes large amounts of) partially dead spill code. This also fixes PR925 + CodeGen/Generic/switch-crit-edge-constant.ll llvm-svn: 31260
-
Chris Lattner authored
llvm-svn: 31259
-
Chris Lattner authored
llvm-svn: 31258
-
Chris Lattner authored
llvm-svn: 31257
-
Chris Lattner authored
llvm-svn: 31256
-
Chris Lattner authored
llvm-svn: 31255
-
Chris Lattner authored
llvm-svn: 31254
-
Evan Cheng authored
llvm-svn: 31253
-
Evan Cheng authored
llvm-svn: 31252
-
Nick Lewycky authored
Clarify the test. llvm-svn: 31251
-
Jim Laskey authored
llvm-svn: 31250
-
Chris Lattner authored
llvm-svn: 31249
-
Chris Lattner authored
llvm-svn: 31248
-
Chris Lattner authored
llvm-svn: 31247
-
Chris Lattner authored
llvm-svn: 31246
-
Chris Lattner authored
llvm-svn: 31245
-
Reid Spencer authored
llvm-svn: 31244
-
Jim Laskey authored
llvm-svn: 31243
-
Chris Lattner authored
Add many fewer CFG edges and PHI node entries. If there is a switch which has the same block as multiple destinations, only add that block once as a successor/phi node (in the jumptable case) llvm-svn: 31242
-
Chris Lattner authored
llvm-svn: 31241
-
Jim Laskey authored
llvm-svn: 31240
-
Devang Patel authored
llvm-svn: 31239
-
Jim Laskey authored
llvm-svn: 31238
-
Devang Patel authored
llvm-svn: 31237
-
Chris Lattner authored
llvm-svn: 31236
-
Chris Lattner authored
cmpw cr0, r7, r3 ble cr0, LBB1_5 ;bb25 LBB1_8: ;bb17 cmpw cr0, r8, r5 bgt cr0, LBB1_2 ;bb which is just as good as crnand. llvm-svn: 31235
-
- Oct 27, 2006
-
-
Devang Patel authored
llvm-svn: 31234
-
Chris Lattner authored
llvm-svn: 31233
-
Chris Lattner authored
cmpwi cr0, r4, 4 bgt cr0, LBB1_2 ;UnifiedReturnBlock LBB1_3: ;entry cmplwi cr0, r3, 0 bne cr0, LBB1_2 ;UnifiedReturnBlock instead of: cmpwi cr7, r4, 4 mfcr r2 addic r4, r3, -1 subfe r3, r4, r3 rlwinm r2, r2, 30, 31, 31 or r2, r2, r3 cmplwi cr0, r2, 0 bne cr0, LBB1_2 ;UnifiedReturnBlock LBB1_1: ;cond_true llvm-svn: 31232
-
Chris Lattner authored
llvm-svn: 31231
-
Chris Lattner authored
This compiles Regression/CodeGen/X86/or-branch.ll into: _foo: subl $12, %esp call L_bar$stub movl 20(%esp), %eax movl 16(%esp), %ecx cmpl $5, %eax jl LBB1_1 #cond_true LBB1_3: #entry testl %ecx, %ecx jne LBB1_2 #UnifiedReturnBlock LBB1_1: #cond_true call L_bar$stub addl $12, %esp ret LBB1_2: #UnifiedReturnBlock addl $12, %esp ret instead of: _foo: subl $12, %esp call L_bar$stub movl 20(%esp), %eax movl 16(%esp), %ecx cmpl $4, %eax setg %al testl %ecx, %ecx setne %cl testb %cl, %al jne LBB1_2 #UnifiedReturnBlock LBB1_1: #cond_true call L_bar$stub addl $12, %esp ret LBB1_2: #UnifiedReturnBlock addl $12, %esp ret And on ppc to: cmpwi cr0, r29, 5 blt cr0, LBB1_1 ;cond_true LBB1_3: ;entry cmplwi cr0, r30, 0 bne cr0, LBB1_2 ;UnifiedReturnBlock instead of: cmpwi cr7, r4, 4 mfcr r2 addic r4, r3, -1 subfe r30, r4, r3 rlwinm r29, r2, 30, 31, 31 and r2, r29, r30 cmplwi cr0, r2, 0 bne cr0, LBB1_2 ;UnifiedReturnBlock llvm-svn: 31230
-
Chris Lattner authored
llvm-svn: 31229
-
Evan Cheng authored
llvm-svn: 31228
-
Evan Cheng authored
llvm-svn: 31227
-
Bill Wendling authored
llvm-svn: 31226
-
Bill Wendling authored
llvm-svn: 31225
-
Jim Laskey authored
llvm-svn: 31224
-
Jim Laskey authored
llvm-svn: 31223
-