- Oct 29, 2006
-
-
Chris Lattner authored
successors), and make island block movement more general. This compiles CodeGen/X86/2006-04-27-ISelFoldingBug.ll to: _loadAndRLEsource_no_exit_2E_1_label_2E_0: subl $8, %esp movl %esi, 4(%esp) movl %ebx, (%esp) movl 16(%esp), %eax movl 12(%esp), %ecx LBB1_3: #label.0 movl _last, %edx movl %edx, %esi incl %esi movl %esi, _last movl %ecx, %ebx # TRUNCATE movb %bl, %bl movl _block, %esi movb %bl, 1(%esi,%edx) cmpl %eax, _last jge LBB1_2 #codeRepl5.exitStub LBB1_4: #label.0 cmpl $257, %ecx je LBB1_2 #codeRepl5.exitStub LBB1_1: #label.0.no_exit.1_crit_edge.exitStub movl $1, %eax movl (%esp), %ebx movl 4(%esp), %esi addl $8, %esp ret LBB1_2: #codeRepl5.exitStub xorl %eax, %eax movl (%esp), %ebx movl 4(%esp), %esi addl $8, %esp ret instead of: _loadAndRLEsource_no_exit_2E_1_label_2E_0: subl $8, %esp movl %esi, 4(%esp) movl %ebx, (%esp) movl 16(%esp), %eax movl 12(%esp), %ecx jmp LBB1_3 #label.0 LBB1_1: #label.0.no_exit.1_crit_edge.exitStub movl $1, %eax movl (%esp), %ebx movl 4(%esp), %esi addl $8, %esp ret LBB1_2: #codeRepl5.exitStub xorl %eax, %eax movl (%esp), %ebx movl 4(%esp), %esi addl $8, %esp ret LBB1_3: #label.0 movl _last, %edx movl %edx, %esi incl %esi movl %esi, _last movl %ecx, %ebx # TRUNCATE movb %bl, %bl movl _block, %esi movb %bl, 1(%esi,%edx) cmpl %eax, _last jge LBB1_2 #codeRepl5.exitStub LBB1_4: #label.0 cmpl $257, %ecx jne LBB1_1 #label.0.no_exit.1_crit_edge.exitStub jmp LBB1_2 #codeRepl5.exitStub ... which is much better layout :) llvm-svn: 31282
-
Chris Lattner authored
llvm-svn: 31281
-
Chris Lattner authored
used by comparisons in the main block. llvm-svn: 31279
-
Jim Laskey authored
llvm-svn: 31278
-
Jim Laskey authored
llvm-svn: 31277
-
Evan Cheng authored
llvm-svn: 31276
-
Nick Lewycky authored
llvm-svn: 31275
-
- Oct 28, 2006
-
-
Chris Lattner authored
edges whose destinations are not phi nodes don't bother us. Also, share split edges, since the split edge can't have a phi. This significantly reduces the complexity of generated code in some cases. llvm-svn: 31274
-
Chris Lattner authored
jump tables that are dead. llvm-svn: 31273
-
Chris Lattner authored
llvm-svn: 31269
-
Chris Lattner authored
llvm-svn: 31267
-
Chris Lattner authored
llvm-svn: 31266
-
Chris Lattner authored
llvm-svn: 31264
-
Chris Lattner authored
the pred block doesn't fall through into them if it's a jumptable. llvm-svn: 31263
-
Chris Lattner authored
llvm-svn: 31262
-
Jim Laskey authored
llvm-svn: 31261
-
Chris Lattner authored
being inserted on unsplit critical edges, which introduces (sometimes large amounts of) partially dead spill code. This also fixes PR925 + CodeGen/Generic/switch-crit-edge-constant.ll llvm-svn: 31260
-
Chris Lattner authored
llvm-svn: 31258
-
Chris Lattner authored
llvm-svn: 31257
-
Chris Lattner authored
llvm-svn: 31256
-
Chris Lattner authored
llvm-svn: 31255
-
Chris Lattner authored
llvm-svn: 31254
-
Evan Cheng authored
llvm-svn: 31253
-
Evan Cheng authored
llvm-svn: 31252
-
Chris Lattner authored
llvm-svn: 31249
-
Chris Lattner authored
llvm-svn: 31248
-
Chris Lattner authored
llvm-svn: 31247
-
Chris Lattner authored
llvm-svn: 31246
-
Chris Lattner authored
llvm-svn: 31245
-
Reid Spencer authored
llvm-svn: 31244
-
Jim Laskey authored
llvm-svn: 31243
-
Chris Lattner authored
Add many fewer CFG edges and PHI node entries. If there is a switch which has the same block as multiple destinations, only add that block once as a successor/phi node (in the jumptable case) llvm-svn: 31242
-
Jim Laskey authored
llvm-svn: 31240
-
Chris Lattner authored
llvm-svn: 31236
-
Chris Lattner authored
cmpw cr0, r7, r3 ble cr0, LBB1_5 ;bb25 LBB1_8: ;bb17 cmpw cr0, r8, r5 bgt cr0, LBB1_2 ;bb which is just as good as crnand. llvm-svn: 31235
-
- Oct 27, 2006
-
-
Chris Lattner authored
llvm-svn: 31233
-
Chris Lattner authored
cmpwi cr0, r4, 4 bgt cr0, LBB1_2 ;UnifiedReturnBlock LBB1_3: ;entry cmplwi cr0, r3, 0 bne cr0, LBB1_2 ;UnifiedReturnBlock instead of: cmpwi cr7, r4, 4 mfcr r2 addic r4, r3, -1 subfe r3, r4, r3 rlwinm r2, r2, 30, 31, 31 or r2, r2, r3 cmplwi cr0, r2, 0 bne cr0, LBB1_2 ;UnifiedReturnBlock LBB1_1: ;cond_true llvm-svn: 31232
-
Chris Lattner authored
This compiles Regression/CodeGen/X86/or-branch.ll into: _foo: subl $12, %esp call L_bar$stub movl 20(%esp), %eax movl 16(%esp), %ecx cmpl $5, %eax jl LBB1_1 #cond_true LBB1_3: #entry testl %ecx, %ecx jne LBB1_2 #UnifiedReturnBlock LBB1_1: #cond_true call L_bar$stub addl $12, %esp ret LBB1_2: #UnifiedReturnBlock addl $12, %esp ret instead of: _foo: subl $12, %esp call L_bar$stub movl 20(%esp), %eax movl 16(%esp), %ecx cmpl $4, %eax setg %al testl %ecx, %ecx setne %cl testb %cl, %al jne LBB1_2 #UnifiedReturnBlock LBB1_1: #cond_true call L_bar$stub addl $12, %esp ret LBB1_2: #UnifiedReturnBlock addl $12, %esp ret And on ppc to: cmpwi cr0, r29, 5 blt cr0, LBB1_1 ;cond_true LBB1_3: ;entry cmplwi cr0, r30, 0 bne cr0, LBB1_2 ;UnifiedReturnBlock instead of: cmpwi cr7, r4, 4 mfcr r2 addic r4, r3, -1 subfe r30, r4, r3 rlwinm r29, r2, 30, 31, 31 and r2, r29, r30 cmplwi cr0, r2, 0 bne cr0, LBB1_2 ;UnifiedReturnBlock llvm-svn: 31230
-
Evan Cheng authored
llvm-svn: 31228
-
Jim Laskey authored
llvm-svn: 31224
-