Skip to content
  1. Oct 29, 2006
    • Chris Lattner's avatar
      Make CanFallThrough more intelligent (so it can handle blocks with (e.g.) no · 504eeda3
      Chris Lattner authored
      successors), and make island block movement more general.
      
      This compiles CodeGen/X86/2006-04-27-ISelFoldingBug.ll to:
      
      _loadAndRLEsource_no_exit_2E_1_label_2E_0:
              subl $8, %esp
              movl %esi, 4(%esp)
              movl %ebx, (%esp)
              movl 16(%esp), %eax
              movl 12(%esp), %ecx
      LBB1_3: #label.0
              movl _last, %edx
              movl %edx, %esi
              incl %esi
              movl %esi, _last
              movl %ecx, %ebx
              # TRUNCATE movb %bl, %bl
              movl _block, %esi
              movb %bl, 1(%esi,%edx)
              cmpl %eax, _last
              jge LBB1_2      #codeRepl5.exitStub
      LBB1_4: #label.0
              cmpl $257, %ecx
              je LBB1_2       #codeRepl5.exitStub
      LBB1_1: #label.0.no_exit.1_crit_edge.exitStub
              movl $1, %eax
              movl (%esp), %ebx
              movl 4(%esp), %esi
              addl $8, %esp
              ret
      LBB1_2: #codeRepl5.exitStub
              xorl %eax, %eax
              movl (%esp), %ebx
              movl 4(%esp), %esi
              addl $8, %esp
              ret
      
      instead of:
      
      _loadAndRLEsource_no_exit_2E_1_label_2E_0:
              subl $8, %esp
              movl %esi, 4(%esp)
              movl %ebx, (%esp)
              movl 16(%esp), %eax
              movl 12(%esp), %ecx
              jmp LBB1_3      #label.0
      LBB1_1: #label.0.no_exit.1_crit_edge.exitStub
              movl $1, %eax
              movl (%esp), %ebx
              movl 4(%esp), %esi
              addl $8, %esp
              ret
      LBB1_2: #codeRepl5.exitStub
              xorl %eax, %eax
              movl (%esp), %ebx
              movl 4(%esp), %esi
              addl $8, %esp
              ret
      LBB1_3: #label.0
              movl _last, %edx
              movl %edx, %esi
              incl %esi
              movl %esi, _last
              movl %ecx, %ebx
              # TRUNCATE movb %bl, %bl
              movl _block, %esi
              movb %bl, 1(%esi,%edx)
              cmpl %eax, _last
              jge LBB1_2      #codeRepl5.exitStub
      LBB1_4: #label.0
              cmpl $257, %ecx
              jne LBB1_1      #label.0.no_exit.1_crit_edge.exitStub
              jmp LBB1_2      #codeRepl5.exitStub
      
      ... which is much better layout :)
      
      llvm-svn: 31282
      504eeda3
    • Chris Lattner's avatar
      fix Generic/2006-10-29-Crash.ll · e60ae823
      Chris Lattner authored
      llvm-svn: 31281
      e60ae823
    • Chris Lattner's avatar
      Fix a load folding issue that Evan noticed: there is no need to export values · f31b9ef4
      Chris Lattner authored
      used by comparisons in the main block.
      
      llvm-svn: 31279
      f31b9ef4
    • Evan Cheng's avatar
      VLOAD is not the LoadSDNode opcode. · 7ab6123c
      Evan Cheng authored
      llvm-svn: 31276
      7ab6123c
    • Nick Lewycky's avatar
      Remove spurious case. EXTLOAD is not one of the node opcodes. · dc146a9f
      Nick Lewycky authored
      llvm-svn: 31275
      dc146a9f
  2. Oct 28, 2006
  3. Oct 27, 2006
    • Chris Lattner's avatar
      remove debug code · b9392fb6
      Chris Lattner authored
      llvm-svn: 31233
      b9392fb6
    • Chris Lattner's avatar
      Codegen cond&cond with two branches. This compiles (f.e.) PowerPC/and-branch.ll to: · f1b54fd7
      Chris Lattner authored
              cmpwi cr0, r4, 4
              bgt cr0, LBB1_2 ;UnifiedReturnBlock
      LBB1_3: ;entry
              cmplwi cr0, r3, 0
              bne cr0, LBB1_2 ;UnifiedReturnBlock
      
      instead of:
      
              cmpwi cr7, r4, 4
              mfcr r2
              addic r4, r3, -1
              subfe r3, r4, r3
              rlwinm r2, r2, 30, 31, 31
              or r2, r2, r3
              cmplwi cr0, r2, 0
              bne cr0, LBB1_2 ;UnifiedReturnBlock
      LBB1_1: ;cond_true
      
      llvm-svn: 31232
      f1b54fd7
    • Chris Lattner's avatar
      Turn conditions like x<Y|z==q into multiple blocks. · ed0110b9
      Chris Lattner authored
      This compiles Regression/CodeGen/X86/or-branch.ll into:
      
      _foo:
              subl $12, %esp
              call L_bar$stub
              movl 20(%esp), %eax
              movl 16(%esp), %ecx
              cmpl $5, %eax
              jl LBB1_1       #cond_true
      LBB1_3: #entry
              testl %ecx, %ecx
              jne LBB1_2      #UnifiedReturnBlock
      LBB1_1: #cond_true
              call L_bar$stub
              addl $12, %esp
              ret
      LBB1_2: #UnifiedReturnBlock
              addl $12, %esp
              ret
      
      instead of:
      
      _foo:
              subl $12, %esp
              call L_bar$stub
              movl 20(%esp), %eax
              movl 16(%esp), %ecx
              cmpl $4, %eax
              setg %al
              testl %ecx, %ecx
              setne %cl
              testb %cl, %al
              jne LBB1_2      #UnifiedReturnBlock
      LBB1_1: #cond_true
              call L_bar$stub
              addl $12, %esp
              ret
      LBB1_2: #UnifiedReturnBlock
              addl $12, %esp
              ret
      
      And on ppc to:
      
              cmpwi cr0, r29, 5
              blt cr0, LBB1_1 ;cond_true
      LBB1_3: ;entry
              cmplwi cr0, r30, 0
              bne cr0, LBB1_2 ;UnifiedReturnBlock
      
      instead of:
      
              cmpwi cr7, r4, 4
              mfcr r2
              addic r4, r3, -1
              subfe r30, r4, r3
              rlwinm r29, r2, 30, 31, 31
              and r2, r29, r30
              cmplwi cr0, r2, 0
              bne cr0, LBB1_2 ;UnifiedReturnBlock
      
      llvm-svn: 31230
      ed0110b9
  4. Oct 26, 2006
  5. Oct 25, 2006
  6. Oct 24, 2006
  7. Oct 23, 2006
  8. Oct 22, 2006
Loading