Handle negative strides much more optimally. This compiles X86/lsr-negative-stride.ll
into: _t: movl 8(%esp), %ecx movl 4(%esp), %eax cmpl %ecx, %eax je LBB1_3 #bb17 LBB1_1: #bb cmpl %ecx, %eax jg LBB1_4 #cond_true LBB1_2: #cond_false subl %eax, %ecx cmpl %ecx, %eax jne LBB1_1 #bb LBB1_3: #bb17 ret LBB1_4: #cond_true subl %ecx, %eax cmpl %ecx, %eax jne LBB1_1 #bb jmp LBB1_3 #bb17 instead of: _t: subl $4, %esp movl %esi, (%esp) movl 12(%esp), %ecx movl 8(%esp), %eax cmpl %ecx, %eax je LBB1_4 #bb17 LBB1_1: #bb.outer movl %ecx, %edx negl %edx LBB1_2: #bb cmpl %ecx, %eax jle LBB1_5 #cond_false LBB1_3: #cond_true addl %edx, %eax cmpl %ecx, %eax jne LBB1_2 #bb LBB1_4: #bb17 movl (%esp), %esi addl $4, %esp ret LBB1_5: #cond_false movl %ecx, %edx subl %eax, %edx movl %eax, %esi addl %esi, %esi cmpl %ecx, %esi je LBB1_4 #bb17 LBB1_6: #cond_false.bb.outer_crit_edge movl %edx, %ecx jmp LBB1_1 #bb.outer llvm-svn: 37252
Loading
Please register or sign in to comment