- Sep 22, 2006
-
-
Evan Cheng authored
llvm-svn: 30583
-
Rafael Espindola authored
llvm-svn: 30581
-
Nate Begeman authored
llvm-svn: 30577
-
Devang Patel authored
llvm-svn: 30576
-
Devang Patel authored
call stack depth. llvm-svn: 30575
-
- Sep 21, 2006
-
-
Evan Cheng authored
llvm-svn: 30573
-
Jim Laskey authored
llvm-svn: 30570
-
Jim Laskey authored
llvm-svn: 30568
-
Rafael Espindola authored
llvm-svn: 30567
-
Rafael Espindola authored
llvm-svn: 30566
-
Chris Lattner authored
llvm-svn: 30561
-
Chris Lattner authored
llvm-svn: 30560
-
Chris Lattner authored
llvm-svn: 30559
-
Chris Lattner authored
int %test(ulong *%tmp) { %tmp = load ulong* %tmp ; <ulong> [#uses=1] %tmp.mask = shr ulong %tmp, ubyte 50 ; <ulong> [#uses=1] %tmp.mask = cast ulong %tmp.mask to ubyte %tmp2 = and ubyte %tmp.mask, 3 ; <ubyte> [#uses=1] %tmp2 = cast ubyte %tmp2 to int ; <int> [#uses=1] ret int %tmp2 } to: _test: movl 4(%esp), %eax movl 4(%eax), %eax shrl $18, %eax andl $3, %eax ret instead of: _test: movl 4(%esp), %eax movl 4(%eax), %eax shrl $18, %eax # TRUNCATE movb %al, %al andb $3, %al movzbl %al, %eax ret llvm-svn: 30558
-
Chris Lattner authored
the src/dst are not the same size. This catches things like "truncate 32-bit X to 8 bits, then zext to 16", which happens a bit on X86. llvm-svn: 30557
-
Chris Lattner authored
llvm-svn: 30556
-
Chris Lattner authored
llvm-svn: 30555
-
Nick Lewycky authored
llvm-svn: 30553
-
Nick Lewycky authored
llvm-svn: 30552
-
Nick Lewycky authored
with the right answer. llvm-svn: 30550
-
Anton Korobeynikov authored
llvm-svn: 30549
-
- Sep 20, 2006
-
-
Andrew Lenharth authored
llvm-svn: 30548
-
Chris Lattner authored
should create the right asmprinter subclass. llvm-svn: 30542
-
Chris Lattner authored
llvm-svn: 30541
-
Nick Lewycky authored
Fixes infinite loop in resolve(). llvm-svn: 30540
-
Andrew Lenharth authored
llvm-svn: 30535
-
Andrew Lenharth authored
llvm-svn: 30534
-
Andrew Lenharth authored
llvm-svn: 30531
-
Andrew Lenharth authored
llvm-svn: 30530
-
Chris Lattner authored
llvm-svn: 30518
-
Chris Lattner authored
int test3(int a, int b) { return (a < 0) ? a : 0; } to: _test3: srawi r2, r3, 31 and r3, r2, r3 blr instead of: _test3: cmpwi cr0, r3, 1 li r2, 0 blt cr0, LBB2_2 ;entry LBB2_1: ;entry mr r3, r2 LBB2_2: ;entry blr This implements: PowerPC/select_lt0.ll:seli32_a_a llvm-svn: 30517
-
Chris Lattner authored
llvm-svn: 30515
-
Chris Lattner authored
llvm-svn: 30514
-
Chris Lattner authored
1. teach SimplifySetCC that '(srl (ctlz x), 5) == 0' is really x != 0. 2. Teach visitSELECT_CC to use SimplifySetCC instead of calling it and ignoring the result. This allows us to compile: bool %test(ulong %x) { %tmp = setlt ulong %x, 4294967296 ret bool %tmp } to: _test: cntlzw r2, r3 cmplwi cr0, r3, 1 srwi r2, r2, 5 li r3, 0 beq cr0, LBB1_2 ; LBB1_1: ; mr r3, r2 LBB1_2: ; blr instead of: _test: addi r2, r3, -1 cntlzw r2, r2 cntlzw r3, r3 srwi r2, r2, 5 cmplwi cr0, r2, 0 srwi r2, r3, 5 li r3, 0 bne cr0, LBB1_2 ; LBB1_1: ; mr r3, r2 LBB1_2: ; blr This isn't wonderful, but it's an improvement. llvm-svn: 30513
-
Chris Lattner authored
llvm-svn: 30512
-
Chris Lattner authored
this comparison, but never checked it. Whoops, no wonder we miscompiled 177.mesa! llvm-svn: 30511
-
Chris Lattner authored
llvm-svn: 30510
-
Chris Lattner authored
1. Codegen this comparison: if (X == 0x8000) as: cmplwi cr0, r3, 32768 bne cr0, LBB1_2 ;cond_next instead of: lis r2, 0 ori r2, r2, 32768 cmpw cr0, r3, r2 bne cr0, LBB1_2 ;cond_next 2. Codegen this comparison: if (X == 0x12345678) as: xoris r2, r3, 4660 cmplwi cr0, r2, 22136 bne cr0, LBB1_2 ;cond_next instead of: lis r2, 4660 ori r2, r2, 22136 cmpw cr0, r3, r2 bne cr0, LBB1_2 ;cond_next llvm-svn: 30509
-
Chris Lattner authored
llvm-svn: 30508
-
Chris Lattner authored
Allow it to be clever when possible and fall back to the gross code when needed. This allows us to compile: long long foo1(long long X, int C) { return X << (C|32); } long long foo2(long long X, int C) { return X << (C&~32); } to: _foo1: rlwinm r2, r5, 0, 27, 31 slw r3, r4, r2 li r4, 0 blr .globl _foo2 .align 4 _foo2: rlwinm r2, r5, 0, 27, 25 subfic r5, r2, 32 slw r3, r3, r2 srw r5, r4, r5 or r3, r3, r5 slw r4, r4, r2 blr instead of: _foo1: ori r2, r5, 32 subfic r5, r2, 32 addi r6, r2, -32 srw r5, r4, r5 slw r3, r3, r2 slw r6, r4, r6 or r3, r3, r5 slw r4, r4, r2 or r3, r3, r6 blr .globl _foo2 .align 4 _foo2: rlwinm r2, r5, 0, 27, 25 subfic r5, r2, 32 addi r6, r2, -32 srw r5, r4, r5 slw r3, r3, r2 slw r6, r4, r6 or r3, r3, r5 slw r4, r4, r2 or r3, r3, r6 blr llvm-svn: 30507
-