- Feb 01, 2006
-
-
Evan Cheng authored
llvm-svn: 25871
-
Jeff Cohen authored
llvm-svn: 25869
-
Chris Lattner authored
llvm-svn: 25867
-
Chris Lattner authored
Beef up the interface to inline asm constraint parsing, making it more general, useful, and easier to use. llvm-svn: 25866
-
Chris Lattner authored
llvm-svn: 25865
-
Evan Cheng authored
value or the chain going into the load. llvm-svn: 25863
-
Chris Lattner authored
llvm-svn: 25862
-
Evan Cheng authored
ensure the memory location has not been clobbered. llvm-svn: 25861
-
Evan Cheng authored
llvm-svn: 25860
-
Evan Cheng authored
it is already available in memory, do a fld directly from there. llvm-svn: 25859
-
- Jan 31, 2006
-
-
Chris Lattner authored
llvm-svn: 25858
-
Evan Cheng authored
- Use XORP* to implement fneg. llvm-svn: 25857
-
Evan Cheng authored
llvm-svn: 25856
-
Evan Cheng authored
llvm-svn: 25855
-
Chris Lattner authored
* Rename hasSSE -> hasSSE1 to avoid my continual confusion with 'has any SSE'. * Add inline asm constraint specification. llvm-svn: 25854
-
Chris Lattner authored
llvm-svn: 25853
-
Evan Cheng authored
caused several test failures. llvm-svn: 25852
-
Chris Lattner authored
llvm-svn: 25851
-
Nate Begeman authored
bool %test(int %X) { %Y = seteq int %X, 13 ret bool %Y } as _test: addi r2, r3, -13 cntlzw r2, r2 srwi r3, r2, 5 blr rather than _test: cmpwi cr7, r3, 13 mfcr r2 rlwinm r3, r2, 31, 31, 31 blr This has very little effect on most code, but speeds up analyzer 23% and mason 11% llvm-svn: 25848
-
Chris Lattner authored
llvm-svn: 25847
-
Chris Lattner authored
llvm-svn: 25846
-
Chris Lattner authored
llvm-svn: 25845
-
Chris Lattner authored
llvm-svn: 25844
-
Chris Lattner authored
llvm-svn: 25843
-
Chris Lattner authored
llvm-svn: 25842
-
Chris Lattner authored
llvm-svn: 25841
-
Chris Lattner authored
an operand that contains the condcode), making things significantly simpler. llvm-svn: 25840
-
Chris Lattner authored
a CC as an operand. Much smaller, much happier. llvm-svn: 25839
-
Chris Lattner authored
llvm-svn: 25838
-
Chris Lattner authored
llvm-svn: 25837
-
Chris Lattner authored
llvm-svn: 25834
-
Chris Lattner authored
void %X(int %A) { %C = setlt int %A, 123 ; <bool> [#uses=1] br bool %C, label %T, label %F T: ; preds = %0 call int %main( int 0 ) ; <int>:0 [#uses=0] ret void F: ; preds = %0 ret void } to this: X: save -96, %o6, %o6 subcc %i0, 122, %l0 bg .LBBX_2 ! F nop ... not this: X: save -96, %o6, %o6 sethi 0, %l0 or %g0, 1, %l1 subcc %i0, 122, %l2 bg .LBBX_4 ! nop .LBBX_3: ! or %g0, %l0, %l1 .LBBX_4: ! subcc %l1, 0, %l0 bne .LBBX_2 ! F nop llvm-svn: 25833
-
Chris Lattner authored
llvm-svn: 25832
-
Evan Cheng authored
llvm-svn: 25831
-
Chris Lattner authored
llvm-svn: 25830
-
Chris Lattner authored
llvm-svn: 25829
-
Chris Lattner authored
llvm-svn: 25828
-
Chris Lattner authored
int %test_cpuid(int %op) { %B = alloca int %C = alloca int %D = alloca int %A = call int asm "cpuid", "=eax,==ebx,==ecx,==edx,eax"(int* %B, int* %C, int* %D, int %op) %Bv = load int* %B %Cv = load int* %C %Dv = load int* %D %x = add int %A, %Bv %y = add int %x, %Cv %z = add int %y, %Dv ret int %z } to this: _test_cpuid: sub %ESP, 16 mov DWORD PTR [%ESP], %EBX mov %EAX, DWORD PTR [%ESP + 20] cpuid mov DWORD PTR [%ESP + 8], %ECX mov DWORD PTR [%ESP + 12], %EBX mov DWORD PTR [%ESP + 4], %EDX mov %ECX, DWORD PTR [%ESP + 12] add %EAX, %ECX mov %ECX, DWORD PTR [%ESP + 8] add %EAX, %ECX mov %ECX, DWORD PTR [%ESP + 4] add %EAX, %ECX mov %EBX, DWORD PTR [%ESP] add %ESP, 16 ret ... note the proper register allocation. :) it is unclear to me why the loads aren't folded into the adds. llvm-svn: 25827
-
Chris Lattner authored
llvm-svn: 25826
-
Chris Lattner authored
llvm-svn: 25825
-