- Apr 03, 2006
-
-
Evan Cheng authored
- Some bug fixes and naming inconsistency fixes. llvm-svn: 27377
-
Chris Lattner authored
llvm-svn: 27376
-
Chris Lattner authored
llvm-svn: 27375
-
Chris Lattner authored
llvm-svn: 27374
-
Chris Lattner authored
llvm-svn: 27372
-
Andrew Lenharth authored
llvm-svn: 27370
-
Andrew Lenharth authored
llvm-svn: 27368
-
Andrew Lenharth authored
llvm-svn: 27367
-
- Apr 02, 2006
-
-
Andrew Lenharth authored
llvm-svn: 27364
-
Andrew Lenharth authored
llvm-svn: 27363
-
Andrew Lenharth authored
llvm-svn: 27362
-
Chris Lattner authored
llvm-svn: 27360
-
Chris Lattner authored
llvm-svn: 27359
-
Chris Lattner authored
llvm-svn: 27358
-
Chris Lattner authored
llvm-svn: 27357
-
Chris Lattner authored
int %AreSecondAndThirdElementsBothNegative(<4 x float>* %in) { entry: %tmp1 = load <4 x float>* %in ; <<4 x float>> [#uses=1] %tmp = tail call int %llvm.ppc.altivec.vcmpgefp.p( int 1, <4 x float> < float 0x7FF8000000000000, float 0.000000e+00, float 0.000000e+00, float 0x7FF8000000000000 >, <4 x float> %tmp1 ) ; <int> [#uses=1] %tmp = seteq int %tmp, 0 ; <bool> [#uses=1] %tmp3 = cast bool %tmp to int ; <int> [#uses=1] ret int %tmp3 } into this: _AreSecondAndThirdElementsBothNegative: mfspr r2, 256 oris r4, r2, 49152 mtspr 256, r4 li r4, lo16(LCPI1_0) lis r5, ha16(LCPI1_0) lvx v0, 0, r3 lvx v1, r5, r4 vcmpgefp. v0, v1, v0 mfcr r3, 2 rlwinm r3, r3, 27, 31, 31 mtspr 256, r2 blr instead of this: _AreSecondAndThirdElementsBothNegative: mfspr r2, 256 oris r4, r2, 49152 mtspr 256, r4 li r4, lo16(LCPI1_0) lis r5, ha16(LCPI1_0) lvx v0, 0, r3 lvx v1, r5, r4 vcmpgefp. v0, v1, v0 mfcr r3, 2 rlwinm r3, r3, 27, 31, 31 xori r3, r3, 1 cntlzw r3, r3 srwi r3, r3, 5 mtspr 256, r2 blr llvm-svn: 27356
-
Chris Lattner authored
%tmp = cast <4 x uint> %tmp to <4 x int> ; <<4 x int>> [#uses=1] %tmp = cast <4 x int> %tmp to <4 x float> ; <<4 x float>> [#uses=1] into: %tmp = cast <4 x uint> %tmp to <4 x float> ; <<4 x float>> [#uses=1] llvm-svn: 27355
-
Chris Lattner authored
llvm-svn: 27354
-
Chris Lattner authored
%tmp = cast <4 x uint>* %testData to <4 x int>* ; <<4 x int>*> [#uses=1] %tmp = load <4 x int>* %tmp ; <<4 x int>> [#uses=1] to this: %tmp = load <4 x uint>* %testData ; <<4 x uint>> [#uses=1] %tmp = cast <4 x uint> %tmp to <4 x int> ; <<4 x int>> [#uses=1] llvm-svn: 27353
-
Chris Lattner authored
elimination of one load from this: int AreSecondAndThirdElementsBothNegative( vector float *in ) { #define QNaN 0x7FC00000 const vector unsigned int testData = (vector unsigned int)( QNaN, 0, 0, QNaN ); vector float test = vec_ld( 0, (float*) &testData ); return ! vec_any_ge( test, *in ); } Now generating: _AreSecondAndThirdElementsBothNegative: mfspr r2, 256 oris r4, r2, 49152 mtspr 256, r4 li r4, lo16(LCPI1_0) lis r5, ha16(LCPI1_0) addi r6, r1, -16 lvx v0, r5, r4 stvx v0, 0, r6 lvx v1, 0, r3 vcmpgefp. v0, v0, v1 mfcr r3, 2 rlwinm r3, r3, 27, 31, 31 xori r3, r3, 1 cntlzw r3, r3 srwi r3, r3, 5 mtspr 256, r2 blr llvm-svn: 27352
-
Chris Lattner authored
llvm-svn: 27351
-
Chris Lattner authored
llvm-svn: 27349
-
Chris Lattner authored
llvm-svn: 27348
-
Chris Lattner authored
into elements and operate on each piece. This allows generic vector integer multiplies to work on PPC, though the generated code is horrible. llvm-svn: 27347
-
Chris Lattner authored
have to serialize against each other. This allows us to schedule lvx's across each other, for example. llvm-svn: 27346
-
Chris Lattner authored
llvm-svn: 27344
-
Chris Lattner authored
"vector unsigned char mergeLowHigh = (vector unsigned char) ( 8, 9, 10, 11, 16, 17, 18, 19, 12, 13, 14, 15, 20, 21, 22, 23 ); vector unsigned char mergeHighLow = vec_xor( mergeLowHigh, vec_splat_u8(8));" aka: void %test2(<16 x sbyte>* %P) { store <16 x sbyte> cast (<4 x int> xor (<4 x int> cast (<16 x ubyte> < ubyte 8, ubyte 9, ubyte 10, ubyte 11, ubyte 16, ubyte 17, ubyte 18, ubyte 19, ubyte 12, ubyte 13, ubyte 14, ubyte 15, ubyte 20, ubyte 21, ubyte 22, ubyte 23 > to <4 x int>), <4 x int> cast (<16 x sbyte> < sbyte 8, sbyte 8, sbyte 8, sbyte 8, sbyte 8, sbyte 8, sbyte 8, sbyte 8, sbyte 8, sbyte 8, sbyte 8, sbyte 8, sbyte 8, sbyte 8, sbyte 8, sbyte 8 > to <4 x int>)) to <16 x sbyte>), <16 x sbyte> * %P ret void } into this: _test2: mfspr r2, 256 oris r4, r2, 32768 mtspr 256, r4 li r4, lo16(LCPI2_0) lis r5, ha16(LCPI2_0) lvx v0, r5, r4 stvx v0, 0, r3 mtspr 256, r2 blr instead of this: _test2: mfspr r2, 256 oris r4, r2, 49152 mtspr 256, r4 li r4, lo16(LCPI2_0) lis r5, ha16(LCPI2_0) vspltisb v0, 8 lvx v1, r5, r4 vxor v0, v1, v0 stvx v0, 0, r3 mtspr 256, r2 blr ... which occurs here: http://developer.apple.com/hardware/ve/calcspeed.html llvm-svn: 27343
-
Chris Lattner authored
llvm-svn: 27342
-
Chris Lattner authored
llvm-svn: 27341
-
Chris Lattner authored
llvm-svn: 27340
-
Chris Lattner authored
llvm-svn: 27339
-
Chris Lattner authored
llvm-svn: 27337
-
Chris Lattner authored
llvm-svn: 27336
-
Chris Lattner authored
"vspltisb v0, 8" instead of a constant pool load. llvm-svn: 27335
-
Chris Lattner authored
multiple register classes. This fixes PowerPC/2006-04-01-FloatDoubleExtend.ll llvm-svn: 27334
-
Chris Lattner authored
llvm-svn: 27332
-
Chris Lattner authored
llvm-svn: 27331
-
Chris Lattner authored
llvm-svn: 27330
-
- Apr 01, 2006
-
-
Chris Lattner authored
Fold (B&A)^A == ~B & A This implements InstCombine/xor.ll:test2[56] llvm-svn: 27328
-
Chris Lattner authored
PR726 by performing consistent signed division, not consistent unsigned division when evaluating scev's. Do not touch udivs. llvm-svn: 27326
-