Newer
Older
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
Should fold to "a|~b". Currently not optimized with "clang
-emit-llvm-bc | opt -std-compile-opts".
//===---------------------------------------------------------------------===//
int a(int a, int b) {return (a&&b) || (a&&!b);}
Should fold to "a". Currently not optimized with "clang -emit-llvm-bc
| opt -std-compile-opts".
//===---------------------------------------------------------------------===//
int a(int a, int b, int c) {return (a&&b) || (!a&&c);}
Should fold to "a ? b : c", or at least something sane. Currently not
optimized with "clang -emit-llvm-bc | opt -std-compile-opts".
//===---------------------------------------------------------------------===//
int a(int a, int b, int c) {return (a&&b) || (a&&c) || (a&&b&&c);}
Should fold to a && (b || c). Currently not optimized with "clang
-emit-llvm-bc | opt -std-compile-opts".
//===---------------------------------------------------------------------===//
int a(int x) {return x | ((x & 8) ^ 8);}
Should combine to x | 8. Currently not optimized with "clang
-emit-llvm-bc | opt -std-compile-opts".
//===---------------------------------------------------------------------===//
int a(int x) {return x ^ ((x & 8) ^ 8);}
Should also combine to x | 8. Currently not optimized with "clang
-emit-llvm-bc | opt -std-compile-opts".
//===---------------------------------------------------------------------===//
int a(int x) {return (x & 8) == 0 ? -1 : -9;}
Should combine to (x | -9) ^ 8. Currently not optimized with "clang
-emit-llvm-bc | opt -std-compile-opts".
//===---------------------------------------------------------------------===//
int a(int x) {return (x & 8) == 0 ? -9 : -1;}
Should combine to x | -9. Currently not optimized with "clang
-emit-llvm-bc | opt -std-compile-opts".
//===---------------------------------------------------------------------===//
int a(int x) {return ((x | -9) ^ 8) & x;}
Should combine to x & -9. Currently not optimized with "clang
-emit-llvm-bc | opt -std-compile-opts".
//===---------------------------------------------------------------------===//
unsigned a(unsigned a) {return a * 0x11111111 >> 28 & 1;}
Should combine to "a * 0x88888888 >> 31". Currently not optimized
with "clang -emit-llvm-bc | opt -std-compile-opts".
//===---------------------------------------------------------------------===//
unsigned a(char* x) {if ((*x & 32) == 0) return b();}
There's an unnecessary zext in the generated code with "clang
-emit-llvm-bc | opt -std-compile-opts".
//===---------------------------------------------------------------------===//
unsigned a(unsigned long long x) {return 40 * (x >> 1);}
Should combine to "20 * (((unsigned)x) & -2)". Currently not
optimized with "clang -emit-llvm-bc | opt -std-compile-opts".
//===---------------------------------------------------------------------===//
This was noticed in the entryblock for grokdeclarator in 403.gcc:
%tmp = icmp eq i32 %decl_context, 4
%decl_context_addr.0 = select i1 %tmp, i32 3, i32 %decl_context
%tmp1 = icmp eq i32 %decl_context_addr.0, 1
%decl_context_addr.1 = select i1 %tmp1, i32 0, i32 %decl_context_addr.0
tmp1 should be simplified to something like:
(!tmp || decl_context == 1)
This allows recursive simplifications, tmp1 is used all over the place in
the function, e.g. by:
%tmp23 = icmp eq i32 %decl_context_addr.1, 0 ; <i1> [#uses=1]
%tmp24 = xor i1 %tmp1, true ; <i1> [#uses=1]
%or.cond8 = and i1 %tmp23, %tmp24 ; <i1> [#uses=1]
later.
//===---------------------------------------------------------------------===//
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
Store sinking: This code:
void f (int n, int *cond, int *res) {
int i;
*res = 0;
for (i = 0; i < n; i++)
if (*cond)
*res ^= 234; /* (*) */
}
On this function GVN hoists the fully redundant value of *res, but nothing
moves the store out. This gives us this code:
bb: ; preds = %bb2, %entry
%.rle = phi i32 [ 0, %entry ], [ %.rle6, %bb2 ]
%i.05 = phi i32 [ 0, %entry ], [ %indvar.next, %bb2 ]
%1 = load i32* %cond, align 4
%2 = icmp eq i32 %1, 0
br i1 %2, label %bb2, label %bb1
bb1: ; preds = %bb
%3 = xor i32 %.rle, 234
store i32 %3, i32* %res, align 4
br label %bb2
bb2: ; preds = %bb, %bb1
%.rle6 = phi i32 [ %3, %bb1 ], [ %.rle, %bb ]
%indvar.next = add i32 %i.05, 1
%exitcond = icmp eq i32 %indvar.next, %n
br i1 %exitcond, label %return, label %bb
DSE should sink partially dead stores to get the store out of the loop.
Here's another partial dead case:
http://gcc.gnu.org/bugzilla/show_bug.cgi?id=12395
//===---------------------------------------------------------------------===//
Scalar PRE hoists the mul in the common block up to the else:
int test (int a, int b, int c, int g) {
int d, e;
if (a)
d = b * c;
else
d = b - c;
e = b * c + g;
return d + e;
}
It would be better to do the mul once to reduce codesize above the if.
This is GCC PR38204.
//===---------------------------------------------------------------------===//
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
GCC PR37810 is an interesting case where we should sink load/store reload
into the if block and outside the loop, so we don't reload/store it on the
non-call path.
for () {
*P += 1;
if ()
call();
else
...
->
tmp = *P
for () {
tmp += 1;
if () {
*P = tmp;
call();
tmp = *P;
} else ...
}
*P = tmp;
We now hoist the reload after the call (Transforms/GVN/lpre-call-wrap.ll), but
we don't sink the store. We need partially dead store sinking.
//===---------------------------------------------------------------------===//
[LOAD PRE CRIT EDGE SPLITTING]
GCC PR37166: Sinking of loads prevents SROA'ing the "g" struct on the stack
leading to excess stack traffic. This could be handled by GVN with some crazy
symbolic phi translation. The code we get looks like (g is on the stack):
bb2: ; preds = %bb1
..
%9 = getelementptr %struct.f* %g, i32 0, i32 0
store i32 %8, i32* %9, align bel %bb3
bb3: ; preds = %bb1, %bb2, %bb
%c_addr.0 = phi %struct.f* [ %g, %bb2 ], [ %c, %bb ], [ %c, %bb1 ]
%b_addr.0 = phi %struct.f* [ %b, %bb2 ], [ %g, %bb ], [ %b, %bb1 ]
%10 = getelementptr %struct.f* %c_addr.0, i32 0, i32 0
%11 = load i32* %10, align 4
%11 is partially redundant, an in BB2 it should have the value %8.
GCC PR33344 and PR35287 are similar cases.
//===---------------------------------------------------------------------===//
There are many load PRE testcases in testsuite/gcc.dg/tree-ssa/loadpre* in the
GCC testsuite, ones we don't get yet are (checked through loadpre25):
[CRIT EDGE BREAKING]
loadpre3.c predcom-4.c
[PRE OF READONLY CALL]
loadpre5.c
[TURN SELECT INTO BRANCH]
loadpre14.c loadpre15.c
actually a conditional increment: loadpre18.c loadpre19.c
//===---------------------------------------------------------------------===//
[SCALAR PRE]
There are many PRE testcases in testsuite/gcc.dg/tree-ssa/ssa-pre-*.c in the
GCC testsuite.
//===---------------------------------------------------------------------===//
There are some interesting cases in testsuite/gcc.dg/tree-ssa/pred-comm* in the
GCC testsuite. For example, we get the first example in predcom-1.c, but
miss the second one:
unsigned fib[1000];
unsigned avg[1000];
__attribute__ ((noinline))
void count_averages(int n) {
int i;
for (i = 1; i < n; i++)
avg[i] = (((unsigned long) fib[i - 1] + fib[i] + fib[i + 1]) / 3) & 0xffff;
}
which compiles into two loads instead of one in the loop.
predcom-2.c is the same as predcom-1.c
predcom-3.c is very similar but needs loads feeding each other instead of
store->load.
//===---------------------------------------------------------------------===//
Chris Lattner
committed
[ALIAS ANALYSIS]
http://gcc.gnu.org/bugzilla/show_bug.cgi?id=14705
Chris Lattner
committed
We should do better analysis of posix_memalign. At the least it should
no-capture its pointer argument, at best, we should know that the out-value
result doesn't point to anything (like malloc). One example of this is in
SingleSource/Benchmarks/Misc/dt.c
//===---------------------------------------------------------------------===//
A/B get pinned to the stack because we turn an if/then into a select instead
of PRE'ing the load/store. This may be fixable in instcombine:
http://gcc.gnu.org/bugzilla/show_bug.cgi?id=37892
struct X { int i; };
int foo (int x) {
struct X a;
struct X b;
struct X *p;
a.i = 1;
b.i = 2;
if (x)
p = &a;
else
p = &b;
return p->i;
}
//===---------------------------------------------------------------------===//
Interesting missed case because of control flow flattening (should be 2 loads):
http://gcc.gnu.org/bugzilla/show_bug.cgi?id=26629
With: llvm-gcc t2.c -S -o - -O0 -emit-llvm | llvm-as |
opt -mem2reg -gvn -instcombine | llvm-dis
we miss it because we need 1) CRIT EDGE 2) MULTIPLE DIFFERENT
//===---------------------------------------------------------------------===//
http://gcc.gnu.org/bugzilla/show_bug.cgi?id=19633
We could eliminate the branch condition here, loading from null is undefined:
struct S { int w, x, y, z; };
struct T { int r; struct S s; };
void bar (struct S, int);
void foo (int a, struct T b)
{
struct S *c = 0;
if (a)
c = &b.s;
bar (*c, a);
}
//===---------------------------------------------------------------------===//
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
simplifylibcalls should do several optimizations for strspn/strcspn:
strcspn(x, "") -> strlen(x)
strcspn("", x) -> 0
strspn("", x) -> 0
strspn(x, "") -> strlen(x)
strspn(x, "a") -> strchr(x, 'a')-x
strcspn(x, "a") -> inlined loop for up to 3 letters (similarly for strspn):
size_t __strcspn_c3 (__const char *__s, int __reject1, int __reject2,
int __reject3) {
register size_t __result = 0;
while (__s[__result] != '\0' && __s[__result] != __reject1 &&
__s[__result] != __reject2 && __s[__result] != __reject3)
++__result;
return __result;
}
This should turn into a switch on the character. See PR3253 for some notes on
codegen.
456.hmmer apparently uses strcspn and strspn a lot. 471.omnetpp uses strspn.
//===---------------------------------------------------------------------===//
"gas" uses this idiom:
else if (strchr ("+-/*%|&^:[]()~", *intel_parser.op_string))
..
else if (strchr ("<>", *intel_parser.op_string)
Those should be turned into a switch.
//===---------------------------------------------------------------------===//
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
252.eon contains this interesting code:
%3072 = getelementptr [100 x i8]* %tempString, i32 0, i32 0
%3073 = call i8* @strcpy(i8* %3072, i8* %3071) nounwind
%strlen = call i32 @strlen(i8* %3072) ; uses = 1
%endptr = getelementptr [100 x i8]* %tempString, i32 0, i32 %strlen
call void @llvm.memcpy.i32(i8* %endptr,
i8* getelementptr ([5 x i8]* @"\01LC42", i32 0, i32 0), i32 5, i32 1)
%3074 = call i32 @strlen(i8* %endptr) nounwind readonly
This is interesting for a couple reasons. First, in this:
%3073 = call i8* @strcpy(i8* %3072, i8* %3071) nounwind
%strlen = call i32 @strlen(i8* %3072)
The strlen could be replaced with: %strlen = sub %3072, %3073, because the
strcpy call returns a pointer to the end of the string. Based on that, the
endptr GEP just becomes equal to 3073, which eliminates a strlen call and GEP.
Second, the memcpy+strlen strlen can be replaced with:
%3074 = call i32 @strlen([5 x i8]* @"\01LC42") nounwind readonly
Because the destination was just copied into the specified memory buffer. This,
in turn, can be constant folded to "4".
In other code, it contains:
%endptr6978 = bitcast i8* %endptr69 to i32*
store i32 7107374, i32* %endptr6978, align 1
%3167 = call i32 @strlen(i8* %endptr69) nounwind readonly
Which could also be constant folded. Whatever is producing this should probably
be fixed to leave this as a memcpy from a string.
Further, eon also has an interesting partially redundant strlen call:
bb8: ; preds = %_ZN18eonImageCalculatorC1Ev.exit
%682 = getelementptr i8** %argv, i32 6 ; <i8**> [#uses=2]
%683 = load i8** %682, align 4 ; <i8*> [#uses=4]
%684 = load i8* %683, align 1 ; <i8> [#uses=1]
%685 = icmp eq i8 %684, 0 ; <i1> [#uses=1]
br i1 %685, label %bb10, label %bb9
bb9: ; preds = %bb8
%686 = call i32 @strlen(i8* %683) nounwind readonly
%687 = icmp ugt i32 %686, 254 ; <i1> [#uses=1]
br i1 %687, label %bb10, label %bb11
bb10: ; preds = %bb9, %bb8
%688 = call i32 @strlen(i8* %683) nounwind readonly
This could be eliminated by doing the strlen once in bb8, saving code size and
improving perf on the bb8->9->10 path.
//===---------------------------------------------------------------------===//
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
I see an interesting fully redundant call to strlen left in 186.crafty:InputMove
which looks like:
%movetext11 = getelementptr [128 x i8]* %movetext, i32 0, i32 0
bb62: ; preds = %bb55, %bb53
%promote.0 = phi i32 [ %169, %bb55 ], [ 0, %bb53 ]
%171 = call i32 @strlen(i8* %movetext11) nounwind readonly align 1
%172 = add i32 %171, -1 ; <i32> [#uses=1]
%173 = getelementptr [128 x i8]* %movetext, i32 0, i32 %172
... no stores ...
br i1 %or.cond, label %bb65, label %bb72
bb65: ; preds = %bb62
store i8 0, i8* %173, align 1
br label %bb72
bb72: ; preds = %bb65, %bb62
%trank.1 = phi i32 [ %176, %bb65 ], [ -1, %bb62 ]
%177 = call i32 @strlen(i8* %movetext11) nounwind readonly align 1
Note that on the bb62->bb72 path, that the %177 strlen call is partially
redundant with the %171 call. At worst, we could shove the %177 strlen call
up into the bb65 block moving it out of the bb62->bb72 path. However, note
that bb65 stores to the string, zeroing out the last byte. This means that on
that path the value of %177 is actually just %171-1. A sub is cheaper than a
strlen!
This pattern repeats several times, basically doing:
A = strlen(P);
P[A-1] = 0;
B = strlen(P);
where it is "obvious" that B = A-1.
//===---------------------------------------------------------------------===//
186.crafty also contains this code:
%1906 = call i32 @strlen(i8* getelementptr ([32 x i8]* @pgn_event, i32 0,i32 0))
%1907 = getelementptr [32 x i8]* @pgn_event, i32 0, i32 %1906
%1908 = call i8* @strcpy(i8* %1907, i8* %1905) nounwind align 1
%1909 = call i32 @strlen(i8* getelementptr ([32 x i8]* @pgn_event, i32 0,i32 0))
%1910 = getelementptr [32 x i8]* @pgn_event, i32 0, i32 %1909
The last strlen is computable as 1908-@pgn_event, which means 1910=1908.
//===---------------------------------------------------------------------===//
186.crafty has this interesting pattern with the "out.4543" variable:
call void @llvm.memcpy.i32(
i8* getelementptr ([10 x i8]* @out.4543, i32 0, i32 0),
i8* getelementptr ([7 x i8]* @"\01LC28700", i32 0, i32 0), i32 7, i32 1)
%101 = call@printf(i8* ... @out.4543, i32 0, i32 0)) nounwind
It is basically doing:
memcpy(globalarray, "string");
printf(..., globalarray);
Anyway, by knowing that printf just reads the memory and forward substituting
the string directly into the printf, this eliminates reads from globalarray.
Since this pattern occurs frequently in crafty (due to the "DisplayTime" and
other similar functions) there are many stores to "out". Once all the printfs
stop using "out", all that is left is the memcpy's into it. This should allow
globalopt to remove the "stored only" global.
//===---------------------------------------------------------------------===//
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
This code:
define inreg i32 @foo(i8* inreg %p) nounwind {
%tmp0 = load i8* %p
%tmp1 = ashr i8 %tmp0, 5
%tmp2 = sext i8 %tmp1 to i32
ret i32 %tmp2
}
could be dagcombine'd to a sign-extending load with a shift.
For example, on x86 this currently gets this:
movb (%eax), %al
sarb $5, %al
movsbl %al, %eax
while it could get this:
movsbl (%eax), %eax
sarl $5, %eax
//===---------------------------------------------------------------------===//
GCC PR31029:
int test(int x) { return 1-x == x; } // --> return false
int test2(int x) { return 2-x == x; } // --> return x == 1 ?
Always foldable for odd constants, what is the rule for even?
//===---------------------------------------------------------------------===//
PR 3381: GEP to field of size 0 inside a struct could be turned into GEP
for next field in struct (which is at same address).
For example: store of float into { {{}}, float } could be turned into a store to
the float directly.
//===---------------------------------------------------------------------===//
#include <math.h>
double foo(double a) { return sin(a); }
This compiles into this on x86-64 Linux:
foo:
subq $8, %rsp
call sin
addq $8, %rsp
ret
vs:
foo:
jmp sin
//===---------------------------------------------------------------------===//
The arg promotion pass should make use of nocapture to make its alias analysis
stuff much more precise.
//===---------------------------------------------------------------------===//
The following functions should be optimized to use a select instead of a
branch (from gcc PR40072):
char char_int(int m) {if(m>7) return 0; return m;}
int int_char(char m) {if(m>7) return 0; return m;}
//===---------------------------------------------------------------------===//
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
int func(int a, int b) { if (a & 0x80) b |= 0x80; else b &= ~0x80; return b; }
Generates this:
define i32 @func(i32 %a, i32 %b) nounwind readnone ssp {
entry:
%0 = and i32 %a, 128 ; <i32> [#uses=1]
%1 = icmp eq i32 %0, 0 ; <i1> [#uses=1]
%2 = or i32 %b, 128 ; <i32> [#uses=1]
%3 = and i32 %b, -129 ; <i32> [#uses=1]
%b_addr.0 = select i1 %1, i32 %3, i32 %2 ; <i32> [#uses=1]
ret i32 %b_addr.0
}
However, it's functionally equivalent to:
b = (b & ~0x80) | (a & 0x80);
Which generates this:
define i32 @func(i32 %a, i32 %b) nounwind readnone ssp {
entry:
%0 = and i32 %b, -129 ; <i32> [#uses=1]
%1 = and i32 %a, 128 ; <i32> [#uses=1]
%2 = or i32 %0, %1 ; <i32> [#uses=1]
ret i32 %2
}
This can be generalized for other forms:
b = (b & ~0x80) | (a & 0x40) << 1;
//===---------------------------------------------------------------------===//
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
These two functions produce different code. They shouldn't:
#include <stdint.h>
uint8_t p1(uint8_t b, uint8_t a) {
b = (b & ~0xc0) | (a & 0xc0);
return (b);
}
uint8_t p2(uint8_t b, uint8_t a) {
b = (b & ~0x40) | (a & 0x40);
b = (b & ~0x80) | (a & 0x80);
return (b);
}
define zeroext i8 @p1(i8 zeroext %b, i8 zeroext %a) nounwind readnone ssp {
entry:
%0 = and i8 %b, 63 ; <i8> [#uses=1]
%1 = and i8 %a, -64 ; <i8> [#uses=1]
%2 = or i8 %1, %0 ; <i8> [#uses=1]
ret i8 %2
}
define zeroext i8 @p2(i8 zeroext %b, i8 zeroext %a) nounwind readnone ssp {
entry:
%0 = and i8 %b, 63 ; <i8> [#uses=1]
%.masked = and i8 %a, 64 ; <i8> [#uses=1]
%1 = and i8 %a, -128 ; <i8> [#uses=1]
%2 = or i8 %1, %0 ; <i8> [#uses=1]
%3 = or i8 %2, %.masked ; <i8> [#uses=1]
ret i8 %3
}
//===---------------------------------------------------------------------===//
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
IPSCCP does not currently propagate argument dependent constants through
functions where it does not not all of the callers. This includes functions
with normal external linkage as well as templates, C99 inline functions etc.
Specifically, it does nothing to:
define i32 @test(i32 %x, i32 %y, i32 %z) nounwind {
entry:
%0 = add nsw i32 %y, %z
%1 = mul i32 %0, %x
%2 = mul i32 %y, %z
%3 = add nsw i32 %1, %2
ret i32 %3
}
define i32 @test2() nounwind {
entry:
%0 = call i32 @test(i32 1, i32 2, i32 4) nounwind
ret i32 %0
}
It would be interesting extend IPSCCP to be able to handle simple cases like
this, where all of the arguments to a call are constant. Because IPSCCP runs
before inlining, trivial templates and inline functions are not yet inlined.
The results for a function + set of constant arguments should be memoized in a
map.
//===---------------------------------------------------------------------===//
The libcall constant folding stuff should be moved out of SimplifyLibcalls into
libanalysis' constantfolding logic. This would allow IPSCCP to be able to
handle simple things like this:
static int foo(const char *X) { return strlen(X); }
int bar() { return foo("abcd"); }
//===---------------------------------------------------------------------===//
Nick Lewycky
committed
InstCombine should use SimplifyDemandedBits to remove the or instruction:
define i1 @test(i8 %x, i8 %y) {
%A = or i8 %x, 1
%B = icmp ugt i8 %A, 3
ret i1 %B
}
Currently instcombine calls SimplifyDemandedBits with either all bits or just
the sign bit, if the comparison is obviously a sign test. In this case, we only
need all but the bottom two bits from %A, and if we gave that mask to SDB it
would delete the or instruction for us.
//===---------------------------------------------------------------------===//
functionattrs doesn't know much about memcpy/memset. This function should be
marked readnone rather than readonly, since it only twiddles local memory, but
functionattrs doesn't handle memset/memcpy/memmove aggressively:
struct X { int *p; int *q; };
int foo() {
int i = 0, j = 1;
struct X x, y;
int **p;
y.p = &i;
x.q = &j;
p = __builtin_memcpy (&x, &y, sizeof (int *));
return **p;
}
//===---------------------------------------------------------------------===//
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
Missed instcombine transformation:
define i1 @a(i32 %x) nounwind readnone {
entry:
%cmp = icmp eq i32 %x, 30
%sub = add i32 %x, -30
%cmp2 = icmp ugt i32 %sub, 9
%or = or i1 %cmp, %cmp2
ret i1 %or
}
This should be optimized to a single compare. Testcase derived from gcc.
//===---------------------------------------------------------------------===//
Missed instcombine transformation:
void b();
void a(int x) { if (((1<<x)&8)==0) b(); }
The shift should be optimized out. Testcase derived from gcc.
//===---------------------------------------------------------------------===//
Missed instcombine or reassociate transformation:
int a(int a, int b) { return (a==12)&(b>47)&(b<58); }
The sgt and slt should be combined into a single comparison. Testcase derived
from gcc.
//===---------------------------------------------------------------------===//
Missed instcombine transformation:
define i32 @a(i32 %x) nounwind readnone {
entry:
%rem = srem i32 %x, 32
%shl = shl i32 1, %rem
ret i32 %shl
}
The srem can be transformed to an and because if x is negative, the shift is
undefined. Testcase derived from gcc.
//===---------------------------------------------------------------------===//
Missed instcombine/dagcombine transformation:
define i32 @a(i32 %x, i32 %y) nounwind readnone {
entry:
%mul = mul i32 %y, -8
%sub = sub i32 %x, %mul
ret i32 %sub
}
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
Should compile to something like x+y*8, but currently compiles to an
inefficient result. Testcase derived from gcc.
//===---------------------------------------------------------------------===//
Missed instcombine/dagcombine transformation:
define void @lshift_lt(i8 zeroext %a) nounwind {
entry:
%conv = zext i8 %a to i32
%shl = shl i32 %conv, 3
%cmp = icmp ult i32 %shl, 33
br i1 %cmp, label %if.then, label %if.end
if.then:
tail call void @bar() nounwind
ret void
if.end:
ret void
}
declare void @bar() nounwind
The shift should be eliminated. Testcase derived from gcc.
//===---------------------------------------------------------------------===//
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
These compile into different code, one gets recognized as a switch and the
other doesn't due to phase ordering issues (PR6212):
int test1(int mainType, int subType) {
if (mainType == 7)
subType = 4;
else if (mainType == 9)
subType = 6;
else if (mainType == 11)
subType = 9;
return subType;
}
int test2(int mainType, int subType) {
if (mainType == 7)
subType = 4;
if (mainType == 9)
subType = 6;
if (mainType == 11)
subType = 9;
return subType;
}
//===---------------------------------------------------------------------===//
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
The following test case (from PR6576):
define i32 @mul(i32 %a, i32 %b) nounwind readnone {
entry:
%cond1 = icmp eq i32 %b, 0 ; <i1> [#uses=1]
br i1 %cond1, label %exit, label %bb.nph
bb.nph: ; preds = %entry
%tmp = mul i32 %b, %a ; <i32> [#uses=1]
ret i32 %tmp
exit: ; preds = %entry
ret i32 0
}
could be reduced to:
define i32 @mul(i32 %a, i32 %b) nounwind readnone {
entry:
%tmp = mul i32 %b, %a
ret i32 %tmp
}
//===---------------------------------------------------------------------===//
We should use DSE + llvm.lifetime.end to delete dead vtable pointer updates.
See GCC PR34949
Another interesting case is that something related could be used for variables
that go const after their ctor has finished. In these cases, globalopt (which
can statically run the constructor) could mark the global const (so it gets put
in the readonly section). A testcase would be:
#include <complex>
using namespace std;
const complex<char> should_be_in_rodata (42,-42);
complex<char> should_be_in_data (42,-42);
complex<char> should_be_in_bss;
Where we currently evaluate the ctors but the globals don't become const because
the optimizer doesn't know they "become const" after the ctor is done. See
GCC PR4131 for more examples.
//===---------------------------------------------------------------------===//
In this code:
long foo(long x) {
return x > 1 ? x : 1;
}
LLVM emits a comparison with 1 instead of 0. 0 would be equivalent
and cheaper on most targets.
LLVM prefers comparisons with zero over non-zero in general, but in this
case it choses instead to keep the max operation obvious.
//===---------------------------------------------------------------------===//
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
Take the following testcase on x86-64 (similar testcases exist for all targets
with addc/adde):
define void @a(i64* nocapture %s, i64* nocapture %t, i64 %a, i64 %b,
i64 %c) nounwind {
entry:
%0 = zext i64 %a to i128 ; <i128> [#uses=1]
%1 = zext i64 %b to i128 ; <i128> [#uses=1]
%2 = add i128 %1, %0 ; <i128> [#uses=2]
%3 = zext i64 %c to i128 ; <i128> [#uses=1]
%4 = shl i128 %3, 64 ; <i128> [#uses=1]
%5 = add i128 %4, %2 ; <i128> [#uses=1]
%6 = lshr i128 %5, 64 ; <i128> [#uses=1]
%7 = trunc i128 %6 to i64 ; <i64> [#uses=1]
store i64 %7, i64* %s, align 8
%8 = trunc i128 %2 to i64 ; <i64> [#uses=1]
store i64 %8, i64* %t, align 8
ret void
}
Generated code:
addq %rcx, %rdx
movl $0, %eax
adcq $0, %rax
addq %r8, %rax
movq %rax, (%rdi)
movq %rdx, (%rsi)
ret
Expected code:
addq %rcx, %rdx
adcq $0, %r8
movq %r8, (%rdi)
movq %rdx, (%rsi)
ret
The generated SelectionDAG has an ADD of an ADDE, where both operands of the
ADDE are zero. Replacing one of the operands of the ADDE with the other operand
of the ADD, and replacing the ADD with the ADDE, should give the desired result.
(That said, we are doing a lot better than gcc on this testcase. :) )
//===---------------------------------------------------------------------===//
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
Switch lowering generates less than ideal code for the following switch:
define void @a(i32 %x) nounwind {
entry:
switch i32 %x, label %if.end [
i32 0, label %if.then
i32 1, label %if.then
i32 2, label %if.then
i32 3, label %if.then
i32 5, label %if.then
]
if.then:
tail call void @foo() nounwind
ret void
if.end:
ret void
}
declare void @foo()
Generated code on x86-64 (other platforms give similar results):
a:
cmpl $5, %edi
ja .LBB0_2
movl %edi, %eax
movl $47, %ecx
btq %rax, %rcx
jb .LBB0_3
.LBB0_2:
ret
.LBB0_3:
The movl+movl+btq+jb could be simplified to a cmpl+jne.
Or, if we wanted to be really clever, we could simplify the whole thing to
something like the following, which eliminates a branch:
xorl $1, %edi
cmpl $4, %edi
ja .LBB0_2
ret
.LBB0_2:
jmp foo # TAILCALL
//===---------------------------------------------------------------------===//