Newer
Older
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
The expression should optimize to something like
"!((start|end)&~PMD_MASK). Currently not optimized with "clang
-emit-llvm-bc | opt -std-compile-opts".
//===---------------------------------------------------------------------===//
From GCC Bug 15241:
unsigned int
foo (unsigned int a, unsigned int b)
{
if (a <= 7 && b <= 7)
baz ();
}
Should combine to "(a|b) <= 7". Currently not optimized with "clang
-emit-llvm-bc | opt -std-compile-opts".
//===---------------------------------------------------------------------===//
From GCC Bug 3756:
int
pn (int n)
{
return (n >= 0 ? 1 : -1);
}
Should combine to (n >> 31) | 1. Currently not optimized with "clang
-emit-llvm-bc | opt -std-compile-opts | llc".
//===---------------------------------------------------------------------===//
From GCC Bug 28685:
int test(int a, int b)
{
int lt = a < b;
int eq = a == b;
return (lt || eq);
}
Should combine to "a <= b". Currently not optimized with "clang
-emit-llvm-bc | opt -std-compile-opts | llc".
//===---------------------------------------------------------------------===//
void a(int variable)
{
if (variable == 4 || variable == 6)
bar();
}
This should optimize to "if ((variable | 2) == 6)". Currently not
optimized with "clang -emit-llvm-bc | opt -std-compile-opts | llc".
//===---------------------------------------------------------------------===//
unsigned int f(unsigned int i, unsigned int n) {++i; if (i == n) ++i; return
i;}
unsigned int f2(unsigned int i, unsigned int n) {++i; i += i == n; return i;}
These should combine to the same thing. Currently, the first function
produces better code on X86.
//===---------------------------------------------------------------------===//
From GCC Bug 15784:
#define abs(x) x>0?x:-x
int f(int x, int y)
{
return (abs(x)) >= 0;
}
This should optimize to x == INT_MIN. (With -fwrapv.) Currently not
optimized with "clang -emit-llvm-bc | opt -std-compile-opts".
//===---------------------------------------------------------------------===//
From GCC Bug 14753:
void
rotate_cst (unsigned int a)
{
a = (a << 10) | (a >> 22);
if (a == 123)
bar ();
}
void
minus_cst (unsigned int a)
{
unsigned int tem;
tem = 20 - a;
if (tem == 5)
bar ();
}
void
mask_gt (unsigned int a)
{
/* This is equivalent to a > 15. */
if ((a & ~7) > 8)
bar ();
}
void
rshift_gt (unsigned int a)
{
/* This is equivalent to a > 23. */
if ((a >> 2) > 5)
bar ();
}
All should simplify to a single comparison. All of these are
currently not optimized with "clang -emit-llvm-bc | opt
-std-compile-opts".
//===---------------------------------------------------------------------===//
From GCC Bug 32605:
int c(int* x) {return (char*)x+2 == (char*)x;}
Should combine to 0. Currently not optimized with "clang
-emit-llvm-bc | opt -std-compile-opts" (although llc can optimize it).
//===---------------------------------------------------------------------===//
int a(unsigned char* b) {return *b > 99;}
There's an unnecessary zext in the generated code with "clang
-emit-llvm-bc | opt -std-compile-opts".
//===---------------------------------------------------------------------===//
int a(unsigned b) {return ((b << 31) | (b << 30)) >> 31;}
Should be combined to "((b >> 1) | b) & 1". Currently not optimized
with "clang -emit-llvm-bc | opt -std-compile-opts".
//===---------------------------------------------------------------------===//
unsigned a(unsigned x, unsigned y) { return x | (y & 1) | (y & 2);}
Should combine to "x | (y & 3)". Currently not optimized with "clang
-emit-llvm-bc | opt -std-compile-opts".
//===---------------------------------------------------------------------===//
unsigned a(unsigned a) {return ((a | 1) & 3) | (a & -4);}
Should combine to "a | 1". Currently not optimized with "clang
-emit-llvm-bc | opt -std-compile-opts".
//===---------------------------------------------------------------------===//
int a(int a, int b, int c) {return (~a & c) | ((c|a) & b);}
Should fold to "(~a & c) | (a & b)". Currently not optimized with
"clang -emit-llvm-bc | opt -std-compile-opts".
//===---------------------------------------------------------------------===//
int a(int a,int b) {return (~(a|b))|a;}
Should fold to "a|~b". Currently not optimized with "clang
-emit-llvm-bc | opt -std-compile-opts".
//===---------------------------------------------------------------------===//
int a(int a, int b) {return (a&&b) || (a&&!b);}
Should fold to "a". Currently not optimized with "clang -emit-llvm-bc
| opt -std-compile-opts".
//===---------------------------------------------------------------------===//
int a(int a, int b, int c) {return (a&&b) || (!a&&c);}
Should fold to "a ? b : c", or at least something sane. Currently not
optimized with "clang -emit-llvm-bc | opt -std-compile-opts".
//===---------------------------------------------------------------------===//
int a(int a, int b, int c) {return (a&&b) || (a&&c) || (a&&b&&c);}
Should fold to a && (b || c). Currently not optimized with "clang
-emit-llvm-bc | opt -std-compile-opts".
//===---------------------------------------------------------------------===//
int a(int x) {return x | ((x & 8) ^ 8);}
Should combine to x | 8. Currently not optimized with "clang
-emit-llvm-bc | opt -std-compile-opts".
//===---------------------------------------------------------------------===//
int a(int x) {return x ^ ((x & 8) ^ 8);}
Should also combine to x | 8. Currently not optimized with "clang
-emit-llvm-bc | opt -std-compile-opts".
//===---------------------------------------------------------------------===//
int a(int x) {return (x & 8) == 0 ? -1 : -9;}
Should combine to (x | -9) ^ 8. Currently not optimized with "clang
-emit-llvm-bc | opt -std-compile-opts".
//===---------------------------------------------------------------------===//
int a(int x) {return (x & 8) == 0 ? -9 : -1;}
Should combine to x | -9. Currently not optimized with "clang
-emit-llvm-bc | opt -std-compile-opts".
//===---------------------------------------------------------------------===//
int a(int x) {return ((x | -9) ^ 8) & x;}
Should combine to x & -9. Currently not optimized with "clang
-emit-llvm-bc | opt -std-compile-opts".
//===---------------------------------------------------------------------===//
unsigned a(unsigned a) {return a * 0x11111111 >> 28 & 1;}
Should combine to "a * 0x88888888 >> 31". Currently not optimized
with "clang -emit-llvm-bc | opt -std-compile-opts".
//===---------------------------------------------------------------------===//
unsigned a(char* x) {if ((*x & 32) == 0) return b();}
There's an unnecessary zext in the generated code with "clang
-emit-llvm-bc | opt -std-compile-opts".
//===---------------------------------------------------------------------===//
unsigned a(unsigned long long x) {return 40 * (x >> 1);}
Should combine to "20 * (((unsigned)x) & -2)". Currently not
optimized with "clang -emit-llvm-bc | opt -std-compile-opts".
//===---------------------------------------------------------------------===//
We would like to do the following transform in the instcombiner:
-X/C -> X/-C
However, this isn't valid if (-X) overflows. We can implement this when we
have the concept of a "C signed subtraction" operator that which is undefined
on overflow.
//===---------------------------------------------------------------------===//
This was noticed in the entryblock for grokdeclarator in 403.gcc:
%tmp = icmp eq i32 %decl_context, 4
%decl_context_addr.0 = select i1 %tmp, i32 3, i32 %decl_context
%tmp1 = icmp eq i32 %decl_context_addr.0, 1
%decl_context_addr.1 = select i1 %tmp1, i32 0, i32 %decl_context_addr.0
tmp1 should be simplified to something like:
(!tmp || decl_context == 1)
This allows recursive simplifications, tmp1 is used all over the place in
the function, e.g. by:
%tmp23 = icmp eq i32 %decl_context_addr.1, 0 ; <i1> [#uses=1]
%tmp24 = xor i1 %tmp1, true ; <i1> [#uses=1]
%or.cond8 = and i1 %tmp23, %tmp24 ; <i1> [#uses=1]
later.
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
//===---------------------------------------------------------------------===//
Store sinking: This code:
void f (int n, int *cond, int *res) {
int i;
*res = 0;
for (i = 0; i < n; i++)
if (*cond)
*res ^= 234; /* (*) */
}
On this function GVN hoists the fully redundant value of *res, but nothing
moves the store out. This gives us this code:
bb: ; preds = %bb2, %entry
%.rle = phi i32 [ 0, %entry ], [ %.rle6, %bb2 ]
%i.05 = phi i32 [ 0, %entry ], [ %indvar.next, %bb2 ]
%1 = load i32* %cond, align 4
%2 = icmp eq i32 %1, 0
br i1 %2, label %bb2, label %bb1
bb1: ; preds = %bb
%3 = xor i32 %.rle, 234
store i32 %3, i32* %res, align 4
br label %bb2
bb2: ; preds = %bb, %bb1
%.rle6 = phi i32 [ %3, %bb1 ], [ %.rle, %bb ]
%indvar.next = add i32 %i.05, 1
%exitcond = icmp eq i32 %indvar.next, %n
br i1 %exitcond, label %return, label %bb
DSE should sink partially dead stores to get the store out of the loop.
Here's another partial dead case:
http://gcc.gnu.org/bugzilla/show_bug.cgi?id=12395
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
//===---------------------------------------------------------------------===//
Scalar PRE hoists the mul in the common block up to the else:
int test (int a, int b, int c, int g) {
int d, e;
if (a)
d = b * c;
else
d = b - c;
e = b * c + g;
return d + e;
}
It would be better to do the mul once to reduce codesize above the if.
This is GCC PR38204.
//===---------------------------------------------------------------------===//
GCC PR37810 is an interesting case where we should sink load/store reload
into the if block and outside the loop, so we don't reload/store it on the
non-call path.
for () {
*P += 1;
if ()
call();
else
...
->
tmp = *P
for () {
tmp += 1;
if () {
*P = tmp;
call();
tmp = *P;
} else ...
}
*P = tmp;
We now hoist the reload after the call (Transforms/GVN/lpre-call-wrap.ll), but
we don't sink the store. We need partially dead store sinking.
//===---------------------------------------------------------------------===//
[PHI TRANSLATE GEPs]
GCC PR37166: Sinking of loads prevents SROA'ing the "g" struct on the stack
leading to excess stack traffic. This could be handled by GVN with some crazy
symbolic phi translation. The code we get looks like (g is on the stack):
bb2: ; preds = %bb1
..
%9 = getelementptr %struct.f* %g, i32 0, i32 0
store i32 %8, i32* %9, align bel %bb3
bb3: ; preds = %bb1, %bb2, %bb
%c_addr.0 = phi %struct.f* [ %g, %bb2 ], [ %c, %bb ], [ %c, %bb1 ]
%b_addr.0 = phi %struct.f* [ %b, %bb2 ], [ %g, %bb ], [ %b, %bb1 ]
%10 = getelementptr %struct.f* %c_addr.0, i32 0, i32 0
%11 = load i32* %10, align 4
%11 is fully redundant, an in BB2 it should have the value %8.
GCC PR33344 is a similar case.
//===---------------------------------------------------------------------===//
There are many load PRE testcases in testsuite/gcc.dg/tree-ssa/loadpre* in the
GCC testsuite. There are many pre testcases as ssa-pre-*.c
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
//===---------------------------------------------------------------------===//
There are some interesting cases in testsuite/gcc.dg/tree-ssa/pred-comm* in the
GCC testsuite. For example, predcom-1.c is:
for (i = 2; i < 1000; i++)
fib[i] = (fib[i-1] + fib[i - 2]) & 0xffff;
which compiles into:
bb1: ; preds = %bb1, %bb1.thread
%indvar = phi i32 [ 0, %bb1.thread ], [ %0, %bb1 ]
%i.0.reg2mem.0 = add i32 %indvar, 2
%0 = add i32 %indvar, 1 ; <i32> [#uses=3]
%1 = getelementptr [1000 x i32]* @fib, i32 0, i32 %0
%2 = load i32* %1, align 4 ; <i32> [#uses=1]
%3 = getelementptr [1000 x i32]* @fib, i32 0, i32 %indvar
%4 = load i32* %3, align 4 ; <i32> [#uses=1]
%5 = add i32 %4, %2 ; <i32> [#uses=1]
%6 = and i32 %5, 65535 ; <i32> [#uses=1]
%7 = getelementptr [1000 x i32]* @fib, i32 0, i32 %i.0.reg2mem.0
store i32 %6, i32* %7, align 4
%exitcond = icmp eq i32 %0, 998 ; <i1> [#uses=1]
br i1 %exitcond, label %return, label %bb1
This is basically:
LOAD fib[i+1]
LOAD fib[i]
STORE fib[i+2]
instead of handling this as a loop or other xform, all we'd need to do is teach
load PRE to phi translate the %0 add (i+1) into the predecessor as (i'+1+1) =
(i'+2) (where i' is the previous iteration of i). This would find the store
which feeds it.
predcom-2.c is apparently the same as predcom-1.c
predcom-3.c is very similar but needs loads feeding each other instead of
store->load.
predcom-4.c seems the same as the rest.
//===---------------------------------------------------------------------===//
http://gcc.gnu.org/bugzilla/show_bug.cgi?id=35287 [LPRE crit edge splitting]
http://gcc.gnu.org/bugzilla/show_bug.cgi?id=34677 (licm does this, LPRE crit edge)
llvm-gcc t2.c -S -o - -O0 -emit-llvm | llvm-as | opt -mem2reg -simplifycfg -gvn | llvm-dis
//===---------------------------------------------------------------------===//
Type based alias analysis:
http://gcc.gnu.org/bugzilla/show_bug.cgi?id=14705
//===---------------------------------------------------------------------===//
When GVN/PRE finds a store of float* to a must aliases pointer when expecting
an int*, it should turn it into a bitcast. This is a nice generalization of
the SROA hack that would apply to other cases, e.g.:
int foo(int C, int *P, float X) {
if (C) {
bar();
*P = 42;
} else
*(float*)P = X;
return *P;
}
One example (that requires crazy phi translation) is:
http://gcc.gnu.org/bugzilla/show_bug.cgi?id=16799 [BITCAST PHI TRANS]
//===---------------------------------------------------------------------===//
A/B get pinned to the stack because we turn an if/then into a select instead
of PRE'ing the load/store. This may be fixable in instcombine:
http://gcc.gnu.org/bugzilla/show_bug.cgi?id=37892
Interesting missed case because of control flow flattening (should be 2 loads):
http://gcc.gnu.org/bugzilla/show_bug.cgi?id=26629
With: llvm-gcc t2.c -S -o - -O0 -emit-llvm | llvm-as |
opt -mem2reg -gvn -instcombine | llvm-dis
we miss it because we need 1) GEP PHI TRAN, 2) CRIT EDGE 3) MULTIPLE DIFFERENT
VALS PRODUCED BY ONE BLOCK OVER DIFFERENT PATHS
//===---------------------------------------------------------------------===//
http://gcc.gnu.org/bugzilla/show_bug.cgi?id=19633
We could eliminate the branch condition here, loading from null is undefined:
struct S { int w, x, y, z; };
struct T { int r; struct S s; };
void bar (struct S, int);
void foo (int a, struct T b)
{
struct S *c = 0;
if (a)
c = &b.s;
bar (*c, a);
}
//===---------------------------------------------------------------------===//
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
simplifylibcalls should do several optimizations for strspn/strcspn:
strcspn(x, "") -> strlen(x)
strcspn("", x) -> 0
strspn("", x) -> 0
strspn(x, "") -> strlen(x)
strspn(x, "a") -> strchr(x, 'a')-x
strcspn(x, "a") -> inlined loop for up to 3 letters (similarly for strspn):
size_t __strcspn_c3 (__const char *__s, int __reject1, int __reject2,
int __reject3) {
register size_t __result = 0;
while (__s[__result] != '\0' && __s[__result] != __reject1 &&
__s[__result] != __reject2 && __s[__result] != __reject3)
++__result;
return __result;
}
This should turn into a switch on the character. See PR3253 for some notes on
codegen.
456.hmmer apparently uses strcspn and strspn a lot. 471.omnetpp uses strspn.
//===---------------------------------------------------------------------===//
"gas" uses this idiom:
else if (strchr ("+-/*%|&^:[]()~", *intel_parser.op_string))
..
else if (strchr ("<>", *intel_parser.op_string)
Those should be turned into a switch.
//===---------------------------------------------------------------------===//
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
252.eon contains this interesting code:
%3072 = getelementptr [100 x i8]* %tempString, i32 0, i32 0
%3073 = call i8* @strcpy(i8* %3072, i8* %3071) nounwind
%strlen = call i32 @strlen(i8* %3072) ; uses = 1
%endptr = getelementptr [100 x i8]* %tempString, i32 0, i32 %strlen
call void @llvm.memcpy.i32(i8* %endptr,
i8* getelementptr ([5 x i8]* @"\01LC42", i32 0, i32 0), i32 5, i32 1)
%3074 = call i32 @strlen(i8* %endptr) nounwind readonly
This is interesting for a couple reasons. First, in this:
%3073 = call i8* @strcpy(i8* %3072, i8* %3071) nounwind
%strlen = call i32 @strlen(i8* %3072)
The strlen could be replaced with: %strlen = sub %3072, %3073, because the
strcpy call returns a pointer to the end of the string. Based on that, the
endptr GEP just becomes equal to 3073, which eliminates a strlen call and GEP.
Second, the memcpy+strlen strlen can be replaced with:
%3074 = call i32 @strlen([5 x i8]* @"\01LC42") nounwind readonly
Because the destination was just copied into the specified memory buffer. This,
in turn, can be constant folded to "4".
In other code, it contains:
%endptr6978 = bitcast i8* %endptr69 to i32*
store i32 7107374, i32* %endptr6978, align 1
%3167 = call i32 @strlen(i8* %endptr69) nounwind readonly
Which could also be constant folded. Whatever is producing this should probably
be fixed to leave this as a memcpy from a string.
Further, eon also has an interesting partially redundant strlen call:
bb8: ; preds = %_ZN18eonImageCalculatorC1Ev.exit
%682 = getelementptr i8** %argv, i32 6 ; <i8**> [#uses=2]
%683 = load i8** %682, align 4 ; <i8*> [#uses=4]
%684 = load i8* %683, align 1 ; <i8> [#uses=1]
%685 = icmp eq i8 %684, 0 ; <i1> [#uses=1]
br i1 %685, label %bb10, label %bb9
bb9: ; preds = %bb8
%686 = call i32 @strlen(i8* %683) nounwind readonly
%687 = icmp ugt i32 %686, 254 ; <i1> [#uses=1]
br i1 %687, label %bb10, label %bb11
bb10: ; preds = %bb9, %bb8
%688 = call i32 @strlen(i8* %683) nounwind readonly
This could be eliminated by doing the strlen once in bb8, saving code size and
improving perf on the bb8->9->10 path.
//===---------------------------------------------------------------------===//