"llvm/lib/CodeGen/git@repo.hca.bsc.es:lalbano/llvm-bpevl.git" did not exist on "f18429372f12b571aef539855c4dbef23a96f494"
Newer
Older
Chris Lattner
committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
//===- CodeGenPrepare.cpp - Prepare a function for code generation --------===//
//
// The LLVM Compiler Infrastructure
//
// This file was developed by Chris Lattner and is distributed under
// the University of Illinois Open Source License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This pass munges the code in the input function to better prepare it for
// SelectionDAG-based code generation. This works around limitations in it's
// basic-block-at-a-time approach. It should eventually be removed.
//
//===----------------------------------------------------------------------===//
#define DEBUG_TYPE "codegenprepare"
#include "llvm/Transforms/Scalar.h"
#include "llvm/Constants.h"
#include "llvm/DerivedTypes.h"
#include "llvm/Function.h"
#include "llvm/Instructions.h"
#include "llvm/Pass.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Target/TargetAsmInfo.h"
#include "llvm/Target/TargetData.h"
#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/ADT/SmallSet.h"
using namespace llvm;
namespace {
class VISIBILITY_HIDDEN CodeGenPrepare : public FunctionPass {
/// TLI - Keep a pointer of a TargetLowering to consult for determining
/// transformation profitability.
const TargetLowering *TLI;
public:
CodeGenPrepare(const TargetLowering *tli = 0) : TLI(tli) {}
bool runOnFunction(Function &F);
private:
bool OptimizeBlock(BasicBlock &BB);
bool OptimizeGEPExpression(GetElementPtrInst *GEPI);
};
}
static RegisterPass<CodeGenPrepare> X("codegenprepare",
"Optimize for code generation");
FunctionPass *llvm::createCodeGenPreparePass(const TargetLowering *TLI) {
return new CodeGenPrepare(TLI);
}
bool CodeGenPrepare::runOnFunction(Function &F) {
bool MadeChange = true;
bool EverMadeChange = false;
while (MadeChange) {
MadeChange = false;
for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB)
MadeChange |= OptimizeBlock(*BB);
EverMadeChange |= MadeChange;
}
return EverMadeChange;
}
/// SplitEdgeNicely - Split the critical edge from TI to it's specified
/// successor if it will improve codegen. We only do this if the successor has
/// phi nodes (otherwise critical edges are ok). If there is already another
/// predecessor of the succ that is empty (and thus has no phi nodes), use it
/// instead of introducing a new block.
static void SplitEdgeNicely(TerminatorInst *TI, unsigned SuccNum, Pass *P) {
BasicBlock *TIBB = TI->getParent();
BasicBlock *Dest = TI->getSuccessor(SuccNum);
assert(isa<PHINode>(Dest->begin()) &&
"This should only be called if Dest has a PHI!");
/// TIPHIValues - This array is lazily computed to determine the values of
/// PHIs in Dest that TI would provide.
std::vector<Value*> TIPHIValues;
// Check to see if Dest has any blocks that can be used as a split edge for
// this terminator.
for (pred_iterator PI = pred_begin(Dest), E = pred_end(Dest); PI != E; ++PI) {
BasicBlock *Pred = *PI;
// To be usable, the pred has to end with an uncond branch to the dest.
BranchInst *PredBr = dyn_cast<BranchInst>(Pred->getTerminator());
if (!PredBr || !PredBr->isUnconditional() ||
// Must be empty other than the branch.
&Pred->front() != PredBr)
continue;
// Finally, since we know that Dest has phi nodes in it, we have to make
// sure that jumping to Pred will have the same affect as going to Dest in
// terms of PHI values.
PHINode *PN;
unsigned PHINo = 0;
bool FoundMatch = true;
for (BasicBlock::iterator I = Dest->begin();
(PN = dyn_cast<PHINode>(I)); ++I, ++PHINo) {
if (PHINo == TIPHIValues.size())
TIPHIValues.push_back(PN->getIncomingValueForBlock(TIBB));
// If the PHI entry doesn't work, we can't use this pred.
if (TIPHIValues[PHINo] != PN->getIncomingValueForBlock(Pred)) {
FoundMatch = false;
break;
}
}
// If we found a workable predecessor, change TI to branch to Succ.
if (FoundMatch) {
Dest->removePredecessor(TIBB);
TI->setSuccessor(SuccNum, Pred);
return;
}
}
SplitCriticalEdge(TI, SuccNum, P, true);
}
/// InsertGEPComputeCode - Insert code into BB to compute Ptr+PtrOffset,
/// casting to the type of GEPI.
static Instruction *InsertGEPComputeCode(Instruction *&V, BasicBlock *BB,
Instruction *GEPI, Value *Ptr,
Value *PtrOffset) {
if (V) return V; // Already computed.
// Figure out the insertion point
BasicBlock::iterator InsertPt;
if (BB == GEPI->getParent()) {
// If GEP is already inserted into BB, insert right after the GEP.
InsertPt = GEPI;
++InsertPt;
} else {
// Otherwise, insert at the top of BB, after any PHI nodes
InsertPt = BB->begin();
while (isa<PHINode>(InsertPt)) ++InsertPt;
}
// If Ptr is itself a cast, but in some other BB, emit a copy of the cast into
// BB so that there is only one value live across basic blocks (the cast
// operand).
if (CastInst *CI = dyn_cast<CastInst>(Ptr))
if (CI->getParent() != BB && isa<PointerType>(CI->getOperand(0)->getType()))
Ptr = CastInst::create(CI->getOpcode(), CI->getOperand(0), CI->getType(),
"", InsertPt);
// Add the offset, cast it to the right type.
Ptr = BinaryOperator::createAdd(Ptr, PtrOffset, "", InsertPt);
// Ptr is an integer type, GEPI is pointer type ==> IntToPtr
return V = CastInst::create(Instruction::IntToPtr, Ptr, GEPI->getType(),
"", InsertPt);
}
/// ReplaceUsesOfGEPInst - Replace all uses of RepPtr with inserted code to
/// compute its value. The RepPtr value can be computed with Ptr+PtrOffset. One
/// trivial way of doing this would be to evaluate Ptr+PtrOffset in RepPtr's
/// block, then ReplaceAllUsesWith'ing everything. However, we would prefer to
/// sink PtrOffset into user blocks where doing so will likely allow us to fold
/// the constant add into a load or store instruction. Additionally, if a user
/// is a pointer-pointer cast, we look through it to find its users.
static void ReplaceUsesOfGEPInst(Instruction *RepPtr, Value *Ptr,
Constant *PtrOffset, BasicBlock *DefBB,
GetElementPtrInst *GEPI,
std::map<BasicBlock*,Instruction*> &InsertedExprs) {
while (!RepPtr->use_empty()) {
Instruction *User = cast<Instruction>(RepPtr->use_back());
// If the user is a Pointer-Pointer cast, recurse. Only BitCast can be
// used for a Pointer-Pointer cast.
if (isa<BitCastInst>(User)) {
ReplaceUsesOfGEPInst(User, Ptr, PtrOffset, DefBB, GEPI, InsertedExprs);
// Drop the use of RepPtr. The cast is dead. Don't delete it now, else we
// could invalidate an iterator.
User->setOperand(0, UndefValue::get(RepPtr->getType()));
continue;
}
// If this is a load of the pointer, or a store through the pointer, emit
// the increment into the load/store block.
Instruction *NewVal;
if (isa<LoadInst>(User) ||
(isa<StoreInst>(User) && User->getOperand(0) != RepPtr)) {
NewVal = InsertGEPComputeCode(InsertedExprs[User->getParent()],
User->getParent(), GEPI,
Ptr, PtrOffset);
} else {
// If this use is not foldable into the addressing mode, use a version
// emitted in the GEP block.
NewVal = InsertGEPComputeCode(InsertedExprs[DefBB], DefBB, GEPI,
Ptr, PtrOffset);
}
if (GEPI->getType() != RepPtr->getType()) {
BasicBlock::iterator IP = NewVal;
++IP;
// NewVal must be a GEP which must be pointer type, so BitCast
NewVal = new BitCastInst(NewVal, RepPtr->getType(), "", IP);
}
User->replaceUsesOfWith(RepPtr, NewVal);
}
}
/// OptimizeGEPExpression - Since we are doing basic-block-at-a-time instruction
/// selection, we want to be a bit careful about some things. In particular, if
/// we have a GEP instruction that is used in a different block than it is
/// defined, the addressing expression of the GEP cannot be folded into loads or
/// stores that use it. In this case, decompose the GEP and move constant
/// indices into blocks that use it.
bool CodeGenPrepare::OptimizeGEPExpression(GetElementPtrInst *GEPI) {
// If this GEP is only used inside the block it is defined in, there is no
// need to rewrite it.
bool isUsedOutsideDefBB = false;
BasicBlock *DefBB = GEPI->getParent();
for (Value::use_iterator UI = GEPI->use_begin(), E = GEPI->use_end();
UI != E; ++UI) {
if (cast<Instruction>(*UI)->getParent() != DefBB) {
isUsedOutsideDefBB = true;
break;
}
}
if (!isUsedOutsideDefBB) return false;
// If this GEP has no non-zero constant indices, there is nothing we can do,
// ignore it.
bool hasConstantIndex = false;
bool hasVariableIndex = false;
for (GetElementPtrInst::op_iterator OI = GEPI->op_begin()+1,
E = GEPI->op_end(); OI != E; ++OI) {
if (ConstantInt *CI = dyn_cast<ConstantInt>(*OI)) {
if (!CI->isZero()) {
hasConstantIndex = true;
break;
}
} else {
hasVariableIndex = true;
}
}
// If this is a "GEP X, 0, 0, 0", turn this into a cast.
if (!hasConstantIndex && !hasVariableIndex) {
/// The GEP operand must be a pointer, so must its result -> BitCast
Value *NC = new BitCastInst(GEPI->getOperand(0), GEPI->getType(),
GEPI->getName(), GEPI);
GEPI->replaceAllUsesWith(NC);
GEPI->eraseFromParent();
return true;
}
// If this is a GEP &Alloca, 0, 0, forward subst the frame index into uses.
if (!hasConstantIndex && !isa<AllocaInst>(GEPI->getOperand(0)))
return false;
// If we don't have target lowering info, we can't lower the GEP.
if (!TLI) return false;
const TargetData *TD = TLI->getTargetData();
// Otherwise, decompose the GEP instruction into multiplies and adds. Sum the
// constant offset (which we now know is non-zero) and deal with it later.
uint64_t ConstantOffset = 0;
const Type *UIntPtrTy = TD->getIntPtrType();
Value *Ptr = new PtrToIntInst(GEPI->getOperand(0), UIntPtrTy, "", GEPI);
const Type *Ty = GEPI->getOperand(0)->getType();
for (GetElementPtrInst::op_iterator OI = GEPI->op_begin()+1,
E = GEPI->op_end(); OI != E; ++OI) {
Value *Idx = *OI;
if (const StructType *StTy = dyn_cast<StructType>(Ty)) {
unsigned Field = cast<ConstantInt>(Idx)->getZExtValue();
if (Field)
ConstantOffset += TD->getStructLayout(StTy)->getElementOffset(Field);
Ty = StTy->getElementType(Field);
} else {
Ty = cast<SequentialType>(Ty)->getElementType();
// Handle constant subscripts.
if (ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) {
if (CI->getZExtValue() == 0) continue;
ConstantOffset += (int64_t)TD->getTypeSize(Ty)*CI->getSExtValue();
continue;
}
// Ptr = Ptr + Idx * ElementSize;
// Cast Idx to UIntPtrTy if needed.
Idx = CastInst::createIntegerCast(Idx, UIntPtrTy, true/*SExt*/, "", GEPI);
uint64_t ElementSize = TD->getTypeSize(Ty);
// Mask off bits that should not be set.
ElementSize &= ~0ULL >> (64-UIntPtrTy->getPrimitiveSizeInBits());
Constant *SizeCst = ConstantInt::get(UIntPtrTy, ElementSize);
// Multiply by the element size and add to the base.
Idx = BinaryOperator::createMul(Idx, SizeCst, "", GEPI);
Ptr = BinaryOperator::createAdd(Ptr, Idx, "", GEPI);
}
}
// Make sure that the offset fits in uintptr_t.
ConstantOffset &= ~0ULL >> (64-UIntPtrTy->getPrimitiveSizeInBits());
Constant *PtrOffset = ConstantInt::get(UIntPtrTy, ConstantOffset);
// Okay, we have now emitted all of the variable index parts to the BB that
// the GEP is defined in. Loop over all of the using instructions, inserting
// an "add Ptr, ConstantOffset" into each block that uses it and update the
// instruction to use the newly computed value, making GEPI dead. When the
// user is a load or store instruction address, we emit the add into the user
// block, otherwise we use a canonical version right next to the gep (these
// won't be foldable as addresses, so we might as well share the computation).
std::map<BasicBlock*,Instruction*> InsertedExprs;
ReplaceUsesOfGEPInst(GEPI, Ptr, PtrOffset, DefBB, GEPI, InsertedExprs);
// Finally, the GEP is dead, remove it.
GEPI->eraseFromParent();
return true;
}
/// SinkInvariantGEPIndex - If a GEP instruction has a variable index that has
/// been hoisted out of the loop by LICM pass, sink it back into the use BB
/// if it can be determined that the index computation can be folded into the
/// addressing mode of the load / store uses.
static bool SinkInvariantGEPIndex(BinaryOperator *BinOp,
const TargetLowering &TLI) {
// Only look at Add.
if (BinOp->getOpcode() != Instruction::Add)
return false;
// DestBBs - These are the blocks where a copy of BinOp will be inserted.
SmallSet<BasicBlock*, 8> DestBBs;
BasicBlock *DefBB = BinOp->getParent();
bool MadeChange = false;
for (Value::use_iterator UI = BinOp->use_begin(), E = BinOp->use_end();
UI != E; ++UI) {
Instruction *GEPI = cast<Instruction>(*UI);
// Only look for GEP use in another block.
if (GEPI->getParent() == DefBB) continue;
if (isa<GetElementPtrInst>(GEPI)) {
// If the GEP has another variable index, abondon.
bool hasVariableIndex = false;
for (GetElementPtrInst::op_iterator OI = GEPI->op_begin()+1,
OE = GEPI->op_end(); OI != OE; ++OI)
if (*OI != BinOp && !isa<ConstantInt>(*OI)) {
hasVariableIndex = true;
break;
}
if (hasVariableIndex)
break;
BasicBlock *GEPIBB = GEPI->getParent();
for (Value::use_iterator UUI = GEPI->use_begin(), UE = GEPI->use_end();
UUI != UE; ++UUI) {
Instruction *GEPIUser = cast<Instruction>(*UUI);
const Type *UseTy = NULL;
if (LoadInst *Load = dyn_cast<LoadInst>(GEPIUser))
UseTy = Load->getType();
else if (StoreInst *Store = dyn_cast<StoreInst>(GEPIUser))
UseTy = Store->getOperand(0)->getType();
// Check if it is possible to fold the expression to address mode.
if (UseTy && isa<ConstantInt>(BinOp->getOperand(1))) {
uint64_t Scale = TLI.getTargetData()->getTypeSize(UseTy);
int64_t Cst = cast<ConstantInt>(BinOp->getOperand(1))->getSExtValue();
// e.g. load (gep i32 * %P, (X+42)) => load (%P + X*4 + 168).
if (TLI.isLegalAddressImmediate(Cst*Scale, UseTy) &&
(Scale == 1 || TLI.isLegalAddressScale(Scale, UseTy))) {
DestBBs.insert(GEPIBB);
MadeChange = true;
break;
}
}
}
}
}
// Nothing to do.
if (!MadeChange)
return false;
/// InsertedOps - Only insert a duplicate in each block once.
std::map<BasicBlock*, BinaryOperator*> InsertedOps;
for (Value::use_iterator UI = BinOp->use_begin(), E = BinOp->use_end();
UI != E; ) {
Instruction *User = cast<Instruction>(*UI);
BasicBlock *UserBB = User->getParent();
// Preincrement use iterator so we don't invalidate it.
++UI;
// If any user in this BB wants it, replace all the uses in the BB.
if (DestBBs.count(UserBB)) {
// Sink it into user block.
BinaryOperator *&InsertedOp = InsertedOps[UserBB];
if (!InsertedOp) {
BasicBlock::iterator InsertPt = UserBB->begin();
while (isa<PHINode>(InsertPt)) ++InsertPt;
InsertedOp =
BinaryOperator::create(BinOp->getOpcode(), BinOp->getOperand(0),
BinOp->getOperand(1), "", InsertPt);
}
User->replaceUsesOfWith(BinOp, InsertedOp);
}
}
if (BinOp->use_empty())
BinOp->eraseFromParent();
return true;
}
/// OptimizeNoopCopyExpression - We have determined that the specified cast
/// instruction is a noop copy (e.g. it's casting from one pointer type to
/// another, int->uint, or int->sbyte on PPC.
///
/// Return true if any changes are made.
static bool OptimizeNoopCopyExpression(CastInst *CI) {
BasicBlock *DefBB = CI->getParent();
/// InsertedCasts - Only insert a cast in each block once.
std::map<BasicBlock*, CastInst*> InsertedCasts;
bool MadeChange = false;
for (Value::use_iterator UI = CI->use_begin(), E = CI->use_end();
UI != E; ) {
Use &TheUse = UI.getUse();
Instruction *User = cast<Instruction>(*UI);
// Figure out which BB this cast is used in. For PHI's this is the
// appropriate predecessor block.
BasicBlock *UserBB = User->getParent();
if (PHINode *PN = dyn_cast<PHINode>(User)) {
unsigned OpVal = UI.getOperandNo()/2;
UserBB = PN->getIncomingBlock(OpVal);
}
// Preincrement use iterator so we don't invalidate it.
++UI;
// If this user is in the same block as the cast, don't change the cast.
if (UserBB == DefBB) continue;
// If we have already inserted a cast into this block, use it.
CastInst *&InsertedCast = InsertedCasts[UserBB];
if (!InsertedCast) {
BasicBlock::iterator InsertPt = UserBB->begin();
while (isa<PHINode>(InsertPt)) ++InsertPt;
InsertedCast =
CastInst::create(CI->getOpcode(), CI->getOperand(0), CI->getType(), "",
InsertPt);
MadeChange = true;
}
// Replace a use of the cast with a use of the new casat.
TheUse = InsertedCast;
}
// If we removed all uses, nuke the cast.
if (CI->use_empty())
CI->eraseFromParent();
return MadeChange;
}
// In this pass we look for GEP and cast instructions that are used
// across basic blocks and rewrite them to improve basic-block-at-a-time
// selection.
bool CodeGenPrepare::OptimizeBlock(BasicBlock &BB) {
bool MadeChange = false;
// Split all critical edges where the dest block has a PHI and where the phi
// has shared immediate operands.
TerminatorInst *BBTI = BB.getTerminator();
if (BBTI->getNumSuccessors() > 1) {
for (unsigned i = 0, e = BBTI->getNumSuccessors(); i != e; ++i)
if (isa<PHINode>(BBTI->getSuccessor(i)->begin()) &&
isCriticalEdge(BBTI, i, true))
SplitEdgeNicely(BBTI, i, this);
}
for (BasicBlock::iterator BBI = BB.begin(), E = BB.end(); BBI != E; ) {
Instruction *I = BBI++;
if (CallInst *CI = dyn_cast<CallInst>(I)) {
// If we found an inline asm expession, and if the target knows how to
// lower it to normal LLVM code, do so now.
if (TLI && isa<InlineAsm>(CI->getCalledValue()))
if (const TargetAsmInfo *TAI =
TLI->getTargetMachine().getTargetAsmInfo()) {
if (TAI->ExpandInlineAsm(CI))
BBI = BB.begin();
}
} else if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I)) {
MadeChange |= OptimizeGEPExpression(GEPI);
} else if (CastInst *CI = dyn_cast<CastInst>(I)) {
// If the source of the cast is a constant, then this should have
// already been constant folded. The only reason NOT to constant fold
// it is if something (e.g. LSR) was careful to place the constant
// evaluation in a block other than then one that uses it (e.g. to hoist
// the address of globals out of a loop). If this is the case, we don't
// want to forward-subst the cast.
if (isa<Constant>(CI->getOperand(0)))
continue;
if (!TLI) continue;
// If this is a noop copy, sink it into user blocks to reduce the number
// of virtual registers that must be created and coallesced.
MVT::ValueType SrcVT = TLI->getValueType(CI->getOperand(0)->getType());
MVT::ValueType DstVT = TLI->getValueType(CI->getType());
// This is an fp<->int conversion?
if (MVT::isInteger(SrcVT) != MVT::isInteger(DstVT))
continue;
// If this is an extension, it will be a zero or sign extension, which
// isn't a noop.
if (SrcVT < DstVT) continue;
// If these values will be promoted, find out what they will be promoted
// to. This helps us consider truncates on PPC as noop copies when they
// are.
if (TLI->getTypeAction(SrcVT) == TargetLowering::Promote)
SrcVT = TLI->getTypeToTransformTo(SrcVT);
if (TLI->getTypeAction(DstVT) == TargetLowering::Promote)
DstVT = TLI->getTypeToTransformTo(DstVT);
// If, after promotion, these are the same types, this is a noop copy.
if (SrcVT == DstVT)
MadeChange |= OptimizeNoopCopyExpression(CI);
} else if (BinaryOperator *BinOp = dyn_cast<BinaryOperator>(I)) {
if (TLI)
MadeChange |= SinkInvariantGEPIndex(BinOp, *TLI);
}
}
return MadeChange;
}