"git@repo.hca.bsc.es:lalbano/llvm-bpevl.git" did not exist on "d31a3d0e82069570bf9ba7ce6ce90dafa06f68be"
Newer
Older
//===- X86InstrInfo.cpp - X86 Instruction Information -----------*- C++ -*-===//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//===----------------------------------------------------------------------===//
// This file contains the X86 implementation of the TargetInstrInfo class.
//
//===----------------------------------------------------------------------===//
#include "X86.h"
Chris Lattner
committed
#include "X86InstrBuilder.h"
#include "X86MachineFunctionInfo.h"
#include "X86Subtarget.h"
#include "X86TargetMachine.h"
Chris Lattner
committed
#include "llvm/GlobalVariable.h"
#include "llvm/DerivedTypes.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/CodeGen/MachineConstantPool.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
Misha Brukman
committed
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
Evan Cheng
committed
#include "llvm/CodeGen/LiveVariables.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
Evan Cheng
committed
#include "llvm/Target/TargetOptions.h"
#include "llvm/Target/TargetAsmInfo.h"
using namespace llvm;
namespace {
cl::opt<bool>
NoFusing("disable-spill-fusing",
cl::desc("Disable fusing of spill code into instructions"));
cl::opt<bool>
PrintFailedFusing("print-failed-fuse-candidates",
cl::desc("Print instructions that the allocator wants to"
" fuse, but the X86 backend currently can't"),
cl::Hidden);
cl::opt<bool>
ReMatPICStubLoad("remat-pic-stub-load",
cl::desc("Re-materialize load from stub in PIC mode"),
cl::init(false), cl::Hidden);
}
: TargetInstrInfoImpl(X86Insts, array_lengthof(X86Insts)),
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
SmallVector<unsigned,16> AmbEntries;
static const unsigned OpTbl2Addr[][2] = {
{ X86::ADC32ri, X86::ADC32mi },
{ X86::ADC32ri8, X86::ADC32mi8 },
{ X86::ADC32rr, X86::ADC32mr },
{ X86::ADC64ri32, X86::ADC64mi32 },
{ X86::ADC64ri8, X86::ADC64mi8 },
{ X86::ADC64rr, X86::ADC64mr },
{ X86::ADD16ri, X86::ADD16mi },
{ X86::ADD16ri8, X86::ADD16mi8 },
{ X86::ADD16rr, X86::ADD16mr },
{ X86::ADD32ri, X86::ADD32mi },
{ X86::ADD32ri8, X86::ADD32mi8 },
{ X86::ADD32rr, X86::ADD32mr },
{ X86::ADD64ri32, X86::ADD64mi32 },
{ X86::ADD64ri8, X86::ADD64mi8 },
{ X86::ADD64rr, X86::ADD64mr },
{ X86::ADD8ri, X86::ADD8mi },
{ X86::ADD8rr, X86::ADD8mr },
{ X86::AND16ri, X86::AND16mi },
{ X86::AND16ri8, X86::AND16mi8 },
{ X86::AND16rr, X86::AND16mr },
{ X86::AND32ri, X86::AND32mi },
{ X86::AND32ri8, X86::AND32mi8 },
{ X86::AND32rr, X86::AND32mr },
{ X86::AND64ri32, X86::AND64mi32 },
{ X86::AND64ri8, X86::AND64mi8 },
{ X86::AND64rr, X86::AND64mr },
{ X86::AND8ri, X86::AND8mi },
{ X86::AND8rr, X86::AND8mr },
{ X86::DEC16r, X86::DEC16m },
{ X86::DEC32r, X86::DEC32m },
{ X86::DEC64_16r, X86::DEC64_16m },
{ X86::DEC64_32r, X86::DEC64_32m },
{ X86::DEC64r, X86::DEC64m },
{ X86::DEC8r, X86::DEC8m },
{ X86::INC16r, X86::INC16m },
{ X86::INC32r, X86::INC32m },
{ X86::INC64_16r, X86::INC64_16m },
{ X86::INC64_32r, X86::INC64_32m },
{ X86::INC64r, X86::INC64m },
{ X86::INC8r, X86::INC8m },
{ X86::NEG16r, X86::NEG16m },
{ X86::NEG32r, X86::NEG32m },
{ X86::NEG64r, X86::NEG64m },
{ X86::NEG8r, X86::NEG8m },
{ X86::NOT16r, X86::NOT16m },
{ X86::NOT32r, X86::NOT32m },
{ X86::NOT64r, X86::NOT64m },
{ X86::NOT8r, X86::NOT8m },
{ X86::OR16ri, X86::OR16mi },
{ X86::OR16ri8, X86::OR16mi8 },
{ X86::OR16rr, X86::OR16mr },
{ X86::OR32ri, X86::OR32mi },
{ X86::OR32ri8, X86::OR32mi8 },
{ X86::OR32rr, X86::OR32mr },
{ X86::OR64ri32, X86::OR64mi32 },
{ X86::OR64ri8, X86::OR64mi8 },
{ X86::OR64rr, X86::OR64mr },
{ X86::OR8ri, X86::OR8mi },
{ X86::OR8rr, X86::OR8mr },
{ X86::ROL16r1, X86::ROL16m1 },
{ X86::ROL16rCL, X86::ROL16mCL },
{ X86::ROL16ri, X86::ROL16mi },
{ X86::ROL32r1, X86::ROL32m1 },
{ X86::ROL32rCL, X86::ROL32mCL },
{ X86::ROL32ri, X86::ROL32mi },
{ X86::ROL64r1, X86::ROL64m1 },
{ X86::ROL64rCL, X86::ROL64mCL },
{ X86::ROL64ri, X86::ROL64mi },
{ X86::ROL8r1, X86::ROL8m1 },
{ X86::ROL8rCL, X86::ROL8mCL },
{ X86::ROL8ri, X86::ROL8mi },
{ X86::ROR16r1, X86::ROR16m1 },
{ X86::ROR16rCL, X86::ROR16mCL },
{ X86::ROR16ri, X86::ROR16mi },
{ X86::ROR32r1, X86::ROR32m1 },
{ X86::ROR32rCL, X86::ROR32mCL },
{ X86::ROR32ri, X86::ROR32mi },
{ X86::ROR64r1, X86::ROR64m1 },
{ X86::ROR64rCL, X86::ROR64mCL },
{ X86::ROR64ri, X86::ROR64mi },
{ X86::ROR8r1, X86::ROR8m1 },
{ X86::ROR8rCL, X86::ROR8mCL },
{ X86::ROR8ri, X86::ROR8mi },
{ X86::SAR16r1, X86::SAR16m1 },
{ X86::SAR16rCL, X86::SAR16mCL },
{ X86::SAR16ri, X86::SAR16mi },
{ X86::SAR32r1, X86::SAR32m1 },
{ X86::SAR32rCL, X86::SAR32mCL },
{ X86::SAR32ri, X86::SAR32mi },
{ X86::SAR64r1, X86::SAR64m1 },
{ X86::SAR64rCL, X86::SAR64mCL },
{ X86::SAR64ri, X86::SAR64mi },
{ X86::SAR8r1, X86::SAR8m1 },
{ X86::SAR8rCL, X86::SAR8mCL },
{ X86::SAR8ri, X86::SAR8mi },
{ X86::SBB32ri, X86::SBB32mi },
{ X86::SBB32ri8, X86::SBB32mi8 },
{ X86::SBB32rr, X86::SBB32mr },
{ X86::SBB64ri32, X86::SBB64mi32 },
{ X86::SBB64ri8, X86::SBB64mi8 },
{ X86::SBB64rr, X86::SBB64mr },
{ X86::SHL16rCL, X86::SHL16mCL },
{ X86::SHL16ri, X86::SHL16mi },
{ X86::SHL32rCL, X86::SHL32mCL },
{ X86::SHL32ri, X86::SHL32mi },
{ X86::SHL64rCL, X86::SHL64mCL },
{ X86::SHL64ri, X86::SHL64mi },
{ X86::SHL8rCL, X86::SHL8mCL },
{ X86::SHL8ri, X86::SHL8mi },
{ X86::SHLD16rrCL, X86::SHLD16mrCL },
{ X86::SHLD16rri8, X86::SHLD16mri8 },
{ X86::SHLD32rrCL, X86::SHLD32mrCL },
{ X86::SHLD32rri8, X86::SHLD32mri8 },
{ X86::SHLD64rrCL, X86::SHLD64mrCL },
{ X86::SHLD64rri8, X86::SHLD64mri8 },
{ X86::SHR16r1, X86::SHR16m1 },
{ X86::SHR16rCL, X86::SHR16mCL },
{ X86::SHR16ri, X86::SHR16mi },
{ X86::SHR32r1, X86::SHR32m1 },
{ X86::SHR32rCL, X86::SHR32mCL },
{ X86::SHR32ri, X86::SHR32mi },
{ X86::SHR64r1, X86::SHR64m1 },
{ X86::SHR64rCL, X86::SHR64mCL },
{ X86::SHR64ri, X86::SHR64mi },
{ X86::SHR8r1, X86::SHR8m1 },
{ X86::SHR8rCL, X86::SHR8mCL },
{ X86::SHR8ri, X86::SHR8mi },
{ X86::SHRD16rrCL, X86::SHRD16mrCL },
{ X86::SHRD16rri8, X86::SHRD16mri8 },
{ X86::SHRD32rrCL, X86::SHRD32mrCL },
{ X86::SHRD32rri8, X86::SHRD32mri8 },
{ X86::SHRD64rrCL, X86::SHRD64mrCL },
{ X86::SHRD64rri8, X86::SHRD64mri8 },
{ X86::SUB16ri, X86::SUB16mi },
{ X86::SUB16ri8, X86::SUB16mi8 },
{ X86::SUB16rr, X86::SUB16mr },
{ X86::SUB32ri, X86::SUB32mi },
{ X86::SUB32ri8, X86::SUB32mi8 },
{ X86::SUB32rr, X86::SUB32mr },
{ X86::SUB64ri32, X86::SUB64mi32 },
{ X86::SUB64ri8, X86::SUB64mi8 },
{ X86::SUB64rr, X86::SUB64mr },
{ X86::SUB8ri, X86::SUB8mi },
{ X86::SUB8rr, X86::SUB8mr },
{ X86::XOR16ri, X86::XOR16mi },
{ X86::XOR16ri8, X86::XOR16mi8 },
{ X86::XOR16rr, X86::XOR16mr },
{ X86::XOR32ri, X86::XOR32mi },
{ X86::XOR32ri8, X86::XOR32mi8 },
{ X86::XOR32rr, X86::XOR32mr },
{ X86::XOR64ri32, X86::XOR64mi32 },
{ X86::XOR64ri8, X86::XOR64mi8 },
{ X86::XOR64rr, X86::XOR64mr },
{ X86::XOR8ri, X86::XOR8mi },
{ X86::XOR8rr, X86::XOR8mr }
};
for (unsigned i = 0, e = array_lengthof(OpTbl2Addr); i != e; ++i) {
unsigned RegOp = OpTbl2Addr[i][0];
unsigned MemOp = OpTbl2Addr[i][1];
if (!RegOp2MemOpTable2Addr.insert(std::make_pair((unsigned*)RegOp,
MemOp)).second)
assert(false && "Duplicated entries?");
unsigned AuxInfo = 0 | (1 << 4) | (1 << 5); // Index 0,folded load and store
if (!MemOp2RegOpTable.insert(std::make_pair((unsigned*)MemOp,
std::make_pair(RegOp,
AuxInfo))).second)
AmbEntries.push_back(MemOp);
}
// If the third value is 1, then it's folding either a load or a store.
static const unsigned OpTbl0[][3] = {
{ X86::BT16ri8, X86::BT16mi8, 1 },
{ X86::BT32ri8, X86::BT32mi8, 1 },
{ X86::BT64ri8, X86::BT64mi8, 1 },
{ X86::CALL32r, X86::CALL32m, 1 },
{ X86::CALL64r, X86::CALL64m, 1 },
{ X86::CMP16ri, X86::CMP16mi, 1 },
{ X86::CMP16ri8, X86::CMP16mi8, 1 },
{ X86::CMP16rr, X86::CMP16mr, 1 },
{ X86::CMP32ri, X86::CMP32mi, 1 },
{ X86::CMP32ri8, X86::CMP32mi8, 1 },
{ X86::CMP32rr, X86::CMP32mr, 1 },
{ X86::CMP64ri32, X86::CMP64mi32, 1 },
{ X86::CMP64ri8, X86::CMP64mi8, 1 },
{ X86::CMP64rr, X86::CMP64mr, 1 },
{ X86::CMP8ri, X86::CMP8mi, 1 },
{ X86::CMP8rr, X86::CMP8mr, 1 },
{ X86::DIV16r, X86::DIV16m, 1 },
{ X86::DIV32r, X86::DIV32m, 1 },
{ X86::DIV64r, X86::DIV64m, 1 },
{ X86::DIV8r, X86::DIV8m, 1 },
{ X86::EXTRACTPSrr, X86::EXTRACTPSmr, 0 },
{ X86::FsMOVAPDrr, X86::MOVSDmr, 0 },
{ X86::FsMOVAPSrr, X86::MOVSSmr, 0 },
{ X86::IDIV16r, X86::IDIV16m, 1 },
{ X86::IDIV32r, X86::IDIV32m, 1 },
{ X86::IDIV64r, X86::IDIV64m, 1 },
{ X86::IDIV8r, X86::IDIV8m, 1 },
{ X86::IMUL16r, X86::IMUL16m, 1 },
{ X86::IMUL32r, X86::IMUL32m, 1 },
{ X86::IMUL64r, X86::IMUL64m, 1 },
{ X86::IMUL8r, X86::IMUL8m, 1 },
{ X86::JMP32r, X86::JMP32m, 1 },
{ X86::JMP64r, X86::JMP64m, 1 },
{ X86::MOV16ri, X86::MOV16mi, 0 },
{ X86::MOV16rr, X86::MOV16mr, 0 },
{ X86::MOV32ri, X86::MOV32mi, 0 },
{ X86::MOV32rr, X86::MOV32mr, 0 },
{ X86::MOV64ri32, X86::MOV64mi32, 0 },
{ X86::MOV64rr, X86::MOV64mr, 0 },
{ X86::MOV8ri, X86::MOV8mi, 0 },
{ X86::MOV8rr, X86::MOV8mr, 0 },
{ X86::MOV8rr_NOREX, X86::MOV8mr_NOREX, 0 },
{ X86::MOVAPDrr, X86::MOVAPDmr, 0 },
{ X86::MOVAPSrr, X86::MOVAPSmr, 0 },
{ X86::MOVDQArr, X86::MOVDQAmr, 0 },
{ X86::MOVPDI2DIrr, X86::MOVPDI2DImr, 0 },
{ X86::MOVPQIto64rr,X86::MOVPQI2QImr, 0 },
{ X86::MOVPS2SSrr, X86::MOVPS2SSmr, 0 },
{ X86::MOVSDrr, X86::MOVSDmr, 0 },
{ X86::MOVSDto64rr, X86::MOVSDto64mr, 0 },
{ X86::MOVSS2DIrr, X86::MOVSS2DImr, 0 },
{ X86::MOVSSrr, X86::MOVSSmr, 0 },
{ X86::MOVUPDrr, X86::MOVUPDmr, 0 },
{ X86::MOVUPSrr, X86::MOVUPSmr, 0 },
{ X86::MUL16r, X86::MUL16m, 1 },
{ X86::MUL32r, X86::MUL32m, 1 },
{ X86::MUL64r, X86::MUL64m, 1 },
{ X86::MUL8r, X86::MUL8m, 1 },
{ X86::SETAEr, X86::SETAEm, 0 },
{ X86::SETAr, X86::SETAm, 0 },
{ X86::SETBEr, X86::SETBEm, 0 },
{ X86::SETBr, X86::SETBm, 0 },
{ X86::SETEr, X86::SETEm, 0 },
{ X86::SETGEr, X86::SETGEm, 0 },
{ X86::SETGr, X86::SETGm, 0 },
{ X86::SETLEr, X86::SETLEm, 0 },
{ X86::SETLr, X86::SETLm, 0 },
{ X86::SETNEr, X86::SETNEm, 0 },
{ X86::SETNOr, X86::SETNOm, 0 },
{ X86::SETNPr, X86::SETNPm, 0 },
{ X86::SETNSr, X86::SETNSm, 0 },
{ X86::SETOr, X86::SETOm, 0 },
{ X86::SETPr, X86::SETPm, 0 },
{ X86::SETSr, X86::SETSm, 0 },
{ X86::TAILJMPr, X86::TAILJMPm, 1 },
{ X86::TEST16ri, X86::TEST16mi, 1 },
{ X86::TEST32ri, X86::TEST32mi, 1 },
{ X86::TEST64ri32, X86::TEST64mi32, 1 },
{ X86::TEST8ri, X86::TEST8mi, 1 }
};
for (unsigned i = 0, e = array_lengthof(OpTbl0); i != e; ++i) {
unsigned RegOp = OpTbl0[i][0];
unsigned MemOp = OpTbl0[i][1];
if (!RegOp2MemOpTable0.insert(std::make_pair((unsigned*)RegOp,
MemOp)).second)
assert(false && "Duplicated entries?");
unsigned FoldedLoad = OpTbl0[i][2];
// Index 0, folded load or store.
unsigned AuxInfo = 0 | (FoldedLoad << 4) | ((FoldedLoad^1) << 5);
if (RegOp != X86::FsMOVAPDrr && RegOp != X86::FsMOVAPSrr)
if (!MemOp2RegOpTable.insert(std::make_pair((unsigned*)MemOp,
std::make_pair(RegOp, AuxInfo))).second)
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
AmbEntries.push_back(MemOp);
}
static const unsigned OpTbl1[][2] = {
{ X86::CMP16rr, X86::CMP16rm },
{ X86::CMP32rr, X86::CMP32rm },
{ X86::CMP64rr, X86::CMP64rm },
{ X86::CMP8rr, X86::CMP8rm },
{ X86::CVTSD2SSrr, X86::CVTSD2SSrm },
{ X86::CVTSI2SD64rr, X86::CVTSI2SD64rm },
{ X86::CVTSI2SDrr, X86::CVTSI2SDrm },
{ X86::CVTSI2SS64rr, X86::CVTSI2SS64rm },
{ X86::CVTSI2SSrr, X86::CVTSI2SSrm },
{ X86::CVTSS2SDrr, X86::CVTSS2SDrm },
{ X86::CVTTSD2SI64rr, X86::CVTTSD2SI64rm },
{ X86::CVTTSD2SIrr, X86::CVTTSD2SIrm },
{ X86::CVTTSS2SI64rr, X86::CVTTSS2SI64rm },
{ X86::CVTTSS2SIrr, X86::CVTTSS2SIrm },
{ X86::FsMOVAPDrr, X86::MOVSDrm },
{ X86::FsMOVAPSrr, X86::MOVSSrm },
{ X86::IMUL16rri, X86::IMUL16rmi },
{ X86::IMUL16rri8, X86::IMUL16rmi8 },
{ X86::IMUL32rri, X86::IMUL32rmi },
{ X86::IMUL32rri8, X86::IMUL32rmi8 },
{ X86::IMUL64rri32, X86::IMUL64rmi32 },
{ X86::IMUL64rri8, X86::IMUL64rmi8 },
{ X86::Int_CMPSDrr, X86::Int_CMPSDrm },
{ X86::Int_CMPSSrr, X86::Int_CMPSSrm },
{ X86::Int_COMISDrr, X86::Int_COMISDrm },
{ X86::Int_COMISSrr, X86::Int_COMISSrm },
{ X86::Int_CVTDQ2PDrr, X86::Int_CVTDQ2PDrm },
{ X86::Int_CVTDQ2PSrr, X86::Int_CVTDQ2PSrm },
{ X86::Int_CVTPD2DQrr, X86::Int_CVTPD2DQrm },
{ X86::Int_CVTPD2PSrr, X86::Int_CVTPD2PSrm },
{ X86::Int_CVTPS2DQrr, X86::Int_CVTPS2DQrm },
{ X86::Int_CVTPS2PDrr, X86::Int_CVTPS2PDrm },
{ X86::Int_CVTSD2SI64rr,X86::Int_CVTSD2SI64rm },
{ X86::Int_CVTSD2SIrr, X86::Int_CVTSD2SIrm },
{ X86::Int_CVTSD2SSrr, X86::Int_CVTSD2SSrm },
{ X86::Int_CVTSI2SD64rr,X86::Int_CVTSI2SD64rm },
{ X86::Int_CVTSI2SDrr, X86::Int_CVTSI2SDrm },
{ X86::Int_CVTSI2SS64rr,X86::Int_CVTSI2SS64rm },
{ X86::Int_CVTSI2SSrr, X86::Int_CVTSI2SSrm },
{ X86::Int_CVTSS2SDrr, X86::Int_CVTSS2SDrm },
{ X86::Int_CVTSS2SI64rr,X86::Int_CVTSS2SI64rm },
{ X86::Int_CVTSS2SIrr, X86::Int_CVTSS2SIrm },
{ X86::Int_CVTTPD2DQrr, X86::Int_CVTTPD2DQrm },
{ X86::Int_CVTTPS2DQrr, X86::Int_CVTTPS2DQrm },
{ X86::Int_CVTTSD2SI64rr,X86::Int_CVTTSD2SI64rm },
{ X86::Int_CVTTSD2SIrr, X86::Int_CVTTSD2SIrm },
{ X86::Int_CVTTSS2SI64rr,X86::Int_CVTTSS2SI64rm },
{ X86::Int_CVTTSS2SIrr, X86::Int_CVTTSS2SIrm },
{ X86::Int_UCOMISDrr, X86::Int_UCOMISDrm },
{ X86::Int_UCOMISSrr, X86::Int_UCOMISSrm },
{ X86::MOV16rr, X86::MOV16rm },
{ X86::MOV32rr, X86::MOV32rm },
{ X86::MOV64rr, X86::MOV64rm },
{ X86::MOV64toPQIrr, X86::MOVQI2PQIrm },
{ X86::MOV64toSDrr, X86::MOV64toSDrm },
{ X86::MOV8rr, X86::MOV8rm },
{ X86::MOVAPDrr, X86::MOVAPDrm },
{ X86::MOVAPSrr, X86::MOVAPSrm },
{ X86::MOVDDUPrr, X86::MOVDDUPrm },
{ X86::MOVDI2PDIrr, X86::MOVDI2PDIrm },
{ X86::MOVDI2SSrr, X86::MOVDI2SSrm },
{ X86::MOVDQArr, X86::MOVDQArm },
{ X86::MOVSD2PDrr, X86::MOVSD2PDrm },
{ X86::MOVSDrr, X86::MOVSDrm },
{ X86::MOVSHDUPrr, X86::MOVSHDUPrm },
{ X86::MOVSLDUPrr, X86::MOVSLDUPrm },
{ X86::MOVSS2PSrr, X86::MOVSS2PSrm },
{ X86::MOVSSrr, X86::MOVSSrm },
{ X86::MOVSX16rr8, X86::MOVSX16rm8 },
{ X86::MOVSX32rr16, X86::MOVSX32rm16 },
{ X86::MOVSX32rr8, X86::MOVSX32rm8 },
{ X86::MOVSX64rr16, X86::MOVSX64rm16 },
{ X86::MOVSX64rr32, X86::MOVSX64rm32 },
{ X86::MOVSX64rr8, X86::MOVSX64rm8 },
{ X86::MOVUPDrr, X86::MOVUPDrm },
{ X86::MOVUPSrr, X86::MOVUPSrm },
{ X86::MOVZDI2PDIrr, X86::MOVZDI2PDIrm },
{ X86::MOVZQI2PQIrr, X86::MOVZQI2PQIrm },
{ X86::MOVZPQILo2PQIrr, X86::MOVZPQILo2PQIrm },
{ X86::MOVZX16rr8, X86::MOVZX16rm8 },
{ X86::MOVZX32rr16, X86::MOVZX32rm16 },
{ X86::MOVZX32_NOREXrr8, X86::MOVZX32_NOREXrm8 },
{ X86::MOVZX32rr8, X86::MOVZX32rm8 },
{ X86::MOVZX64rr16, X86::MOVZX64rm16 },
{ X86::MOVZX64rr32, X86::MOVZX64rm32 },
{ X86::MOVZX64rr8, X86::MOVZX64rm8 },
{ X86::PSHUFDri, X86::PSHUFDmi },
{ X86::PSHUFHWri, X86::PSHUFHWmi },
{ X86::PSHUFLWri, X86::PSHUFLWmi },
{ X86::RCPPSr, X86::RCPPSm },
{ X86::RCPPSr_Int, X86::RCPPSm_Int },
{ X86::RSQRTPSr, X86::RSQRTPSm },
{ X86::RSQRTPSr_Int, X86::RSQRTPSm_Int },
{ X86::RSQRTSSr, X86::RSQRTSSm },
{ X86::RSQRTSSr_Int, X86::RSQRTSSm_Int },
{ X86::SQRTPDr, X86::SQRTPDm },
{ X86::SQRTPDr_Int, X86::SQRTPDm_Int },
{ X86::SQRTPSr, X86::SQRTPSm },
{ X86::SQRTPSr_Int, X86::SQRTPSm_Int },
{ X86::SQRTSDr, X86::SQRTSDm },
{ X86::SQRTSDr_Int, X86::SQRTSDm_Int },
{ X86::SQRTSSr, X86::SQRTSSm },
{ X86::SQRTSSr_Int, X86::SQRTSSm_Int },
{ X86::TEST16rr, X86::TEST16rm },
{ X86::TEST32rr, X86::TEST32rm },
{ X86::TEST64rr, X86::TEST64rm },
{ X86::TEST8rr, X86::TEST8rm },
// FIXME: TEST*rr EAX,EAX ---> CMP [mem], 0
{ X86::UCOMISDrr, X86::UCOMISDrm },
{ X86::UCOMISSrr, X86::UCOMISSrm }
};
for (unsigned i = 0, e = array_lengthof(OpTbl1); i != e; ++i) {
unsigned RegOp = OpTbl1[i][0];
unsigned MemOp = OpTbl1[i][1];
if (!RegOp2MemOpTable1.insert(std::make_pair((unsigned*)RegOp,
MemOp)).second)
assert(false && "Duplicated entries?");
unsigned AuxInfo = 1 | (1 << 4); // Index 1, folded load
if (RegOp != X86::FsMOVAPDrr && RegOp != X86::FsMOVAPSrr)
if (!MemOp2RegOpTable.insert(std::make_pair((unsigned*)MemOp,
std::make_pair(RegOp, AuxInfo))).second)
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
AmbEntries.push_back(MemOp);
}
static const unsigned OpTbl2[][2] = {
{ X86::ADC32rr, X86::ADC32rm },
{ X86::ADC64rr, X86::ADC64rm },
{ X86::ADD16rr, X86::ADD16rm },
{ X86::ADD32rr, X86::ADD32rm },
{ X86::ADD64rr, X86::ADD64rm },
{ X86::ADD8rr, X86::ADD8rm },
{ X86::ADDPDrr, X86::ADDPDrm },
{ X86::ADDPSrr, X86::ADDPSrm },
{ X86::ADDSDrr, X86::ADDSDrm },
{ X86::ADDSSrr, X86::ADDSSrm },
{ X86::ADDSUBPDrr, X86::ADDSUBPDrm },
{ X86::ADDSUBPSrr, X86::ADDSUBPSrm },
{ X86::AND16rr, X86::AND16rm },
{ X86::AND32rr, X86::AND32rm },
{ X86::AND64rr, X86::AND64rm },
{ X86::AND8rr, X86::AND8rm },
{ X86::ANDNPDrr, X86::ANDNPDrm },
{ X86::ANDNPSrr, X86::ANDNPSrm },
{ X86::ANDPDrr, X86::ANDPDrm },
{ X86::ANDPSrr, X86::ANDPSrm },
{ X86::CMOVA16rr, X86::CMOVA16rm },
{ X86::CMOVA32rr, X86::CMOVA32rm },
{ X86::CMOVA64rr, X86::CMOVA64rm },
{ X86::CMOVAE16rr, X86::CMOVAE16rm },
{ X86::CMOVAE32rr, X86::CMOVAE32rm },
{ X86::CMOVAE64rr, X86::CMOVAE64rm },
{ X86::CMOVB16rr, X86::CMOVB16rm },
{ X86::CMOVB32rr, X86::CMOVB32rm },
{ X86::CMOVB64rr, X86::CMOVB64rm },
{ X86::CMOVBE16rr, X86::CMOVBE16rm },
{ X86::CMOVBE32rr, X86::CMOVBE32rm },
{ X86::CMOVBE64rr, X86::CMOVBE64rm },
{ X86::CMOVE16rr, X86::CMOVE16rm },
{ X86::CMOVE32rr, X86::CMOVE32rm },
{ X86::CMOVE64rr, X86::CMOVE64rm },
{ X86::CMOVG16rr, X86::CMOVG16rm },
{ X86::CMOVG32rr, X86::CMOVG32rm },
{ X86::CMOVG64rr, X86::CMOVG64rm },
{ X86::CMOVGE16rr, X86::CMOVGE16rm },
{ X86::CMOVGE32rr, X86::CMOVGE32rm },
{ X86::CMOVGE64rr, X86::CMOVGE64rm },
{ X86::CMOVL16rr, X86::CMOVL16rm },
{ X86::CMOVL32rr, X86::CMOVL32rm },
{ X86::CMOVL64rr, X86::CMOVL64rm },
{ X86::CMOVLE16rr, X86::CMOVLE16rm },
{ X86::CMOVLE32rr, X86::CMOVLE32rm },
{ X86::CMOVLE64rr, X86::CMOVLE64rm },
{ X86::CMOVNE16rr, X86::CMOVNE16rm },
{ X86::CMOVNE32rr, X86::CMOVNE32rm },
{ X86::CMOVNE64rr, X86::CMOVNE64rm },
{ X86::CMOVNO16rr, X86::CMOVNO16rm },
{ X86::CMOVNO32rr, X86::CMOVNO32rm },
{ X86::CMOVNO64rr, X86::CMOVNO64rm },
{ X86::CMOVNP16rr, X86::CMOVNP16rm },
{ X86::CMOVNP32rr, X86::CMOVNP32rm },
{ X86::CMOVNP64rr, X86::CMOVNP64rm },
{ X86::CMOVNS16rr, X86::CMOVNS16rm },
{ X86::CMOVNS32rr, X86::CMOVNS32rm },
{ X86::CMOVNS64rr, X86::CMOVNS64rm },
{ X86::CMOVO16rr, X86::CMOVO16rm },
{ X86::CMOVO32rr, X86::CMOVO32rm },
{ X86::CMOVO64rr, X86::CMOVO64rm },
{ X86::CMOVP16rr, X86::CMOVP16rm },
{ X86::CMOVP32rr, X86::CMOVP32rm },
{ X86::CMOVP64rr, X86::CMOVP64rm },
{ X86::CMOVS16rr, X86::CMOVS16rm },
{ X86::CMOVS32rr, X86::CMOVS32rm },
{ X86::CMOVS64rr, X86::CMOVS64rm },
{ X86::CMPPDrri, X86::CMPPDrmi },
{ X86::CMPPSrri, X86::CMPPSrmi },
{ X86::CMPSDrr, X86::CMPSDrm },
{ X86::CMPSSrr, X86::CMPSSrm },
{ X86::DIVPDrr, X86::DIVPDrm },
{ X86::DIVPSrr, X86::DIVPSrm },
{ X86::DIVSDrr, X86::DIVSDrm },
{ X86::DIVSSrr, X86::DIVSSrm },
Evan Cheng
committed
{ X86::FsANDNPDrr, X86::FsANDNPDrm },
{ X86::FsANDNPSrr, X86::FsANDNPSrm },
{ X86::FsANDPDrr, X86::FsANDPDrm },
{ X86::FsANDPSrr, X86::FsANDPSrm },
{ X86::FsORPDrr, X86::FsORPDrm },
{ X86::FsORPSrr, X86::FsORPSrm },
{ X86::FsXORPDrr, X86::FsXORPDrm },
{ X86::FsXORPSrr, X86::FsXORPSrm },
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
{ X86::HADDPDrr, X86::HADDPDrm },
{ X86::HADDPSrr, X86::HADDPSrm },
{ X86::HSUBPDrr, X86::HSUBPDrm },
{ X86::HSUBPSrr, X86::HSUBPSrm },
{ X86::IMUL16rr, X86::IMUL16rm },
{ X86::IMUL32rr, X86::IMUL32rm },
{ X86::IMUL64rr, X86::IMUL64rm },
{ X86::MAXPDrr, X86::MAXPDrm },
{ X86::MAXPDrr_Int, X86::MAXPDrm_Int },
{ X86::MAXPSrr, X86::MAXPSrm },
{ X86::MAXPSrr_Int, X86::MAXPSrm_Int },
{ X86::MAXSDrr, X86::MAXSDrm },
{ X86::MAXSDrr_Int, X86::MAXSDrm_Int },
{ X86::MAXSSrr, X86::MAXSSrm },
{ X86::MAXSSrr_Int, X86::MAXSSrm_Int },
{ X86::MINPDrr, X86::MINPDrm },
{ X86::MINPDrr_Int, X86::MINPDrm_Int },
{ X86::MINPSrr, X86::MINPSrm },
{ X86::MINPSrr_Int, X86::MINPSrm_Int },
{ X86::MINSDrr, X86::MINSDrm },
{ X86::MINSDrr_Int, X86::MINSDrm_Int },
{ X86::MINSSrr, X86::MINSSrm },
{ X86::MINSSrr_Int, X86::MINSSrm_Int },
{ X86::MULPDrr, X86::MULPDrm },
{ X86::MULPSrr, X86::MULPSrm },
{ X86::MULSDrr, X86::MULSDrm },
{ X86::MULSSrr, X86::MULSSrm },
{ X86::OR16rr, X86::OR16rm },
{ X86::OR32rr, X86::OR32rm },
{ X86::OR64rr, X86::OR64rm },
{ X86::OR8rr, X86::OR8rm },
{ X86::ORPDrr, X86::ORPDrm },
{ X86::ORPSrr, X86::ORPSrm },
{ X86::PACKSSDWrr, X86::PACKSSDWrm },
{ X86::PACKSSWBrr, X86::PACKSSWBrm },
{ X86::PACKUSWBrr, X86::PACKUSWBrm },
{ X86::PADDBrr, X86::PADDBrm },
{ X86::PADDDrr, X86::PADDDrm },
{ X86::PADDQrr, X86::PADDQrm },
{ X86::PADDSBrr, X86::PADDSBrm },
{ X86::PADDSWrr, X86::PADDSWrm },
{ X86::PADDWrr, X86::PADDWrm },
{ X86::PANDNrr, X86::PANDNrm },
{ X86::PANDrr, X86::PANDrm },
{ X86::PAVGBrr, X86::PAVGBrm },
{ X86::PAVGWrr, X86::PAVGWrm },
{ X86::PCMPEQBrr, X86::PCMPEQBrm },
{ X86::PCMPEQDrr, X86::PCMPEQDrm },
{ X86::PCMPEQWrr, X86::PCMPEQWrm },
{ X86::PCMPGTBrr, X86::PCMPGTBrm },
{ X86::PCMPGTDrr, X86::PCMPGTDrm },
{ X86::PCMPGTWrr, X86::PCMPGTWrm },
{ X86::PINSRWrri, X86::PINSRWrmi },
{ X86::PMADDWDrr, X86::PMADDWDrm },
{ X86::PMAXSWrr, X86::PMAXSWrm },
{ X86::PMAXUBrr, X86::PMAXUBrm },
{ X86::PMINSWrr, X86::PMINSWrm },
{ X86::PMINUBrr, X86::PMINUBrm },
{ X86::PMULDQrr, X86::PMULDQrm },
{ X86::PMULHUWrr, X86::PMULHUWrm },
{ X86::PMULHWrr, X86::PMULHWrm },
{ X86::PMULLDrr, X86::PMULLDrm },
{ X86::PMULLDrr_int, X86::PMULLDrm_int },
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
{ X86::PMULLWrr, X86::PMULLWrm },
{ X86::PMULUDQrr, X86::PMULUDQrm },
{ X86::PORrr, X86::PORrm },
{ X86::PSADBWrr, X86::PSADBWrm },
{ X86::PSLLDrr, X86::PSLLDrm },
{ X86::PSLLQrr, X86::PSLLQrm },
{ X86::PSLLWrr, X86::PSLLWrm },
{ X86::PSRADrr, X86::PSRADrm },
{ X86::PSRAWrr, X86::PSRAWrm },
{ X86::PSRLDrr, X86::PSRLDrm },
{ X86::PSRLQrr, X86::PSRLQrm },
{ X86::PSRLWrr, X86::PSRLWrm },
{ X86::PSUBBrr, X86::PSUBBrm },
{ X86::PSUBDrr, X86::PSUBDrm },
{ X86::PSUBSBrr, X86::PSUBSBrm },
{ X86::PSUBSWrr, X86::PSUBSWrm },
{ X86::PSUBWrr, X86::PSUBWrm },
{ X86::PUNPCKHBWrr, X86::PUNPCKHBWrm },
{ X86::PUNPCKHDQrr, X86::PUNPCKHDQrm },
{ X86::PUNPCKHQDQrr, X86::PUNPCKHQDQrm },
{ X86::PUNPCKHWDrr, X86::PUNPCKHWDrm },
{ X86::PUNPCKLBWrr, X86::PUNPCKLBWrm },
{ X86::PUNPCKLDQrr, X86::PUNPCKLDQrm },
{ X86::PUNPCKLQDQrr, X86::PUNPCKLQDQrm },
{ X86::PUNPCKLWDrr, X86::PUNPCKLWDrm },
{ X86::PXORrr, X86::PXORrm },
{ X86::SBB32rr, X86::SBB32rm },
{ X86::SBB64rr, X86::SBB64rm },
{ X86::SHUFPDrri, X86::SHUFPDrmi },
{ X86::SHUFPSrri, X86::SHUFPSrmi },
{ X86::SUB16rr, X86::SUB16rm },
{ X86::SUB32rr, X86::SUB32rm },
{ X86::SUB64rr, X86::SUB64rm },
{ X86::SUB8rr, X86::SUB8rm },
{ X86::SUBPDrr, X86::SUBPDrm },
{ X86::SUBPSrr, X86::SUBPSrm },
{ X86::SUBSDrr, X86::SUBSDrm },
{ X86::SUBSSrr, X86::SUBSSrm },
// FIXME: TEST*rr -> swapped operand of TEST*mr.
{ X86::UNPCKHPDrr, X86::UNPCKHPDrm },
{ X86::UNPCKHPSrr, X86::UNPCKHPSrm },
{ X86::UNPCKLPDrr, X86::UNPCKLPDrm },
{ X86::UNPCKLPSrr, X86::UNPCKLPSrm },
{ X86::XOR16rr, X86::XOR16rm },
{ X86::XOR32rr, X86::XOR32rm },
{ X86::XOR64rr, X86::XOR64rm },
{ X86::XOR8rr, X86::XOR8rm },
{ X86::XORPDrr, X86::XORPDrm },
{ X86::XORPSrr, X86::XORPSrm }
};
for (unsigned i = 0, e = array_lengthof(OpTbl2); i != e; ++i) {
unsigned RegOp = OpTbl2[i][0];
unsigned MemOp = OpTbl2[i][1];
if (!RegOp2MemOpTable2.insert(std::make_pair((unsigned*)RegOp,
MemOp)).second)
assert(false && "Duplicated entries?");
unsigned AuxInfo = 2 | (1 << 4); // Index 2, folded load
if (!MemOp2RegOpTable.insert(std::make_pair((unsigned*)MemOp,
std::make_pair(RegOp, AuxInfo))).second)
AmbEntries.push_back(MemOp);
}
// Remove ambiguous entries.
assert(AmbEntries.empty() && "Duplicated entries in unfolding maps?");
bool X86InstrInfo::isMoveInstr(const MachineInstr& MI,
Evan Cheng
committed
unsigned &SrcReg, unsigned &DstReg,
unsigned &SrcSubIdx, unsigned &DstSubIdx) const {
switch (MI.getOpcode()) {
default:
return false;
case X86::MOV8rr:
case X86::MOV8rr_NOREX:
case X86::MOV16rr:
case X86::MOV32rr:
case X86::MOV64rr:
case X86::MOVSSrr:
case X86::MOVSDrr:
// FP Stack register class copies
case X86::MOV_Fp3232: case X86::MOV_Fp6464: case X86::MOV_Fp8080:
case X86::MOV_Fp3264: case X86::MOV_Fp3280:
case X86::MOV_Fp6432: case X86::MOV_Fp8032:
case X86::FsMOVAPSrr:
case X86::FsMOVAPDrr:
case X86::MOVAPSrr:
case X86::MOVAPDrr:
case X86::MOVSS2PSrr:
case X86::MOVSD2PDrr:
case X86::MOVPS2SSrr:
case X86::MOVPD2SDrr:
case X86::MMX_MOVQ64rr:
assert(MI.getNumOperands() >= 2 &&
MI.getOperand(0).isReg() &&
MI.getOperand(1).isReg() &&
"invalid register-register move instruction");
Evan Cheng
committed
SrcReg = MI.getOperand(1).getReg();
DstReg = MI.getOperand(0).getReg();
SrcSubIdx = MI.getOperand(1).getSubReg();
DstSubIdx = MI.getOperand(0).getSubReg();
}
}
unsigned X86InstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
Chris Lattner
committed
int &FrameIndex) const {
switch (MI->getOpcode()) {
default: break;
case X86::MOV8rm:
case X86::MOV16rm:
case X86::MOV32rm:
Chris Lattner
committed
case X86::MOVSSrm:
case X86::MOVSDrm:
case X86::MOVAPSrm:
case X86::MOVAPDrm:
case X86::MMX_MOVD64rm:
case X86::MMX_MOVQ64rm:
if (MI->getOperand(1).isFI() && MI->getOperand(2).isImm() &&
MI->getOperand(3).isReg() && MI->getOperand(4).isImm() &&
Chris Lattner
committed
MI->getOperand(2).getImm() == 1 &&
Chris Lattner
committed
MI->getOperand(3).getReg() == 0 &&
Chris Lattner
committed
MI->getOperand(4).getImm() == 0) {
FrameIndex = MI->getOperand(1).getIndex();
Chris Lattner
committed
return MI->getOperand(0).getReg();
}
break;
}
return 0;
}
unsigned X86InstrInfo::isStoreToStackSlot(const MachineInstr *MI,
Chris Lattner
committed
int &FrameIndex) const {
switch (MI->getOpcode()) {
default: break;
case X86::MOV8mr:
case X86::MOV16mr:
case X86::MOV32mr:
Chris Lattner
committed
case X86::MOVSSmr:
case X86::MOVSDmr:
case X86::MOVAPSmr:
case X86::MOVAPDmr:
case X86::MMX_MOVD64mr:
case X86::MMX_MOVQ64mr:
if (MI->getOperand(0).isFI() && MI->getOperand(1).isImm() &&
MI->getOperand(2).isReg() && MI->getOperand(3).isImm() &&
Chris Lattner
committed
MI->getOperand(1).getImm() == 1 &&
Chris Lattner
committed
MI->getOperand(3).getImm() == 0) {
FrameIndex = MI->getOperand(0).getIndex();
return MI->getOperand(X86AddrNumOperands).getReg();
Chris Lattner
committed
}
break;
}
return 0;
}
/// regIsPICBase - Return true if register is PIC base (i.e.g defined by
/// X86::MOVPC32r.
static bool regIsPICBase(unsigned BaseReg, const MachineRegisterInfo &MRI) {
bool isPICBase = false;
for (MachineRegisterInfo::def_iterator I = MRI.def_begin(BaseReg),
E = MRI.def_end(); I != E; ++I) {
MachineInstr *DefMI = I.getOperand().getParent();
if (DefMI->getOpcode() != X86::MOVPC32r)
return false;
assert(!isPICBase && "More than one PIC base?");
isPICBase = true;
}
return isPICBase;
}
Chris Lattner
committed
/// CanRematLoadWithDispOperand - Return true if a load with the specified
/// operand is a candidate for remat: for this to be true we need to know that
/// the load will always return the same value, even if moved.
static bool CanRematLoadWithDispOperand(const MachineOperand &MO,
X86TargetMachine &TM) {
// Loads from constant pool entries can be remat'd.
if (MO.isCPI()) return true;
// We can remat globals in some cases.
if (MO.isGlobal()) {
// If this is a load of a stub, not of the global, we can remat it. This
// access will always return the address of the global.
Chris Lattner
committed
if (isGlobalStubReference(MO.getTargetFlags()))
Chris Lattner
committed
return true;
// If the global itself is constant, we can remat the load.
if (GlobalVariable *GV = dyn_cast<GlobalVariable>(MO.getGlobal()))
if (GV->isConstant())
return true;
}
return false;
}
bool
X86InstrInfo::isReallyTriviallyReMaterializable(const MachineInstr *MI) const {
Dan Gohman
committed
switch (MI->getOpcode()) {
default: break;
case X86::MOV8rm:
case X86::MOV16rm:
case X86::MOV32rm:
case X86::MOV64rm:
case X86::LD_Fp64m:
case X86::MOVSSrm:
case X86::MOVSDrm:
case X86::MOVAPSrm:
case X86::MOVAPDrm:
case X86::MMX_MOVD64rm:
case X86::MMX_MOVQ64rm: {
// Loads from constant pools are trivially rematerializable.
if (MI->getOperand(1).isReg() &&
MI->getOperand(2).isImm() &&
MI->getOperand(3).isReg() && MI->getOperand(3).getReg() == 0 &&
Chris Lattner
committed
CanRematLoadWithDispOperand(MI->getOperand(4), TM)) {
unsigned BaseReg = MI->getOperand(1).getReg();
if (BaseReg == 0 || BaseReg == X86::RIP)
return true;
// Allow re-materialization of PIC load.
if (!ReMatPICStubLoad && MI->getOperand(4).isGlobal())
return false;
const MachineFunction &MF = *MI->getParent()->getParent();
const MachineRegisterInfo &MRI = MF.getRegInfo();
bool isPICBase = false;
for (MachineRegisterInfo::def_iterator I = MRI.def_begin(BaseReg),
E = MRI.def_end(); I != E; ++I) {
MachineInstr *DefMI = I.getOperand().getParent();
if (DefMI->getOpcode() != X86::MOVPC32r)
return false;
assert(!isPICBase && "More than one PIC base?");
isPICBase = true;
}
return isPICBase;
}
return false;
}
case X86::LEA32r:
case X86::LEA64r: {
if (MI->getOperand(2).isImm() &&
MI->getOperand(3).isReg() && MI->getOperand(3).getReg() == 0 &&
!MI->getOperand(4).isReg()) {
// lea fi#, lea GV, etc. are all rematerializable.
if (!MI->getOperand(1).isReg())
unsigned BaseReg = MI->getOperand(1).getReg();
if (BaseReg == 0)
return true;
// Allow re-materialization of lea PICBase + x.
const MachineFunction &MF = *MI->getParent()->getParent();
const MachineRegisterInfo &MRI = MF.getRegInfo();
}
return false;
}
Dan Gohman
committed
}
// All other instructions marked M_REMATERIALIZABLE are always trivially
// rematerializable.
return true;
Dan Gohman
committed
}
/// isSafeToClobberEFLAGS - Return true if it's safe insert an instruction that
/// would clobber the EFLAGS condition register. Note the result may be
/// conservative. If it cannot definitely determine the safety after visiting
/// two instructions it assumes it's not safe.
static bool isSafeToClobberEFLAGS(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I) {
// It's always safe to clobber EFLAGS at the end of a block.
if (I == MBB.end())
return true;
// For compile time consideration, if we are not able to determine the
// safety after visiting 2 instructions, we will assume it's not safe.
for (unsigned i = 0; i < 2; ++i) {
bool SeenDef = false;
for (unsigned j = 0, e = I->getNumOperands(); j != e; ++j) {
MachineOperand &MO = I->getOperand(j);
if (!MO.isReg())
continue;
if (MO.getReg() == X86::EFLAGS) {
if (MO.isUse())
return false;
SeenDef = true;
}
}
if (SeenDef)
// This instruction defines EFLAGS, no need to look any further.
return true;
++I;
// If we make it to the end of the block, it's safe to clobber EFLAGS.
if (I == MBB.end())
return true;
}
// Conservative answer.
return false;
}
void X86InstrInfo::reMaterialize(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I,
unsigned DestReg,
const MachineInstr *Orig) const {
DebugLoc DL = DebugLoc::getUnknownLoc();
if (I != MBB.end()) DL = I->getDebugLoc();
unsigned SubIdx = Orig->getOperand(0).isReg()
? Orig->getOperand(0).getSubReg() : 0;
bool ChangeSubIdx = SubIdx != 0;
if (SubIdx && TargetRegisterInfo::isPhysicalRegister(DestReg)) {
DestReg = RI.getSubReg(DestReg, SubIdx);
SubIdx = 0;
}
// MOV32r0 etc. are implemented with xor which clobbers condition code.
// Re-materialize them as movri instructions to avoid side effects.
bool Emitted = false;
switch (Orig->getOpcode()) {
default: break;
case X86::MOV8r0:
case X86::MOV16r0:
case X86::MOV32r0:
case X86::MOV64r0: {
if (!isSafeToClobberEFLAGS(MBB, I)) {
unsigned Opc = 0;
switch (Orig->getOpcode()) {
default: break;
case X86::MOV8r0: Opc = X86::MOV8ri; break;
case X86::MOV16r0: Opc = X86::MOV16ri; break;
case X86::MOV32r0: Opc = X86::MOV32ri; break;
case X86::MOV64r0: Opc = X86::MOV64ri32; break;
}
BuildMI(MBB, I, DL, get(Opc), DestReg).addImm(0);
Emitted = true;
}
}
}
if (!Emitted) {
MachineInstr *MI = MBB.getParent()->CloneMachineInstr(Orig);
MI->getOperand(0).setReg(DestReg);
MBB.insert(I, MI);
}
if (ChangeSubIdx) {
MachineInstr *NewMI = prior(I);
NewMI->getOperand(0).setSubReg(SubIdx);
}
/// isInvariantLoad - Return true if the specified instruction (which is marked
/// mayLoad) is loading from a location whose value is invariant across the
/// function. For example, loading a value from the constant pool or from
/// from the argument area of a function if it does not change. This should
/// only return true of *all* loads the instruction does are invariant (if it
/// does multiple loads).
bool X86InstrInfo::isInvariantLoad(const MachineInstr *MI) const {
// This code cares about loads from three cases: constant pool entries,
// invariant argument slots, and global stubs. In order to handle these cases
// for all of the myriad of X86 instructions, we just scan for a CP/FI/GV
// operand and base our analysis on it. This is safe because the address of
// none of these three cases is ever used as anything other than a load base
// and X86 doesn't have any instructions that load from multiple places.
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
const MachineOperand &MO = MI->getOperand(i);
// Loads from constant pools are trivially invariant.
if (MO.isCPI())
if (MO.isGlobal())
Chris Lattner
committed
return isGlobalStubReference(MO.getTargetFlags());
// If this is a load from an invariant stack slot, the load is a constant.
if (MO.isFI()) {
const MachineFrameInfo &MFI =
*MI->getParent()->getParent()->getFrameInfo();
int Idx = MO.getIndex();
return MFI.isFixedObjectIndex(Idx) && MFI.isImmutableObjectIndex(Idx);
}
Bill Wendling
committed
}
// All other instances of these instructions are presumed to have other
// issues.
Bill Wendling
committed
}
/// hasLiveCondCodeDef - True if MI has a condition code def, e.g. EFLAGS, that
/// is not marked dead.