Bug Summary

File:lib/Target/ARM/ARMConstantIslandPass.cpp
Warning:line 31, column 35
The result of the left shift is undefined due to shifting by '32', which is greater or equal to the width of type 'unsigned int'

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name ARMConstantIslandPass.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-eagerly-assume -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -mrelocation-model pic -pic-level 2 -mthread-model posix -fmath-errno -masm-verbose -mconstructor-aliases -munwind-tables -fuse-init-array -target-cpu x86-64 -dwarf-column-info -debugger-tuning=gdb -momit-leaf-frame-pointer -ffunction-sections -fdata-sections -resource-dir /usr/lib/llvm-7/lib/clang/7.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-7~svn325874/build-llvm/lib/Target/ARM -I /build/llvm-toolchain-snapshot-7~svn325874/lib/Target/ARM -I /build/llvm-toolchain-snapshot-7~svn325874/build-llvm/include -I /build/llvm-toolchain-snapshot-7~svn325874/include -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/7.3.0/../../../../include/c++/7.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/7.3.0/../../../../include/x86_64-linux-gnu/c++/7.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/7.3.0/../../../../include/x86_64-linux-gnu/c++/7.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/7.3.0/../../../../include/c++/7.3.0/backward -internal-isystem /usr/include/clang/7.0.0/include/ -internal-isystem /usr/local/include -internal-isystem /usr/lib/llvm-7/lib/clang/7.0.0/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-comment -std=c++11 -fdeprecated-macro -fdebug-compilation-dir /build/llvm-toolchain-snapshot-7~svn325874/build-llvm/lib/Target/ARM -ferror-limit 19 -fmessage-length 0 -fvisibility-inlines-hidden -fobjc-runtime=gcc -fdiagnostics-show-option -vectorize-loops -vectorize-slp -analyzer-checker optin.performance.Padding -analyzer-output=html -analyzer-config stable-report-filename=true -o /tmp/scan-build-2018-02-23-163436-368-1 -x c++ /build/llvm-toolchain-snapshot-7~svn325874/lib/Target/ARM/ARMConstantIslandPass.cpp

/build/llvm-toolchain-snapshot-7~svn325874/lib/Target/ARM/ARMConstantIslandPass.cpp

1//===- ARMConstantIslandPass.cpp - ARM constant islands -------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file contains a pass that splits the constant pool up into 'islands'
11// which are scattered through-out the function. This is required due to the
12// limited pc-relative displacements that ARM has.
13//
14//===----------------------------------------------------------------------===//
15
16#include "ARM.h"
17#include "ARMBaseInstrInfo.h"
18#include "ARMBasicBlockInfo.h"
19#include "ARMMachineFunctionInfo.h"
20#include "ARMSubtarget.h"
21#include "MCTargetDesc/ARMBaseInfo.h"
22#include "Thumb2InstrInfo.h"
23#include "Utils/ARMBaseInfo.h"
24#include "llvm/ADT/DenseMap.h"
25#include "llvm/ADT/STLExtras.h"
26#include "llvm/ADT/SmallSet.h"
27#include "llvm/ADT/SmallVector.h"
28#include "llvm/ADT/Statistic.h"
29#include "llvm/ADT/StringRef.h"
30#include "llvm/CodeGen/MachineBasicBlock.h"
31#include "llvm/CodeGen/MachineConstantPool.h"
32#include "llvm/CodeGen/MachineFunction.h"
33#include "llvm/CodeGen/MachineFunctionPass.h"
34#include "llvm/CodeGen/MachineInstr.h"
35#include "llvm/CodeGen/MachineJumpTableInfo.h"
36#include "llvm/CodeGen/MachineOperand.h"
37#include "llvm/CodeGen/MachineRegisterInfo.h"
38#include "llvm/IR/DataLayout.h"
39#include "llvm/IR/DebugLoc.h"
40#include "llvm/MC/MCInstrDesc.h"
41#include "llvm/Pass.h"
42#include "llvm/Support/CommandLine.h"
43#include "llvm/Support/Compiler.h"
44#include "llvm/Support/Debug.h"
45#include "llvm/Support/ErrorHandling.h"
46#include "llvm/Support/Format.h"
47#include "llvm/Support/MathExtras.h"
48#include "llvm/Support/raw_ostream.h"
49#include <algorithm>
50#include <cassert>
51#include <cstdint>
52#include <iterator>
53#include <utility>
54#include <vector>
55
56using namespace llvm;
57
58#define DEBUG_TYPE"arm-cp-islands" "arm-cp-islands"
59
60#define ARM_CP_ISLANDS_OPT_NAME"ARM constant island placement and branch shortening pass" \
61 "ARM constant island placement and branch shortening pass"
62STATISTIC(NumCPEs, "Number of constpool entries")static llvm::Statistic NumCPEs = {"arm-cp-islands", "NumCPEs"
, "Number of constpool entries", {0}, {false}}
;
63STATISTIC(NumSplit, "Number of uncond branches inserted")static llvm::Statistic NumSplit = {"arm-cp-islands", "NumSplit"
, "Number of uncond branches inserted", {0}, {false}}
;
64STATISTIC(NumCBrFixed, "Number of cond branches fixed")static llvm::Statistic NumCBrFixed = {"arm-cp-islands", "NumCBrFixed"
, "Number of cond branches fixed", {0}, {false}}
;
65STATISTIC(NumUBrFixed, "Number of uncond branches fixed")static llvm::Statistic NumUBrFixed = {"arm-cp-islands", "NumUBrFixed"
, "Number of uncond branches fixed", {0}, {false}}
;
66STATISTIC(NumTBs, "Number of table branches generated")static llvm::Statistic NumTBs = {"arm-cp-islands", "NumTBs", "Number of table branches generated"
, {0}, {false}}
;
67STATISTIC(NumT2CPShrunk, "Number of Thumb2 constantpool instructions shrunk")static llvm::Statistic NumT2CPShrunk = {"arm-cp-islands", "NumT2CPShrunk"
, "Number of Thumb2 constantpool instructions shrunk", {0}, {
false}}
;
68STATISTIC(NumT2BrShrunk, "Number of Thumb2 immediate branches shrunk")static llvm::Statistic NumT2BrShrunk = {"arm-cp-islands", "NumT2BrShrunk"
, "Number of Thumb2 immediate branches shrunk", {0}, {false}}
;
69STATISTIC(NumCBZ, "Number of CBZ / CBNZ formed")static llvm::Statistic NumCBZ = {"arm-cp-islands", "NumCBZ", "Number of CBZ / CBNZ formed"
, {0}, {false}}
;
70STATISTIC(NumJTMoved, "Number of jump table destination blocks moved")static llvm::Statistic NumJTMoved = {"arm-cp-islands", "NumJTMoved"
, "Number of jump table destination blocks moved", {0}, {false
}}
;
71STATISTIC(NumJTInserted, "Number of jump table intermediate blocks inserted")static llvm::Statistic NumJTInserted = {"arm-cp-islands", "NumJTInserted"
, "Number of jump table intermediate blocks inserted", {0}, {
false}}
;
72
73static cl::opt<bool>
74AdjustJumpTableBlocks("arm-adjust-jump-tables", cl::Hidden, cl::init(true),
75 cl::desc("Adjust basic block layout to better use TB[BH]"));
76
77static cl::opt<unsigned>
78CPMaxIteration("arm-constant-island-max-iteration", cl::Hidden, cl::init(30),
79 cl::desc("The max number of iteration for converge"));
80
81static cl::opt<bool> SynthesizeThumb1TBB(
82 "arm-synthesize-thumb-1-tbb", cl::Hidden, cl::init(true),
83 cl::desc("Use compressed jump tables in Thumb-1 by synthesizing an "
84 "equivalent to the TBB/TBH instructions"));
85
86namespace {
87
88 /// ARMConstantIslands - Due to limited PC-relative displacements, ARM
89 /// requires constant pool entries to be scattered among the instructions
90 /// inside a function. To do this, it completely ignores the normal LLVM
91 /// constant pool; instead, it places constants wherever it feels like with
92 /// special instructions.
93 ///
94 /// The terminology used in this pass includes:
95 /// Islands - Clumps of constants placed in the function.
96 /// Water - Potential places where an island could be formed.
97 /// CPE - A constant pool entry that has been placed somewhere, which
98 /// tracks a list of users.
99 class ARMConstantIslands : public MachineFunctionPass {
100 std::vector<BasicBlockInfo> BBInfo;
101
102 /// WaterList - A sorted list of basic blocks where islands could be placed
103 /// (i.e. blocks that don't fall through to the following block, due
104 /// to a return, unreachable, or unconditional branch).
105 std::vector<MachineBasicBlock*> WaterList;
106
107 /// NewWaterList - The subset of WaterList that was created since the
108 /// previous iteration by inserting unconditional branches.
109 SmallSet<MachineBasicBlock*, 4> NewWaterList;
110
111 using water_iterator = std::vector<MachineBasicBlock *>::iterator;
112
113 /// CPUser - One user of a constant pool, keeping the machine instruction
114 /// pointer, the constant pool being referenced, and the max displacement
115 /// allowed from the instruction to the CP. The HighWaterMark records the
116 /// highest basic block where a new CPEntry can be placed. To ensure this
117 /// pass terminates, the CP entries are initially placed at the end of the
118 /// function and then move monotonically to lower addresses. The
119 /// exception to this rule is when the current CP entry for a particular
120 /// CPUser is out of range, but there is another CP entry for the same
121 /// constant value in range. We want to use the existing in-range CP
122 /// entry, but if it later moves out of range, the search for new water
123 /// should resume where it left off. The HighWaterMark is used to record
124 /// that point.
125 struct CPUser {
126 MachineInstr *MI;
127 MachineInstr *CPEMI;
128 MachineBasicBlock *HighWaterMark;
129 unsigned MaxDisp;
130 bool NegOk;
131 bool IsSoImm;
132 bool KnownAlignment = false;
133
134 CPUser(MachineInstr *mi, MachineInstr *cpemi, unsigned maxdisp,
135 bool neg, bool soimm)
136 : MI(mi), CPEMI(cpemi), MaxDisp(maxdisp), NegOk(neg), IsSoImm(soimm) {
137 HighWaterMark = CPEMI->getParent();
138 }
139
140 /// getMaxDisp - Returns the maximum displacement supported by MI.
141 /// Correct for unknown alignment.
142 /// Conservatively subtract 2 bytes to handle weird alignment effects.
143 unsigned getMaxDisp() const {
144 return (KnownAlignment ? MaxDisp : MaxDisp - 2) - 2;
145 }
146 };
147
148 /// CPUsers - Keep track of all of the machine instructions that use various
149 /// constant pools and their max displacement.
150 std::vector<CPUser> CPUsers;
151
152 /// CPEntry - One per constant pool entry, keeping the machine instruction
153 /// pointer, the constpool index, and the number of CPUser's which
154 /// reference this entry.
155 struct CPEntry {
156 MachineInstr *CPEMI;
157 unsigned CPI;
158 unsigned RefCount;
159
160 CPEntry(MachineInstr *cpemi, unsigned cpi, unsigned rc = 0)
161 : CPEMI(cpemi), CPI(cpi), RefCount(rc) {}
162 };
163
164 /// CPEntries - Keep track of all of the constant pool entry machine
165 /// instructions. For each original constpool index (i.e. those that existed
166 /// upon entry to this pass), it keeps a vector of entries. Original
167 /// elements are cloned as we go along; the clones are put in the vector of
168 /// the original element, but have distinct CPIs.
169 ///
170 /// The first half of CPEntries contains generic constants, the second half
171 /// contains jump tables. Use getCombinedIndex on a generic CPEMI to look up
172 /// which vector it will be in here.
173 std::vector<std::vector<CPEntry>> CPEntries;
174
175 /// Maps a JT index to the offset in CPEntries containing copies of that
176 /// table. The equivalent map for a CONSTPOOL_ENTRY is the identity.
177 DenseMap<int, int> JumpTableEntryIndices;
178
179 /// Maps a JT index to the LEA that actually uses the index to calculate its
180 /// base address.
181 DenseMap<int, int> JumpTableUserIndices;
182
183 /// ImmBranch - One per immediate branch, keeping the machine instruction
184 /// pointer, conditional or unconditional, the max displacement,
185 /// and (if isCond is true) the corresponding unconditional branch
186 /// opcode.
187 struct ImmBranch {
188 MachineInstr *MI;
189 unsigned MaxDisp : 31;
190 bool isCond : 1;
191 unsigned UncondBr;
192
193 ImmBranch(MachineInstr *mi, unsigned maxdisp, bool cond, unsigned ubr)
194 : MI(mi), MaxDisp(maxdisp), isCond(cond), UncondBr(ubr) {}
195 };
196
197 /// ImmBranches - Keep track of all the immediate branch instructions.
198 std::vector<ImmBranch> ImmBranches;
199
200 /// PushPopMIs - Keep track of all the Thumb push / pop instructions.
201 SmallVector<MachineInstr*, 4> PushPopMIs;
202
203 /// T2JumpTables - Keep track of all the Thumb2 jumptable instructions.
204 SmallVector<MachineInstr*, 4> T2JumpTables;
205
206 /// HasFarJump - True if any far jump instruction has been emitted during
207 /// the branch fix up pass.
208 bool HasFarJump;
209
210 MachineFunction *MF;
211 MachineConstantPool *MCP;
212 const ARMBaseInstrInfo *TII;
213 const ARMSubtarget *STI;
214 ARMFunctionInfo *AFI;
215 bool isThumb;
216 bool isThumb1;
217 bool isThumb2;
218 bool isPositionIndependentOrROPI;
219
220 public:
221 static char ID;
222
223 ARMConstantIslands() : MachineFunctionPass(ID) {}
224
225 bool runOnMachineFunction(MachineFunction &MF) override;
226
227 MachineFunctionProperties getRequiredProperties() const override {
228 return MachineFunctionProperties().set(
229 MachineFunctionProperties::Property::NoVRegs);
230 }
231
232 StringRef getPassName() const override {
233 return ARM_CP_ISLANDS_OPT_NAME"ARM constant island placement and branch shortening pass";
234 }
235
236 private:
237 void doInitialConstPlacement(std::vector<MachineInstr *> &CPEMIs);
238 void doInitialJumpTablePlacement(std::vector<MachineInstr *> &CPEMIs);
239 bool BBHasFallthrough(MachineBasicBlock *MBB);
240 CPEntry *findConstPoolEntry(unsigned CPI, const MachineInstr *CPEMI);
241 unsigned getCPELogAlign(const MachineInstr *CPEMI);
242 void scanFunctionJumpTables();
243 void initializeFunctionInfo(const std::vector<MachineInstr*> &CPEMIs);
244 MachineBasicBlock *splitBlockBeforeInstr(MachineInstr *MI);
245 void updateForInsertedWaterBlock(MachineBasicBlock *NewBB);
246 void adjustBBOffsetsAfter(MachineBasicBlock *BB);
247 bool decrementCPEReferenceCount(unsigned CPI, MachineInstr* CPEMI);
248 unsigned getCombinedIndex(const MachineInstr *CPEMI);
249 int findInRangeCPEntry(CPUser& U, unsigned UserOffset);
250 bool findAvailableWater(CPUser&U, unsigned UserOffset,
251 water_iterator &WaterIter, bool CloserWater);
252 void createNewWater(unsigned CPUserIndex, unsigned UserOffset,
253 MachineBasicBlock *&NewMBB);
254 bool handleConstantPoolUser(unsigned CPUserIndex, bool CloserWater);
255 void removeDeadCPEMI(MachineInstr *CPEMI);
256 bool removeUnusedCPEntries();
257 bool isCPEntryInRange(MachineInstr *MI, unsigned UserOffset,
258 MachineInstr *CPEMI, unsigned Disp, bool NegOk,
259 bool DoDump = false);
260 bool isWaterInRange(unsigned UserOffset, MachineBasicBlock *Water,
261 CPUser &U, unsigned &Growth);
262 bool isBBInRange(MachineInstr *MI, MachineBasicBlock *BB, unsigned Disp);
263 bool fixupImmediateBr(ImmBranch &Br);
264 bool fixupConditionalBr(ImmBranch &Br);
265 bool fixupUnconditionalBr(ImmBranch &Br);
266 bool undoLRSpillRestore();
267 bool optimizeThumb2Instructions();
268 bool optimizeThumb2Branches();
269 bool reorderThumb2JumpTables();
270 bool preserveBaseRegister(MachineInstr *JumpMI, MachineInstr *LEAMI,
271 unsigned &DeadSize, bool &CanDeleteLEA,
272 bool &BaseRegKill);
273 bool optimizeThumb2JumpTables();
274 MachineBasicBlock *adjustJTTargetBlockForward(MachineBasicBlock *BB,
275 MachineBasicBlock *JTBB);
276
277 unsigned getOffsetOf(MachineInstr *MI) const;
278 unsigned getUserOffset(CPUser&) const;
279 void dumpBBs();
280 void verify();
281
282 bool isOffsetInRange(unsigned UserOffset, unsigned TrialOffset,
283 unsigned Disp, bool NegativeOK, bool IsSoImm = false);
284 bool isOffsetInRange(unsigned UserOffset, unsigned TrialOffset,
285 const CPUser &U) {
286 return isOffsetInRange(UserOffset, TrialOffset,
287 U.getMaxDisp(), U.NegOk, U.IsSoImm);
288 }
289 };
290
291} // end anonymous namespace
292
293char ARMConstantIslands::ID = 0;
294
295/// verify - check BBOffsets, BBSizes, alignment of islands
296void ARMConstantIslands::verify() {
297#ifndef NDEBUG
298 assert(std::is_sorted(MF->begin(), MF->end(),(static_cast <bool> (std::is_sorted(MF->begin(), MF->
end(), [this](const MachineBasicBlock &LHS, const MachineBasicBlock
&RHS) { return BBInfo[LHS.getNumber()].postOffset() <
BBInfo[RHS.getNumber()].postOffset(); })) ? void (0) : __assert_fail
("std::is_sorted(MF->begin(), MF->end(), [this](const MachineBasicBlock &LHS, const MachineBasicBlock &RHS) { return BBInfo[LHS.getNumber()].postOffset() < BBInfo[RHS.getNumber()].postOffset(); })"
, "/build/llvm-toolchain-snapshot-7~svn325874/lib/Target/ARM/ARMConstantIslandPass.cpp"
, 303, __extension__ __PRETTY_FUNCTION__))
1
Within the expansion of the macro 'assert':
a
Calling 'BasicBlockInfo::postOffset'
299 [this](const MachineBasicBlock &LHS,(static_cast <bool> (std::is_sorted(MF->begin(), MF->
end(), [this](const MachineBasicBlock &LHS, const MachineBasicBlock
&RHS) { return BBInfo[LHS.getNumber()].postOffset() <
BBInfo[RHS.getNumber()].postOffset(); })) ? void (0) : __assert_fail
("std::is_sorted(MF->begin(), MF->end(), [this](const MachineBasicBlock &LHS, const MachineBasicBlock &RHS) { return BBInfo[LHS.getNumber()].postOffset() < BBInfo[RHS.getNumber()].postOffset(); })"
, "/build/llvm-toolchain-snapshot-7~svn325874/lib/Target/ARM/ARMConstantIslandPass.cpp"
, 303, __extension__ __PRETTY_FUNCTION__))
300 const MachineBasicBlock &RHS) {(static_cast <bool> (std::is_sorted(MF->begin(), MF->
end(), [this](const MachineBasicBlock &LHS, const MachineBasicBlock
&RHS) { return BBInfo[LHS.getNumber()].postOffset() <
BBInfo[RHS.getNumber()].postOffset(); })) ? void (0) : __assert_fail
("std::is_sorted(MF->begin(), MF->end(), [this](const MachineBasicBlock &LHS, const MachineBasicBlock &RHS) { return BBInfo[LHS.getNumber()].postOffset() < BBInfo[RHS.getNumber()].postOffset(); })"
, "/build/llvm-toolchain-snapshot-7~svn325874/lib/Target/ARM/ARMConstantIslandPass.cpp"
, 303, __extension__ __PRETTY_FUNCTION__))
301 return BBInfo[LHS.getNumber()].postOffset() <(static_cast <bool> (std::is_sorted(MF->begin(), MF->
end(), [this](const MachineBasicBlock &LHS, const MachineBasicBlock
&RHS) { return BBInfo[LHS.getNumber()].postOffset() <
BBInfo[RHS.getNumber()].postOffset(); })) ? void (0) : __assert_fail
("std::is_sorted(MF->begin(), MF->end(), [this](const MachineBasicBlock &LHS, const MachineBasicBlock &RHS) { return BBInfo[LHS.getNumber()].postOffset() < BBInfo[RHS.getNumber()].postOffset(); })"
, "/build/llvm-toolchain-snapshot-7~svn325874/lib/Target/ARM/ARMConstantIslandPass.cpp"
, 303, __extension__ __PRETTY_FUNCTION__))
302 BBInfo[RHS.getNumber()].postOffset();(static_cast <bool> (std::is_sorted(MF->begin(), MF->
end(), [this](const MachineBasicBlock &LHS, const MachineBasicBlock
&RHS) { return BBInfo[LHS.getNumber()].postOffset() <
BBInfo[RHS.getNumber()].postOffset(); })) ? void (0) : __assert_fail
("std::is_sorted(MF->begin(), MF->end(), [this](const MachineBasicBlock &LHS, const MachineBasicBlock &RHS) { return BBInfo[LHS.getNumber()].postOffset() < BBInfo[RHS.getNumber()].postOffset(); })"
, "/build/llvm-toolchain-snapshot-7~svn325874/lib/Target/ARM/ARMConstantIslandPass.cpp"
, 303, __extension__ __PRETTY_FUNCTION__))
303 }))(static_cast <bool> (std::is_sorted(MF->begin(), MF->
end(), [this](const MachineBasicBlock &LHS, const MachineBasicBlock
&RHS) { return BBInfo[LHS.getNumber()].postOffset() <
BBInfo[RHS.getNumber()].postOffset(); })) ? void (0) : __assert_fail
("std::is_sorted(MF->begin(), MF->end(), [this](const MachineBasicBlock &LHS, const MachineBasicBlock &RHS) { return BBInfo[LHS.getNumber()].postOffset() < BBInfo[RHS.getNumber()].postOffset(); })"
, "/build/llvm-toolchain-snapshot-7~svn325874/lib/Target/ARM/ARMConstantIslandPass.cpp"
, 303, __extension__ __PRETTY_FUNCTION__))
;
304 DEBUG(dbgs() << "Verifying " << CPUsers.size() << " CP users.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("arm-cp-islands")) { dbgs() << "Verifying " << CPUsers
.size() << " CP users.\n"; } } while (false)
;
305 for (unsigned i = 0, e = CPUsers.size(); i != e; ++i) {
306 CPUser &U = CPUsers[i];
307 unsigned UserOffset = getUserOffset(U);
308 // Verify offset using the real max displacement without the safety
309 // adjustment.
310 if (isCPEntryInRange(U.MI, UserOffset, U.CPEMI, U.getMaxDisp()+2, U.NegOk,
311 /* DoDump = */ true)) {
312 DEBUG(dbgs() << "OK\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("arm-cp-islands")) { dbgs() << "OK\n"; } } while (false
)
;
313 continue;
314 }
315 DEBUG(dbgs() << "Out of range.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("arm-cp-islands")) { dbgs() << "Out of range.\n"; } } while
(false)
;
316 dumpBBs();
317 DEBUG(MF->dump())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("arm-cp-islands")) { MF->dump(); } } while (false)
;
318 llvm_unreachable("Constant pool entry out of range!")::llvm::llvm_unreachable_internal("Constant pool entry out of range!"
, "/build/llvm-toolchain-snapshot-7~svn325874/lib/Target/ARM/ARMConstantIslandPass.cpp"
, 318)
;
319 }
320#endif
321}
322
323#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
324/// print block size and offset information - debugging
325LLVM_DUMP_METHOD__attribute__((noinline)) __attribute__((__used__)) void ARMConstantIslands::dumpBBs() {
326 DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("arm-cp-islands")) { { for (unsigned J = 0, E = BBInfo.size(
); J !=E; ++J) { const BasicBlockInfo &BBI = BBInfo[J]; dbgs
() << format("%08x %bb.%u\t", BBI.Offset, J) << " kb="
<< unsigned(BBI.KnownBits) << " ua=" << unsigned
(BBI.Unalign) << " pa=" << unsigned(BBI.PostAlign
) << format(" size=%#x\n", BBInfo[J].Size); } }; } } while
(false)
327 for (unsigned J = 0, E = BBInfo.size(); J !=E; ++J) {do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("arm-cp-islands")) { { for (unsigned J = 0, E = BBInfo.size(
); J !=E; ++J) { const BasicBlockInfo &BBI = BBInfo[J]; dbgs
() << format("%08x %bb.%u\t", BBI.Offset, J) << " kb="
<< unsigned(BBI.KnownBits) << " ua=" << unsigned
(BBI.Unalign) << " pa=" << unsigned(BBI.PostAlign
) << format(" size=%#x\n", BBInfo[J].Size); } }; } } while
(false)
328 const BasicBlockInfo &BBI = BBInfo[J];do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("arm-cp-islands")) { { for (unsigned J = 0, E = BBInfo.size(
); J !=E; ++J) { const BasicBlockInfo &BBI = BBInfo[J]; dbgs
() << format("%08x %bb.%u\t", BBI.Offset, J) << " kb="
<< unsigned(BBI.KnownBits) << " ua=" << unsigned
(BBI.Unalign) << " pa=" << unsigned(BBI.PostAlign
) << format(" size=%#x\n", BBInfo[J].Size); } }; } } while
(false)
329 dbgs() << format("%08x %bb.%u\t", BBI.Offset, J)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("arm-cp-islands")) { { for (unsigned J = 0, E = BBInfo.size(
); J !=E; ++J) { const BasicBlockInfo &BBI = BBInfo[J]; dbgs
() << format("%08x %bb.%u\t", BBI.Offset, J) << " kb="
<< unsigned(BBI.KnownBits) << " ua=" << unsigned
(BBI.Unalign) << " pa=" << unsigned(BBI.PostAlign
) << format(" size=%#x\n", BBInfo[J].Size); } }; } } while
(false)
330 << " kb=" << unsigned(BBI.KnownBits)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("arm-cp-islands")) { { for (unsigned J = 0, E = BBInfo.size(
); J !=E; ++J) { const BasicBlockInfo &BBI = BBInfo[J]; dbgs
() << format("%08x %bb.%u\t", BBI.Offset, J) << " kb="
<< unsigned(BBI.KnownBits) << " ua=" << unsigned
(BBI.Unalign) << " pa=" << unsigned(BBI.PostAlign
) << format(" size=%#x\n", BBInfo[J].Size); } }; } } while
(false)
331 << " ua=" << unsigned(BBI.Unalign)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("arm-cp-islands")) { { for (unsigned J = 0, E = BBInfo.size(
); J !=E; ++J) { const BasicBlockInfo &BBI = BBInfo[J]; dbgs
() << format("%08x %bb.%u\t", BBI.Offset, J) << " kb="
<< unsigned(BBI.KnownBits) << " ua=" << unsigned
(BBI.Unalign) << " pa=" << unsigned(BBI.PostAlign
) << format(" size=%#x\n", BBInfo[J].Size); } }; } } while
(false)
332 << " pa=" << unsigned(BBI.PostAlign)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("arm-cp-islands")) { { for (unsigned J = 0, E = BBInfo.size(
); J !=E; ++J) { const BasicBlockInfo &BBI = BBInfo[J]; dbgs
() << format("%08x %bb.%u\t", BBI.Offset, J) << " kb="
<< unsigned(BBI.KnownBits) << " ua=" << unsigned
(BBI.Unalign) << " pa=" << unsigned(BBI.PostAlign
) << format(" size=%#x\n", BBInfo[J].Size); } }; } } while
(false)
333 << format(" size=%#x\n", BBInfo[J].Size);do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("arm-cp-islands")) { { for (unsigned J = 0, E = BBInfo.size(
); J !=E; ++J) { const BasicBlockInfo &BBI = BBInfo[J]; dbgs
() << format("%08x %bb.%u\t", BBI.Offset, J) << " kb="
<< unsigned(BBI.KnownBits) << " ua=" << unsigned
(BBI.Unalign) << " pa=" << unsigned(BBI.PostAlign
) << format(" size=%#x\n", BBInfo[J].Size); } }; } } while
(false)
334 }do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("arm-cp-islands")) { { for (unsigned J = 0, E = BBInfo.size(
); J !=E; ++J) { const BasicBlockInfo &BBI = BBInfo[J]; dbgs
() << format("%08x %bb.%u\t", BBI.Offset, J) << " kb="
<< unsigned(BBI.KnownBits) << " ua=" << unsigned
(BBI.Unalign) << " pa=" << unsigned(BBI.PostAlign
) << format(" size=%#x\n", BBInfo[J].Size); } }; } } while
(false)
335 })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("arm-cp-islands")) { { for (unsigned J = 0, E = BBInfo.size(
); J !=E; ++J) { const BasicBlockInfo &BBI = BBInfo[J]; dbgs
() << format("%08x %bb.%u\t", BBI.Offset, J) << " kb="
<< unsigned(BBI.KnownBits) << " ua=" << unsigned
(BBI.Unalign) << " pa=" << unsigned(BBI.PostAlign
) << format(" size=%#x\n", BBInfo[J].Size); } }; } } while
(false)
;
336}
337#endif
338
339bool ARMConstantIslands::runOnMachineFunction(MachineFunction &mf) {
340 MF = &mf;
341 MCP = mf.getConstantPool();
342
343 DEBUG(dbgs() << "***** ARMConstantIslands: "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("arm-cp-islands")) { dbgs() << "***** ARMConstantIslands: "
<< MCP->getConstants().size() << " CP entries, aligned to "
<< MCP->getConstantPoolAlignment() << " bytes *****\n"
; } } while (false)
344 << MCP->getConstants().size() << " CP entries, aligned to "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("arm-cp-islands")) { dbgs() << "***** ARMConstantIslands: "
<< MCP->getConstants().size() << " CP entries, aligned to "
<< MCP->getConstantPoolAlignment() << " bytes *****\n"
; } } while (false)
345 << MCP->getConstantPoolAlignment() << " bytes *****\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("arm-cp-islands")) { dbgs() << "***** ARMConstantIslands: "
<< MCP->getConstants().size() << " CP entries, aligned to "
<< MCP->getConstantPoolAlignment() << " bytes *****\n"
; } } while (false)
;
346
347 STI = &static_cast<const ARMSubtarget &>(MF->getSubtarget());
348 TII = STI->getInstrInfo();
349 isPositionIndependentOrROPI =
350 STI->getTargetLowering()->isPositionIndependent() || STI->isROPI();
351 AFI = MF->getInfo<ARMFunctionInfo>();
352
353 isThumb = AFI->isThumbFunction();
354 isThumb1 = AFI->isThumb1OnlyFunction();
355 isThumb2 = AFI->isThumb2Function();
356
357 HasFarJump = false;
358 bool GenerateTBB = isThumb2 || (isThumb1 && SynthesizeThumb1TBB);
359
360 // This pass invalidates liveness information when it splits basic blocks.
361 MF->getRegInfo().invalidateLiveness();
362
363 // Renumber all of the machine basic blocks in the function, guaranteeing that
364 // the numbers agree with the position of the block in the function.
365 MF->RenumberBlocks();
366
367 // Try to reorder and otherwise adjust the block layout to make good use
368 // of the TB[BH] instructions.
369 bool MadeChange = false;
370 if (GenerateTBB && AdjustJumpTableBlocks) {
371 scanFunctionJumpTables();
372 MadeChange |= reorderThumb2JumpTables();
373 // Data is out of date, so clear it. It'll be re-computed later.
374 T2JumpTables.clear();
375 // Blocks may have shifted around. Keep the numbering up to date.
376 MF->RenumberBlocks();
377 }
378
379 // Perform the initial placement of the constant pool entries. To start with,
380 // we put them all at the end of the function.
381 std::vector<MachineInstr*> CPEMIs;
382 if (!MCP->isEmpty())
383 doInitialConstPlacement(CPEMIs);
384
385 if (MF->getJumpTableInfo())
386 doInitialJumpTablePlacement(CPEMIs);
387
388 /// The next UID to take is the first unused one.
389 AFI->initPICLabelUId(CPEMIs.size());
390
391 // Do the initial scan of the function, building up information about the
392 // sizes of each block, the location of all the water, and finding all of the
393 // constant pool users.
394 initializeFunctionInfo(CPEMIs);
395 CPEMIs.clear();
396 DEBUG(dumpBBs())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("arm-cp-islands")) { dumpBBs(); } } while (false)
;
397
398 // Functions with jump tables need an alignment of 4 because they use the ADR
399 // instruction, which aligns the PC to 4 bytes before adding an offset.
400 if (!T2JumpTables.empty())
401 MF->ensureAlignment(2);
402
403 /// Remove dead constant pool entries.
404 MadeChange |= removeUnusedCPEntries();
405
406 // Iteratively place constant pool entries and fix up branches until there
407 // is no change.
408 unsigned NoCPIters = 0, NoBRIters = 0;
409 while (true) {
410 DEBUG(dbgs() << "Beginning CP iteration #" << NoCPIters << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("arm-cp-islands")) { dbgs() << "Beginning CP iteration #"
<< NoCPIters << '\n'; } } while (false)
;
411 bool CPChange = false;
412 for (unsigned i = 0, e = CPUsers.size(); i != e; ++i)
413 // For most inputs, it converges in no more than 5 iterations.
414 // If it doesn't end in 10, the input may have huge BB or many CPEs.
415 // In this case, we will try different heuristics.
416 CPChange |= handleConstantPoolUser(i, NoCPIters >= CPMaxIteration / 2);
417 if (CPChange && ++NoCPIters > CPMaxIteration)
418 report_fatal_error("Constant Island pass failed to converge!");
419 DEBUG(dumpBBs())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("arm-cp-islands")) { dumpBBs(); } } while (false)
;
420
421 // Clear NewWaterList now. If we split a block for branches, it should
422 // appear as "new water" for the next iteration of constant pool placement.
423 NewWaterList.clear();
424
425 DEBUG(dbgs() << "Beginning BR iteration #" << NoBRIters << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("arm-cp-islands")) { dbgs() << "Beginning BR iteration #"
<< NoBRIters << '\n'; } } while (false)
;
426 bool BRChange = false;
427 for (unsigned i = 0, e = ImmBranches.size(); i != e; ++i)
428 BRChange |= fixupImmediateBr(ImmBranches[i]);
429 if (BRChange && ++NoBRIters > 30)
430 report_fatal_error("Branch Fix Up pass failed to converge!");
431 DEBUG(dumpBBs())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("arm-cp-islands")) { dumpBBs(); } } while (false)
;
432
433 if (!CPChange && !BRChange)
434 break;
435 MadeChange = true;
436 }
437
438 // Shrink 32-bit Thumb2 load and store instructions.
439 if (isThumb2 && !STI->prefers32BitThumb())
440 MadeChange |= optimizeThumb2Instructions();
441
442 // Shrink 32-bit branch instructions.
443 if (isThumb && STI->hasV8MBaselineOps())
444 MadeChange |= optimizeThumb2Branches();
445
446 // Optimize jump tables using TBB / TBH.
447 if (GenerateTBB && !STI->genExecuteOnly())
448 MadeChange |= optimizeThumb2JumpTables();
449
450 // After a while, this might be made debug-only, but it is not expensive.
451 verify();
452
453 // If LR has been forced spilled and no far jump (i.e. BL) has been issued,
454 // undo the spill / restore of LR if possible.
455 if (isThumb && !HasFarJump && AFI->isLRSpilledForFarJump())
456 MadeChange |= undoLRSpillRestore();
457
458 // Save the mapping between original and cloned constpool entries.
459 for (unsigned i = 0, e = CPEntries.size(); i != e; ++i) {
460 for (unsigned j = 0, je = CPEntries[i].size(); j != je; ++j) {
461 const CPEntry & CPE = CPEntries[i][j];
462 if (CPE.CPEMI && CPE.CPEMI->getOperand(1).isCPI())
463 AFI->recordCPEClone(i, CPE.CPI);
464 }
465 }
466
467 DEBUG(dbgs() << '\n'; dumpBBs())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("arm-cp-islands")) { dbgs() << '\n'; dumpBBs(); } } while
(false)
;
468
469 BBInfo.clear();
470 WaterList.clear();
471 CPUsers.clear();
472 CPEntries.clear();
473 JumpTableEntryIndices.clear();
474 JumpTableUserIndices.clear();
475 ImmBranches.clear();
476 PushPopMIs.clear();
477 T2JumpTables.clear();
478
479 return MadeChange;
480}
481
482/// \brief Perform the initial placement of the regular constant pool entries.
483/// To start with, we put them all at the end of the function.
484void
485ARMConstantIslands::doInitialConstPlacement(std::vector<MachineInstr*> &CPEMIs) {
486 // Create the basic block to hold the CPE's.
487 MachineBasicBlock *BB = MF->CreateMachineBasicBlock();
488 MF->push_back(BB);
489
490 // MachineConstantPool measures alignment in bytes. We measure in log2(bytes).
491 unsigned MaxAlign = Log2_32(MCP->getConstantPoolAlignment());
492
493 // Mark the basic block as required by the const-pool.
494 BB->setAlignment(MaxAlign);
495
496 // The function needs to be as aligned as the basic blocks. The linker may
497 // move functions around based on their alignment.
498 MF->ensureAlignment(BB->getAlignment());
499
500 // Order the entries in BB by descending alignment. That ensures correct
501 // alignment of all entries as long as BB is sufficiently aligned. Keep
502 // track of the insertion point for each alignment. We are going to bucket
503 // sort the entries as they are created.
504 SmallVector<MachineBasicBlock::iterator, 8> InsPoint(MaxAlign + 1, BB->end());
505
506 // Add all of the constants from the constant pool to the end block, use an
507 // identity mapping of CPI's to CPE's.
508 const std::vector<MachineConstantPoolEntry> &CPs = MCP->getConstants();
509
510 const DataLayout &TD = MF->getDataLayout();
511 for (unsigned i = 0, e = CPs.size(); i != e; ++i) {
512 unsigned Size = TD.getTypeAllocSize(CPs[i].getType());
513 unsigned Align = CPs[i].getAlignment();
514 assert(isPowerOf2_32(Align) && "Invalid alignment")(static_cast <bool> (isPowerOf2_32(Align) && "Invalid alignment"
) ? void (0) : __assert_fail ("isPowerOf2_32(Align) && \"Invalid alignment\""
, "/build/llvm-toolchain-snapshot-7~svn325874/lib/Target/ARM/ARMConstantIslandPass.cpp"
, 514, __extension__ __PRETTY_FUNCTION__))
;
515 // Verify that all constant pool entries are a multiple of their alignment.
516 // If not, we would have to pad them out so that instructions stay aligned.
517 assert((Size % Align) == 0 && "CP Entry not multiple of 4 bytes!")(static_cast <bool> ((Size % Align) == 0 && "CP Entry not multiple of 4 bytes!"
) ? void (0) : __assert_fail ("(Size % Align) == 0 && \"CP Entry not multiple of 4 bytes!\""
, "/build/llvm-toolchain-snapshot-7~svn325874/lib/Target/ARM/ARMConstantIslandPass.cpp"
, 517, __extension__ __PRETTY_FUNCTION__))
;
518
519 // Insert CONSTPOOL_ENTRY before entries with a smaller alignment.
520 unsigned LogAlign = Log2_32(Align);
521 MachineBasicBlock::iterator InsAt = InsPoint[LogAlign];
522 MachineInstr *CPEMI =
523 BuildMI(*BB, InsAt, DebugLoc(), TII->get(ARM::CONSTPOOL_ENTRY))
524 .addImm(i).addConstantPoolIndex(i).addImm(Size);
525 CPEMIs.push_back(CPEMI);
526
527 // Ensure that future entries with higher alignment get inserted before
528 // CPEMI. This is bucket sort with iterators.
529 for (unsigned a = LogAlign + 1; a <= MaxAlign; ++a)
530 if (InsPoint[a] == InsAt)
531 InsPoint[a] = CPEMI;
532
533 // Add a new CPEntry, but no corresponding CPUser yet.
534 CPEntries.emplace_back(1, CPEntry(CPEMI, i));
535 ++NumCPEs;
536 DEBUG(dbgs() << "Moved CPI#" << i << " to end of function, size = "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("arm-cp-islands")) { dbgs() << "Moved CPI#" << i
<< " to end of function, size = " << Size <<
", align = " << Align <<'\n'; } } while (false)
537 << Size << ", align = " << Align <<'\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("arm-cp-islands")) { dbgs() << "Moved CPI#" << i
<< " to end of function, size = " << Size <<
", align = " << Align <<'\n'; } } while (false)
;
538 }
539 DEBUG(BB->dump())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("arm-cp-islands")) { BB->dump(); } } while (false)
;
540}
541
542/// \brief Do initial placement of the jump tables. Because Thumb2's TBB and TBH
543/// instructions can be made more efficient if the jump table immediately
544/// follows the instruction, it's best to place them immediately next to their
545/// jumps to begin with. In almost all cases they'll never be moved from that
546/// position.
547void ARMConstantIslands::doInitialJumpTablePlacement(
548 std::vector<MachineInstr *> &CPEMIs) {
549 unsigned i = CPEntries.size();
550 auto MJTI = MF->getJumpTableInfo();
551 const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables();
552
553 MachineBasicBlock *LastCorrectlyNumberedBB = nullptr;
554 for (MachineBasicBlock &MBB : *MF) {
555 auto MI = MBB.getLastNonDebugInstr();
556 if (MI == MBB.end())
557 continue;
558
559 unsigned JTOpcode;
560 switch (MI->getOpcode()) {
561 default:
562 continue;
563 case ARM::BR_JTadd:
564 case ARM::BR_JTr:
565 case ARM::tBR_JTr:
566 case ARM::BR_JTm_i12:
567 case ARM::BR_JTm_rs:
568 JTOpcode = ARM::JUMPTABLE_ADDRS;
569 break;
570 case ARM::t2BR_JT:
571 JTOpcode = ARM::JUMPTABLE_INSTS;
572 break;
573 case ARM::tTBB_JT:
574 case ARM::t2TBB_JT:
575 JTOpcode = ARM::JUMPTABLE_TBB;
576 break;
577 case ARM::tTBH_JT:
578 case ARM::t2TBH_JT:
579 JTOpcode = ARM::JUMPTABLE_TBH;
580 break;
581 }
582
583 unsigned NumOps = MI->getDesc().getNumOperands();
584 MachineOperand JTOp =
585 MI->getOperand(NumOps - (MI->isPredicable() ? 2 : 1));
586 unsigned JTI = JTOp.getIndex();
587 unsigned Size = JT[JTI].MBBs.size() * sizeof(uint32_t);
588 MachineBasicBlock *JumpTableBB = MF->CreateMachineBasicBlock();
589 MF->insert(std::next(MachineFunction::iterator(MBB)), JumpTableBB);
590 MachineInstr *CPEMI = BuildMI(*JumpTableBB, JumpTableBB->begin(),
591 DebugLoc(), TII->get(JTOpcode))
592 .addImm(i++)
593 .addJumpTableIndex(JTI)
594 .addImm(Size);
595 CPEMIs.push_back(CPEMI);
596 CPEntries.emplace_back(1, CPEntry(CPEMI, JTI));
597 JumpTableEntryIndices.insert(std::make_pair(JTI, CPEntries.size() - 1));
598 if (!LastCorrectlyNumberedBB)
599 LastCorrectlyNumberedBB = &MBB;
600 }
601
602 // If we did anything then we need to renumber the subsequent blocks.
603 if (LastCorrectlyNumberedBB)
604 MF->RenumberBlocks(LastCorrectlyNumberedBB);
605}
606
607/// BBHasFallthrough - Return true if the specified basic block can fallthrough
608/// into the block immediately after it.
609bool ARMConstantIslands::BBHasFallthrough(MachineBasicBlock *MBB) {
610 // Get the next machine basic block in the function.
611 MachineFunction::iterator MBBI = MBB->getIterator();
612 // Can't fall off end of function.
613 if (std::next(MBBI) == MBB->getParent()->end())
614 return false;
615
616 MachineBasicBlock *NextBB = &*std::next(MBBI);
617 if (!MBB->isSuccessor(NextBB))
618 return false;
619
620 // Try to analyze the end of the block. A potential fallthrough may already
621 // have an unconditional branch for whatever reason.
622 MachineBasicBlock *TBB, *FBB;
623 SmallVector<MachineOperand, 4> Cond;
624 bool TooDifficult = TII->analyzeBranch(*MBB, TBB, FBB, Cond);
625 return TooDifficult || FBB == nullptr;
626}
627
628/// findConstPoolEntry - Given the constpool index and CONSTPOOL_ENTRY MI,
629/// look up the corresponding CPEntry.
630ARMConstantIslands::CPEntry *
631ARMConstantIslands::findConstPoolEntry(unsigned CPI,
632 const MachineInstr *CPEMI) {
633 std::vector<CPEntry> &CPEs = CPEntries[CPI];
634 // Number of entries per constpool index should be small, just do a
635 // linear search.
636 for (unsigned i = 0, e = CPEs.size(); i != e; ++i) {
637 if (CPEs[i].CPEMI == CPEMI)
638 return &CPEs[i];
639 }
640 return nullptr;
641}
642
643/// getCPELogAlign - Returns the required alignment of the constant pool entry
644/// represented by CPEMI. Alignment is measured in log2(bytes) units.
645unsigned ARMConstantIslands::getCPELogAlign(const MachineInstr *CPEMI) {
646 switch (CPEMI->getOpcode()) {
647 case ARM::CONSTPOOL_ENTRY:
648 break;
649 case ARM::JUMPTABLE_TBB:
650 return isThumb1 ? 2 : 0;
651 case ARM::JUMPTABLE_TBH:
652 return isThumb1 ? 2 : 1;
653 case ARM::JUMPTABLE_INSTS:
654 return 1;
655 case ARM::JUMPTABLE_ADDRS:
656 return 2;
657 default:
658 llvm_unreachable("unknown constpool entry kind")::llvm::llvm_unreachable_internal("unknown constpool entry kind"
, "/build/llvm-toolchain-snapshot-7~svn325874/lib/Target/ARM/ARMConstantIslandPass.cpp"
, 658)
;
659 }
660
661 unsigned CPI = getCombinedIndex(CPEMI);
662 assert(CPI < MCP->getConstants().size() && "Invalid constant pool index.")(static_cast <bool> (CPI < MCP->getConstants().size
() && "Invalid constant pool index.") ? void (0) : __assert_fail
("CPI < MCP->getConstants().size() && \"Invalid constant pool index.\""
, "/build/llvm-toolchain-snapshot-7~svn325874/lib/Target/ARM/ARMConstantIslandPass.cpp"
, 662, __extension__ __PRETTY_FUNCTION__))
;
663 unsigned Align = MCP->getConstants()[CPI].getAlignment();
664 assert(isPowerOf2_32(Align) && "Invalid CPE alignment")(static_cast <bool> (isPowerOf2_32(Align) && "Invalid CPE alignment"
) ? void (0) : __assert_fail ("isPowerOf2_32(Align) && \"Invalid CPE alignment\""
, "/build/llvm-toolchain-snapshot-7~svn325874/lib/Target/ARM/ARMConstantIslandPass.cpp"
, 664, __extension__ __PRETTY_FUNCTION__))
;
665 return Log2_32(Align);
666}
667
668/// scanFunctionJumpTables - Do a scan of the function, building up
669/// information about the sizes of each block and the locations of all
670/// the jump tables.
671void ARMConstantIslands::scanFunctionJumpTables() {
672 for (MachineBasicBlock &MBB : *MF) {
673 for (MachineInstr &I : MBB)
674 if (I.isBranch() &&
675 (I.getOpcode() == ARM::t2BR_JT || I.getOpcode() == ARM::tBR_JTr))
676 T2JumpTables.push_back(&I);
677 }
678}
679
680/// initializeFunctionInfo - Do the initial scan of the function, building up
681/// information about the sizes of each block, the location of all the water,
682/// and finding all of the constant pool users.
683void ARMConstantIslands::
684initializeFunctionInfo(const std::vector<MachineInstr*> &CPEMIs) {
685
686 BBInfo = computeAllBlockSizes(MF);
687
688 // The known bits of the entry block offset are determined by the function
689 // alignment.
690 BBInfo.front().KnownBits = MF->getAlignment();
691
692 // Compute block offsets and known bits.
693 adjustBBOffsetsAfter(&MF->front());
694
695 // Now go back through the instructions and build up our data structures.
696 for (MachineBasicBlock &MBB : *MF) {
697 // If this block doesn't fall through into the next MBB, then this is
698 // 'water' that a constant pool island could be placed.
699 if (!BBHasFallthrough(&MBB))
700 WaterList.push_back(&MBB);
701
702 for (MachineInstr &I : MBB) {
703 if (I.isDebugValue())
704 continue;
705
706 unsigned Opc = I.getOpcode();
707 if (I.isBranch()) {
708 bool isCond = false;
709 unsigned Bits = 0;
710 unsigned Scale = 1;
711 int UOpc = Opc;
712 switch (Opc) {
713 default:
714 continue; // Ignore other JT branches
715 case ARM::t2BR_JT:
716 case ARM::tBR_JTr:
717 T2JumpTables.push_back(&I);
718 continue; // Does not get an entry in ImmBranches
719 case ARM::Bcc:
720 isCond = true;
721 UOpc = ARM::B;
722 LLVM_FALLTHROUGH[[clang::fallthrough]];
723 case ARM::B:
724 Bits = 24;
725 Scale = 4;
726 break;
727 case ARM::tBcc:
728 isCond = true;
729 UOpc = ARM::tB;
730 Bits = 8;
731 Scale = 2;
732 break;
733 case ARM::tB:
734 Bits = 11;
735 Scale = 2;
736 break;
737 case ARM::t2Bcc:
738 isCond = true;
739 UOpc = ARM::t2B;
740 Bits = 20;
741 Scale = 2;
742 break;
743 case ARM::t2B:
744 Bits = 24;
745 Scale = 2;
746 break;
747 }
748
749 // Record this immediate branch.
750 unsigned MaxOffs = ((1 << (Bits-1))-1) * Scale;
751 ImmBranches.push_back(ImmBranch(&I, MaxOffs, isCond, UOpc));
752 }
753
754 if (Opc == ARM::tPUSH || Opc == ARM::tPOP_RET)
755 PushPopMIs.push_back(&I);
756
757 if (Opc == ARM::CONSTPOOL_ENTRY || Opc == ARM::JUMPTABLE_ADDRS ||
758 Opc == ARM::JUMPTABLE_INSTS || Opc == ARM::JUMPTABLE_TBB ||
759 Opc == ARM::JUMPTABLE_TBH)
760 continue;
761
762 // Scan the instructions for constant pool operands.
763 for (unsigned op = 0, e = I.getNumOperands(); op != e; ++op)
764 if (I.getOperand(op).isCPI() || I.getOperand(op).isJTI()) {
765 // We found one. The addressing mode tells us the max displacement
766 // from the PC that this instruction permits.
767
768 // Basic size info comes from the TSFlags field.
769 unsigned Bits = 0;
770 unsigned Scale = 1;
771 bool NegOk = false;
772 bool IsSoImm = false;
773
774 switch (Opc) {
775 default:
776 llvm_unreachable("Unknown addressing mode for CP reference!")::llvm::llvm_unreachable_internal("Unknown addressing mode for CP reference!"
, "/build/llvm-toolchain-snapshot-7~svn325874/lib/Target/ARM/ARMConstantIslandPass.cpp"
, 776)
;
777
778 // Taking the address of a CP entry.
779 case ARM::LEApcrel:
780 case ARM::LEApcrelJT:
781 // This takes a SoImm, which is 8 bit immediate rotated. We'll
782 // pretend the maximum offset is 255 * 4. Since each instruction
783 // 4 byte wide, this is always correct. We'll check for other
784 // displacements that fits in a SoImm as well.
785 Bits = 8;
786 Scale = 4;
787 NegOk = true;
788 IsSoImm = true;
789 break;
790 case ARM::t2LEApcrel:
791 case ARM::t2LEApcrelJT:
792 Bits = 12;
793 NegOk = true;
794 break;
795 case ARM::tLEApcrel:
796 case ARM::tLEApcrelJT:
797 Bits = 8;
798 Scale = 4;
799 break;
800
801 case ARM::LDRBi12:
802 case ARM::LDRi12:
803 case ARM::LDRcp:
804 case ARM::t2LDRpci:
805 case ARM::t2LDRHpci:
806 case ARM::t2LDRBpci:
807 Bits = 12; // +-offset_12
808 NegOk = true;
809 break;
810
811 case ARM::tLDRpci:
812 Bits = 8;
813 Scale = 4; // +(offset_8*4)
814 break;
815
816 case ARM::VLDRD:
817 case ARM::VLDRS:
818 Bits = 8;
819 Scale = 4; // +-(offset_8*4)
820 NegOk = true;
821 break;
822 case ARM::VLDRH:
823 Bits = 8;
824 Scale = 2; // +-(offset_8*2)
825 NegOk = true;
826 break;
827
828 case ARM::tLDRHi:
829 Bits = 5;
830 Scale = 2; // +(offset_5*2)
831 break;
832 }
833
834 // Remember that this is a user of a CP entry.
835 unsigned CPI = I.getOperand(op).getIndex();
836 if (I.getOperand(op).isJTI()) {
837 JumpTableUserIndices.insert(std::make_pair(CPI, CPUsers.size()));
838 CPI = JumpTableEntryIndices[CPI];
839 }
840
841 MachineInstr *CPEMI = CPEMIs[CPI];
842 unsigned MaxOffs = ((1 << Bits)-1) * Scale;
843 CPUsers.push_back(CPUser(&I, CPEMI, MaxOffs, NegOk, IsSoImm));
844
845 // Increment corresponding CPEntry reference count.
846 CPEntry *CPE = findConstPoolEntry(CPI, CPEMI);
847 assert(CPE && "Cannot find a corresponding CPEntry!")(static_cast <bool> (CPE && "Cannot find a corresponding CPEntry!"
) ? void (0) : __assert_fail ("CPE && \"Cannot find a corresponding CPEntry!\""
, "/build/llvm-toolchain-snapshot-7~svn325874/lib/Target/ARM/ARMConstantIslandPass.cpp"
, 847, __extension__ __PRETTY_FUNCTION__))
;
848 CPE->RefCount++;
849
850 // Instructions can only use one CP entry, don't bother scanning the
851 // rest of the operands.
852 break;
853 }
854 }
855 }
856}
857
858/// getOffsetOf - Return the current offset of the specified machine instruction
859/// from the start of the function. This offset changes as stuff is moved
860/// around inside the function.
861unsigned ARMConstantIslands::getOffsetOf(MachineInstr *MI) const {
862 MachineBasicBlock *MBB = MI->getParent();
863
864 // The offset is composed of two things: the sum of the sizes of all MBB's
865 // before this instruction's block, and the offset from the start of the block
866 // it is in.
867 unsigned Offset = BBInfo[MBB->getNumber()].Offset;
868
869 // Sum instructions before MI in MBB.
870 for (MachineBasicBlock::iterator I = MBB->begin(); &*I != MI; ++I) {
871 assert(I != MBB->end() && "Didn't find MI in its own basic block?")(static_cast <bool> (I != MBB->end() && "Didn't find MI in its own basic block?"
) ? void (0) : __assert_fail ("I != MBB->end() && \"Didn't find MI in its own basic block?\""
, "/build/llvm-toolchain-snapshot-7~svn325874/lib/Target/ARM/ARMConstantIslandPass.cpp"
, 871, __extension__ __PRETTY_FUNCTION__))
;
872 Offset += TII->getInstSizeInBytes(*I);
873 }
874 return Offset;
875}
876
877/// CompareMBBNumbers - Little predicate function to sort the WaterList by MBB
878/// ID.
879static bool CompareMBBNumbers(const MachineBasicBlock *LHS,
880 const MachineBasicBlock *RHS) {
881 return LHS->getNumber() < RHS->getNumber();
882}
883
884/// updateForInsertedWaterBlock - When a block is newly inserted into the
885/// machine function, it upsets all of the block numbers. Renumber the blocks
886/// and update the arrays that parallel this numbering.
887void ARMConstantIslands::updateForInsertedWaterBlock(MachineBasicBlock *NewBB) {
888 // Renumber the MBB's to keep them consecutive.
889 NewBB->getParent()->RenumberBlocks(NewBB);
890
891 // Insert an entry into BBInfo to align it properly with the (newly
892 // renumbered) block numbers.
893 BBInfo.insert(BBInfo.begin() + NewBB->getNumber(), BasicBlockInfo());
894
895 // Next, update WaterList. Specifically, we need to add NewMBB as having
896 // available water after it.
897 water_iterator IP =
898 std::lower_bound(WaterList.begin(), WaterList.end(), NewBB,
899 CompareMBBNumbers);
900 WaterList.insert(IP, NewBB);
901}
902
903/// Split the basic block containing MI into two blocks, which are joined by
904/// an unconditional branch. Update data structures and renumber blocks to
905/// account for this change and returns the newly created block.
906MachineBasicBlock *ARMConstantIslands::splitBlockBeforeInstr(MachineInstr *MI) {
907 MachineBasicBlock *OrigBB = MI->getParent();
908
909 // Create a new MBB for the code after the OrigBB.
910 MachineBasicBlock *NewBB =
911 MF->CreateMachineBasicBlock(OrigBB->getBasicBlock());
912 MachineFunction::iterator MBBI = ++OrigBB->getIterator();
913 MF->insert(MBBI, NewBB);
914
915 // Splice the instructions starting with MI over to NewBB.
916 NewBB->splice(NewBB->end(), OrigBB, MI, OrigBB->end());
917
918 // Add an unconditional branch from OrigBB to NewBB.
919 // Note the new unconditional branch is not being recorded.
920 // There doesn't seem to be meaningful DebugInfo available; this doesn't
921 // correspond to anything in the source.
922 unsigned Opc = isThumb ? (isThumb2 ? ARM::t2B : ARM::tB) : ARM::B;
923 if (!isThumb)
924 BuildMI(OrigBB, DebugLoc(), TII->get(Opc)).addMBB(NewBB);
925 else
926 BuildMI(OrigBB, DebugLoc(), TII->get(Opc))
927 .addMBB(NewBB)
928 .add(predOps(ARMCC::AL));
929 ++NumSplit;
930
931 // Update the CFG. All succs of OrigBB are now succs of NewBB.
932 NewBB->transferSuccessors(OrigBB);
933
934 // OrigBB branches to NewBB.
935 OrigBB->addSuccessor(NewBB);
936
937 // Update internal data structures to account for the newly inserted MBB.
938 // This is almost the same as updateForInsertedWaterBlock, except that
939 // the Water goes after OrigBB, not NewBB.
940 MF->RenumberBlocks(NewBB);
941
942 // Insert an entry into BBInfo to align it properly with the (newly
943 // renumbered) block numbers.
944 BBInfo.insert(BBInfo.begin() + NewBB->getNumber(), BasicBlockInfo());
945
946 // Next, update WaterList. Specifically, we need to add OrigMBB as having
947 // available water after it (but not if it's already there, which happens
948 // when splitting before a conditional branch that is followed by an
949 // unconditional branch - in that case we want to insert NewBB).
950 water_iterator IP =
951 std::lower_bound(WaterList.begin(), WaterList.end(), OrigBB,
952 CompareMBBNumbers);
953 MachineBasicBlock* WaterBB = *IP;
954 if (WaterBB == OrigBB)
955 WaterList.insert(std::next(IP), NewBB);
956 else
957 WaterList.insert(IP, OrigBB);
958 NewWaterList.insert(OrigBB);
959
960 // Figure out how large the OrigBB is. As the first half of the original
961 // block, it cannot contain a tablejump. The size includes
962 // the new jump we added. (It should be possible to do this without
963 // recounting everything, but it's very confusing, and this is rarely
964 // executed.)
965 computeBlockSize(MF, OrigBB, BBInfo[OrigBB->getNumber()]);
966
967 // Figure out how large the NewMBB is. As the second half of the original
968 // block, it may contain a tablejump.
969 computeBlockSize(MF, NewBB, BBInfo[NewBB->getNumber()]);
970
971 // All BBOffsets following these blocks must be modified.
972 adjustBBOffsetsAfter(OrigBB);
973
974 return NewBB;
975}
976
977/// getUserOffset - Compute the offset of U.MI as seen by the hardware
978/// displacement computation. Update U.KnownAlignment to match its current
979/// basic block location.
980unsigned ARMConstantIslands::getUserOffset(CPUser &U) const {
981 unsigned UserOffset = getOffsetOf(U.MI);
982 const BasicBlockInfo &BBI = BBInfo[U.MI->getParent()->getNumber()];
983 unsigned KnownBits = BBI.internalKnownBits();
984
985 // The value read from PC is offset from the actual instruction address.
986 UserOffset += (isThumb ? 4 : 8);
987
988 // Because of inline assembly, we may not know the alignment (mod 4) of U.MI.
989 // Make sure U.getMaxDisp() returns a constrained range.
990 U.KnownAlignment = (KnownBits >= 2);
991
992 // On Thumb, offsets==2 mod 4 are rounded down by the hardware for
993 // purposes of the displacement computation; compensate for that here.
994 // For unknown alignments, getMaxDisp() constrains the range instead.
995 if (isThumb && U.KnownAlignment)
996 UserOffset &= ~3u;
997
998 return UserOffset;
999}
1000
1001/// isOffsetInRange - Checks whether UserOffset (the location of a constant pool
1002/// reference) is within MaxDisp of TrialOffset (a proposed location of a
1003/// constant pool entry).
1004/// UserOffset is computed by getUserOffset above to include PC adjustments. If
1005/// the mod 4 alignment of UserOffset is not known, the uncertainty must be
1006/// subtracted from MaxDisp instead. CPUser::getMaxDisp() does that.
1007bool ARMConstantIslands::isOffsetInRange(unsigned UserOffset,
1008 unsigned TrialOffset, unsigned MaxDisp,
1009 bool NegativeOK, bool IsSoImm) {
1010 if (UserOffset <= TrialOffset) {
1011 // User before the Trial.
1012 if (TrialOffset - UserOffset <= MaxDisp)
1013 return true;
1014 // FIXME: Make use full range of soimm values.
1015 } else if (NegativeOK) {
1016 if (UserOffset - TrialOffset <= MaxDisp)
1017 return true;
1018 // FIXME: Make use full range of soimm values.
1019 }
1020 return false;
1021}
1022
1023/// isWaterInRange - Returns true if a CPE placed after the specified
1024/// Water (a basic block) will be in range for the specific MI.
1025///
1026/// Compute how much the function will grow by inserting a CPE after Water.
1027bool ARMConstantIslands::isWaterInRange(unsigned UserOffset,
1028 MachineBasicBlock* Water, CPUser &U,
1029 unsigned &Growth) {
1030 unsigned CPELogAlign = getCPELogAlign(U.CPEMI);
1031 unsigned CPEOffset = BBInfo[Water->getNumber()].postOffset(CPELogAlign);
1032 unsigned NextBlockOffset, NextBlockAlignment;
1033 MachineFunction::const_iterator NextBlock = Water->getIterator();
1034 if (++NextBlock == MF->end()) {
1035 NextBlockOffset = BBInfo[Water->getNumber()].postOffset();
1036 NextBlockAlignment = 0;
1037 } else {
1038 NextBlockOffset = BBInfo[NextBlock->getNumber()].Offset;
1039 NextBlockAlignment = NextBlock->getAlignment();
1040 }
1041 unsigned Size = U.CPEMI->getOperand(2).getImm();
1042 unsigned CPEEnd = CPEOffset + Size;
1043
1044 // The CPE may be able to hide in the alignment padding before the next
1045 // block. It may also cause more padding to be required if it is more aligned
1046 // that the next block.
1047 if (CPEEnd > NextBlockOffset) {
1048 Growth = CPEEnd - NextBlockOffset;
1049 // Compute the padding that would go at the end of the CPE to align the next
1050 // block.
1051 Growth += OffsetToAlignment(CPEEnd, 1ULL << NextBlockAlignment);
1052
1053 // If the CPE is to be inserted before the instruction, that will raise
1054 // the offset of the instruction. Also account for unknown alignment padding
1055 // in blocks between CPE and the user.
1056 if (CPEOffset < UserOffset)
1057 UserOffset += Growth + UnknownPadding(MF->getAlignment(), CPELogAlign);
1058 } else
1059 // CPE fits in existing padding.
1060 Growth = 0;
1061
1062 return isOffsetInRange(UserOffset, CPEOffset, U);
1063}
1064
1065/// isCPEntryInRange - Returns true if the distance between specific MI and
1066/// specific ConstPool entry instruction can fit in MI's displacement field.
1067bool ARMConstantIslands::isCPEntryInRange(MachineInstr *MI, unsigned UserOffset,
1068 MachineInstr *CPEMI, unsigned MaxDisp,
1069 bool NegOk, bool DoDump) {
1070 unsigned CPEOffset = getOffsetOf(CPEMI);
1071
1072 if (DoDump) {
1073 DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("arm-cp-islands")) { { unsigned Block = MI->getParent()->
getNumber(); const BasicBlockInfo &BBI = BBInfo[Block]; dbgs
() << "User of CPE#" << CPEMI->getOperand(0).getImm
() << " max delta=" << MaxDisp << format(" insn address=%#x"
, UserOffset) << " in " << printMBBReference(*MI->
getParent()) << ": " << format("%#x-%x\t", BBI.Offset
, BBI.postOffset()) << *MI << format("CPE address=%#x offset=%+d: "
, CPEOffset, int(CPEOffset - UserOffset)); }; } } while (false
)
1074 unsigned Block = MI->getParent()->getNumber();do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("arm-cp-islands")) { { unsigned Block = MI->getParent()->
getNumber(); const BasicBlockInfo &BBI = BBInfo[Block]; dbgs
() << "User of CPE#" << CPEMI->getOperand(0).getImm
() << " max delta=" << MaxDisp << format(" insn address=%#x"
, UserOffset) << " in " << printMBBReference(*MI->
getParent()) << ": " << format("%#x-%x\t", BBI.Offset
, BBI.postOffset()) << *MI << format("CPE address=%#x offset=%+d: "
, CPEOffset, int(CPEOffset - UserOffset)); }; } } while (false
)
1075 const BasicBlockInfo &BBI = BBInfo[Block];do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("arm-cp-islands")) { { unsigned Block = MI->getParent()->
getNumber(); const BasicBlockInfo &BBI = BBInfo[Block]; dbgs
() << "User of CPE#" << CPEMI->getOperand(0).getImm
() << " max delta=" << MaxDisp << format(" insn address=%#x"
, UserOffset) << " in " << printMBBReference(*MI->
getParent()) << ": " << format("%#x-%x\t", BBI.Offset
, BBI.postOffset()) << *MI << format("CPE address=%#x offset=%+d: "
, CPEOffset, int(CPEOffset - UserOffset)); }; } } while (false
)
1076 dbgs() << "User of CPE#" << CPEMI->getOperand(0).getImm()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("arm-cp-islands")) { { unsigned Block = MI->getParent()->
getNumber(); const BasicBlockInfo &BBI = BBInfo[Block]; dbgs
() << "User of CPE#" << CPEMI->getOperand(0).getImm
() << " max delta=" << MaxDisp << format(" insn address=%#x"
, UserOffset) << " in " << printMBBReference(*MI->
getParent()) << ": " << format("%#x-%x\t", BBI.Offset
, BBI.postOffset()) << *MI << format("CPE address=%#x offset=%+d: "
, CPEOffset, int(CPEOffset - UserOffset)); }; } } while (false
)
1077 << " max delta=" << MaxDispdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("arm-cp-islands")) { { unsigned Block = MI->getParent()->
getNumber(); const BasicBlockInfo &BBI = BBInfo[Block]; dbgs
() << "User of CPE#" << CPEMI->getOperand(0).getImm
() << " max delta=" << MaxDisp << format(" insn address=%#x"
, UserOffset) << " in " << printMBBReference(*MI->
getParent()) << ": " << format("%#x-%x\t", BBI.Offset
, BBI.postOffset()) << *MI << format("CPE address=%#x offset=%+d: "
, CPEOffset, int(CPEOffset - UserOffset)); }; } } while (false
)
1078 << format(" insn address=%#x", UserOffset) << " in "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("arm-cp-islands")) { { unsigned Block = MI->getParent()->
getNumber(); const BasicBlockInfo &BBI = BBInfo[Block]; dbgs
() << "User of CPE#" << CPEMI->getOperand(0).getImm
() << " max delta=" << MaxDisp << format(" insn address=%#x"
, UserOffset) << " in " << printMBBReference(*MI->
getParent()) << ": " << format("%#x-%x\t", BBI.Offset
, BBI.postOffset()) << *MI << format("CPE address=%#x offset=%+d: "
, CPEOffset, int(CPEOffset - UserOffset)); }; } } while (false
)
1079 << printMBBReference(*MI->getParent()) << ": "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("arm-cp-islands")) { { unsigned Block = MI->getParent()->
getNumber(); const BasicBlockInfo &BBI = BBInfo[Block]; dbgs
() << "User of CPE#" << CPEMI->getOperand(0).getImm
() << " max delta=" << MaxDisp << format(" insn address=%#x"
, UserOffset) << " in " << printMBBReference(*MI->
getParent()) << ": " << format("%#x-%x\t", BBI.Offset
, BBI.postOffset()) << *MI << format("CPE address=%#x offset=%+d: "
, CPEOffset, int(CPEOffset - UserOffset)); }; } } while (false
)
1080 << format("%#x-%x\t", BBI.Offset, BBI.postOffset()) << *MIdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("arm-cp-islands")) { { unsigned Block = MI->getParent()->
getNumber(); const BasicBlockInfo &BBI = BBInfo[Block]; dbgs
() << "User of CPE#" << CPEMI->getOperand(0).getImm
() << " max delta=" << MaxDisp << format(" insn address=%#x"
, UserOffset) << " in " << printMBBReference(*MI->
getParent()) << ": " << format("%#x-%x\t", BBI.Offset
, BBI.postOffset()) << *MI << format("CPE address=%#x offset=%+d: "
, CPEOffset, int(CPEOffset - UserOffset)); }; } } while (false
)
1081 << format("CPE address=%#x offset=%+d: ", CPEOffset,do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("arm-cp-islands")) { { unsigned Block = MI->getParent()->
getNumber(); const BasicBlockInfo &BBI = BBInfo[Block]; dbgs
() << "User of CPE#" << CPEMI->getOperand(0).getImm
() << " max delta=" << MaxDisp << format(" insn address=%#x"
, UserOffset) << " in " << printMBBReference(*MI->
getParent()) << ": " << format("%#x-%x\t", BBI.Offset
, BBI.postOffset()) << *MI << format("CPE address=%#x offset=%+d: "
, CPEOffset, int(CPEOffset - UserOffset)); }; } } while (false
)
1082 int(CPEOffset - UserOffset));do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("arm-cp-islands")) { { unsigned Block = MI->getParent()->
getNumber(); const BasicBlockInfo &BBI = BBInfo[Block]; dbgs
() << "User of CPE#" << CPEMI->getOperand(0).getImm
() << " max delta=" << MaxDisp << format(" insn address=%#x"
, UserOffset) << " in " << printMBBReference(*MI->
getParent()) << ": " << format("%#x-%x\t", BBI.Offset
, BBI.postOffset()) << *MI << format("CPE address=%#x offset=%+d: "
, CPEOffset, int(CPEOffset - UserOffset)); }; } } while (false
)
1083 })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("arm-cp-islands")) { { unsigned Block = MI->getParent()->
getNumber(); const BasicBlockInfo &BBI = BBInfo[Block]; dbgs
() << "User of CPE#" << CPEMI->getOperand(0).getImm
() << " max delta=" << MaxDisp << format(" insn address=%#x"
, UserOffset) << " in " << printMBBReference(*MI->
getParent()) << ": " << format("%#x-%x\t", BBI.Offset
, BBI.postOffset()) << *MI << format("CPE address=%#x offset=%+d: "
, CPEOffset, int(CPEOffset - UserOffset)); }; } } while (false
)
;
1084 }
1085
1086 return isOffsetInRange(UserOffset, CPEOffset, MaxDisp, NegOk);
1087}
1088
1089#ifndef NDEBUG
1090/// BBIsJumpedOver - Return true of the specified basic block's only predecessor
1091/// unconditionally branches to its only successor.
1092static bool BBIsJumpedOver(MachineBasicBlock *MBB) {
1093 if (MBB->pred_size() != 1 || MBB->succ_size() != 1)
1094 return false;
1095
1096 MachineBasicBlock *Succ = *MBB->succ_begin();
1097 MachineBasicBlock *Pred = *MBB->pred_begin();
1098 MachineInstr *PredMI = &Pred->back();
1099 if (PredMI->getOpcode() == ARM::B || PredMI->getOpcode() == ARM::tB
1100 || PredMI->getOpcode() == ARM::t2B)
1101 return PredMI->getOperand(0).getMBB() == Succ;
1102 return false;
1103}
1104#endif // NDEBUG
1105
1106void ARMConstantIslands::adjustBBOffsetsAfter(MachineBasicBlock *BB) {
1107 unsigned BBNum = BB->getNumber();
1108 for(unsigned i = BBNum + 1, e = MF->getNumBlockIDs(); i < e; ++i) {
1109 // Get the offset and known bits at the end of the layout predecessor.
1110 // Include the alignment of the current block.
1111 unsigned LogAlign = MF->getBlockNumbered(i)->getAlignment();
1112 unsigned Offset = BBInfo[i - 1].postOffset(LogAlign);
1113 unsigned KnownBits = BBInfo[i - 1].postKnownBits(LogAlign);
1114
1115 // This is where block i begins. Stop if the offset is already correct,
1116 // and we have updated 2 blocks. This is the maximum number of blocks
1117 // changed before calling this function.
1118 if (i > BBNum + 2 &&
1119 BBInfo[i].Offset == Offset &&
1120 BBInfo[i].KnownBits == KnownBits)
1121 break;
1122
1123 BBInfo[i].Offset = Offset;
1124 BBInfo[i].KnownBits = KnownBits;
1125 }
1126}
1127
1128/// decrementCPEReferenceCount - find the constant pool entry with index CPI
1129/// and instruction CPEMI, and decrement its refcount. If the refcount
1130/// becomes 0 remove the entry and instruction. Returns true if we removed
1131/// the entry, false if we didn't.
1132bool ARMConstantIslands::decrementCPEReferenceCount(unsigned CPI,
1133 MachineInstr *CPEMI) {
1134 // Find the old entry. Eliminate it if it is no longer used.
1135 CPEntry *CPE = findConstPoolEntry(CPI, CPEMI);
1136 assert(CPE && "Unexpected!")(static_cast <bool> (CPE && "Unexpected!") ? void
(0) : __assert_fail ("CPE && \"Unexpected!\"", "/build/llvm-toolchain-snapshot-7~svn325874/lib/Target/ARM/ARMConstantIslandPass.cpp"
, 1136, __extension__ __PRETTY_FUNCTION__))
;
1137 if (--CPE->RefCount == 0) {
1138 removeDeadCPEMI(CPEMI);
1139 CPE->CPEMI = nullptr;
1140 --NumCPEs;
1141 return true;
1142 }
1143 return false;
1144}
1145
1146unsigned ARMConstantIslands::getCombinedIndex(const MachineInstr *CPEMI) {
1147 if (CPEMI->getOperand(1).isCPI())
1148 return CPEMI->getOperand(1).getIndex();
1149
1150 return JumpTableEntryIndices[CPEMI->getOperand(1).getIndex()];
1151}
1152
1153/// LookForCPEntryInRange - see if the currently referenced CPE is in range;
1154/// if not, see if an in-range clone of the CPE is in range, and if so,
1155/// change the data structures so the user references the clone. Returns:
1156/// 0 = no existing entry found
1157/// 1 = entry found, and there were no code insertions or deletions
1158/// 2 = entry found, and there were code insertions or deletions
1159int ARMConstantIslands::findInRangeCPEntry(CPUser& U, unsigned UserOffset) {
1160 MachineInstr *UserMI = U.MI;
1161 MachineInstr *CPEMI = U.CPEMI;
1162
1163 // Check to see if the CPE is already in-range.
1164 if (isCPEntryInRange(UserMI, UserOffset, CPEMI, U.getMaxDisp(), U.NegOk,
1165 true)) {
1166 DEBUG(dbgs() << "In range\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("arm-cp-islands")) { dbgs() << "In range\n"; } } while
(false)
;
1167 return 1;
1168 }
1169
1170 // No. Look for previously created clones of the CPE that are in range.
1171 unsigned CPI = getCombinedIndex(CPEMI);
1172 std::vector<CPEntry> &CPEs = CPEntries[CPI];
1173 for (unsigned i = 0, e = CPEs.size(); i != e; ++i) {
1174 // We already tried this one
1175 if (CPEs[i].CPEMI == CPEMI)
1176 continue;
1177 // Removing CPEs can leave empty entries, skip
1178 if (CPEs[i].CPEMI == nullptr)
1179 continue;
1180 if (isCPEntryInRange(UserMI, UserOffset, CPEs[i].CPEMI, U.getMaxDisp(),
1181 U.NegOk)) {
1182 DEBUG(dbgs() << "Replacing CPE#" << CPI << " with CPE#"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("arm-cp-islands")) { dbgs() << "Replacing CPE#" <<
CPI << " with CPE#" << CPEs[i].CPI << "\n"
; } } while (false)
1183 << CPEs[i].CPI << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("arm-cp-islands")) { dbgs() << "Replacing CPE#" <<
CPI << " with CPE#" << CPEs[i].CPI << "\n"
; } } while (false)
;
1184 // Point the CPUser node to the replacement
1185 U.CPEMI = CPEs[i].CPEMI;
1186 // Change the CPI in the instruction operand to refer to the clone.
1187 for (unsigned j = 0, e = UserMI->getNumOperands(); j != e; ++j)
1188 if (UserMI->getOperand(j).isCPI()) {
1189 UserMI->getOperand(j).setIndex(CPEs[i].CPI);
1190 break;
1191 }
1192 // Adjust the refcount of the clone...
1193 CPEs[i].RefCount++;
1194 // ...and the original. If we didn't remove the old entry, none of the
1195 // addresses changed, so we don't need another pass.
1196 return decrementCPEReferenceCount(CPI, CPEMI) ? 2 : 1;
1197 }
1198 }
1199 return 0;
1200}
1201
1202/// getUnconditionalBrDisp - Returns the maximum displacement that can fit in
1203/// the specific unconditional branch instruction.
1204static inline unsigned getUnconditionalBrDisp(int Opc) {
1205 switch (Opc) {
1206 case ARM::tB:
1207 return ((1<<10)-1)*2;
1208 case ARM::t2B:
1209 return ((1<<23)-1)*2;
1210 default:
1211 break;
1212 }
1213
1214 return ((1<<23)-1)*4;
1215}
1216
1217/// findAvailableWater - Look for an existing entry in the WaterList in which
1218/// we can place the CPE referenced from U so it's within range of U's MI.
1219/// Returns true if found, false if not. If it returns true, WaterIter
1220/// is set to the WaterList entry. For Thumb, prefer water that will not
1221/// introduce padding to water that will. To ensure that this pass
1222/// terminates, the CPE location for a particular CPUser is only allowed to
1223/// move to a lower address, so search backward from the end of the list and
1224/// prefer the first water that is in range.
1225bool ARMConstantIslands::findAvailableWater(CPUser &U, unsigned UserOffset,
1226 water_iterator &WaterIter,
1227 bool CloserWater) {
1228 if (WaterList.empty())
1229 return false;
1230
1231 unsigned BestGrowth = ~0u;
1232 // The nearest water without splitting the UserBB is right after it.
1233 // If the distance is still large (we have a big BB), then we need to split it
1234 // if we don't converge after certain iterations. This helps the following
1235 // situation to converge:
1236 // BB0:
1237 // Big BB
1238 // BB1:
1239 // Constant Pool
1240 // When a CP access is out of range, BB0 may be used as water. However,
1241 // inserting islands between BB0 and BB1 makes other accesses out of range.
1242 MachineBasicBlock *UserBB = U.MI->getParent();
1243 unsigned MinNoSplitDisp =
1244 BBInfo[UserBB->getNumber()].postOffset(getCPELogAlign(U.CPEMI));
1245 if (CloserWater && MinNoSplitDisp > U.getMaxDisp() / 2)
1246 return false;
1247 for (water_iterator IP = std::prev(WaterList.end()), B = WaterList.begin();;
1248 --IP) {
1249 MachineBasicBlock* WaterBB = *IP;
1250 // Check if water is in range and is either at a lower address than the
1251 // current "high water mark" or a new water block that was created since
1252 // the previous iteration by inserting an unconditional branch. In the
1253 // latter case, we want to allow resetting the high water mark back to
1254 // this new water since we haven't seen it before. Inserting branches
1255 // should be relatively uncommon and when it does happen, we want to be
1256 // sure to take advantage of it for all the CPEs near that block, so that
1257 // we don't insert more branches than necessary.
1258 // When CloserWater is true, we try to find the lowest address after (or
1259 // equal to) user MI's BB no matter of padding growth.
1260 unsigned Growth;
1261 if (isWaterInRange(UserOffset, WaterBB, U, Growth) &&
1262 (WaterBB->getNumber() < U.HighWaterMark->getNumber() ||
1263 NewWaterList.count(WaterBB) || WaterBB == U.MI->getParent()) &&
1264 Growth < BestGrowth) {
1265 // This is the least amount of required padding seen so far.
1266 BestGrowth = Growth;
1267 WaterIter = IP;
1268 DEBUG(dbgs() << "Found water after " << printMBBReference(*WaterBB)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("arm-cp-islands")) { dbgs() << "Found water after " <<
printMBBReference(*WaterBB) << " Growth=" << Growth
<< '\n'; } } while (false)
1269 << " Growth=" << Growth << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("arm-cp-islands")) { dbgs() << "Found water after " <<
printMBBReference(*WaterBB) << " Growth=" << Growth
<< '\n'; } } while (false)
;
1270
1271 if (CloserWater && WaterBB == U.MI->getParent())
1272 return true;
1273 // Keep looking unless it is perfect and we're not looking for the lowest
1274 // possible address.
1275 if (!CloserWater && BestGrowth == 0)
1276 return true;
1277 }
1278 if (IP == B)
1279 break;
1280 }
1281 return BestGrowth != ~0u;
1282}
1283
1284/// createNewWater - No existing WaterList entry will work for
1285/// CPUsers[CPUserIndex], so create a place to put the CPE. The end of the
1286/// block is used if in range, and the conditional branch munged so control
1287/// flow is correct. Otherwise the block is split to create a hole with an
1288/// unconditional branch around it. In either case NewMBB is set to a
1289/// block following which the new island can be inserted (the WaterList
1290/// is not adjusted).
1291void ARMConstantIslands::createNewWater(unsigned CPUserIndex,
1292 unsigned UserOffset,
1293 MachineBasicBlock *&NewMBB) {
1294 CPUser &U = CPUsers[CPUserIndex];
1295 MachineInstr *UserMI = U.MI;
1296 MachineInstr *CPEMI = U.CPEMI;
1297 unsigned CPELogAlign = getCPELogAlign(CPEMI);
1298 MachineBasicBlock *UserMBB = UserMI->getParent();
1299 const BasicBlockInfo &UserBBI = BBInfo[UserMBB->getNumber()];
1300
1301 // If the block does not end in an unconditional branch already, and if the
1302 // end of the block is within range, make new water there. (The addition
1303 // below is for the unconditional branch we will be adding: 4 bytes on ARM +
1304 // Thumb2, 2 on Thumb1.
1305 if (BBHasFallthrough(UserMBB)) {
1306 // Size of branch to insert.
1307 unsigned Delta = isThumb1 ? 2 : 4;
1308 // Compute the offset where the CPE will begin.
1309 unsigned CPEOffset = UserBBI.postOffset(CPELogAlign) + Delta;
1310
1311 if (isOffsetInRange(UserOffset, CPEOffset, U)) {
1312 DEBUG(dbgs() << "Split at end of " << printMBBReference(*UserMBB)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("arm-cp-islands")) { dbgs() << "Split at end of " <<
printMBBReference(*UserMBB) << format(", expected CPE offset %#x\n"
, CPEOffset); } } while (false)
1313 << format(", expected CPE offset %#x\n", CPEOffset))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("arm-cp-islands")) { dbgs() << "Split at end of " <<
printMBBReference(*UserMBB) << format(", expected CPE offset %#x\n"
, CPEOffset); } } while (false)
;
1314 NewMBB = &*++UserMBB->getIterator();
1315 // Add an unconditional branch from UserMBB to fallthrough block. Record
1316 // it for branch lengthening; this new branch will not get out of range,
1317 // but if the preceding conditional branch is out of range, the targets
1318 // will be exchanged, and the altered branch may be out of range, so the
1319 // machinery has to know about it.
1320 int UncondBr = isThumb ? ((isThumb2) ? ARM::t2B : ARM::tB) : ARM::B;
1321 if (!isThumb)
1322 BuildMI(UserMBB, DebugLoc(), TII->get(UncondBr)).addMBB(NewMBB);
1323 else
1324 BuildMI(UserMBB, DebugLoc(), TII->get(UncondBr))
1325 .addMBB(NewMBB)
1326 .add(predOps(ARMCC::AL));
1327 unsigned MaxDisp = getUnconditionalBrDisp(UncondBr);
1328 ImmBranches.push_back(ImmBranch(&UserMBB->back(),
1329 MaxDisp, false, UncondBr));
1330 computeBlockSize(MF, UserMBB, BBInfo[UserMBB->getNumber()]);
1331 adjustBBOffsetsAfter(UserMBB);
1332 return;
1333 }
1334 }
1335
1336 // What a big block. Find a place within the block to split it. This is a
1337 // little tricky on Thumb1 since instructions are 2 bytes and constant pool
1338 // entries are 4 bytes: if instruction I references island CPE, and
1339 // instruction I+1 references CPE', it will not work well to put CPE as far
1340 // forward as possible, since then CPE' cannot immediately follow it (that
1341 // location is 2 bytes farther away from I+1 than CPE was from I) and we'd
1342 // need to create a new island. So, we make a first guess, then walk through
1343 // the instructions between the one currently being looked at and the
1344 // possible insertion point, and make sure any other instructions that
1345 // reference CPEs will be able to use the same island area; if not, we back
1346 // up the insertion point.
1347
1348 // Try to split the block so it's fully aligned. Compute the latest split
1349 // point where we can add a 4-byte branch instruction, and then align to
1350 // LogAlign which is the largest possible alignment in the function.
1351 unsigned LogAlign = MF->getAlignment();
1352 assert(LogAlign >= CPELogAlign && "Over-aligned constant pool entry")(static_cast <bool> (LogAlign >= CPELogAlign &&
"Over-aligned constant pool entry") ? void (0) : __assert_fail
("LogAlign >= CPELogAlign && \"Over-aligned constant pool entry\""
, "/build/llvm-toolchain-snapshot-7~svn325874/lib/Target/ARM/ARMConstantIslandPass.cpp"
, 1352, __extension__ __PRETTY_FUNCTION__))
;
1353 unsigned KnownBits = UserBBI.internalKnownBits();
1354 unsigned UPad = UnknownPadding(LogAlign, KnownBits);
1355 unsigned BaseInsertOffset = UserOffset + U.getMaxDisp() - UPad;
1356 DEBUG(dbgs() << format("Split in middle of big block before %#x",do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("arm-cp-islands")) { dbgs() << format("Split in middle of big block before %#x"
, BaseInsertOffset); } } while (false)
1357 BaseInsertOffset))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("arm-cp-islands")) { dbgs() << format("Split in middle of big block before %#x"
, BaseInsertOffset); } } while (false)
;
1358
1359 // The 4 in the following is for the unconditional branch we'll be inserting
1360 // (allows for long branch on Thumb1). Alignment of the island is handled
1361 // inside isOffsetInRange.
1362 BaseInsertOffset -= 4;
1363
1364 DEBUG(dbgs() << format(", adjusted to %#x", BaseInsertOffset)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("arm-cp-islands")) { dbgs() << format(", adjusted to %#x"
, BaseInsertOffset) << " la=" << LogAlign <<
" kb=" << KnownBits << " up=" << UPad <<
'\n'; } } while (false)
1365 << " la=" << LogAligndo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("arm-cp-islands")) { dbgs() << format(", adjusted to %#x"
, BaseInsertOffset) << " la=" << LogAlign <<
" kb=" << KnownBits << " up=" << UPad <<
'\n'; } } while (false)
1366 << " kb=" << KnownBitsdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("arm-cp-islands")) { dbgs() << format(", adjusted to %#x"
, BaseInsertOffset) << " la=" << LogAlign <<
" kb=" << KnownBits << " up=" << UPad <<
'\n'; } } while (false)
1367 << " up=" << UPad << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("arm-cp-islands")) { dbgs() << format(", adjusted to %#x"
, BaseInsertOffset) << " la=" << LogAlign <<
" kb=" << KnownBits << " up=" << UPad <<
'\n'; } } while (false)
;
1368
1369 // This could point off the end of the block if we've already got constant
1370 // pool entries following this block; only the last one is in the water list.
1371 // Back past any possible branches (allow for a conditional and a maximally
1372 // long unconditional).
1373 if (BaseInsertOffset + 8 >= UserBBI.postOffset()) {
1374 // Ensure BaseInsertOffset is larger than the offset of the instruction
1375 // following UserMI so that the loop which searches for the split point
1376 // iterates at least once.
1377 BaseInsertOffset =
1378 std::max(UserBBI.postOffset() - UPad - 8,
1379 UserOffset + TII->getInstSizeInBytes(*UserMI) + 1);
1380 DEBUG(dbgs() << format("Move inside block: %#x\n", BaseInsertOffset))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("arm-cp-islands")) { dbgs() << format("Move inside block: %#x\n"
, BaseInsertOffset); } } while (false)
;
1381 }
1382 unsigned EndInsertOffset = BaseInsertOffset + 4 + UPad +
1383 CPEMI->getOperand(2).getImm();
1384 MachineBasicBlock::iterator MI = UserMI;
1385 ++MI;
1386 unsigned CPUIndex = CPUserIndex+1;
1387 unsigned NumCPUsers = CPUsers.size();
1388 MachineInstr *LastIT = nullptr;
1389 for (unsigned Offset = UserOffset + TII->getInstSizeInBytes(*UserMI);
1390 Offset < BaseInsertOffset;
1391 Offset += TII->getInstSizeInBytes(*MI), MI = std::next(MI)) {
1392 assert(MI != UserMBB->end() && "Fell off end of block")(static_cast <bool> (MI != UserMBB->end() &&
"Fell off end of block") ? void (0) : __assert_fail ("MI != UserMBB->end() && \"Fell off end of block\""
, "/build/llvm-toolchain-snapshot-7~svn325874/lib/Target/ARM/ARMConstantIslandPass.cpp"
, 1392, __extension__ __PRETTY_FUNCTION__))
;
1393 if (CPUIndex < NumCPUsers && CPUsers[CPUIndex].MI == &*MI) {
1394 CPUser &U = CPUsers[CPUIndex];
1395 if (!isOffsetInRange(Offset, EndInsertOffset, U)) {
1396 // Shift intertion point by one unit of alignment so it is within reach.
1397 BaseInsertOffset -= 1u << LogAlign;
1398 EndInsertOffset -= 1u << LogAlign;
1399 }
1400 // This is overly conservative, as we don't account for CPEMIs being
1401 // reused within the block, but it doesn't matter much. Also assume CPEs
1402 // are added in order with alignment padding. We may eventually be able
1403 // to pack the aligned CPEs better.
1404 EndInsertOffset += U.CPEMI->getOperand(2).getImm();
1405 CPUIndex++;
1406 }
1407
1408 // Remember the last IT instruction.
1409 if (MI->getOpcode() == ARM::t2IT)
1410 LastIT = &*MI;
1411 }
1412
1413 --MI;
1414
1415 // Avoid splitting an IT block.
1416 if (LastIT) {
1417 unsigned PredReg = 0;
1418 ARMCC::CondCodes CC = getITInstrPredicate(*MI, PredReg);
1419 if (CC != ARMCC::AL)
1420 MI = LastIT;
1421 }
1422
1423 // We really must not split an IT block.
1424 DEBUG(unsigned PredReg;do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("arm-cp-islands")) { unsigned PredReg; (static_cast <bool
> (!isThumb || getITInstrPredicate(*MI, PredReg) == ARMCC::
AL) ? void (0) : __assert_fail ("!isThumb || getITInstrPredicate(*MI, PredReg) == ARMCC::AL"
, "/build/llvm-toolchain-snapshot-7~svn325874/lib/Target/ARM/ARMConstantIslandPass.cpp"
, 1425, __extension__ __PRETTY_FUNCTION__)); } } while (false
)
1425 assert(!isThumb || getITInstrPredicate(*MI, PredReg) == ARMCC::AL))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("arm-cp-islands")) { unsigned PredReg; (static_cast <bool
> (!isThumb || getITInstrPredicate(*MI, PredReg) == ARMCC::
AL) ? void (0) : __assert_fail ("!isThumb || getITInstrPredicate(*MI, PredReg) == ARMCC::AL"
, "/build/llvm-toolchain-snapshot-7~svn325874/lib/Target/ARM/ARMConstantIslandPass.cpp"
, 1425, __extension__ __PRETTY_FUNCTION__)); } } while (false
)
;
1426
1427 NewMBB = splitBlockBeforeInstr(&*MI);
1428}
1429
1430/// handleConstantPoolUser - Analyze the specified user, checking to see if it
1431/// is out-of-range. If so, pick up the constant pool value and move it some
1432/// place in-range. Return true if we changed any addresses (thus must run
1433/// another pass of branch lengthening), false otherwise.
1434bool ARMConstantIslands::handleConstantPoolUser(unsigned CPUserIndex,
1435 bool CloserWater) {
1436 CPUser &U = CPUsers[CPUserIndex];
1437 MachineInstr *UserMI = U.MI;
1438 MachineInstr *CPEMI = U.CPEMI;
1439 unsigned CPI = getCombinedIndex(CPEMI);
1440 unsigned Size = CPEMI->getOperand(2).getImm();
1441 // Compute this only once, it's expensive.
1442 unsigned UserOffset = getUserOffset(U);
1443
1444 // See if the current entry is within range, or there is a clone of it
1445 // in range.
1446 int result = findInRangeCPEntry(U, UserOffset);
1447 if (result==1) return false;
1448 else if (result==2) return true;
1449
1450 // No existing clone of this CPE is within range.
1451 // We will be generating a new clone. Get a UID for it.
1452 unsigned ID = AFI->createPICLabelUId();
1453
1454 // Look for water where we can place this CPE.
1455 MachineBasicBlock *NewIsland = MF->CreateMachineBasicBlock();
1456 MachineBasicBlock *NewMBB;
1457 water_iterator IP;
1458 if (findAvailableWater(U, UserOffset, IP, CloserWater)) {
1459 DEBUG(dbgs() << "Found water in range\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("arm-cp-islands")) { dbgs() << "Found water in range\n"
; } } while (false)
;
1460 MachineBasicBlock *WaterBB = *IP;
1461
1462 // If the original WaterList entry was "new water" on this iteration,
1463 // propagate that to the new island. This is just keeping NewWaterList
1464 // updated to match the WaterList, which will be updated below.
1465 if (NewWaterList.erase(WaterBB))
1466 NewWaterList.insert(NewIsland);
1467
1468 // The new CPE goes before the following block (NewMBB).
1469 NewMBB = &*++WaterBB->getIterator();
1470 } else {
1471 // No water found.
1472 DEBUG(dbgs() << "No water found\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("arm-cp-islands")) { dbgs() << "No water found\n"; } }
while (false)
;
1473 createNewWater(CPUserIndex, UserOffset, NewMBB);
1474
1475 // splitBlockBeforeInstr adds to WaterList, which is important when it is
1476 // called while handling branches so that the water will be seen on the
1477 // next iteration for constant pools, but in this context, we don't want
1478 // it. Check for this so it will be removed from the WaterList.
1479 // Also remove any entry from NewWaterList.
1480 MachineBasicBlock *WaterBB = &*--NewMBB->getIterator();
1481 IP = find(WaterList, WaterBB);
1482 if (IP != WaterList.end())
1483 NewWaterList.erase(WaterBB);
1484
1485 // We are adding new water. Update NewWaterList.
1486 NewWaterList.insert(NewIsland);
1487 }
1488 // Always align the new block because CP entries can be smaller than 4 bytes.
1489 NewMBB->setAlignment(isThumb ? 1 : 2);
1490
1491 // Remove the original WaterList entry; we want subsequent insertions in
1492 // this vicinity to go after the one we're about to insert. This
1493 // considerably reduces the number of times we have to move the same CPE
1494 // more than once and is also important to ensure the algorithm terminates.
1495 if (IP != WaterList.end())
1496 WaterList.erase(IP);
1497
1498 // Okay, we know we can put an island before NewMBB now, do it!
1499 MF->insert(NewMBB->getIterator(), NewIsland);
1500
1501 // Update internal data structures to account for the newly inserted MBB.
1502 updateForInsertedWaterBlock(NewIsland);
1503
1504 // Now that we have an island to add the CPE to, clone the original CPE and
1505 // add it to the island.
1506 U.HighWaterMark = NewIsland;
1507 U.CPEMI = BuildMI(NewIsland, DebugLoc(), CPEMI->getDesc())
1508 .addImm(ID)
1509 .add(CPEMI->getOperand(1))
1510 .addImm(Size);
1511 CPEntries[CPI].push_back(CPEntry(U.CPEMI, ID, 1));
1512 ++NumCPEs;
1513
1514 // Decrement the old entry, and remove it if refcount becomes 0.
1515 decrementCPEReferenceCount(CPI, CPEMI);
1516
1517 // Mark the basic block as aligned as required by the const-pool entry.
1518 NewIsland->setAlignment(getCPELogAlign(U.CPEMI));
1519
1520 // Increase the size of the island block to account for the new entry.
1521 BBInfo[NewIsland->getNumber()].Size += Size;
1522 adjustBBOffsetsAfter(&*--NewIsland->getIterator());
1523
1524 // Finally, change the CPI in the instruction operand to be ID.
1525 for (unsigned i = 0, e = UserMI->getNumOperands(); i != e; ++i)
1526 if (UserMI->getOperand(i).isCPI()) {
1527 UserMI->getOperand(i).setIndex(ID);
1528 break;
1529 }
1530
1531 DEBUG(dbgs() << " Moved CPE to #" << ID << " CPI=" << CPIdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("arm-cp-islands")) { dbgs() << " Moved CPE to #" <<
ID << " CPI=" << CPI << format(" offset=%#x\n"
, BBInfo[NewIsland->getNumber()].Offset); } } while (false
)
1532 << format(" offset=%#x\n", BBInfo[NewIsland->getNumber()].Offset))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("arm-cp-islands")) { dbgs() << " Moved CPE to #" <<
ID << " CPI=" << CPI << format(" offset=%#x\n"
, BBInfo[NewIsland->getNumber()].Offset); } } while (false
)
;
1533
1534 return true;
1535}
1536
1537/// removeDeadCPEMI - Remove a dead constant pool entry instruction. Update
1538/// sizes and offsets of impacted basic blocks.
1539void ARMConstantIslands::removeDeadCPEMI(MachineInstr *CPEMI) {
1540 MachineBasicBlock *CPEBB = CPEMI->getParent();
1541 unsigned Size = CPEMI->getOperand(2).getImm();
1542 CPEMI->eraseFromParent();
1543 BBInfo[CPEBB->getNumber()].Size -= Size;
1544 // All succeeding offsets have the current size value added in, fix this.
1545 if (CPEBB->empty()) {
1546 BBInfo[CPEBB->getNumber()].Size = 0;
1547
1548 // This block no longer needs to be aligned.
1549 CPEBB->setAlignment(0);
1550 } else
1551 // Entries are sorted by descending alignment, so realign from the front.
1552 CPEBB->setAlignment(getCPELogAlign(&*CPEBB->begin()));
1553
1554 adjustBBOffsetsAfter(CPEBB);
1555 // An island has only one predecessor BB and one successor BB. Check if
1556 // this BB's predecessor jumps directly to this BB's successor. This
1557 // shouldn't happen currently.
1558 assert(!BBIsJumpedOver(CPEBB) && "How did this happen?")(static_cast <bool> (!BBIsJumpedOver(CPEBB) && "How did this happen?"
) ? void (0) : __assert_fail ("!BBIsJumpedOver(CPEBB) && \"How did this happen?\""
, "/build/llvm-toolchain-snapshot-7~svn325874/lib/Target/ARM/ARMConstantIslandPass.cpp"
, 1558, __extension__ __PRETTY_FUNCTION__))
;
1559 // FIXME: remove the empty blocks after all the work is done?
1560}
1561
1562/// removeUnusedCPEntries - Remove constant pool entries whose refcounts
1563/// are zero.
1564bool ARMConstantIslands::removeUnusedCPEntries() {
1565 unsigned MadeChange = false;
1566 for (unsigned i = 0, e = CPEntries.size(); i != e; ++i) {
1567 std::vector<CPEntry> &CPEs = CPEntries[i];
1568 for (unsigned j = 0, ee = CPEs.size(); j != ee; ++j) {
1569 if (CPEs[j].RefCount == 0 && CPEs[j].CPEMI) {
1570 removeDeadCPEMI(CPEs[j].CPEMI);
1571 CPEs[j].CPEMI = nullptr;
1572 MadeChange = true;
1573 }
1574 }
1575 }
1576 return MadeChange;
1577}
1578
1579/// isBBInRange - Returns true if the distance between specific MI and
1580/// specific BB can fit in MI's displacement field.
1581bool ARMConstantIslands::isBBInRange(MachineInstr *MI,MachineBasicBlock *DestBB,
1582 unsigned MaxDisp) {
1583 unsigned PCAdj = isThumb ? 4 : 8;
1584 unsigned BrOffset = getOffsetOf(MI) + PCAdj;
1585 unsigned DestOffset = BBInfo[DestBB->getNumber()].Offset;
1586
1587 DEBUG(dbgs() << "Branch of destination " << printMBBReference(*DestBB)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("arm-cp-islands")) { dbgs() << "Branch of destination "
<< printMBBReference(*DestBB) << " from " <<
printMBBReference(*MI->getParent()) << " max delta="
<< MaxDisp << " from " << getOffsetOf(MI) <<
" to " << DestOffset << " offset " << int(
DestOffset - BrOffset) << "\t" << *MI; } } while (
false)
1588 << " from " << printMBBReference(*MI->getParent())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("arm-cp-islands")) { dbgs() << "Branch of destination "
<< printMBBReference(*DestBB) << " from " <<
printMBBReference(*MI->getParent()) << " max delta="
<< MaxDisp << " from " << getOffsetOf(MI) <<
" to " << DestOffset << " offset " << int(
DestOffset - BrOffset) << "\t" << *MI; } } while (
false)
1589 << " max delta=" << MaxDisp << " from " << getOffsetOf(MI)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("arm-cp-islands")) { dbgs() << "Branch of destination "
<< printMBBReference(*DestBB) << " from " <<
printMBBReference(*MI->getParent()) << " max delta="
<< MaxDisp << " from " << getOffsetOf(MI) <<
" to " << DestOffset << " offset " << int(
DestOffset - BrOffset) << "\t" << *MI; } } while (
false)
1590 << " to " << DestOffset << " offset "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("arm-cp-islands")) { dbgs() << "Branch of destination "
<< printMBBReference(*DestBB) << " from " <<
printMBBReference(*MI->getParent()) << " max delta="
<< MaxDisp << " from " << getOffsetOf(MI) <<
" to " << DestOffset << " offset " << int(
DestOffset - BrOffset) << "\t" << *MI; } } while (
false)
1591 << int(DestOffset - BrOffset) << "\t" << *MI)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("arm-cp-islands")) { dbgs() << "Branch of destination "
<< printMBBReference(*DestBB) << " from " <<
printMBBReference(*MI->getParent()) << " max delta="
<< MaxDisp << " from " << getOffsetOf(MI) <<
" to " << DestOffset << " offset " << int(
DestOffset - BrOffset) << "\t" << *MI; } } while (
false)
;
1592
1593 if (BrOffset <= DestOffset) {
1594 // Branch before the Dest.
1595 if (DestOffset-BrOffset <= MaxDisp)
1596 return true;
1597 } else {
1598 if (BrOffset-DestOffset <= MaxDisp)
1599 return true;
1600 }
1601 return false;
1602}
1603
1604/// fixupImmediateBr - Fix up an immediate branch whose destination is too far
1605/// away to fit in its displacement field.
1606bool ARMConstantIslands::fixupImmediateBr(ImmBranch &Br) {
1607 MachineInstr *MI = Br.MI;
1608 MachineBasicBlock *DestBB = MI->getOperand(0).getMBB();
1609
1610 // Check to see if the DestBB is already in-range.
1611 if (isBBInRange(MI, DestBB, Br.MaxDisp))
1612 return false;
1613
1614 if (!Br.isCond)
1615 return fixupUnconditionalBr(Br);
1616 return fixupConditionalBr(Br);
1617}
1618
1619/// fixupUnconditionalBr - Fix up an unconditional branch whose destination is
1620/// too far away to fit in its displacement field. If the LR register has been
1621/// spilled in the epilogue, then we can use BL to implement a far jump.
1622/// Otherwise, add an intermediate branch instruction to a branch.
1623bool
1624ARMConstantIslands::fixupUnconditionalBr(ImmBranch &Br) {
1625 MachineInstr *MI = Br.MI;
1626 MachineBasicBlock *MBB = MI->getParent();
1627 if (!isThumb1)
1628 llvm_unreachable("fixupUnconditionalBr is Thumb1 only!")::llvm::llvm_unreachable_internal("fixupUnconditionalBr is Thumb1 only!"
, "/build/llvm-toolchain-snapshot-7~svn325874/lib/Target/ARM/ARMConstantIslandPass.cpp"
, 1628)
;
1629
1630 // Use BL to implement far jump.
1631 Br.MaxDisp = (1 << 21) * 2;
1632 MI->setDesc(TII->get(ARM::tBfar));
1633 BBInfo[MBB->getNumber()].Size += 2;
1634 adjustBBOffsetsAfter(MBB);
1635 HasFarJump = true;
1636 ++NumUBrFixed;
1637
1638 DEBUG(dbgs() << " Changed B to long jump " << *MI)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("arm-cp-islands")) { dbgs() << " Changed B to long jump "
<< *MI; } } while (false)
;
1639
1640 return true;
1641}
1642
1643/// fixupConditionalBr - Fix up a conditional branch whose destination is too
1644/// far away to fit in its displacement field. It is converted to an inverse
1645/// conditional branch + an unconditional branch to the destination.
1646bool
1647ARMConstantIslands::fixupConditionalBr(ImmBranch &Br) {
1648 MachineInstr *MI = Br.MI;
1649 MachineBasicBlock *DestBB = MI->getOperand(0).getMBB();
1650
1651 // Add an unconditional branch to the destination and invert the branch
1652 // condition to jump over it:
1653 // blt L1
1654 // =>
1655 // bge L2
1656 // b L1
1657 // L2:
1658 ARMCC::CondCodes CC = (ARMCC::CondCodes)MI->getOperand(1).getImm();
1659 CC = ARMCC::getOppositeCondition(CC);
1660 unsigned CCReg = MI->getOperand(2).getReg();
1661
1662 // If the branch is at the end of its MBB and that has a fall-through block,
1663 // direct the updated conditional branch to the fall-through block. Otherwise,
1664 // split the MBB before the next instruction.
1665 MachineBasicBlock *MBB = MI->getParent();
1666 MachineInstr *BMI = &MBB->back();
1667 bool NeedSplit = (BMI != MI) || !BBHasFallthrough(MBB);
1668
1669 ++NumCBrFixed;
1670 if (BMI != MI) {
1671 if (std::next(MachineBasicBlock::iterator(MI)) == std::prev(MBB->end()) &&
1672 BMI->getOpcode() == Br.UncondBr) {
1673 // Last MI in the BB is an unconditional branch. Can we simply invert the
1674 // condition and swap destinations:
1675 // beq L1
1676 // b L2
1677 // =>
1678 // bne L2
1679 // b L1
1680 MachineBasicBlock *NewDest = BMI->getOperand(0).getMBB();
1681 if (isBBInRange(MI, NewDest, Br.MaxDisp)) {
1682 DEBUG(dbgs() << " Invert Bcc condition and swap its destination with "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("arm-cp-islands")) { dbgs() << " Invert Bcc condition and swap its destination with "
<< *BMI; } } while (false)
1683 << *BMI)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("arm-cp-islands")) { dbgs() << " Invert Bcc condition and swap its destination with "
<< *BMI; } } while (false)
;
1684 BMI->getOperand(0).setMBB(DestBB);
1685 MI->getOperand(0).setMBB(NewDest);
1686 MI->getOperand(1).setImm(CC);
1687 return true;
1688 }
1689 }
1690 }
1691
1692 if (NeedSplit) {
1693 splitBlockBeforeInstr(MI);
1694 // No need for the branch to the next block. We're adding an unconditional
1695 // branch to the destination.
1696 int delta = TII->getInstSizeInBytes(MBB->back());
1697 BBInfo[MBB->getNumber()].Size -= delta;
1698 MBB->back().eraseFromParent();
1699
1700 // The conditional successor will be swapped between the BBs after this, so
1701 // update CFG.
1702 MBB->addSuccessor(DestBB);
1703 std::next(MBB->getIterator())->removeSuccessor(DestBB);
1704
1705 // BBInfo[SplitBB].Offset is wrong temporarily, fixed below
1706 }
1707 MachineBasicBlock *NextBB = &*++MBB->getIterator();
1708
1709 DEBUG(dbgs() << " Insert B to " << printMBBReference(*DestBB)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("arm-cp-islands")) { dbgs() << " Insert B to " <<
printMBBReference(*DestBB) << " also invert condition and change dest. to "
<< printMBBReference(*NextBB) << "\n"; } } while
(false)
1710 << " also invert condition and change dest. to "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("arm-cp-islands")) { dbgs() << " Insert B to " <<
printMBBReference(*DestBB) << " also invert condition and change dest. to "
<< printMBBReference(*NextBB) << "\n"; } } while
(false)
1711 << printMBBReference(*NextBB) << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("arm-cp-islands")) { dbgs() << " Insert B to " <<
printMBBReference(*DestBB) << " also invert condition and change dest. to "
<< printMBBReference(*NextBB) << "\n"; } } while
(false)
;
1712
1713 // Insert a new conditional branch and a new unconditional branch.
1714 // Also update the ImmBranch as well as adding a new entry for the new branch.
1715 BuildMI(MBB, DebugLoc(), TII->get(MI->getOpcode()))
1716 .addMBB(NextBB).addImm(CC).addReg(CCReg);
1717 Br.MI = &MBB->back();
1718 BBInfo[MBB->getNumber()].Size += TII->getInstSizeInBytes(MBB->back());
1719 if (isThumb)
1720 BuildMI(MBB, DebugLoc(), TII->get(Br.UncondBr))
1721 .addMBB(DestBB)
1722 .add(predOps(ARMCC::AL));
1723 else
1724 BuildMI(MBB, DebugLoc(), TII->get(Br.UncondBr)).addMBB(DestBB);
1725 BBInfo[MBB->getNumber()].Size += TII->getInstSizeInBytes(MBB->back());
1726 unsigned MaxDisp = getUnconditionalBrDisp(Br.UncondBr);
1727 ImmBranches.push_back(ImmBranch(&MBB->back(), MaxDisp, false, Br.UncondBr));
1728
1729 // Remove the old conditional branch. It may or may not still be in MBB.
1730 BBInfo[MI->getParent()->getNumber()].Size -= TII->getInstSizeInBytes(*MI);
1731 MI->eraseFromParent();
1732 adjustBBOffsetsAfter(MBB);
1733 return true;
1734}
1735
1736/// undoLRSpillRestore - Remove Thumb push / pop instructions that only spills
1737/// LR / restores LR to pc. FIXME: This is done here because it's only possible
1738/// to do this if tBfar is not used.
1739bool ARMConstantIslands::undoLRSpillRestore() {
1740 bool MadeChange = false;
1741 for (unsigned i = 0, e = PushPopMIs.size(); i != e; ++i) {
1742 MachineInstr *MI = PushPopMIs[i];
1743 // First two operands are predicates.
1744 if (MI->getOpcode() == ARM::tPOP_RET &&
1745 MI->getOperand(2).getReg() == ARM::PC &&
1746 MI->getNumExplicitOperands() == 3) {
1747 // Create the new insn and copy the predicate from the old.
1748 BuildMI(MI->getParent(), MI->getDebugLoc(), TII->get(ARM::tBX_RET))
1749 .add(MI->getOperand(0))
1750 .add(MI->getOperand(1));
1751 MI->eraseFromParent();
1752 MadeChange = true;
1753 } else if (MI->getOpcode() == ARM::tPUSH &&
1754 MI->getOperand(2).getReg() == ARM::LR &&
1755 MI->getNumExplicitOperands() == 3) {
1756 // Just remove the push.
1757 MI->eraseFromParent();
1758 MadeChange = true;
1759 }
1760 }
1761 return MadeChange;
1762}
1763
1764bool ARMConstantIslands::optimizeThumb2Instructions() {
1765 bool MadeChange = false;
1766
1767 // Shrink ADR and LDR from constantpool.
1768 for (unsigned i = 0, e = CPUsers.size(); i != e; ++i) {
1769 CPUser &U = CPUsers[i];
1770 unsigned Opcode = U.MI->getOpcode();
1771 unsigned NewOpc = 0;
1772 unsigned Scale = 1;
1773 unsigned Bits = 0;
1774 switch (Opcode) {
1775 default: break;
1776 case ARM::t2LEApcrel:
1777 if (isARMLowRegister(U.MI->getOperand(0).getReg())) {
1778 NewOpc = ARM::tLEApcrel;
1779 Bits = 8;
1780 Scale = 4;
1781 }
1782 break;
1783 case ARM::t2LDRpci:
1784 if (isARMLowRegister(U.MI->getOperand(0).getReg())) {
1785 NewOpc = ARM::tLDRpci;
1786 Bits = 8;
1787 Scale = 4;
1788 }
1789 break;
1790 }
1791
1792 if (!NewOpc)
1793 continue;
1794
1795 unsigned UserOffset = getUserOffset(U);
1796 unsigned MaxOffs = ((1 << Bits) - 1) * Scale;
1797
1798 // Be conservative with inline asm.
1799 if (!U.KnownAlignment)
1800 MaxOffs -= 2;
1801
1802 // FIXME: Check if offset is multiple of scale if scale is not 4.
1803 if (isCPEntryInRange(U.MI, UserOffset, U.CPEMI, MaxOffs, false, true)) {
1804 DEBUG(dbgs() << "Shrink: " << *U.MI)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("arm-cp-islands")) { dbgs() << "Shrink: " << *U.
MI; } } while (false)
;
1805 U.MI->setDesc(TII->get(NewOpc));
1806 MachineBasicBlock *MBB = U.MI->getParent();
1807 BBInfo[MBB->getNumber()].Size -= 2;
1808 adjustBBOffsetsAfter(MBB);
1809 ++NumT2CPShrunk;
1810 MadeChange = true;
1811 }
1812 }
1813
1814 return MadeChange;
1815}
1816
1817bool ARMConstantIslands::optimizeThumb2Branches() {
1818 bool MadeChange = false;
1819
1820 // The order in which branches appear in ImmBranches is approximately their
1821 // order within the function body. By visiting later branches first, we reduce
1822 // the distance between earlier forward branches and their targets, making it
1823 // more likely that the cbn?z optimization, which can only apply to forward
1824 // branches, will succeed.
1825 for (unsigned i = ImmBranches.size(); i != 0; --i) {
1826 ImmBranch &Br = ImmBranches[i-1];
1827 unsigned Opcode = Br.MI->getOpcode();
1828 unsigned NewOpc = 0;
1829 unsigned Scale = 1;
1830 unsigned Bits = 0;
1831 switch (Opcode) {
1832 default: break;
1833 case ARM::t2B:
1834 NewOpc = ARM::tB;
1835 Bits = 11;
1836 Scale = 2;
1837 break;
1838 case ARM::t2Bcc:
1839 NewOpc = ARM::tBcc;
1840 Bits = 8;
1841 Scale = 2;
1842 break;
1843 }
1844 if (NewOpc) {
1845 unsigned MaxOffs = ((1 << (Bits-1))-1) * Scale;
1846 MachineBasicBlock *DestBB = Br.MI->getOperand(0).getMBB();
1847 if (isBBInRange(Br.MI, DestBB, MaxOffs)) {
1848 DEBUG(dbgs() << "Shrink branch: " << *Br.MI)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("arm-cp-islands")) { dbgs() << "Shrink branch: " <<
*Br.MI; } } while (false)
;
1849 Br.MI->setDesc(TII->get(NewOpc));
1850 MachineBasicBlock *MBB = Br.MI->getParent();
1851 BBInfo[MBB->getNumber()].Size -= 2;
1852 adjustBBOffsetsAfter(MBB);
1853 ++NumT2BrShrunk;
1854 MadeChange = true;
1855 }
1856 }
1857
1858 Opcode = Br.MI->getOpcode();
1859 if (Opcode != ARM::tBcc)
1860 continue;
1861
1862 // If the conditional branch doesn't kill CPSR, then CPSR can be liveout
1863 // so this transformation is not safe.
1864 if (!Br.MI->killsRegister(ARM::CPSR))
1865 continue;
1866
1867 NewOpc = 0;
1868 unsigned PredReg = 0;
1869 ARMCC::CondCodes Pred = getInstrPredicate(*Br.MI, PredReg);
1870 if (Pred == ARMCC::EQ)
1871 NewOpc = ARM::tCBZ;
1872 else if (Pred == ARMCC::NE)
1873 NewOpc = ARM::tCBNZ;
1874 if (!NewOpc)
1875 continue;
1876 MachineBasicBlock *DestBB = Br.MI->getOperand(0).getMBB();
1877 // Check if the distance is within 126. Subtract starting offset by 2
1878 // because the cmp will be eliminated.
1879 unsigned BrOffset = getOffsetOf(Br.MI) + 4 - 2;
1880 unsigned DestOffset = BBInfo[DestBB->getNumber()].Offset;
1881 if (BrOffset < DestOffset && (DestOffset - BrOffset) <= 126) {
1882 MachineBasicBlock::iterator CmpMI = Br.MI;
1883 if (CmpMI != Br.MI->getParent()->begin()) {
1884 --CmpMI;
1885 if (CmpMI->getOpcode() == ARM::tCMPi8) {
1886 unsigned Reg = CmpMI->getOperand(0).getReg();
1887 Pred = getInstrPredicate(*CmpMI, PredReg);
1888 if (Pred == ARMCC::AL &&
1889 CmpMI->getOperand(1).getImm() == 0 &&
1890 isARMLowRegister(Reg)) {
1891 MachineBasicBlock *MBB = Br.MI->getParent();
1892 DEBUG(dbgs() << "Fold: " << *CmpMI << " and: " << *Br.MI)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("arm-cp-islands")) { dbgs() << "Fold: " << *CmpMI
<< " and: " << *Br.MI; } } while (false)
;
1893 MachineInstr *NewBR =
1894 BuildMI(*MBB, CmpMI, Br.MI->getDebugLoc(), TII->get(NewOpc))
1895 .addReg(Reg).addMBB(DestBB,Br.MI->getOperand(0).getTargetFlags());
1896 CmpMI->eraseFromParent();
1897 Br.MI->eraseFromParent();
1898 Br.MI = NewBR;
1899 BBInfo[MBB->getNumber()].Size -= 2;
1900 adjustBBOffsetsAfter(MBB);
1901 ++NumCBZ;
1902 MadeChange = true;
1903 }
1904 }
1905 }
1906 }
1907 }
1908
1909 return MadeChange;
1910}
1911
1912static bool isSimpleIndexCalc(MachineInstr &I, unsigned EntryReg,
1913 unsigned BaseReg) {
1914 if (I.getOpcode() != ARM::t2ADDrs)
1915 return false;
1916
1917 if (I.getOperand(0).getReg() != EntryReg)
1918 return false;
1919
1920 if (I.getOperand(1).getReg() != BaseReg)
1921 return false;
1922
1923 // FIXME: what about CC and IdxReg?
1924 return true;
1925}
1926
1927/// \brief While trying to form a TBB/TBH instruction, we may (if the table
1928/// doesn't immediately follow the BR_JT) need access to the start of the
1929/// jump-table. We know one instruction that produces such a register; this
1930/// function works out whether that definition can be preserved to the BR_JT,
1931/// possibly by removing an intervening addition (which is usually needed to
1932/// calculate the actual entry to jump to).
1933bool ARMConstantIslands::preserveBaseRegister(MachineInstr *JumpMI,
1934 MachineInstr *LEAMI,
1935 unsigned &DeadSize,
1936 bool &CanDeleteLEA,
1937 bool &BaseRegKill) {
1938 if (JumpMI->getParent() != LEAMI->getParent())
1939 return false;
1940
1941 // Now we hope that we have at least these instructions in the basic block:
1942 // BaseReg = t2LEA ...
1943 // [...]
1944 // EntryReg = t2ADDrs BaseReg, ...
1945 // [...]
1946 // t2BR_JT EntryReg
1947 //
1948 // We have to be very conservative about what we recognise here though. The
1949 // main perturbing factors to watch out for are:
1950 // + Spills at any point in the chain: not direct problems but we would
1951 // expect a blocking Def of the spilled register so in practice what we
1952 // can do is limited.
1953 // + EntryReg == BaseReg: this is the one situation we should allow a Def
1954 // of BaseReg, but only if the t2ADDrs can be removed.
1955 // + Some instruction other than t2ADDrs computing the entry. Not seen in
1956 // the wild, but we should be careful.
1957 unsigned EntryReg = JumpMI->getOperand(0).getReg();
1958 unsigned BaseReg = LEAMI->getOperand(0).getReg();
1959
1960 CanDeleteLEA = true;
1961 BaseRegKill = false;
1962 MachineInstr *RemovableAdd = nullptr;
1963 MachineBasicBlock::iterator I(LEAMI);
1964 for (++I; &*I != JumpMI; ++I) {
1965 if (isSimpleIndexCalc(*I, EntryReg, BaseReg)) {
1966 RemovableAdd = &*I;
1967 break;
1968 }
1969
1970 for (unsigned K = 0, E = I->getNumOperands(); K != E; ++K) {
1971 const MachineOperand &MO = I->getOperand(K);
1972 if (!MO.isReg() || !MO.getReg())
1973 continue;
1974 if (MO.isDef() && MO.getReg() == BaseReg)
1975 return false;
1976 if (MO.isUse() && MO.getReg() == BaseReg) {
1977 BaseRegKill = BaseRegKill || MO.isKill();
1978 CanDeleteLEA = false;
1979 }
1980 }
1981 }
1982
1983 if (!RemovableAdd)
1984 return true;
1985
1986 // Check the add really is removable, and that nothing else in the block
1987 // clobbers BaseReg.
1988 for (++I; &*I != JumpMI; ++I) {
1989 for (unsigned K = 0, E = I->getNumOperands(); K != E; ++K) {
1990 const MachineOperand &MO = I->getOperand(K);
1991 if (!MO.isReg() || !MO.getReg())
1992 continue;
1993 if (MO.isDef() && MO.getReg() == BaseReg)
1994 return false;
1995 if (MO.isUse() && MO.getReg() == EntryReg)
1996 RemovableAdd = nullptr;
1997 }
1998 }
1999
2000 if (RemovableAdd) {
2001 RemovableAdd->eraseFromParent();
2002 DeadSize += isThumb2 ? 4 : 2;
2003 } else if (BaseReg == EntryReg) {
2004 // The add wasn't removable, but clobbered the base for the TBB. So we can't
2005 // preserve it.
2006 return false;
2007 }
2008
2009 // We reached the end of the block without seeing another definition of
2010 // BaseReg (except, possibly the t2ADDrs, which was removed). BaseReg can be
2011 // used in the TBB/TBH if necessary.
2012 return true;
2013}
2014
2015/// \brief Returns whether CPEMI is the first instruction in the block
2016/// immediately following JTMI (assumed to be a TBB or TBH terminator). If so,
2017/// we can switch the first register to PC and usually remove the address
2018/// calculation that preceded it.
2019static bool jumpTableFollowsTB(MachineInstr *JTMI, MachineInstr *CPEMI) {
2020 MachineFunction::iterator MBB = JTMI->getParent()->getIterator();
2021 MachineFunction *MF = MBB->getParent();
2022 ++MBB;
2023
2024 return MBB != MF->end() && MBB->begin() != MBB->end() &&
2025 &*MBB->begin() == CPEMI;
2026}
2027
2028static void RemoveDeadAddBetweenLEAAndJT(MachineInstr *LEAMI,
2029 MachineInstr *JumpMI,
2030 unsigned &DeadSize) {
2031 // Remove a dead add between the LEA and JT, which used to compute EntryReg,
2032 // but the JT now uses PC. Finds the last ADD (if any) that def's EntryReg
2033 // and is not clobbered / used.
2034 MachineInstr *RemovableAdd = nullptr;
2035 unsigned EntryReg = JumpMI->getOperand(0).getReg();
2036
2037 // Find the last ADD to set EntryReg
2038 MachineBasicBlock::iterator I(LEAMI);
2039 for (++I; &*I != JumpMI; ++I) {
2040 if (I->getOpcode() == ARM::t2ADDrs && I->getOperand(0).getReg() == EntryReg)
2041 RemovableAdd = &*I;
2042 }
2043
2044 if (!RemovableAdd)
2045 return;
2046
2047 // Ensure EntryReg is not clobbered or used.
2048 MachineBasicBlock::iterator J(RemovableAdd);
2049 for (++J; &*J != JumpMI; ++J) {
2050 for (unsigned K = 0, E = J->getNumOperands(); K != E; ++K) {
2051 const MachineOperand &MO = J->getOperand(K);
2052 if (!MO.isReg() || !MO.getReg())
2053 continue;
2054 if (MO.isDef() && MO.getReg() == EntryReg)
2055 return;
2056 if (MO.isUse() && MO.getReg() == EntryReg)
2057 return;
2058 }
2059 }
2060
2061 DEBUG(dbgs() << "Removing Dead Add: " << *RemovableAdd)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("arm-cp-islands")) { dbgs() << "Removing Dead Add: " <<
*RemovableAdd; } } while (false)
;
2062 RemovableAdd->eraseFromParent();
2063 DeadSize += 4;
2064}
2065
2066static bool registerDefinedBetween(unsigned Reg,
2067 MachineBasicBlock::iterator From,
2068 MachineBasicBlock::iterator To,
2069 const TargetRegisterInfo *TRI) {
2070 for (auto I = From; I != To; ++I)
2071 if (I->modifiesRegister(Reg, TRI))
2072 return true;
2073 return false;
2074}
2075
2076/// optimizeThumb2JumpTables - Use tbb / tbh instructions to generate smaller
2077/// jumptables when it's possible.
2078bool ARMConstantIslands::optimizeThumb2JumpTables() {
2079 bool MadeChange = false;
2080
2081 // FIXME: After the tables are shrunk, can we get rid some of the
2082 // constantpool tables?
2083 MachineJumpTableInfo *MJTI = MF->getJumpTableInfo();
2084 if (!MJTI) return false;
2085
2086 const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables();
2087 for (unsigned i = 0, e = T2JumpTables.size(); i != e; ++i) {
2088 MachineInstr *MI = T2JumpTables[i];
2089 const MCInstrDesc &MCID = MI->getDesc();
2090 unsigned NumOps = MCID.getNumOperands();
2091 unsigned JTOpIdx = NumOps - (MI->isPredicable() ? 2 : 1);
2092 MachineOperand JTOP = MI->getOperand(JTOpIdx);
2093 unsigned JTI = JTOP.getIndex();
2094 assert(JTI < JT.size())(static_cast <bool> (JTI < JT.size()) ? void (0) : __assert_fail
("JTI < JT.size()", "/build/llvm-toolchain-snapshot-7~svn325874/lib/Target/ARM/ARMConstantIslandPass.cpp"
, 2094, __extension__ __PRETTY_FUNCTION__))
;
2095
2096 bool ByteOk = true;
2097 bool HalfWordOk = true;
2098 unsigned JTOffset = getOffsetOf(MI) + 4;
2099 const std::vector<MachineBasicBlock*> &JTBBs = JT[JTI].MBBs;
2100 for (unsigned j = 0, ee = JTBBs.size(); j != ee; ++j) {
2101 MachineBasicBlock *MBB = JTBBs[j];
2102 unsigned DstOffset = BBInfo[MBB->getNumber()].Offset;
2103 // Negative offset is not ok. FIXME: We should change BB layout to make
2104 // sure all the branches are forward.
2105 if (ByteOk && (DstOffset - JTOffset) > ((1<<8)-1)*2)
2106 ByteOk = false;
2107 unsigned TBHLimit = ((1<<16)-1)*2;
2108 if (HalfWordOk && (DstOffset - JTOffset) > TBHLimit)
2109 HalfWordOk = false;
2110 if (!ByteOk && !HalfWordOk)
2111 break;
2112 }
2113
2114 if (!ByteOk && !HalfWordOk)
2115 continue;
2116
2117 CPUser &User = CPUsers[JumpTableUserIndices[JTI]];
2118 MachineBasicBlock *MBB = MI->getParent();
2119 if (!MI->getOperand(0).isKill()) // FIXME: needed now?
2120 continue;
2121
2122 unsigned DeadSize = 0;
2123 bool CanDeleteLEA = false;
2124 bool BaseRegKill = false;
2125
2126 unsigned IdxReg = ~0U;
2127 bool IdxRegKill = true;
2128 if (isThumb2) {
2129 IdxReg = MI->getOperand(1).getReg();
2130 IdxRegKill = MI->getOperand(1).isKill();
2131
2132 bool PreservedBaseReg =
2133 preserveBaseRegister(MI, User.MI, DeadSize, CanDeleteLEA, BaseRegKill);
2134 if (!jumpTableFollowsTB(MI, User.CPEMI) && !PreservedBaseReg)
2135 continue;
2136 } else {
2137 // We're in thumb-1 mode, so we must have something like:
2138 // %idx = tLSLri %idx, 2
2139 // %base = tLEApcrelJT
2140 // %t = tLDRr %base, %idx
2141 unsigned BaseReg = User.MI->getOperand(0).getReg();
2142
2143 if (User.MI->getIterator() == User.MI->getParent()->begin())
2144 continue;
2145 MachineInstr *Shift = User.MI->getPrevNode();
2146 if (Shift->getOpcode() != ARM::tLSLri ||
2147 Shift->getOperand(3).getImm() != 2 ||
2148 !Shift->getOperand(2).isKill())
2149 continue;
2150 IdxReg = Shift->getOperand(2).getReg();
2151 unsigned ShiftedIdxReg = Shift->getOperand(0).getReg();
2152
2153 // It's important that IdxReg is live until the actual TBB/TBH. Most of
2154 // the range is checked later, but the LEA might still clobber it and not
2155 // actually get removed.
2156 if (BaseReg == IdxReg && !jumpTableFollowsTB(MI, User.CPEMI))
2157 continue;
2158
2159 MachineInstr *Load = User.MI->getNextNode();
2160 if (Load->getOpcode() != ARM::tLDRr)
2161 continue;
2162 if (Load->getOperand(1).getReg() != BaseReg ||
2163 Load->getOperand(2).getReg() != ShiftedIdxReg ||
2164 !Load->getOperand(2).isKill())
2165 continue;
2166
2167 // If we're in PIC mode, there should be another ADD following.
2168 auto *TRI = STI->getRegisterInfo();
2169
2170 // %base cannot be redefined after the load as it will appear before
2171 // TBB/TBH like:
2172 // %base =
2173 // %base =
2174 // tBB %base, %idx
2175 if (registerDefinedBetween(BaseReg, Load->getNextNode(), MBB->end(), TRI))
2176 continue;
2177
2178 if (isPositionIndependentOrROPI) {
2179 MachineInstr *Add = Load->getNextNode();
2180 if (Add->getOpcode() != ARM::tADDrr ||
2181 Add->getOperand(2).getReg() != BaseReg ||
2182 Add->getOperand(3).getReg() != Load->getOperand(0).getReg() ||
2183 !Add->getOperand(3).isKill())
2184 continue;
2185 if (Add->getOperand(0).getReg() != MI->getOperand(0).getReg())
2186 continue;
2187 if (registerDefinedBetween(IdxReg, Add->getNextNode(), MI, TRI))
2188 // IdxReg gets redefined in the middle of the sequence.
2189 continue;
2190 Add->eraseFromParent();
2191 DeadSize += 2;
2192 } else {
2193 if (Load->getOperand(0).getReg() != MI->getOperand(0).getReg())
2194 continue;
2195 if (registerDefinedBetween(IdxReg, Load->getNextNode(), MI, TRI))
2196 // IdxReg gets redefined in the middle of the sequence.
2197 continue;
2198 }
2199
2200 // Now safe to delete the load and lsl. The LEA will be removed later.
2201 CanDeleteLEA = true;
2202 Shift->eraseFromParent();
2203 Load->eraseFromParent();
2204 DeadSize += 4;
2205 }
2206
2207 DEBUG(dbgs() << "Shrink JT: " << *MI)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("arm-cp-islands")) { dbgs() << "Shrink JT: " << *
MI; } } while (false)
;
2208 MachineInstr *CPEMI = User.CPEMI;
2209 unsigned Opc = ByteOk ? ARM::t2TBB_JT : ARM::t2TBH_JT;
2210 if (!isThumb2)
2211 Opc = ByteOk ? ARM::tTBB_JT : ARM::tTBH_JT;
2212
2213 MachineBasicBlock::iterator MI_JT = MI;
2214 MachineInstr *NewJTMI =
2215 BuildMI(*MBB, MI_JT, MI->getDebugLoc(), TII->get(Opc))
2216 .addReg(User.MI->getOperand(0).getReg(),
2217 getKillRegState(BaseRegKill))
2218 .addReg(IdxReg, getKillRegState(IdxRegKill))
2219 .addJumpTableIndex(JTI, JTOP.getTargetFlags())
2220 .addImm(CPEMI->getOperand(0).getImm());
2221 DEBUG(dbgs() << printMBBReference(*MBB) << ": " << *NewJTMI)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("arm-cp-islands")) { dbgs() << printMBBReference(*MBB)
<< ": " << *NewJTMI; } } while (false)
;
2222
2223 unsigned JTOpc = ByteOk ? ARM::JUMPTABLE_TBB : ARM::JUMPTABLE_TBH;
2224 CPEMI->setDesc(TII->get(JTOpc));
2225
2226 if (jumpTableFollowsTB(MI, User.CPEMI)) {
2227 NewJTMI->getOperand(0).setReg(ARM::PC);
2228 NewJTMI->getOperand(0).setIsKill(false);
2229
2230 if (CanDeleteLEA) {
2231 if (isThumb2)
2232 RemoveDeadAddBetweenLEAAndJT(User.MI, MI, DeadSize);
2233
2234 User.MI->eraseFromParent();
2235 DeadSize += isThumb2 ? 4 : 2;
2236
2237 // The LEA was eliminated, the TBB instruction becomes the only new user
2238 // of the jump table.
2239 User.MI = NewJTMI;
2240 User.MaxDisp = 4;
2241 User.NegOk = false;
2242 User.IsSoImm = false;
2243 User.KnownAlignment = false;
2244 } else {
2245 // The LEA couldn't be eliminated, so we must add another CPUser to
2246 // record the TBB or TBH use.
2247 int CPEntryIdx = JumpTableEntryIndices[JTI];
2248 auto &CPEs = CPEntries[CPEntryIdx];
2249 auto Entry =
2250 find_if(CPEs, [&](CPEntry &E) { return E.CPEMI == User.CPEMI; });
2251 ++Entry->RefCount;
2252 CPUsers.emplace_back(CPUser(NewJTMI, User.CPEMI, 4, false, false));
2253 }
2254 }
2255
2256 unsigned NewSize = TII->getInstSizeInBytes(*NewJTMI);
2257 unsigned OrigSize = TII->getInstSizeInBytes(*MI);
2258 MI->eraseFromParent();
2259
2260 int Delta = OrigSize - NewSize + DeadSize;
2261 BBInfo[MBB->getNumber()].Size -= Delta;
2262 adjustBBOffsetsAfter(MBB);
2263
2264 ++NumTBs;
2265 MadeChange = true;
2266 }
2267
2268 return MadeChange;
2269}
2270
2271/// reorderThumb2JumpTables - Adjust the function's block layout to ensure that
2272/// jump tables always branch forwards, since that's what tbb and tbh need.
2273bool ARMConstantIslands::reorderThumb2JumpTables() {
2274 bool MadeChange = false;
2275
2276 MachineJumpTableInfo *MJTI = MF->getJumpTableInfo();
2277 if (!MJTI) return false;
2278
2279 const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables();
2280 for (unsigned i = 0, e = T2JumpTables.size(); i != e; ++i) {
2281 MachineInstr *MI = T2JumpTables[i];
2282 const MCInstrDesc &MCID = MI->getDesc();
2283 unsigned NumOps = MCID.getNumOperands();
2284 unsigned JTOpIdx = NumOps - (MI->isPredicable() ? 2 : 1);
2285 MachineOperand JTOP = MI->getOperand(JTOpIdx);
2286 unsigned JTI = JTOP.getIndex();
2287 assert(JTI < JT.size())(static_cast <bool> (JTI < JT.size()) ? void (0) : __assert_fail
("JTI < JT.size()", "/build/llvm-toolchain-snapshot-7~svn325874/lib/Target/ARM/ARMConstantIslandPass.cpp"
, 2287, __extension__ __PRETTY_FUNCTION__))
;
2288
2289 // We prefer if target blocks for the jump table come after the jump
2290 // instruction so we can use TB[BH]. Loop through the target blocks
2291 // and try to adjust them such that that's true.
2292 int JTNumber = MI->getParent()->getNumber();
2293 const std::vector<MachineBasicBlock*> &JTBBs = JT[JTI].MBBs;
2294 for (unsigned j = 0, ee = JTBBs.size(); j != ee; ++j) {
2295 MachineBasicBlock *MBB = JTBBs[j];
2296 int DTNumber = MBB->getNumber();
2297
2298 if (DTNumber < JTNumber) {
2299 // The destination precedes the switch. Try to move the block forward
2300 // so we have a positive offset.
2301 MachineBasicBlock *NewBB =
2302 adjustJTTargetBlockForward(MBB, MI->getParent());
2303 if (NewBB)
2304 MJTI->ReplaceMBBInJumpTable(JTI, JTBBs[j], NewBB);
2305 MadeChange = true;
2306 }
2307 }
2308 }
2309
2310 return MadeChange;
2311}
2312
2313MachineBasicBlock *ARMConstantIslands::
2314adjustJTTargetBlockForward(MachineBasicBlock *BB, MachineBasicBlock *JTBB) {
2315 // If the destination block is terminated by an unconditional branch,
2316 // try to move it; otherwise, create a new block following the jump
2317 // table that branches back to the actual target. This is a very simple
2318 // heuristic. FIXME: We can definitely improve it.
2319 MachineBasicBlock *TBB = nullptr, *FBB = nullptr;
2320 SmallVector<MachineOperand, 4> Cond;
2321 SmallVector<MachineOperand, 4> CondPrior;
2322 MachineFunction::iterator BBi = BB->getIterator();
2323 MachineFunction::iterator OldPrior = std::prev(BBi);
2324
2325 // If the block terminator isn't analyzable, don't try to move the block
2326 bool B = TII->analyzeBranch(*BB, TBB, FBB, Cond);
2327
2328 // If the block ends in an unconditional branch, move it. The prior block
2329 // has to have an analyzable terminator for us to move this one. Be paranoid
2330 // and make sure we're not trying to move the entry block of the function.
2331 if (!B && Cond.empty() && BB != &MF->front() &&
2332 !TII->analyzeBranch(*OldPrior, TBB, FBB, CondPrior)) {
2333 BB->moveAfter(JTBB);
2334 OldPrior->updateTerminator();
2335 BB->updateTerminator();
2336 // Update numbering to account for the block being moved.
2337 MF->RenumberBlocks();
2338 ++NumJTMoved;
2339 return nullptr;
2340 }
2341
2342 // Create a new MBB for the code after the jump BB.
2343 MachineBasicBlock *NewBB =
2344 MF->CreateMachineBasicBlock(JTBB->getBasicBlock());
2345 MachineFunction::iterator MBBI = ++JTBB->getIterator();
2346 MF->insert(MBBI, NewBB);
2347
2348 // Add an unconditional branch from NewBB to BB.
2349 // There doesn't seem to be meaningful DebugInfo available; this doesn't
2350 // correspond directly to anything in the source.
2351 if (isThumb2)
2352 BuildMI(NewBB, DebugLoc(), TII->get(ARM::t2B))
2353 .addMBB(BB)
2354 .add(predOps(ARMCC::AL));
2355 else
2356 BuildMI(NewBB, DebugLoc(), TII->get(ARM::tB))
2357 .addMBB(BB)
2358 .add(predOps(ARMCC::AL));
2359
2360 // Update internal data structures to account for the newly inserted MBB.
2361 MF->RenumberBlocks(NewBB);
2362
2363 // Update the CFG.
2364 NewBB->addSuccessor(BB);
2365 JTBB->replaceSuccessor(BB, NewBB);
2366
2367 ++NumJTInserted;
2368 return NewBB;
2369}
2370
2371/// createARMConstantIslandPass - returns an instance of the constpool
2372/// island pass.
2373FunctionPass *llvm::createARMConstantIslandPass() {
2374 return new ARMConstantIslands();
2375}
2376
2377INITIALIZE_PASS(ARMConstantIslands, "arm-cp-islands", ARM_CP_ISLANDS_OPT_NAME,static void *initializeARMConstantIslandsPassOnce(PassRegistry
&Registry) { PassInfo *PI = new PassInfo( "ARM constant island placement and branch shortening pass"
, "arm-cp-islands", &ARMConstantIslands::ID, PassInfo::NormalCtor_t
(callDefaultCtor<ARMConstantIslands>), false, false); Registry
.registerPass(*PI, true); return PI; } static llvm::once_flag
InitializeARMConstantIslandsPassFlag; void llvm::initializeARMConstantIslandsPass
(PassRegistry &Registry) { llvm::call_once(InitializeARMConstantIslandsPassFlag
, initializeARMConstantIslandsPassOnce, std::ref(Registry)); }
2378 false, false)static void *initializeARMConstantIslandsPassOnce(PassRegistry
&Registry) { PassInfo *PI = new PassInfo( "ARM constant island placement and branch shortening pass"
, "arm-cp-islands", &ARMConstantIslands::ID, PassInfo::NormalCtor_t
(callDefaultCtor<ARMConstantIslands>), false, false); Registry
.registerPass(*PI, true); return PI; } static llvm::once_flag
InitializeARMConstantIslandsPassFlag; void llvm::initializeARMConstantIslandsPass
(PassRegistry &Registry) { llvm::call_once(InitializeARMConstantIslandsPassFlag
, initializeARMConstantIslandsPassOnce, std::ref(Registry)); }

/build/llvm-toolchain-snapshot-7~svn325874/lib/Target/ARM/ARMBasicBlockInfo.h

1//===-- ARMBasicBlockInfo.h - Basic Block Information -----------*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// Utility functions and data structure for computing block size.
11//
12//===----------------------------------------------------------------------===//
13
14#ifndef LLVM_LIB_TARGET_ARM_ARMBASICBLOCKINFO_H
15#define LLVM_LIB_TARGET_ARM_ARMBASICBLOCKINFO_H
16
17#include "llvm/Support/MathExtras.h"
18#include <algorithm>
19#include <cstdint>
20
21namespace llvm {
22
23/// UnknownPadding - Return the worst case padding that could result from
24/// unknown offset bits. This does not include alignment padding caused by
25/// known offset bits.
26///
27/// @param LogAlign log2(alignment)
28/// @param KnownBits Number of known low offset bits.
29inline unsigned UnknownPadding(unsigned LogAlign, unsigned KnownBits) {
30 if (KnownBits < LogAlign)
5
Assuming 'KnownBits' is < 'LogAlign'
6
Taking true branch
31 return (1u << LogAlign) - (1u << KnownBits);
7
The result of the left shift is undefined due to shifting by '32', which is greater or equal to the width of type 'unsigned int'
32 return 0;
33}
34
35/// BasicBlockInfo - Information about the offset and size of a single
36/// basic block.
37struct BasicBlockInfo {
38 /// Offset - Distance from the beginning of the function to the beginning
39 /// of this basic block.
40 ///
41 /// Offsets are computed assuming worst case padding before an aligned
42 /// block. This means that subtracting basic block offsets always gives a
43 /// conservative estimate of the real distance which may be smaller.
44 ///
45 /// Because worst case padding is used, the computed offset of an aligned
46 /// block may not actually be aligned.
47 unsigned Offset = 0;
48
49 /// Size - Size of the basic block in bytes. If the block contains
50 /// inline assembly, this is a worst case estimate.
51 ///
52 /// The size does not include any alignment padding whether from the
53 /// beginning of the block, or from an aligned jump table at the end.
54 unsigned Size = 0;
55
56 /// KnownBits - The number of low bits in Offset that are known to be
57 /// exact. The remaining bits of Offset are an upper bound.
58 uint8_t KnownBits = 0;
59
60 /// Unalign - When non-zero, the block contains instructions (inline asm)
61 /// of unknown size. The real size may be smaller than Size bytes by a
62 /// multiple of 1 << Unalign.
63 uint8_t Unalign = 0;
64
65 /// PostAlign - When non-zero, the block terminator contains a .align
66 /// directive, so the end of the block is aligned to 1 << PostAlign
67 /// bytes.
68 uint8_t PostAlign = 0;
69
70 BasicBlockInfo() = default;
71
72 /// Compute the number of known offset bits internally to this block.
73 /// This number should be used to predict worst case padding when
74 /// splitting the block.
75 unsigned internalKnownBits() const {
76 unsigned Bits = Unalign ? Unalign : KnownBits;
77 // If the block size isn't a multiple of the known bits, assume the
78 // worst case padding.
79 if (Size & ((1u << Bits) - 1))
80 Bits = countTrailingZeros(Size);
81 return Bits;
82 }
83
84 /// Compute the offset immediately following this block. If LogAlign is
85 /// specified, return the offset the successor block will get if it has
86 /// this alignment.
87 unsigned postOffset(unsigned LogAlign = 0) const {
88 unsigned PO = Offset + Size;
89 unsigned LA = std::max(unsigned(PostAlign), LogAlign);
90 if (!LA)
2
Assuming 'LA' is not equal to 0
3
Taking false branch
91 return PO;
92 // Add alignment padding from the terminator.
93 return PO + UnknownPadding(LA, internalKnownBits());
4
Calling 'UnknownPadding'
94 }
95
96 /// Compute the number of known low bits of postOffset. If this block
97 /// contains inline asm, the number of known bits drops to the
98 /// instruction alignment. An aligned terminator may increase the number
99 /// of know bits.
100 /// If LogAlign is given, also consider the alignment of the next block.
101 unsigned postKnownBits(unsigned LogAlign = 0) const {
102 return std::max(std::max(unsigned(PostAlign), LogAlign),
103 internalKnownBits());
104 }
105};
106
107} // end namespace llvm
108
109#endif // LLVM_LIB_TARGET_ARM_ARMBASICBLOCKINFO_H