Bug Summary

File:lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp
Warning:line 207, column 5
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name AArch64ExpandPseudoInsts.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-eagerly-assume -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -mrelocation-model pic -pic-level 2 -mthread-model posix -fmath-errno -masm-verbose -mconstructor-aliases -munwind-tables -fuse-init-array -target-cpu x86-64 -dwarf-column-info -debugger-tuning=gdb -momit-leaf-frame-pointer -ffunction-sections -fdata-sections -resource-dir /usr/lib/llvm-7/lib/clang/7.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-7~svn329677/build-llvm/lib/Target/AArch64 -I /build/llvm-toolchain-snapshot-7~svn329677/lib/Target/AArch64 -I /build/llvm-toolchain-snapshot-7~svn329677/build-llvm/include -I /build/llvm-toolchain-snapshot-7~svn329677/include -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/7.3.0/../../../../include/c++/7.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/7.3.0/../../../../include/x86_64-linux-gnu/c++/7.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/7.3.0/../../../../include/x86_64-linux-gnu/c++/7.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/7.3.0/../../../../include/c++/7.3.0/backward -internal-isystem /usr/include/clang/7.0.0/include/ -internal-isystem /usr/local/include -internal-isystem /usr/lib/llvm-7/lib/clang/7.0.0/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-comment -std=c++11 -fdeprecated-macro -fdebug-compilation-dir /build/llvm-toolchain-snapshot-7~svn329677/build-llvm/lib/Target/AArch64 -ferror-limit 19 -fmessage-length 0 -fvisibility-inlines-hidden -fobjc-runtime=gcc -fdiagnostics-show-option -vectorize-loops -vectorize-slp -analyzer-checker optin.performance.Padding -analyzer-output=html -analyzer-config stable-report-filename=true -o /tmp/scan-build-2018-04-11-031539-24776-1 -x c++ /build/llvm-toolchain-snapshot-7~svn329677/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp

/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp

1//===- AArch64ExpandPseudoInsts.cpp - Expand pseudo instructions ----------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file contains a pass that expands pseudo instructions into target
11// instructions to allow proper scheduling and other late optimizations. This
12// pass should be run after register allocation but before the post-regalloc
13// scheduling pass.
14//
15//===----------------------------------------------------------------------===//
16
17#include "AArch64InstrInfo.h"
18#include "AArch64Subtarget.h"
19#include "MCTargetDesc/AArch64AddressingModes.h"
20#include "Utils/AArch64BaseInfo.h"
21#include "llvm/ADT/DenseMap.h"
22#include "llvm/ADT/Triple.h"
23#include "llvm/CodeGen/LivePhysRegs.h"
24#include "llvm/CodeGen/MachineBasicBlock.h"
25#include "llvm/CodeGen/MachineFunction.h"
26#include "llvm/CodeGen/MachineFunctionPass.h"
27#include "llvm/CodeGen/MachineInstr.h"
28#include "llvm/CodeGen/MachineInstrBuilder.h"
29#include "llvm/CodeGen/MachineOperand.h"
30#include "llvm/CodeGen/TargetSubtargetInfo.h"
31#include "llvm/IR/DebugLoc.h"
32#include "llvm/MC/MCInstrDesc.h"
33#include "llvm/Pass.h"
34#include "llvm/Support/CodeGen.h"
35#include "llvm/Support/MathExtras.h"
36#include "llvm/Target/TargetMachine.h"
37#include <cassert>
38#include <cstdint>
39#include <iterator>
40#include <limits>
41#include <utility>
42
43using namespace llvm;
44
45#define AARCH64_EXPAND_PSEUDO_NAME"AArch64 pseudo instruction expansion pass" "AArch64 pseudo instruction expansion pass"
46
47namespace {
48
49class AArch64ExpandPseudo : public MachineFunctionPass {
50public:
51 const AArch64InstrInfo *TII;
52
53 static char ID;
54
55 AArch64ExpandPseudo() : MachineFunctionPass(ID) {
56 initializeAArch64ExpandPseudoPass(*PassRegistry::getPassRegistry());
57 }
58
59 bool runOnMachineFunction(MachineFunction &Fn) override;
60
61 StringRef getPassName() const override { return AARCH64_EXPAND_PSEUDO_NAME"AArch64 pseudo instruction expansion pass"; }
62
63private:
64 bool expandMBB(MachineBasicBlock &MBB);
65 bool expandMI(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
66 MachineBasicBlock::iterator &NextMBBI);
67 bool expandMOVImm(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
68 unsigned BitSize);
69
70 bool expandCMP_SWAP(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
71 unsigned LdarOp, unsigned StlrOp, unsigned CmpOp,
72 unsigned ExtendImm, unsigned ZeroReg,
73 MachineBasicBlock::iterator &NextMBBI);
74 bool expandCMP_SWAP_128(MachineBasicBlock &MBB,
75 MachineBasicBlock::iterator MBBI,
76 MachineBasicBlock::iterator &NextMBBI);
77};
78
79} // end anonymous namespace
80
81char AArch64ExpandPseudo::ID = 0;
82
83INITIALIZE_PASS(AArch64ExpandPseudo, "aarch64-expand-pseudo",static void *initializeAArch64ExpandPseudoPassOnce(PassRegistry
&Registry) { PassInfo *PI = new PassInfo( "AArch64 pseudo instruction expansion pass"
, "aarch64-expand-pseudo", &AArch64ExpandPseudo::ID, PassInfo
::NormalCtor_t(callDefaultCtor<AArch64ExpandPseudo>), false
, false); Registry.registerPass(*PI, true); return PI; } static
llvm::once_flag InitializeAArch64ExpandPseudoPassFlag; void llvm
::initializeAArch64ExpandPseudoPass(PassRegistry &Registry
) { llvm::call_once(InitializeAArch64ExpandPseudoPassFlag, initializeAArch64ExpandPseudoPassOnce
, std::ref(Registry)); }
84 AARCH64_EXPAND_PSEUDO_NAME, false, false)static void *initializeAArch64ExpandPseudoPassOnce(PassRegistry
&Registry) { PassInfo *PI = new PassInfo( "AArch64 pseudo instruction expansion pass"
, "aarch64-expand-pseudo", &AArch64ExpandPseudo::ID, PassInfo
::NormalCtor_t(callDefaultCtor<AArch64ExpandPseudo>), false
, false); Registry.registerPass(*PI, true); return PI; } static
llvm::once_flag InitializeAArch64ExpandPseudoPassFlag; void llvm
::initializeAArch64ExpandPseudoPass(PassRegistry &Registry
) { llvm::call_once(InitializeAArch64ExpandPseudoPassFlag, initializeAArch64ExpandPseudoPassOnce
, std::ref(Registry)); }
85
86/// \brief Transfer implicit operands on the pseudo instruction to the
87/// instructions created from the expansion.
88static void transferImpOps(MachineInstr &OldMI, MachineInstrBuilder &UseMI,
89 MachineInstrBuilder &DefMI) {
90 const MCInstrDesc &Desc = OldMI.getDesc();
91 for (unsigned i = Desc.getNumOperands(), e = OldMI.getNumOperands(); i != e;
33
Loop condition is true. Entering loop body
92 ++i) {
93 const MachineOperand &MO = OldMI.getOperand(i);
94 assert(MO.isReg() && MO.getReg())(static_cast <bool> (MO.isReg() && MO.getReg())
? void (0) : __assert_fail ("MO.isReg() && MO.getReg()"
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp"
, 94, __extension__ __PRETTY_FUNCTION__))
;
34
Within the expansion of the macro 'assert':
a
Assuming the condition is true
95 if (MO.isUse())
35
Assuming the condition is false
36
Taking false branch
96 UseMI.add(MO);
97 else
98 DefMI.add(MO);
37
Calling 'MachineInstrBuilder::add'
99 }
100}
101
102/// \brief Helper function which extracts the specified 16-bit chunk from a
103/// 64-bit value.
104static uint64_t getChunk(uint64_t Imm, unsigned ChunkIdx) {
105 assert(ChunkIdx < 4 && "Out of range chunk index specified!")(static_cast <bool> (ChunkIdx < 4 && "Out of range chunk index specified!"
) ? void (0) : __assert_fail ("ChunkIdx < 4 && \"Out of range chunk index specified!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp"
, 105, __extension__ __PRETTY_FUNCTION__))
;
106
107 return (Imm >> (ChunkIdx * 16)) & 0xFFFF;
108}
109
110/// \brief Helper function which replicates a 16-bit chunk within a 64-bit
111/// value. Indices correspond to element numbers in a v4i16.
112static uint64_t replicateChunk(uint64_t Imm, unsigned FromIdx, unsigned ToIdx) {
113 assert((FromIdx < 4) && (ToIdx < 4) && "Out of range chunk index specified!")(static_cast <bool> ((FromIdx < 4) && (ToIdx
< 4) && "Out of range chunk index specified!") ? void
(0) : __assert_fail ("(FromIdx < 4) && (ToIdx < 4) && \"Out of range chunk index specified!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp"
, 113, __extension__ __PRETTY_FUNCTION__))
;
114 const unsigned ShiftAmt = ToIdx * 16;
115
116 // Replicate the source chunk to the destination position.
117 const uint64_t Chunk = getChunk(Imm, FromIdx) << ShiftAmt;
118 // Clear the destination chunk.
119 Imm &= ~(0xFFFFLL << ShiftAmt);
120 // Insert the replicated chunk.
121 return Imm | Chunk;
122}
123
124/// \brief Helper function which tries to materialize a 64-bit value with an
125/// ORR + MOVK instruction sequence.
126static bool tryOrrMovk(uint64_t UImm, uint64_t OrrImm, MachineInstr &MI,
127 MachineBasicBlock &MBB,
128 MachineBasicBlock::iterator &MBBI,
129 const AArch64InstrInfo *TII, unsigned ChunkIdx) {
130 assert(ChunkIdx < 4 && "Out of range chunk index specified!")(static_cast <bool> (ChunkIdx < 4 && "Out of range chunk index specified!"
) ? void (0) : __assert_fail ("ChunkIdx < 4 && \"Out of range chunk index specified!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp"
, 130, __extension__ __PRETTY_FUNCTION__))
;
131 const unsigned ShiftAmt = ChunkIdx * 16;
132
133 uint64_t Encoding;
134 if (AArch64_AM::processLogicalImmediate(OrrImm, 64, Encoding)) {
135 // Create the ORR-immediate instruction.
136 MachineInstrBuilder MIB =
137 BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::ORRXri))
138 .add(MI.getOperand(0))
139 .addReg(AArch64::XZR)
140 .addImm(Encoding);
141
142 // Create the MOVK instruction.
143 const unsigned Imm16 = getChunk(UImm, ChunkIdx);
144 const unsigned DstReg = MI.getOperand(0).getReg();
145 const bool DstIsDead = MI.getOperand(0).isDead();
146 MachineInstrBuilder MIB1 =
147 BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::MOVKXi))
148 .addReg(DstReg, RegState::Define | getDeadRegState(DstIsDead))
149 .addReg(DstReg)
150 .addImm(Imm16)
151 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, ShiftAmt));
152
153 transferImpOps(MI, MIB, MIB1);
154 MI.eraseFromParent();
155 return true;
156 }
157
158 return false;
159}
160
161/// \brief Check whether the given 16-bit chunk replicated to full 64-bit width
162/// can be materialized with an ORR instruction.
163static bool canUseOrr(uint64_t Chunk, uint64_t &Encoding) {
164 Chunk = (Chunk << 48) | (Chunk << 32) | (Chunk << 16) | Chunk;
165
166 return AArch64_AM::processLogicalImmediate(Chunk, 64, Encoding);
167}
168
169/// \brief Check for identical 16-bit chunks within the constant and if so
170/// materialize them with a single ORR instruction. The remaining one or two
171/// 16-bit chunks will be materialized with MOVK instructions.
172///
173/// This allows us to materialize constants like |A|B|A|A| or |A|B|C|A| (order
174/// of the chunks doesn't matter), assuming |A|A|A|A| can be materialized with
175/// an ORR instruction.
176static bool tryToreplicateChunks(uint64_t UImm, MachineInstr &MI,
177 MachineBasicBlock &MBB,
178 MachineBasicBlock::iterator &MBBI,
179 const AArch64InstrInfo *TII) {
180 using CountMap = DenseMap<uint64_t, unsigned>;
181
182 CountMap Counts;
183
184 // Scan the constant and count how often every chunk occurs.
185 for (unsigned Idx = 0; Idx < 4; ++Idx)
186 ++Counts[getChunk(UImm, Idx)];
187
188 // Traverse the chunks to find one which occurs more than once.
189 for (CountMap::const_iterator Chunk = Counts.begin(), End = Counts.end();
190 Chunk != End; ++Chunk) {
191 const uint64_t ChunkVal = Chunk->first;
192 const unsigned Count = Chunk->second;
193
194 uint64_t Encoding = 0;
195
196 // We are looking for chunks which have two or three instances and can be
197 // materialized with an ORR instruction.
198 if ((Count != 2 && Count != 3) || !canUseOrr(ChunkVal, Encoding))
199 continue;
200
201 const bool CountThree = Count == 3;
202 // Create the ORR-immediate instruction.
203 MachineInstrBuilder MIB =
204 BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::ORRXri))
205 .add(MI.getOperand(0))
206 .addReg(AArch64::XZR)
207 .addImm(Encoding);
208
209 const unsigned DstReg = MI.getOperand(0).getReg();
210 const bool DstIsDead = MI.getOperand(0).isDead();
211
212 unsigned ShiftAmt = 0;
213 uint64_t Imm16 = 0;
214 // Find the first chunk not materialized with the ORR instruction.
215 for (; ShiftAmt < 64; ShiftAmt += 16) {
216 Imm16 = (UImm >> ShiftAmt) & 0xFFFF;
217
218 if (Imm16 != ChunkVal)
219 break;
220 }
221
222 // Create the first MOVK instruction.
223 MachineInstrBuilder MIB1 =
224 BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::MOVKXi))
225 .addReg(DstReg,
226 RegState::Define | getDeadRegState(DstIsDead && CountThree))
227 .addReg(DstReg)
228 .addImm(Imm16)
229 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, ShiftAmt));
230
231 // In case we have three instances the whole constant is now materialized
232 // and we can exit.
233 if (CountThree) {
234 transferImpOps(MI, MIB, MIB1);
235 MI.eraseFromParent();
236 return true;
237 }
238
239 // Find the remaining chunk which needs to be materialized.
240 for (ShiftAmt += 16; ShiftAmt < 64; ShiftAmt += 16) {
241 Imm16 = (UImm >> ShiftAmt) & 0xFFFF;
242
243 if (Imm16 != ChunkVal)
244 break;
245 }
246
247 // Create the second MOVK instruction.
248 MachineInstrBuilder MIB2 =
249 BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::MOVKXi))
250 .addReg(DstReg, RegState::Define | getDeadRegState(DstIsDead))
251 .addReg(DstReg)
252 .addImm(Imm16)
253 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, ShiftAmt));
254
255 transferImpOps(MI, MIB, MIB2);
256 MI.eraseFromParent();
257 return true;
258 }
259
260 return false;
261}
262
263/// \brief Check whether this chunk matches the pattern '1...0...'. This pattern
264/// starts a contiguous sequence of ones if we look at the bits from the LSB
265/// towards the MSB.
266static bool isStartChunk(uint64_t Chunk) {
267 if (Chunk == 0 || Chunk == std::numeric_limits<uint64_t>::max())
268 return false;
269
270 return isMask_64(~Chunk);
271}
272
273/// \brief Check whether this chunk matches the pattern '0...1...' This pattern
274/// ends a contiguous sequence of ones if we look at the bits from the LSB
275/// towards the MSB.
276static bool isEndChunk(uint64_t Chunk) {
277 if (Chunk == 0 || Chunk == std::numeric_limits<uint64_t>::max())
278 return false;
279
280 return isMask_64(Chunk);
281}
282
283/// \brief Clear or set all bits in the chunk at the given index.
284static uint64_t updateImm(uint64_t Imm, unsigned Idx, bool Clear) {
285 const uint64_t Mask = 0xFFFF;
286
287 if (Clear)
288 // Clear chunk in the immediate.
289 Imm &= ~(Mask << (Idx * 16));
290 else
291 // Set all bits in the immediate for the particular chunk.
292 Imm |= Mask << (Idx * 16);
293
294 return Imm;
295}
296
297/// \brief Check whether the constant contains a sequence of contiguous ones,
298/// which might be interrupted by one or two chunks. If so, materialize the
299/// sequence of contiguous ones with an ORR instruction.
300/// Materialize the chunks which are either interrupting the sequence or outside
301/// of the sequence with a MOVK instruction.
302///
303/// Assuming S is a chunk which starts the sequence (1...0...), E is a chunk
304/// which ends the sequence (0...1...). Then we are looking for constants which
305/// contain at least one S and E chunk.
306/// E.g. |E|A|B|S|, |A|E|B|S| or |A|B|E|S|.
307///
308/// We are also looking for constants like |S|A|B|E| where the contiguous
309/// sequence of ones wraps around the MSB into the LSB.
310static bool trySequenceOfOnes(uint64_t UImm, MachineInstr &MI,
311 MachineBasicBlock &MBB,
312 MachineBasicBlock::iterator &MBBI,
313 const AArch64InstrInfo *TII) {
314 const int NotSet = -1;
315 const uint64_t Mask = 0xFFFF;
316
317 int StartIdx = NotSet;
318 int EndIdx = NotSet;
319 // Try to find the chunks which start/end a contiguous sequence of ones.
320 for (int Idx = 0; Idx < 4; ++Idx) {
321 int64_t Chunk = getChunk(UImm, Idx);
322 // Sign extend the 16-bit chunk to 64-bit.
323 Chunk = (Chunk << 48) >> 48;
324
325 if (isStartChunk(Chunk))
326 StartIdx = Idx;
327 else if (isEndChunk(Chunk))
328 EndIdx = Idx;
329 }
330
331 // Early exit in case we can't find a start/end chunk.
332 if (StartIdx == NotSet || EndIdx == NotSet)
333 return false;
334
335 // Outside of the contiguous sequence of ones everything needs to be zero.
336 uint64_t Outside = 0;
337 // Chunks between the start and end chunk need to have all their bits set.
338 uint64_t Inside = Mask;
339
340 // If our contiguous sequence of ones wraps around from the MSB into the LSB,
341 // just swap indices and pretend we are materializing a contiguous sequence
342 // of zeros surrounded by a contiguous sequence of ones.
343 if (StartIdx > EndIdx) {
344 std::swap(StartIdx, EndIdx);
345 std::swap(Outside, Inside);
346 }
347
348 uint64_t OrrImm = UImm;
349 int FirstMovkIdx = NotSet;
350 int SecondMovkIdx = NotSet;
351
352 // Find out which chunks we need to patch up to obtain a contiguous sequence
353 // of ones.
354 for (int Idx = 0; Idx < 4; ++Idx) {
355 const uint64_t Chunk = getChunk(UImm, Idx);
356
357 // Check whether we are looking at a chunk which is not part of the
358 // contiguous sequence of ones.
359 if ((Idx < StartIdx || EndIdx < Idx) && Chunk != Outside) {
360 OrrImm = updateImm(OrrImm, Idx, Outside == 0);
361
362 // Remember the index we need to patch.
363 if (FirstMovkIdx == NotSet)
364 FirstMovkIdx = Idx;
365 else
366 SecondMovkIdx = Idx;
367
368 // Check whether we are looking a chunk which is part of the contiguous
369 // sequence of ones.
370 } else if (Idx > StartIdx && Idx < EndIdx && Chunk != Inside) {
371 OrrImm = updateImm(OrrImm, Idx, Inside != Mask);
372
373 // Remember the index we need to patch.
374 if (FirstMovkIdx == NotSet)
375 FirstMovkIdx = Idx;
376 else
377 SecondMovkIdx = Idx;
378 }
379 }
380 assert(FirstMovkIdx != NotSet && "Constant materializable with single ORR!")(static_cast <bool> (FirstMovkIdx != NotSet && "Constant materializable with single ORR!"
) ? void (0) : __assert_fail ("FirstMovkIdx != NotSet && \"Constant materializable with single ORR!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp"
, 380, __extension__ __PRETTY_FUNCTION__))
;
381
382 // Create the ORR-immediate instruction.
383 uint64_t Encoding = 0;
384 AArch64_AM::processLogicalImmediate(OrrImm, 64, Encoding);
385 MachineInstrBuilder MIB =
386 BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::ORRXri))
387 .add(MI.getOperand(0))
388 .addReg(AArch64::XZR)
389 .addImm(Encoding);
390
391 const unsigned DstReg = MI.getOperand(0).getReg();
392 const bool DstIsDead = MI.getOperand(0).isDead();
393
394 const bool SingleMovk = SecondMovkIdx == NotSet;
395 // Create the first MOVK instruction.
396 MachineInstrBuilder MIB1 =
397 BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::MOVKXi))
398 .addReg(DstReg,
399 RegState::Define | getDeadRegState(DstIsDead && SingleMovk))
400 .addReg(DstReg)
401 .addImm(getChunk(UImm, FirstMovkIdx))
402 .addImm(
403 AArch64_AM::getShifterImm(AArch64_AM::LSL, FirstMovkIdx * 16));
404
405 // Early exit in case we only need to emit a single MOVK instruction.
406 if (SingleMovk) {
407 transferImpOps(MI, MIB, MIB1);
408 MI.eraseFromParent();
409 return true;
410 }
411
412 // Create the second MOVK instruction.
413 MachineInstrBuilder MIB2 =
414 BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::MOVKXi))
415 .addReg(DstReg, RegState::Define | getDeadRegState(DstIsDead))
416 .addReg(DstReg)
417 .addImm(getChunk(UImm, SecondMovkIdx))
418 .addImm(
419 AArch64_AM::getShifterImm(AArch64_AM::LSL, SecondMovkIdx * 16));
420
421 transferImpOps(MI, MIB, MIB2);
422 MI.eraseFromParent();
423 return true;
424}
425
426/// \brief Expand a MOVi32imm or MOVi64imm pseudo instruction to one or more
427/// real move-immediate instructions to synthesize the immediate.
428bool AArch64ExpandPseudo::expandMOVImm(MachineBasicBlock &MBB,
429 MachineBasicBlock::iterator MBBI,
430 unsigned BitSize) {
431 MachineInstr &MI = *MBBI;
432 unsigned DstReg = MI.getOperand(0).getReg();
433 uint64_t Imm = MI.getOperand(1).getImm();
434 const unsigned Mask = 0xFFFF;
435
436 if (DstReg == AArch64::XZR || DstReg == AArch64::WZR) {
6
Assuming 'DstReg' is not equal to XZR
7
Assuming 'DstReg' is not equal to WZR
8
Taking false branch
437 // Useless def, and we don't want to risk creating an invalid ORR (which
438 // would really write to sp).
439 MI.eraseFromParent();
440 return true;
441 }
442
443 // Try a MOVI instruction (aka ORR-immediate with the zero register).
444 uint64_t UImm = Imm << (64 - BitSize) >> (64 - BitSize);
445 uint64_t Encoding;
446 if (AArch64_AM::processLogicalImmediate(UImm, BitSize, Encoding)) {
9
Taking false branch
447 unsigned Opc = (BitSize == 32 ? AArch64::ORRWri : AArch64::ORRXri);
448 MachineInstrBuilder MIB =
449 BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(Opc))
450 .add(MI.getOperand(0))
451 .addReg(BitSize == 32 ? AArch64::WZR : AArch64::XZR)
452 .addImm(Encoding);
453 transferImpOps(MI, MIB, MIB);
454 MI.eraseFromParent();
455 return true;
456 }
457
458 // Scan the immediate and count the number of 16-bit chunks which are either
459 // all ones or all zeros.
460 unsigned OneChunks = 0;
461 unsigned ZeroChunks = 0;
462 for (unsigned Shift = 0; Shift < BitSize; Shift += 16) {
10
Loop condition is true. Entering loop body
15
Loop condition is true. Entering loop body
18
Loop condition is false. Execution continues on line 493
463 const unsigned Chunk = (Imm >> Shift) & Mask;
464 if (Chunk == Mask)
11
Assuming 'Chunk' is not equal to 'Mask'
12
Taking false branch
16
Assuming 'Chunk' is equal to 'Mask'
17
Taking true branch
465 OneChunks++;
466 else if (Chunk == 0)
13
Assuming 'Chunk' is not equal to 0
14
Taking false branch
467 ZeroChunks++;
468 }
469
470 // Since we can't materialize the constant with a single ORR instruction,
471 // let's see whether we can materialize 3/4 of the constant with an ORR
472 // instruction and use an additional MOVK instruction to materialize the
473 // remaining 1/4.
474 //
475 // We are looking for constants with a pattern like: |A|X|B|X| or |X|A|X|B|.
476 //
477 // E.g. assuming |A|X|A|X| is a pattern which can be materialized with ORR,
478 // we would create the following instruction sequence:
479 //
480 // ORR x0, xzr, |A|X|A|X|
481 // MOVK x0, |B|, LSL #16
482 //
483 // Only look at 64-bit constants which can't be materialized with a single
484 // instruction e.g. which have less than either three all zero or all one
485 // chunks.
486 //
487 // Ignore 32-bit constants here, they always can be materialized with a
488 // MOVZ/MOVN + MOVK pair. Since the 32-bit constant can't be materialized
489 // with a single ORR, the best sequence we can achieve is a ORR + MOVK pair.
490 // Thus we fall back to the default code below which in the best case creates
491 // a single MOVZ/MOVN instruction (in case one chunk is all zero or all one).
492 //
493 if (BitSize == 64 && OneChunks < 3 && ZeroChunks < 3) {
494 // If we interpret the 64-bit constant as a v4i16, are elements 0 and 2
495 // identical?
496 if (getChunk(UImm, 0) == getChunk(UImm, 2)) {
497 // See if we can come up with a constant which can be materialized with
498 // ORR-immediate by replicating element 3 into element 1.
499 uint64_t OrrImm = replicateChunk(UImm, 3, 1);
500 if (tryOrrMovk(UImm, OrrImm, MI, MBB, MBBI, TII, 1))
501 return true;
502
503 // See if we can come up with a constant which can be materialized with
504 // ORR-immediate by replicating element 1 into element 3.
505 OrrImm = replicateChunk(UImm, 1, 3);
506 if (tryOrrMovk(UImm, OrrImm, MI, MBB, MBBI, TII, 3))
507 return true;
508
509 // If we interpret the 64-bit constant as a v4i16, are elements 1 and 3
510 // identical?
511 } else if (getChunk(UImm, 1) == getChunk(UImm, 3)) {
512 // See if we can come up with a constant which can be materialized with
513 // ORR-immediate by replicating element 2 into element 0.
514 uint64_t OrrImm = replicateChunk(UImm, 2, 0);
515 if (tryOrrMovk(UImm, OrrImm, MI, MBB, MBBI, TII, 0))
516 return true;
517
518 // See if we can come up with a constant which can be materialized with
519 // ORR-immediate by replicating element 1 into element 3.
520 OrrImm = replicateChunk(UImm, 0, 2);
521 if (tryOrrMovk(UImm, OrrImm, MI, MBB, MBBI, TII, 2))
522 return true;
523 }
524 }
525
526 // Check for identical 16-bit chunks within the constant and if so materialize
527 // them with a single ORR instruction. The remaining one or two 16-bit chunks
528 // will be materialized with MOVK instructions.
529 if (BitSize == 64 && tryToreplicateChunks(UImm, MI, MBB, MBBI, TII))
530 return true;
531
532 // Check whether the constant contains a sequence of contiguous ones, which
533 // might be interrupted by one or two chunks. If so, materialize the sequence
534 // of contiguous ones with an ORR instruction. Materialize the chunks which
535 // are either interrupting the sequence or outside of the sequence with a
536 // MOVK instruction.
537 if (BitSize == 64 && trySequenceOfOnes(UImm, MI, MBB, MBBI, TII))
538 return true;
539
540 // Use a MOVZ or MOVN instruction to set the high bits, followed by one or
541 // more MOVK instructions to insert additional 16-bit portions into the
542 // lower bits.
543 bool isNeg = false;
544
545 // Use MOVN to materialize the high bits if we have more all one chunks
546 // than all zero chunks.
547 if (OneChunks > ZeroChunks) {
19
Taking true branch
548 isNeg = true;
549 Imm = ~Imm;
550 }
551
552 unsigned FirstOpc;
553 if (BitSize == 32) {
20
Taking true branch
554 Imm &= (1LL << 32) - 1;
555 FirstOpc = (isNeg ? AArch64::MOVNWi : AArch64::MOVZWi);
21
'?' condition is true
556 } else {
557 FirstOpc = (isNeg ? AArch64::MOVNXi : AArch64::MOVZXi);
558 }
559 unsigned Shift = 0; // LSL amount for high bits with MOVZ/MOVN
560 unsigned LastShift = 0; // LSL amount for last MOVK
561 if (Imm != 0) {
22
Assuming 'Imm' is not equal to 0
23
Taking true branch
562 unsigned LZ = countLeadingZeros(Imm);
563 unsigned TZ = countTrailingZeros(Imm);
564 Shift = (TZ / 16) * 16;
565 LastShift = ((63 - LZ) / 16) * 16;
566 }
567 unsigned Imm16 = (Imm >> Shift) & Mask;
568 bool DstIsDead = MI.getOperand(0).isDead();
569 MachineInstrBuilder MIB1 =
570 BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(FirstOpc))
571 .addReg(DstReg, RegState::Define |
572 getDeadRegState(DstIsDead && Shift == LastShift))
24
Assuming 'DstIsDead' is 0
573 .addImm(Imm16)
574 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, Shift));
575
576 // If a MOVN was used for the high bits of a negative value, flip the rest
577 // of the bits back for use with MOVK.
578 if (isNeg)
25
Taking true branch
579 Imm = ~Imm;
580
581 if (Shift == LastShift) {
26
Taking false branch
582 transferImpOps(MI, MIB1, MIB1);
583 MI.eraseFromParent();
584 return true;
585 }
586
587 MachineInstrBuilder MIB2;
27
Calling defaulted default constructor for 'MachineInstrBuilder'
29
Returning from default constructor for 'MachineInstrBuilder'
588 unsigned Opc = (BitSize == 32 ? AArch64::MOVKWi : AArch64::MOVKXi);
30
'?' condition is true
589 while (Shift < LastShift) {
31
Loop condition is false. Execution continues on line 603
590 Shift += 16;
591 Imm16 = (Imm >> Shift) & Mask;
592 if (Imm16 == (isNeg ? Mask : 0))
593 continue; // This 16-bit portion is already set correctly.
594 MIB2 = BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(Opc))
595 .addReg(DstReg,
596 RegState::Define |
597 getDeadRegState(DstIsDead && Shift == LastShift))
598 .addReg(DstReg)
599 .addImm(Imm16)
600 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, Shift));
601 }
602
603 transferImpOps(MI, MIB1, MIB2);
32
Calling 'transferImpOps'
604 MI.eraseFromParent();
605 return true;
606}
607
608bool AArch64ExpandPseudo::expandCMP_SWAP(
609 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, unsigned LdarOp,
610 unsigned StlrOp, unsigned CmpOp, unsigned ExtendImm, unsigned ZeroReg,
611 MachineBasicBlock::iterator &NextMBBI) {
612 MachineInstr &MI = *MBBI;
613 DebugLoc DL = MI.getDebugLoc();
614 const MachineOperand &Dest = MI.getOperand(0);
615 unsigned StatusReg = MI.getOperand(1).getReg();
616 bool StatusDead = MI.getOperand(1).isDead();
617 // Duplicating undef operands into 2 instructions does not guarantee the same
618 // value on both; However undef should be replaced by xzr anyway.
619 assert(!MI.getOperand(2).isUndef() && "cannot handle undef")(static_cast <bool> (!MI.getOperand(2).isUndef() &&
"cannot handle undef") ? void (0) : __assert_fail ("!MI.getOperand(2).isUndef() && \"cannot handle undef\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp"
, 619, __extension__ __PRETTY_FUNCTION__))
;
620 unsigned AddrReg = MI.getOperand(2).getReg();
621 unsigned DesiredReg = MI.getOperand(3).getReg();
622 unsigned NewReg = MI.getOperand(4).getReg();
623
624 MachineFunction *MF = MBB.getParent();
625 auto LoadCmpBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
626 auto StoreBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
627 auto DoneBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
628
629 MF->insert(++MBB.getIterator(), LoadCmpBB);
630 MF->insert(++LoadCmpBB->getIterator(), StoreBB);
631 MF->insert(++StoreBB->getIterator(), DoneBB);
632
633 // .Lloadcmp:
634 // mov wStatus, 0
635 // ldaxr xDest, [xAddr]
636 // cmp xDest, xDesired
637 // b.ne .Ldone
638 if (!StatusDead)
639 BuildMI(LoadCmpBB, DL, TII->get(AArch64::MOVZWi), StatusReg)
640 .addImm(0).addImm(0);
641 BuildMI(LoadCmpBB, DL, TII->get(LdarOp), Dest.getReg())
642 .addReg(AddrReg);
643 BuildMI(LoadCmpBB, DL, TII->get(CmpOp), ZeroReg)
644 .addReg(Dest.getReg(), getKillRegState(Dest.isDead()))
645 .addReg(DesiredReg)
646 .addImm(ExtendImm);
647 BuildMI(LoadCmpBB, DL, TII->get(AArch64::Bcc))
648 .addImm(AArch64CC::NE)
649 .addMBB(DoneBB)
650 .addReg(AArch64::NZCV, RegState::Implicit | RegState::Kill);
651 LoadCmpBB->addSuccessor(DoneBB);
652 LoadCmpBB->addSuccessor(StoreBB);
653
654 // .Lstore:
655 // stlxr wStatus, xNew, [xAddr]
656 // cbnz wStatus, .Lloadcmp
657 BuildMI(StoreBB, DL, TII->get(StlrOp), StatusReg)
658 .addReg(NewReg)
659 .addReg(AddrReg);
660 BuildMI(StoreBB, DL, TII->get(AArch64::CBNZW))
661 .addReg(StatusReg, getKillRegState(StatusDead))
662 .addMBB(LoadCmpBB);
663 StoreBB->addSuccessor(LoadCmpBB);
664 StoreBB->addSuccessor(DoneBB);
665
666 DoneBB->splice(DoneBB->end(), &MBB, MI, MBB.end());
667 DoneBB->transferSuccessors(&MBB);
668
669 MBB.addSuccessor(LoadCmpBB);
670
671 NextMBBI = MBB.end();
672 MI.eraseFromParent();
673
674 // Recompute livein lists.
675 LivePhysRegs LiveRegs;
676 computeAndAddLiveIns(LiveRegs, *DoneBB);
677 computeAndAddLiveIns(LiveRegs, *StoreBB);
678 computeAndAddLiveIns(LiveRegs, *LoadCmpBB);
679 // Do an extra pass around the loop to get loop carried registers right.
680 StoreBB->clearLiveIns();
681 computeAndAddLiveIns(LiveRegs, *StoreBB);
682 LoadCmpBB->clearLiveIns();
683 computeAndAddLiveIns(LiveRegs, *LoadCmpBB);
684
685 return true;
686}
687
688bool AArch64ExpandPseudo::expandCMP_SWAP_128(
689 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
690 MachineBasicBlock::iterator &NextMBBI) {
691 MachineInstr &MI = *MBBI;
692 DebugLoc DL = MI.getDebugLoc();
693 MachineOperand &DestLo = MI.getOperand(0);
694 MachineOperand &DestHi = MI.getOperand(1);
695 unsigned StatusReg = MI.getOperand(2).getReg();
696 bool StatusDead = MI.getOperand(2).isDead();
697 // Duplicating undef operands into 2 instructions does not guarantee the same
698 // value on both; However undef should be replaced by xzr anyway.
699 assert(!MI.getOperand(3).isUndef() && "cannot handle undef")(static_cast <bool> (!MI.getOperand(3).isUndef() &&
"cannot handle undef") ? void (0) : __assert_fail ("!MI.getOperand(3).isUndef() && \"cannot handle undef\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp"
, 699, __extension__ __PRETTY_FUNCTION__))
;
700 unsigned AddrReg = MI.getOperand(3).getReg();
701 unsigned DesiredLoReg = MI.getOperand(4).getReg();
702 unsigned DesiredHiReg = MI.getOperand(5).getReg();
703 unsigned NewLoReg = MI.getOperand(6).getReg();
704 unsigned NewHiReg = MI.getOperand(7).getReg();
705
706 MachineFunction *MF = MBB.getParent();
707 auto LoadCmpBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
708 auto StoreBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
709 auto DoneBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
710
711 MF->insert(++MBB.getIterator(), LoadCmpBB);
712 MF->insert(++LoadCmpBB->getIterator(), StoreBB);
713 MF->insert(++StoreBB->getIterator(), DoneBB);
714
715 // .Lloadcmp:
716 // ldaxp xDestLo, xDestHi, [xAddr]
717 // cmp xDestLo, xDesiredLo
718 // sbcs xDestHi, xDesiredHi
719 // b.ne .Ldone
720 BuildMI(LoadCmpBB, DL, TII->get(AArch64::LDAXPX))
721 .addReg(DestLo.getReg(), RegState::Define)
722 .addReg(DestHi.getReg(), RegState::Define)
723 .addReg(AddrReg);
724 BuildMI(LoadCmpBB, DL, TII->get(AArch64::SUBSXrs), AArch64::XZR)
725 .addReg(DestLo.getReg(), getKillRegState(DestLo.isDead()))
726 .addReg(DesiredLoReg)
727 .addImm(0);
728 BuildMI(LoadCmpBB, DL, TII->get(AArch64::CSINCWr), StatusReg)
729 .addUse(AArch64::WZR)
730 .addUse(AArch64::WZR)
731 .addImm(AArch64CC::EQ);
732 BuildMI(LoadCmpBB, DL, TII->get(AArch64::SUBSXrs), AArch64::XZR)
733 .addReg(DestHi.getReg(), getKillRegState(DestHi.isDead()))
734 .addReg(DesiredHiReg)
735 .addImm(0);
736 BuildMI(LoadCmpBB, DL, TII->get(AArch64::CSINCWr), StatusReg)
737 .addUse(StatusReg, RegState::Kill)
738 .addUse(StatusReg, RegState::Kill)
739 .addImm(AArch64CC::EQ);
740 BuildMI(LoadCmpBB, DL, TII->get(AArch64::CBNZW))
741 .addUse(StatusReg, getKillRegState(StatusDead))
742 .addMBB(DoneBB);
743 LoadCmpBB->addSuccessor(DoneBB);
744 LoadCmpBB->addSuccessor(StoreBB);
745
746 // .Lstore:
747 // stlxp wStatus, xNewLo, xNewHi, [xAddr]
748 // cbnz wStatus, .Lloadcmp
749 BuildMI(StoreBB, DL, TII->get(AArch64::STLXPX), StatusReg)
750 .addReg(NewLoReg)
751 .addReg(NewHiReg)
752 .addReg(AddrReg);
753 BuildMI(StoreBB, DL, TII->get(AArch64::CBNZW))
754 .addReg(StatusReg, getKillRegState(StatusDead))
755 .addMBB(LoadCmpBB);
756 StoreBB->addSuccessor(LoadCmpBB);
757 StoreBB->addSuccessor(DoneBB);
758
759 DoneBB->splice(DoneBB->end(), &MBB, MI, MBB.end());
760 DoneBB->transferSuccessors(&MBB);
761
762 MBB.addSuccessor(LoadCmpBB);
763
764 NextMBBI = MBB.end();
765 MI.eraseFromParent();
766
767 // Recompute liveness bottom up.
768 LivePhysRegs LiveRegs;
769 computeAndAddLiveIns(LiveRegs, *DoneBB);
770 computeAndAddLiveIns(LiveRegs, *StoreBB);
771 computeAndAddLiveIns(LiveRegs, *LoadCmpBB);
772 // Do an extra pass in the loop to get the loop carried dependencies right.
773 StoreBB->clearLiveIns();
774 computeAndAddLiveIns(LiveRegs, *StoreBB);
775 LoadCmpBB->clearLiveIns();
776 computeAndAddLiveIns(LiveRegs, *LoadCmpBB);
777
778 return true;
779}
780
781/// \brief If MBBI references a pseudo instruction that should be expanded here,
782/// do the expansion and return true. Otherwise return false.
783bool AArch64ExpandPseudo::expandMI(MachineBasicBlock &MBB,
784 MachineBasicBlock::iterator MBBI,
785 MachineBasicBlock::iterator &NextMBBI) {
786 MachineInstr &MI = *MBBI;
787 unsigned Opcode = MI.getOpcode();
788 switch (Opcode) {
4
Control jumps to 'case MOVi32imm:' at line 937
789 default:
790 break;
791
792 case AArch64::ADDWrr:
793 case AArch64::SUBWrr:
794 case AArch64::ADDXrr:
795 case AArch64::SUBXrr:
796 case AArch64::ADDSWrr:
797 case AArch64::SUBSWrr:
798 case AArch64::ADDSXrr:
799 case AArch64::SUBSXrr:
800 case AArch64::ANDWrr:
801 case AArch64::ANDXrr:
802 case AArch64::BICWrr:
803 case AArch64::BICXrr:
804 case AArch64::ANDSWrr:
805 case AArch64::ANDSXrr:
806 case AArch64::BICSWrr:
807 case AArch64::BICSXrr:
808 case AArch64::EONWrr:
809 case AArch64::EONXrr:
810 case AArch64::EORWrr:
811 case AArch64::EORXrr:
812 case AArch64::ORNWrr:
813 case AArch64::ORNXrr:
814 case AArch64::ORRWrr:
815 case AArch64::ORRXrr: {
816 unsigned Opcode;
817 switch (MI.getOpcode()) {
818 default:
819 return false;
820 case AArch64::ADDWrr: Opcode = AArch64::ADDWrs; break;
821 case AArch64::SUBWrr: Opcode = AArch64::SUBWrs; break;
822 case AArch64::ADDXrr: Opcode = AArch64::ADDXrs; break;
823 case AArch64::SUBXrr: Opcode = AArch64::SUBXrs; break;
824 case AArch64::ADDSWrr: Opcode = AArch64::ADDSWrs; break;
825 case AArch64::SUBSWrr: Opcode = AArch64::SUBSWrs; break;
826 case AArch64::ADDSXrr: Opcode = AArch64::ADDSXrs; break;
827 case AArch64::SUBSXrr: Opcode = AArch64::SUBSXrs; break;
828 case AArch64::ANDWrr: Opcode = AArch64::ANDWrs; break;
829 case AArch64::ANDXrr: Opcode = AArch64::ANDXrs; break;
830 case AArch64::BICWrr: Opcode = AArch64::BICWrs; break;
831 case AArch64::BICXrr: Opcode = AArch64::BICXrs; break;
832 case AArch64::ANDSWrr: Opcode = AArch64::ANDSWrs; break;
833 case AArch64::ANDSXrr: Opcode = AArch64::ANDSXrs; break;
834 case AArch64::BICSWrr: Opcode = AArch64::BICSWrs; break;
835 case AArch64::BICSXrr: Opcode = AArch64::BICSXrs; break;
836 case AArch64::EONWrr: Opcode = AArch64::EONWrs; break;
837 case AArch64::EONXrr: Opcode = AArch64::EONXrs; break;
838 case AArch64::EORWrr: Opcode = AArch64::EORWrs; break;
839 case AArch64::EORXrr: Opcode = AArch64::EORXrs; break;
840 case AArch64::ORNWrr: Opcode = AArch64::ORNWrs; break;
841 case AArch64::ORNXrr: Opcode = AArch64::ORNXrs; break;
842 case AArch64::ORRWrr: Opcode = AArch64::ORRWrs; break;
843 case AArch64::ORRXrr: Opcode = AArch64::ORRXrs; break;
844 }
845 MachineInstrBuilder MIB1 =
846 BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(Opcode),
847 MI.getOperand(0).getReg())
848 .add(MI.getOperand(1))
849 .add(MI.getOperand(2))
850 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0));
851 transferImpOps(MI, MIB1, MIB1);
852 MI.eraseFromParent();
853 return true;
854 }
855
856 case AArch64::LOADgot: {
857 // Expand into ADRP + LDR.
858 unsigned DstReg = MI.getOperand(0).getReg();
859 const MachineOperand &MO1 = MI.getOperand(1);
860 unsigned Flags = MO1.getTargetFlags();
861 MachineInstrBuilder MIB1 =
862 BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::ADRP), DstReg);
863 MachineInstrBuilder MIB2 =
864 BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::LDRXui))
865 .add(MI.getOperand(0))
866 .addReg(DstReg);
867
868 if (MO1.isGlobal()) {
869 MIB1.addGlobalAddress(MO1.getGlobal(), 0, Flags | AArch64II::MO_PAGE);
870 MIB2.addGlobalAddress(MO1.getGlobal(), 0,
871 Flags | AArch64II::MO_PAGEOFF | AArch64II::MO_NC);
872 } else if (MO1.isSymbol()) {
873 MIB1.addExternalSymbol(MO1.getSymbolName(), Flags | AArch64II::MO_PAGE);
874 MIB2.addExternalSymbol(MO1.getSymbolName(),
875 Flags | AArch64II::MO_PAGEOFF | AArch64II::MO_NC);
876 } else {
877 assert(MO1.isCPI() &&(static_cast <bool> (MO1.isCPI() && "Only expect globals, externalsymbols, or constant pools"
) ? void (0) : __assert_fail ("MO1.isCPI() && \"Only expect globals, externalsymbols, or constant pools\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp"
, 878, __extension__ __PRETTY_FUNCTION__))
878 "Only expect globals, externalsymbols, or constant pools")(static_cast <bool> (MO1.isCPI() && "Only expect globals, externalsymbols, or constant pools"
) ? void (0) : __assert_fail ("MO1.isCPI() && \"Only expect globals, externalsymbols, or constant pools\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp"
, 878, __extension__ __PRETTY_FUNCTION__))
;
879 MIB1.addConstantPoolIndex(MO1.getIndex(), MO1.getOffset(),
880 Flags | AArch64II::MO_PAGE);
881 MIB2.addConstantPoolIndex(MO1.getIndex(), MO1.getOffset(),
882 Flags | AArch64II::MO_PAGEOFF |
883 AArch64II::MO_NC);
884 }
885
886 transferImpOps(MI, MIB1, MIB2);
887 MI.eraseFromParent();
888 return true;
889 }
890
891 case AArch64::MOVaddr:
892 case AArch64::MOVaddrJT:
893 case AArch64::MOVaddrCP:
894 case AArch64::MOVaddrBA:
895 case AArch64::MOVaddrTLS:
896 case AArch64::MOVaddrEXT: {
897 // Expand into ADRP + ADD.
898 unsigned DstReg = MI.getOperand(0).getReg();
899 MachineInstrBuilder MIB1 =
900 BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::ADRP), DstReg)
901 .add(MI.getOperand(1));
902
903 MachineInstrBuilder MIB2 =
904 BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::ADDXri))
905 .add(MI.getOperand(0))
906 .addReg(DstReg)
907 .add(MI.getOperand(2))
908 .addImm(0);
909
910 transferImpOps(MI, MIB1, MIB2);
911 MI.eraseFromParent();
912 return true;
913 }
914 case AArch64::ADDlowTLS:
915 // Produce a plain ADD
916 BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::ADDXri))
917 .add(MI.getOperand(0))
918 .add(MI.getOperand(1))
919 .add(MI.getOperand(2))
920 .addImm(0);
921 MI.eraseFromParent();
922 return true;
923
924 case AArch64::MOVbaseTLS: {
925 unsigned DstReg = MI.getOperand(0).getReg();
926 auto SysReg = AArch64SysReg::TPIDR_EL0;
927 MachineFunction *MF = MBB.getParent();
928 if (MF->getTarget().getTargetTriple().isOSFuchsia() &&
929 MF->getTarget().getCodeModel() == CodeModel::Kernel)
930 SysReg = AArch64SysReg::TPIDR_EL1;
931 BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::MRS), DstReg)
932 .addImm(SysReg);
933 MI.eraseFromParent();
934 return true;
935 }
936
937 case AArch64::MOVi32imm:
938 return expandMOVImm(MBB, MBBI, 32);
5
Calling 'AArch64ExpandPseudo::expandMOVImm'
939 case AArch64::MOVi64imm:
940 return expandMOVImm(MBB, MBBI, 64);
941 case AArch64::RET_ReallyLR: {
942 // Hiding the LR use with RET_ReallyLR may lead to extra kills in the
943 // function and missing live-ins. We are fine in practice because callee
944 // saved register handling ensures the register value is restored before
945 // RET, but we need the undef flag here to appease the MachineVerifier
946 // liveness checks.
947 MachineInstrBuilder MIB =
948 BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::RET))
949 .addReg(AArch64::LR, RegState::Undef);
950 transferImpOps(MI, MIB, MIB);
951 MI.eraseFromParent();
952 return true;
953 }
954 case AArch64::CMP_SWAP_8:
955 return expandCMP_SWAP(MBB, MBBI, AArch64::LDAXRB, AArch64::STLXRB,
956 AArch64::SUBSWrx,
957 AArch64_AM::getArithExtendImm(AArch64_AM::UXTB, 0),
958 AArch64::WZR, NextMBBI);
959 case AArch64::CMP_SWAP_16:
960 return expandCMP_SWAP(MBB, MBBI, AArch64::LDAXRH, AArch64::STLXRH,
961 AArch64::SUBSWrx,
962 AArch64_AM::getArithExtendImm(AArch64_AM::UXTH, 0),
963 AArch64::WZR, NextMBBI);
964 case AArch64::CMP_SWAP_32:
965 return expandCMP_SWAP(MBB, MBBI, AArch64::LDAXRW, AArch64::STLXRW,
966 AArch64::SUBSWrs,
967 AArch64_AM::getShifterImm(AArch64_AM::LSL, 0),
968 AArch64::WZR, NextMBBI);
969 case AArch64::CMP_SWAP_64:
970 return expandCMP_SWAP(MBB, MBBI,
971 AArch64::LDAXRX, AArch64::STLXRX, AArch64::SUBSXrs,
972 AArch64_AM::getShifterImm(AArch64_AM::LSL, 0),
973 AArch64::XZR, NextMBBI);
974 case AArch64::CMP_SWAP_128:
975 return expandCMP_SWAP_128(MBB, MBBI, NextMBBI);
976
977 case AArch64::AESMCrrTied:
978 case AArch64::AESIMCrrTied: {
979 MachineInstrBuilder MIB =
980 BuildMI(MBB, MBBI, MI.getDebugLoc(),
981 TII->get(Opcode == AArch64::AESMCrrTied ? AArch64::AESMCrr :
982 AArch64::AESIMCrr))
983 .add(MI.getOperand(0))
984 .add(MI.getOperand(1));
985 transferImpOps(MI, MIB, MIB);
986 MI.eraseFromParent();
987 return true;
988 }
989 }
990 return false;
991}
992
993/// \brief Iterate over the instructions in basic block MBB and expand any
994/// pseudo instructions. Return true if anything was modified.
995bool AArch64ExpandPseudo::expandMBB(MachineBasicBlock &MBB) {
996 bool Modified = false;
997
998 MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
999 while (MBBI != E) {
2
Loop condition is true. Entering loop body
1000 MachineBasicBlock::iterator NMBBI = std::next(MBBI);
1001 Modified |= expandMI(MBB, MBBI, NMBBI);
3
Calling 'AArch64ExpandPseudo::expandMI'
1002 MBBI = NMBBI;
1003 }
1004
1005 return Modified;
1006}
1007
1008bool AArch64ExpandPseudo::runOnMachineFunction(MachineFunction &MF) {
1009 TII = static_cast<const AArch64InstrInfo *>(MF.getSubtarget().getInstrInfo());
1010
1011 bool Modified = false;
1012 for (auto &MBB : MF)
1013 Modified |= expandMBB(MBB);
1
Calling 'AArch64ExpandPseudo::expandMBB'
1014 return Modified;
1015}
1016
1017/// \brief Returns an instance of the pseudo instruction expansion pass.
1018FunctionPass *llvm::createAArch64ExpandPseudoPass() {
1019 return new AArch64ExpandPseudo();
1020}

/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/CodeGen/MachineInstrBuilder.h

1//===- CodeGen/MachineInstrBuilder.h - Simplify creation of MIs --*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file exposes a function named BuildMI, which is useful for dramatically
11// simplifying how MachineInstr's are created. It allows use of code like this:
12//
13// M = BuildMI(MBB, MI, DL, TII.get(X86::ADD8rr), Dst)
14// .addReg(argVal1)
15// .addReg(argVal2);
16//
17//===----------------------------------------------------------------------===//
18
19#ifndef LLVM_CODEGEN_MACHINEINSTRBUILDER_H
20#define LLVM_CODEGEN_MACHINEINSTRBUILDER_H
21
22#include "llvm/ADT/ArrayRef.h"
23#include "llvm/CodeGen/GlobalISel/Utils.h"
24#include "llvm/CodeGen/MachineBasicBlock.h"
25#include "llvm/CodeGen/MachineFunction.h"
26#include "llvm/CodeGen/MachineInstr.h"
27#include "llvm/CodeGen/MachineInstrBundle.h"
28#include "llvm/CodeGen/MachineOperand.h"
29#include "llvm/CodeGen/TargetRegisterInfo.h"
30#include "llvm/IR/InstrTypes.h"
31#include "llvm/IR/Intrinsics.h"
32#include "llvm/Support/ErrorHandling.h"
33#include <cassert>
34#include <cstdint>
35#include <utility>
36
37namespace llvm {
38
39class MCInstrDesc;
40class MDNode;
41
42namespace RegState {
43
44 enum {
45 Define = 0x2,
46 Implicit = 0x4,
47 Kill = 0x8,
48 Dead = 0x10,
49 Undef = 0x20,
50 EarlyClobber = 0x40,
51 Debug = 0x80,
52 InternalRead = 0x100,
53 Renamable = 0x200,
54 DefineNoRead = Define | Undef,
55 ImplicitDefine = Implicit | Define,
56 ImplicitKill = Implicit | Kill
57 };
58
59} // end namespace RegState
60
61class MachineInstrBuilder {
62 MachineFunction *MF = nullptr;
63 MachineInstr *MI = nullptr;
28
Null pointer value stored to 'MIB2.MI'
64
65public:
66 MachineInstrBuilder() = default;
67
68 /// Create a MachineInstrBuilder for manipulating an existing instruction.
69 /// F must be the machine function that was used to allocate I.
70 MachineInstrBuilder(MachineFunction &F, MachineInstr *I) : MF(&F), MI(I) {}
71 MachineInstrBuilder(MachineFunction &F, MachineBasicBlock::iterator I)
72 : MF(&F), MI(&*I) {}
73
74 /// Allow automatic conversion to the machine instruction we are working on.
75 operator MachineInstr*() const { return MI; }
76 MachineInstr *operator->() const { return MI; }
77 operator MachineBasicBlock::iterator() const { return MI; }
78
79 /// If conversion operators fail, use this method to get the MachineInstr
80 /// explicitly.
81 MachineInstr *getInstr() const { return MI; }
82
83 /// Add a new virtual register operand.
84 const MachineInstrBuilder &addReg(unsigned RegNo, unsigned flags = 0,
85 unsigned SubReg = 0) const {
86 assert((flags & 0x1) == 0 &&(static_cast <bool> ((flags & 0x1) == 0 && "Passing in 'true' to addReg is forbidden! Use enums instead."
) ? void (0) : __assert_fail ("(flags & 0x1) == 0 && \"Passing in 'true' to addReg is forbidden! Use enums instead.\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/CodeGen/MachineInstrBuilder.h"
, 87, __extension__ __PRETTY_FUNCTION__))
87 "Passing in 'true' to addReg is forbidden! Use enums instead.")(static_cast <bool> ((flags & 0x1) == 0 && "Passing in 'true' to addReg is forbidden! Use enums instead."
) ? void (0) : __assert_fail ("(flags & 0x1) == 0 && \"Passing in 'true' to addReg is forbidden! Use enums instead.\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/CodeGen/MachineInstrBuilder.h"
, 87, __extension__ __PRETTY_FUNCTION__))
;
88 MI->addOperand(*MF, MachineOperand::CreateReg(RegNo,
89 flags & RegState::Define,
90 flags & RegState::Implicit,
91 flags & RegState::Kill,
92 flags & RegState::Dead,
93 flags & RegState::Undef,
94 flags & RegState::EarlyClobber,
95 SubReg,
96 flags & RegState::Debug,
97 flags & RegState::InternalRead,
98 flags & RegState::Renamable));
99 return *this;
100 }
101
102 /// Add a virtual register definition operand.
103 const MachineInstrBuilder &addDef(unsigned RegNo, unsigned Flags = 0,
104 unsigned SubReg = 0) const {
105 return addReg(RegNo, Flags | RegState::Define, SubReg);
106 }
107
108 /// Add a virtual register use operand. It is an error for Flags to contain
109 /// `RegState::Define` when calling this function.
110 const MachineInstrBuilder &addUse(unsigned RegNo, unsigned Flags = 0,
111 unsigned SubReg = 0) const {
112 assert(!(Flags & RegState::Define) &&(static_cast <bool> (!(Flags & RegState::Define) &&
"Misleading addUse defines register, use addReg instead.") ?
void (0) : __assert_fail ("!(Flags & RegState::Define) && \"Misleading addUse defines register, use addReg instead.\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/CodeGen/MachineInstrBuilder.h"
, 113, __extension__ __PRETTY_FUNCTION__))
113 "Misleading addUse defines register, use addReg instead.")(static_cast <bool> (!(Flags & RegState::Define) &&
"Misleading addUse defines register, use addReg instead.") ?
void (0) : __assert_fail ("!(Flags & RegState::Define) && \"Misleading addUse defines register, use addReg instead.\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/CodeGen/MachineInstrBuilder.h"
, 113, __extension__ __PRETTY_FUNCTION__))
;
114 return addReg(RegNo, Flags, SubReg);
115 }
116
117 /// Add a new immediate operand.
118 const MachineInstrBuilder &addImm(int64_t Val) const {
119 MI->addOperand(*MF, MachineOperand::CreateImm(Val));
120 return *this;
121 }
122
123 const MachineInstrBuilder &addCImm(const ConstantInt *Val) const {
124 MI->addOperand(*MF, MachineOperand::CreateCImm(Val));
125 return *this;
126 }
127
128 const MachineInstrBuilder &addFPImm(const ConstantFP *Val) const {
129 MI->addOperand(*MF, MachineOperand::CreateFPImm(Val));
130 return *this;
131 }
132
133 const MachineInstrBuilder &addMBB(MachineBasicBlock *MBB,
134 unsigned char TargetFlags = 0) const {
135 MI->addOperand(*MF, MachineOperand::CreateMBB(MBB, TargetFlags));
136 return *this;
137 }
138
139 const MachineInstrBuilder &addFrameIndex(int Idx) const {
140 MI->addOperand(*MF, MachineOperand::CreateFI(Idx));
141 return *this;
142 }
143
144 const MachineInstrBuilder &addConstantPoolIndex(unsigned Idx,
145 int Offset = 0,
146 unsigned char TargetFlags = 0) const {
147 MI->addOperand(*MF, MachineOperand::CreateCPI(Idx, Offset, TargetFlags));
148 return *this;
149 }
150
151 const MachineInstrBuilder &addTargetIndex(unsigned Idx, int64_t Offset = 0,
152 unsigned char TargetFlags = 0) const {
153 MI->addOperand(*MF, MachineOperand::CreateTargetIndex(Idx, Offset,
154 TargetFlags));
155 return *this;
156 }
157
158 const MachineInstrBuilder &addJumpTableIndex(unsigned Idx,
159 unsigned char TargetFlags = 0) const {
160 MI->addOperand(*MF, MachineOperand::CreateJTI(Idx, TargetFlags));
161 return *this;
162 }
163
164 const MachineInstrBuilder &addGlobalAddress(const GlobalValue *GV,
165 int64_t Offset = 0,
166 unsigned char TargetFlags = 0) const {
167 MI->addOperand(*MF, MachineOperand::CreateGA(GV, Offset, TargetFlags));
168 return *this;
169 }
170
171 const MachineInstrBuilder &addExternalSymbol(const char *FnName,
172 unsigned char TargetFlags = 0) const {
173 MI->addOperand(*MF, MachineOperand::CreateES(FnName, TargetFlags));
174 return *this;
175 }
176
177 const MachineInstrBuilder &addBlockAddress(const BlockAddress *BA,
178 int64_t Offset = 0,
179 unsigned char TargetFlags = 0) const {
180 MI->addOperand(*MF, MachineOperand::CreateBA(BA, Offset, TargetFlags));
181 return *this;
182 }
183
184 const MachineInstrBuilder &addRegMask(const uint32_t *Mask) const {
185 MI->addOperand(*MF, MachineOperand::CreateRegMask(Mask));
186 return *this;
187 }
188
189 const MachineInstrBuilder &addMemOperand(MachineMemOperand *MMO) const {
190 MI->addMemOperand(*MF, MMO);
191 return *this;
192 }
193
194 const MachineInstrBuilder &setMemRefs(MachineInstr::mmo_iterator b,
195 MachineInstr::mmo_iterator e) const {
196 MI->setMemRefs(b, e);
197 return *this;
198 }
199
200 const MachineInstrBuilder &setMemRefs(std::pair<MachineInstr::mmo_iterator,
201 unsigned> MemOperandsRef) const {
202 MI->setMemRefs(MemOperandsRef);
203 return *this;
204 }
205
206 const MachineInstrBuilder &add(const MachineOperand &MO) const {
207 MI->addOperand(*MF, MO);
38
Called C++ object pointer is null
208 return *this;
209 }
210
211 const MachineInstrBuilder &add(ArrayRef<MachineOperand> MOs) const {
212 for (const MachineOperand &MO : MOs) {
213 MI->addOperand(*MF, MO);
214 }
215 return *this;
216 }
217
218 const MachineInstrBuilder &addMetadata(const MDNode *MD) const {
219 MI->addOperand(*MF, MachineOperand::CreateMetadata(MD));
220 assert((MI->isDebugValue() ? static_cast<bool>(MI->getDebugVariable())(static_cast <bool> ((MI->isDebugValue() ? static_cast
<bool>(MI->getDebugVariable()) : true) && "first MDNode argument of a DBG_VALUE not a variable"
) ? void (0) : __assert_fail ("(MI->isDebugValue() ? static_cast<bool>(MI->getDebugVariable()) : true) && \"first MDNode argument of a DBG_VALUE not a variable\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/CodeGen/MachineInstrBuilder.h"
, 222, __extension__ __PRETTY_FUNCTION__))
221 : true) &&(static_cast <bool> ((MI->isDebugValue() ? static_cast
<bool>(MI->getDebugVariable()) : true) && "first MDNode argument of a DBG_VALUE not a variable"
) ? void (0) : __assert_fail ("(MI->isDebugValue() ? static_cast<bool>(MI->getDebugVariable()) : true) && \"first MDNode argument of a DBG_VALUE not a variable\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/CodeGen/MachineInstrBuilder.h"
, 222, __extension__ __PRETTY_FUNCTION__))
222 "first MDNode argument of a DBG_VALUE not a variable")(static_cast <bool> ((MI->isDebugValue() ? static_cast
<bool>(MI->getDebugVariable()) : true) && "first MDNode argument of a DBG_VALUE not a variable"
) ? void (0) : __assert_fail ("(MI->isDebugValue() ? static_cast<bool>(MI->getDebugVariable()) : true) && \"first MDNode argument of a DBG_VALUE not a variable\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/CodeGen/MachineInstrBuilder.h"
, 222, __extension__ __PRETTY_FUNCTION__))
;
223 return *this;
224 }
225
226 const MachineInstrBuilder &addCFIIndex(unsigned CFIIndex) const {
227 MI->addOperand(*MF, MachineOperand::CreateCFIIndex(CFIIndex));
228 return *this;
229 }
230
231 const MachineInstrBuilder &addIntrinsicID(Intrinsic::ID ID) const {
232 MI->addOperand(*MF, MachineOperand::CreateIntrinsicID(ID));
233 return *this;
234 }
235
236 const MachineInstrBuilder &addPredicate(CmpInst::Predicate Pred) const {
237 MI->addOperand(*MF, MachineOperand::CreatePredicate(Pred));
238 return *this;
239 }
240
241 const MachineInstrBuilder &addSym(MCSymbol *Sym,
242 unsigned char TargetFlags = 0) const {
243 MI->addOperand(*MF, MachineOperand::CreateMCSymbol(Sym, TargetFlags));
244 return *this;
245 }
246
247 const MachineInstrBuilder &setMIFlags(unsigned Flags) const {
248 MI->setFlags(Flags);
249 return *this;
250 }
251
252 const MachineInstrBuilder &setMIFlag(MachineInstr::MIFlag Flag) const {
253 MI->setFlag(Flag);
254 return *this;
255 }
256
257 // Add a displacement from an existing MachineOperand with an added offset.
258 const MachineInstrBuilder &addDisp(const MachineOperand &Disp, int64_t off,
259 unsigned char TargetFlags = 0) const {
260 // If caller specifies new TargetFlags then use it, otherwise the
261 // default behavior is to copy the target flags from the existing
262 // MachineOperand. This means if the caller wants to clear the
263 // target flags it needs to do so explicitly.
264 if (0 == TargetFlags)
265 TargetFlags = Disp.getTargetFlags();
266
267 switch (Disp.getType()) {
268 default:
269 llvm_unreachable("Unhandled operand type in addDisp()")::llvm::llvm_unreachable_internal("Unhandled operand type in addDisp()"
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/CodeGen/MachineInstrBuilder.h"
, 269)
;
270 case MachineOperand::MO_Immediate:
271 return addImm(Disp.getImm() + off);
272 case MachineOperand::MO_ConstantPoolIndex:
273 return addConstantPoolIndex(Disp.getIndex(), Disp.getOffset() + off,
274 TargetFlags);
275 case MachineOperand::MO_GlobalAddress:
276 return addGlobalAddress(Disp.getGlobal(), Disp.getOffset() + off,
277 TargetFlags);
278 }
279 }
280
281 /// Copy all the implicit operands from OtherMI onto this one.
282 const MachineInstrBuilder &
283 copyImplicitOps(const MachineInstr &OtherMI) const {
284 MI->copyImplicitOps(*MF, OtherMI);
285 return *this;
286 }
287
288 bool constrainAllUses(const TargetInstrInfo &TII,
289 const TargetRegisterInfo &TRI,
290 const RegisterBankInfo &RBI) const {
291 return constrainSelectedInstRegOperands(*MI, TII, TRI, RBI);
292 }
293};
294
295/// Builder interface. Specify how to create the initial instruction itself.
296inline MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL,
297 const MCInstrDesc &MCID) {
298 return MachineInstrBuilder(MF, MF.CreateMachineInstr(MCID, DL));
299}
300
301/// This version of the builder sets up the first operand as a
302/// destination virtual register.
303inline MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL,
304 const MCInstrDesc &MCID, unsigned DestReg) {
305 return MachineInstrBuilder(MF, MF.CreateMachineInstr(MCID, DL))
306 .addReg(DestReg, RegState::Define);
307}
308
309/// This version of the builder inserts the newly-built instruction before
310/// the given position in the given MachineBasicBlock, and sets up the first
311/// operand as a destination virtual register.
312inline MachineInstrBuilder BuildMI(MachineBasicBlock &BB,
313 MachineBasicBlock::iterator I,
314 const DebugLoc &DL, const MCInstrDesc &MCID,
315 unsigned DestReg) {
316 MachineFunction &MF = *BB.getParent();
317 MachineInstr *MI = MF.CreateMachineInstr(MCID, DL);
318 BB.insert(I, MI);
319 return MachineInstrBuilder(MF, MI).addReg(DestReg, RegState::Define);
320}
321
322/// This version of the builder inserts the newly-built instruction before
323/// the given position in the given MachineBasicBlock, and sets up the first
324/// operand as a destination virtual register.
325///
326/// If \c I is inside a bundle, then the newly inserted \a MachineInstr is
327/// added to the same bundle.
328inline MachineInstrBuilder BuildMI(MachineBasicBlock &BB,
329 MachineBasicBlock::instr_iterator I,
330 const DebugLoc &DL, const MCInstrDesc &MCID,
331 unsigned DestReg) {
332 MachineFunction &MF = *BB.getParent();
333 MachineInstr *MI = MF.CreateMachineInstr(MCID, DL);
334 BB.insert(I, MI);
335 return MachineInstrBuilder(MF, MI).addReg(DestReg, RegState::Define);
336}
337
338inline MachineInstrBuilder BuildMI(MachineBasicBlock &BB, MachineInstr &I,
339 const DebugLoc &DL, const MCInstrDesc &MCID,
340 unsigned DestReg) {
341 // Calling the overload for instr_iterator is always correct. However, the
342 // definition is not available in headers, so inline the check.
343 if (I.isInsideBundle())
344 return BuildMI(BB, MachineBasicBlock::instr_iterator(I), DL, MCID, DestReg);
345 return BuildMI(BB, MachineBasicBlock::iterator(I), DL, MCID, DestReg);
346}
347
348inline MachineInstrBuilder BuildMI(MachineBasicBlock &BB, MachineInstr *I,
349 const DebugLoc &DL, const MCInstrDesc &MCID,
350 unsigned DestReg) {
351 return BuildMI(BB, *I, DL, MCID, DestReg);
352}
353
354/// This version of the builder inserts the newly-built instruction before the
355/// given position in the given MachineBasicBlock, and does NOT take a
356/// destination register.
357inline MachineInstrBuilder BuildMI(MachineBasicBlock &BB,
358 MachineBasicBlock::iterator I,
359 const DebugLoc &DL,
360 const MCInstrDesc &MCID) {
361 MachineFunction &MF = *BB.getParent();
362 MachineInstr *MI = MF.CreateMachineInstr(MCID, DL);
363 BB.insert(I, MI);
364 return MachineInstrBuilder(MF, MI);
365}
366
367inline MachineInstrBuilder BuildMI(MachineBasicBlock &BB,
368 MachineBasicBlock::instr_iterator I,
369 const DebugLoc &DL,
370 const MCInstrDesc &MCID) {
371 MachineFunction &MF = *BB.getParent();
372 MachineInstr *MI = MF.CreateMachineInstr(MCID, DL);
373 BB.insert(I, MI);
374 return MachineInstrBuilder(MF, MI);
375}
376
377inline MachineInstrBuilder BuildMI(MachineBasicBlock &BB, MachineInstr &I,
378 const DebugLoc &DL,
379 const MCInstrDesc &MCID) {
380 // Calling the overload for instr_iterator is always correct. However, the
381 // definition is not available in headers, so inline the check.
382 if (I.isInsideBundle())
383 return BuildMI(BB, MachineBasicBlock::instr_iterator(I), DL, MCID);
384 return BuildMI(BB, MachineBasicBlock::iterator(I), DL, MCID);
385}
386
387inline MachineInstrBuilder BuildMI(MachineBasicBlock &BB, MachineInstr *I,
388 const DebugLoc &DL,
389 const MCInstrDesc &MCID) {
390 return BuildMI(BB, *I, DL, MCID);
391}
392
393/// This version of the builder inserts the newly-built instruction at the end
394/// of the given MachineBasicBlock, and does NOT take a destination register.
395inline MachineInstrBuilder BuildMI(MachineBasicBlock *BB, const DebugLoc &DL,
396 const MCInstrDesc &MCID) {
397 return BuildMI(*BB, BB->end(), DL, MCID);
398}
399
400/// This version of the builder inserts the newly-built instruction at the
401/// end of the given MachineBasicBlock, and sets up the first operand as a
402/// destination virtual register.
403inline MachineInstrBuilder BuildMI(MachineBasicBlock *BB, const DebugLoc &DL,
404 const MCInstrDesc &MCID, unsigned DestReg) {
405 return BuildMI(*BB, BB->end(), DL, MCID, DestReg);
406}
407
408/// This version of the builder builds a DBG_VALUE intrinsic
409/// for either a value in a register or a register-indirect
410/// address. The convention is that a DBG_VALUE is indirect iff the
411/// second operand is an immediate.
412MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL,
413 const MCInstrDesc &MCID, bool IsIndirect,
414 unsigned Reg, const MDNode *Variable,
415 const MDNode *Expr);
416
417/// This version of the builder builds a DBG_VALUE intrinsic
418/// for either a value in a register or a register-indirect
419/// address and inserts it at position I.
420MachineInstrBuilder BuildMI(MachineBasicBlock &BB,
421 MachineBasicBlock::iterator I, const DebugLoc &DL,
422 const MCInstrDesc &MCID, bool IsIndirect,
423 unsigned Reg, const MDNode *Variable,
424 const MDNode *Expr);
425
426/// Clone a DBG_VALUE whose value has been spilled to FrameIndex.
427MachineInstr *buildDbgValueForSpill(MachineBasicBlock &BB,
428 MachineBasicBlock::iterator I,
429 const MachineInstr &Orig, int FrameIndex);
430
431/// Update a DBG_VALUE whose value has been spilled to FrameIndex. Useful when
432/// modifying an instruction in place while iterating over a basic block.
433void updateDbgValueForSpill(MachineInstr &Orig, int FrameIndex);
434
435inline unsigned getDefRegState(bool B) {
436 return B ? RegState::Define : 0;
437}
438inline unsigned getImplRegState(bool B) {
439 return B ? RegState::Implicit : 0;
440}
441inline unsigned getKillRegState(bool B) {
442 return B ? RegState::Kill : 0;
443}
444inline unsigned getDeadRegState(bool B) {
445 return B ? RegState::Dead : 0;
446}
447inline unsigned getUndefRegState(bool B) {
448 return B ? RegState::Undef : 0;
449}
450inline unsigned getInternalReadRegState(bool B) {
451 return B ? RegState::InternalRead : 0;
452}
453inline unsigned getDebugRegState(bool B) {
454 return B ? RegState::Debug : 0;
455}
456inline unsigned getRenamableRegState(bool B) {
457 return B ? RegState::Renamable : 0;
458}
459
460/// Get all register state flags from machine operand \p RegOp.
461inline unsigned getRegState(const MachineOperand &RegOp) {
462 assert(RegOp.isReg() && "Not a register operand")(static_cast <bool> (RegOp.isReg() && "Not a register operand"
) ? void (0) : __assert_fail ("RegOp.isReg() && \"Not a register operand\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/CodeGen/MachineInstrBuilder.h"
, 462, __extension__ __PRETTY_FUNCTION__))
;
463 return getDefRegState(RegOp.isDef()) |
464 getImplRegState(RegOp.isImplicit()) |
465 getKillRegState(RegOp.isKill()) |
466 getDeadRegState(RegOp.isDead()) |
467 getUndefRegState(RegOp.isUndef()) |
468 getInternalReadRegState(RegOp.isInternalRead()) |
469 getDebugRegState(RegOp.isDebug()) |
470 getRenamableRegState(
471 TargetRegisterInfo::isPhysicalRegister(RegOp.getReg()) &&
472 RegOp.isRenamable());
473}
474
475/// Helper class for constructing bundles of MachineInstrs.
476///
477/// MIBundleBuilder can create a bundle from scratch by inserting new
478/// MachineInstrs one at a time, or it can create a bundle from a sequence of
479/// existing MachineInstrs in a basic block.
480class MIBundleBuilder {
481 MachineBasicBlock &MBB;
482 MachineBasicBlock::instr_iterator Begin;
483 MachineBasicBlock::instr_iterator End;
484
485public:
486 /// Create an MIBundleBuilder that inserts instructions into a new bundle in
487 /// BB above the bundle or instruction at Pos.
488 MIBundleBuilder(MachineBasicBlock &BB, MachineBasicBlock::iterator Pos)
489 : MBB(BB), Begin(Pos.getInstrIterator()), End(Begin) {}
490
491 /// Create a bundle from the sequence of instructions between B and E.
492 MIBundleBuilder(MachineBasicBlock &BB, MachineBasicBlock::iterator B,
493 MachineBasicBlock::iterator E)
494 : MBB(BB), Begin(B.getInstrIterator()), End(E.getInstrIterator()) {
495 assert(B != E && "No instructions to bundle")(static_cast <bool> (B != E && "No instructions to bundle"
) ? void (0) : __assert_fail ("B != E && \"No instructions to bundle\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/CodeGen/MachineInstrBuilder.h"
, 495, __extension__ __PRETTY_FUNCTION__))
;
496 ++B;
497 while (B != E) {
498 MachineInstr &MI = *B;
499 ++B;
500 MI.bundleWithPred();
501 }
502 }
503
504 /// Create an MIBundleBuilder representing an existing instruction or bundle
505 /// that has MI as its head.
506 explicit MIBundleBuilder(MachineInstr *MI)
507 : MBB(*MI->getParent()), Begin(MI),
508 End(getBundleEnd(MI->getIterator())) {}
509
510 /// Return a reference to the basic block containing this bundle.
511 MachineBasicBlock &getMBB() const { return MBB; }
512
513 /// Return true if no instructions have been inserted in this bundle yet.
514 /// Empty bundles aren't representable in a MachineBasicBlock.
515 bool empty() const { return Begin == End; }
516
517 /// Return an iterator to the first bundled instruction.
518 MachineBasicBlock::instr_iterator begin() const { return Begin; }
519
520 /// Return an iterator beyond the last bundled instruction.
521 MachineBasicBlock::instr_iterator end() const { return End; }
522
523 /// Insert MI into this bundle before I which must point to an instruction in
524 /// the bundle, or end().
525 MIBundleBuilder &insert(MachineBasicBlock::instr_iterator I,
526 MachineInstr *MI) {
527 MBB.insert(I, MI);
528 if (I == Begin) {
529 if (!empty())
530 MI->bundleWithSucc();
531 Begin = MI->getIterator();
532 return *this;
533 }
534 if (I == End) {
535 MI->bundleWithPred();
536 return *this;
537 }
538 // MI was inserted in the middle of the bundle, so its neighbors' flags are
539 // already fine. Update MI's bundle flags manually.
540 MI->setFlag(MachineInstr::BundledPred);
541 MI->setFlag(MachineInstr::BundledSucc);
542 return *this;
543 }
544
545 /// Insert MI into MBB by prepending it to the instructions in the bundle.
546 /// MI will become the first instruction in the bundle.
547 MIBundleBuilder &prepend(MachineInstr *MI) {
548 return insert(begin(), MI);
549 }
550
551 /// Insert MI into MBB by appending it to the instructions in the bundle.
552 /// MI will become the last instruction in the bundle.
553 MIBundleBuilder &append(MachineInstr *MI) {
554 return insert(end(), MI);
555 }
556};
557
558} // end namespace llvm
559
560#endif // LLVM_CODEGEN_MACHINEINSTRBUILDER_H