Bug Summary

File:build/source/bolt/lib/Target/AArch64/AArch64MCPlusBuilder.cpp
Warning:line 286, column 12
Assigned value is garbage or undefined

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name AArch64MCPlusBuilder.cpp -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -ffp-contract=on -fno-rounding-math -mconstructor-aliases -funwind-tables=2 -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/source/build-llvm/tools/clang/stage2-bins -resource-dir /usr/lib/llvm-17/lib/clang/17 -D _DEBUG -D _GLIBCXX_ASSERTIONS -D _GNU_SOURCE -D _LIBCPP_ENABLE_ASSERTIONS -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I tools/bolt/lib/Target/AArch64 -I /build/source/bolt/lib/Target/AArch64 -I include -I /build/source/llvm/include -I /build/source/bolt/include -I tools/bolt/include -I /build/source/llvm/lib/Target/AArch64 -I lib/Target/AArch64 -D _FORTIFY_SOURCE=2 -D NDEBUG -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-17/lib/clang/17/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -fmacro-prefix-map=/build/source/build-llvm/tools/clang/stage2-bins=build-llvm/tools/clang/stage2-bins -fmacro-prefix-map=/build/source/= -fcoverage-prefix-map=/build/source/build-llvm/tools/clang/stage2-bins=build-llvm/tools/clang/stage2-bins -fcoverage-prefix-map=/build/source/= -source-date-epoch 1683717183 -O2 -Wno-unused-command-line-argument -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -Wno-misleading-indentation -std=c++17 -fdeprecated-macro -fdebug-compilation-dir=/build/source/build-llvm/tools/clang/stage2-bins -fdebug-prefix-map=/build/source/build-llvm/tools/clang/stage2-bins=build-llvm/tools/clang/stage2-bins -fdebug-prefix-map=/build/source/= -ferror-limit 19 -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -fcolor-diagnostics -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2023-05-10-133810-16478-1 -x c++ /build/source/bolt/lib/Target/AArch64/AArch64MCPlusBuilder.cpp

/build/source/bolt/lib/Target/AArch64/AArch64MCPlusBuilder.cpp

1//===- bolt/Target/AArch64/AArch64MCPlusBuilder.cpp -----------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file provides AArch64-specific MCPlus builder.
10//
11//===----------------------------------------------------------------------===//
12
13#include "MCTargetDesc/AArch64AddressingModes.h"
14#include "MCTargetDesc/AArch64MCExpr.h"
15#include "MCTargetDesc/AArch64MCTargetDesc.h"
16#include "Utils/AArch64BaseInfo.h"
17#include "bolt/Core/MCPlusBuilder.h"
18#include "llvm/BinaryFormat/ELF.h"
19#include "llvm/MC/MCInstrInfo.h"
20#include "llvm/MC/MCRegisterInfo.h"
21#include "llvm/Support/Debug.h"
22#include "llvm/Support/ErrorHandling.h"
23
24#define DEBUG_TYPE"mcplus" "mcplus"
25
26using namespace llvm;
27using namespace bolt;
28
29namespace {
30
31class AArch64MCPlusBuilder : public MCPlusBuilder {
32public:
33 AArch64MCPlusBuilder(const MCInstrAnalysis *Analysis, const MCInstrInfo *Info,
34 const MCRegisterInfo *RegInfo)
35 : MCPlusBuilder(Analysis, Info, RegInfo) {}
36
37 bool equals(const MCTargetExpr &A, const MCTargetExpr &B,
38 CompFuncTy Comp) const override {
39 const auto &AArch64ExprA = cast<AArch64MCExpr>(A);
40 const auto &AArch64ExprB = cast<AArch64MCExpr>(B);
41 if (AArch64ExprA.getKind() != AArch64ExprB.getKind())
42 return false;
43
44 return MCPlusBuilder::equals(*AArch64ExprA.getSubExpr(),
45 *AArch64ExprB.getSubExpr(), Comp);
46 }
47
48 bool isMacroOpFusionPair(ArrayRef<MCInst> Insts) const override {
49 return false;
50 }
51
52 bool shortenInstruction(MCInst &, const MCSubtargetInfo &) const override {
53 return false;
54 }
55
56 bool isADRP(const MCInst &Inst) const override {
57 return Inst.getOpcode() == AArch64::ADRP;
58 }
59
60 bool isADR(const MCInst &Inst) const override {
61 return Inst.getOpcode() == AArch64::ADR;
62 }
63
64 bool isAddXri(const MCInst &Inst) const {
65 return Inst.getOpcode() == AArch64::ADDXri;
66 }
67
68 void getADRReg(const MCInst &Inst, MCPhysReg &RegName) const override {
69 assert((isADR(Inst) || isADRP(Inst)) && "Not an ADR instruction")(static_cast <bool> ((isADR(Inst) || isADRP(Inst)) &&
"Not an ADR instruction") ? void (0) : __assert_fail ("(isADR(Inst) || isADRP(Inst)) && \"Not an ADR instruction\""
, "bolt/lib/Target/AArch64/AArch64MCPlusBuilder.cpp", 69, __extension__
__PRETTY_FUNCTION__))
;
70 assert(MCPlus::getNumPrimeOperands(Inst) != 0 &&(static_cast <bool> (MCPlus::getNumPrimeOperands(Inst) !=
0 && "No operands for ADR instruction") ? void (0) :
__assert_fail ("MCPlus::getNumPrimeOperands(Inst) != 0 && \"No operands for ADR instruction\""
, "bolt/lib/Target/AArch64/AArch64MCPlusBuilder.cpp", 71, __extension__
__PRETTY_FUNCTION__))
71 "No operands for ADR instruction")(static_cast <bool> (MCPlus::getNumPrimeOperands(Inst) !=
0 && "No operands for ADR instruction") ? void (0) :
__assert_fail ("MCPlus::getNumPrimeOperands(Inst) != 0 && \"No operands for ADR instruction\""
, "bolt/lib/Target/AArch64/AArch64MCPlusBuilder.cpp", 71, __extension__
__PRETTY_FUNCTION__))
;
72 assert(Inst.getOperand(0).isReg() &&(static_cast <bool> (Inst.getOperand(0).isReg() &&
"Unexpected operand in ADR instruction") ? void (0) : __assert_fail
("Inst.getOperand(0).isReg() && \"Unexpected operand in ADR instruction\""
, "bolt/lib/Target/AArch64/AArch64MCPlusBuilder.cpp", 73, __extension__
__PRETTY_FUNCTION__))
73 "Unexpected operand in ADR instruction")(static_cast <bool> (Inst.getOperand(0).isReg() &&
"Unexpected operand in ADR instruction") ? void (0) : __assert_fail
("Inst.getOperand(0).isReg() && \"Unexpected operand in ADR instruction\""
, "bolt/lib/Target/AArch64/AArch64MCPlusBuilder.cpp", 73, __extension__
__PRETTY_FUNCTION__))
;
74 RegName = Inst.getOperand(0).getReg();
75 }
76
77 bool isTB(const MCInst &Inst) const {
78 return (Inst.getOpcode() == AArch64::TBNZW ||
79 Inst.getOpcode() == AArch64::TBNZX ||
80 Inst.getOpcode() == AArch64::TBZW ||
81 Inst.getOpcode() == AArch64::TBZX);
82 }
83
84 bool isCB(const MCInst &Inst) const {
85 return (Inst.getOpcode() == AArch64::CBNZW ||
86 Inst.getOpcode() == AArch64::CBNZX ||
87 Inst.getOpcode() == AArch64::CBZW ||
88 Inst.getOpcode() == AArch64::CBZX);
89 }
90
91 bool isMOVW(const MCInst &Inst) const {
92 return (Inst.getOpcode() == AArch64::MOVKWi ||
93 Inst.getOpcode() == AArch64::MOVKXi ||
94 Inst.getOpcode() == AArch64::MOVNWi ||
95 Inst.getOpcode() == AArch64::MOVNXi ||
96 Inst.getOpcode() == AArch64::MOVZXi ||
97 Inst.getOpcode() == AArch64::MOVZWi);
98 }
99
100 bool isADD(const MCInst &Inst) const {
101 return (Inst.getOpcode() == AArch64::ADDSWri ||
102 Inst.getOpcode() == AArch64::ADDSWrr ||
103 Inst.getOpcode() == AArch64::ADDSWrs ||
104 Inst.getOpcode() == AArch64::ADDSWrx ||
105 Inst.getOpcode() == AArch64::ADDSXri ||
106 Inst.getOpcode() == AArch64::ADDSXrr ||
107 Inst.getOpcode() == AArch64::ADDSXrs ||
108 Inst.getOpcode() == AArch64::ADDSXrx ||
109 Inst.getOpcode() == AArch64::ADDSXrx64 ||
110 Inst.getOpcode() == AArch64::ADDWri ||
111 Inst.getOpcode() == AArch64::ADDWrr ||
112 Inst.getOpcode() == AArch64::ADDWrs ||
113 Inst.getOpcode() == AArch64::ADDWrx ||
114 Inst.getOpcode() == AArch64::ADDXri ||
115 Inst.getOpcode() == AArch64::ADDXrr ||
116 Inst.getOpcode() == AArch64::ADDXrs ||
117 Inst.getOpcode() == AArch64::ADDXrx ||
118 Inst.getOpcode() == AArch64::ADDXrx64);
119 }
120
121 bool isLDRB(const MCInst &Inst) const {
122 return (Inst.getOpcode() == AArch64::LDRBBpost ||
123 Inst.getOpcode() == AArch64::LDRBBpre ||
124 Inst.getOpcode() == AArch64::LDRBBroW ||
125 Inst.getOpcode() == AArch64::LDRBBroX ||
126 Inst.getOpcode() == AArch64::LDRBBui ||
127 Inst.getOpcode() == AArch64::LDRSBWpost ||
128 Inst.getOpcode() == AArch64::LDRSBWpre ||
129 Inst.getOpcode() == AArch64::LDRSBWroW ||
130 Inst.getOpcode() == AArch64::LDRSBWroX ||
131 Inst.getOpcode() == AArch64::LDRSBWui ||
132 Inst.getOpcode() == AArch64::LDRSBXpost ||
133 Inst.getOpcode() == AArch64::LDRSBXpre ||
134 Inst.getOpcode() == AArch64::LDRSBXroW ||
135 Inst.getOpcode() == AArch64::LDRSBXroX ||
136 Inst.getOpcode() == AArch64::LDRSBXui);
137 }
138
139 bool isLDRH(const MCInst &Inst) const {
140 return (Inst.getOpcode() == AArch64::LDRHHpost ||
141 Inst.getOpcode() == AArch64::LDRHHpre ||
142 Inst.getOpcode() == AArch64::LDRHHroW ||
143 Inst.getOpcode() == AArch64::LDRHHroX ||
144 Inst.getOpcode() == AArch64::LDRHHui ||
145 Inst.getOpcode() == AArch64::LDRSHWpost ||
146 Inst.getOpcode() == AArch64::LDRSHWpre ||
147 Inst.getOpcode() == AArch64::LDRSHWroW ||
148 Inst.getOpcode() == AArch64::LDRSHWroX ||
149 Inst.getOpcode() == AArch64::LDRSHWui ||
150 Inst.getOpcode() == AArch64::LDRSHXpost ||
151 Inst.getOpcode() == AArch64::LDRSHXpre ||
152 Inst.getOpcode() == AArch64::LDRSHXroW ||
153 Inst.getOpcode() == AArch64::LDRSHXroX ||
154 Inst.getOpcode() == AArch64::LDRSHXui);
155 }
156
157 bool isLDRW(const MCInst &Inst) const {
158 return (Inst.getOpcode() == AArch64::LDRWpost ||
159 Inst.getOpcode() == AArch64::LDRWpre ||
160 Inst.getOpcode() == AArch64::LDRWroW ||
161 Inst.getOpcode() == AArch64::LDRWroX ||
162 Inst.getOpcode() == AArch64::LDRWui);
163 }
164
165 bool isLDRX(const MCInst &Inst) const {
166 return (Inst.getOpcode() == AArch64::LDRXpost ||
167 Inst.getOpcode() == AArch64::LDRXpre ||
168 Inst.getOpcode() == AArch64::LDRXroW ||
169 Inst.getOpcode() == AArch64::LDRXroX ||
170 Inst.getOpcode() == AArch64::LDRXui);
171 }
172
173 bool isLoad(const MCInst &Inst) const override {
174 return isLDRB(Inst) || isLDRH(Inst) || isLDRW(Inst) || isLDRX(Inst);
175 }
176
177 bool isLoadFromStack(const MCInst &Inst) const {
178 if (!isLoad(Inst))
179 return false;
180 const MCInstrDesc &InstInfo = Info->get(Inst.getOpcode());
181 unsigned NumDefs = InstInfo.getNumDefs();
182 for (unsigned I = NumDefs, E = InstInfo.getNumOperands(); I < E; ++I) {
183 const MCOperand &Operand = Inst.getOperand(I);
184 if (!Operand.isReg())
185 continue;
186 unsigned Reg = Operand.getReg();
187 if (Reg == AArch64::SP || Reg == AArch64::WSP || Reg == AArch64::FP ||
188 Reg == AArch64::W29)
189 return true;
190 }
191 return false;
192 }
193
194 bool isRegToRegMove(const MCInst &Inst, MCPhysReg &From,
195 MCPhysReg &To) const override {
196 if (Inst.getOpcode() != AArch64::ORRXrs)
197 return false;
198 if (Inst.getOperand(1).getReg() != AArch64::XZR)
199 return false;
200 if (Inst.getOperand(3).getImm() != 0)
201 return false;
202 From = Inst.getOperand(2).getReg();
203 To = Inst.getOperand(0).getReg();
204 return true;
205 }
206
207 bool isIndirectCall(const MCInst &Inst) const override {
208 return Inst.getOpcode() == AArch64::BLR;
209 }
210
211 bool hasPCRelOperand(const MCInst &Inst) const override {
212 // ADRP is blacklisted and is an exception. Even though it has a
213 // PC-relative operand, this operand is not a complete symbol reference
214 // and BOLT shouldn't try to process it in isolation.
215 if (isADRP(Inst))
216 return false;
217
218 if (isADR(Inst))
219 return true;
220
221 // Look for literal addressing mode (see C1-143 ARM DDI 0487B.a)
222 const MCInstrDesc &MCII = Info->get(Inst.getOpcode());
223 for (unsigned I = 0, E = MCII.getNumOperands(); I != E; ++I)
224 if (MCII.operands()[I].OperandType == MCOI::OPERAND_PCREL)
225 return true;
226
227 return false;
228 }
229
230 bool evaluateADR(const MCInst &Inst, int64_t &Imm,
231 const MCExpr **DispExpr) const {
232 assert((isADR(Inst) || isADRP(Inst)) && "Not an ADR instruction")(static_cast <bool> ((isADR(Inst) || isADRP(Inst)) &&
"Not an ADR instruction") ? void (0) : __assert_fail ("(isADR(Inst) || isADRP(Inst)) && \"Not an ADR instruction\""
, "bolt/lib/Target/AArch64/AArch64MCPlusBuilder.cpp", 232, __extension__
__PRETTY_FUNCTION__))
;
233
234 const MCOperand &Label = Inst.getOperand(1);
235 if (!Label.isImm()) {
236 assert(Label.isExpr() && "Unexpected ADR operand")(static_cast <bool> (Label.isExpr() && "Unexpected ADR operand"
) ? void (0) : __assert_fail ("Label.isExpr() && \"Unexpected ADR operand\""
, "bolt/lib/Target/AArch64/AArch64MCPlusBuilder.cpp", 236, __extension__
__PRETTY_FUNCTION__))
;
237 assert(DispExpr && "DispExpr must be set")(static_cast <bool> (DispExpr && "DispExpr must be set"
) ? void (0) : __assert_fail ("DispExpr && \"DispExpr must be set\""
, "bolt/lib/Target/AArch64/AArch64MCPlusBuilder.cpp", 237, __extension__
__PRETTY_FUNCTION__))
;
238 *DispExpr = Label.getExpr();
239 return false;
240 }
241
242 if (Inst.getOpcode() == AArch64::ADR) {
243 Imm = Label.getImm();
244 return true;
245 }
246 Imm = Label.getImm() << 12;
247 return true;
248 }
249
250 bool evaluateAArch64MemoryOperand(const MCInst &Inst, int64_t &DispImm,
251 const MCExpr **DispExpr = nullptr) const {
252 if (isADR(Inst) || isADRP(Inst))
19
Assuming the condition is false
20
Assuming the condition is false
21
Taking false branch
253 return evaluateADR(Inst, DispImm, DispExpr);
254
255 // Literal addressing mode
256 const MCInstrDesc &MCII = Info->get(Inst.getOpcode());
257 for (unsigned I = 0, E = MCII.getNumOperands(); I != E; ++I) {
22
Assuming 'I' is not equal to 'E'
23
Loop condition is true. Entering loop body
258 if (MCII.operands()[I].OperandType != MCOI::OPERAND_PCREL)
24
Assuming field 'OperandType' is equal to OPERAND_PCREL
25
Taking false branch
259 continue;
260
261 if (!Inst.getOperand(I).isImm()) {
262 assert(Inst.getOperand(I).isExpr() && "Unexpected PCREL operand")(static_cast <bool> (Inst.getOperand(I).isExpr() &&
"Unexpected PCREL operand") ? void (0) : __assert_fail ("Inst.getOperand(I).isExpr() && \"Unexpected PCREL operand\""
, "bolt/lib/Target/AArch64/AArch64MCPlusBuilder.cpp", 262, __extension__
__PRETTY_FUNCTION__))
;
26
Taking true branch
27
'?' condition is true
263 assert(DispExpr && "DispExpr must be set")(static_cast <bool> (DispExpr && "DispExpr must be set"
) ? void (0) : __assert_fail ("DispExpr && \"DispExpr must be set\""
, "bolt/lib/Target/AArch64/AArch64MCPlusBuilder.cpp", 263, __extension__
__PRETTY_FUNCTION__))
;
28
'?' condition is true
264 *DispExpr = Inst.getOperand(I).getExpr();
29
Calling 'MCOperand::getExpr'
32
Returning from 'MCOperand::getExpr'
33
Value assigned to 'DispExpr', which participates in a condition later
265 return true;
34
Returning without writing to 'DispImm'
266 }
267
268 DispImm = Inst.getOperand(I).getImm() * 4;
269 return true;
270 }
271 return false;
272 }
273
274 bool evaluateMemOperandTarget(const MCInst &Inst, uint64_t &Target,
275 uint64_t Address,
276 uint64_t Size) const override {
277 int64_t DispValue;
17
'DispValue' declared without an initial value
278 const MCExpr *DispExpr = nullptr;
279 if (!evaluateAArch64MemoryOperand(Inst, DispValue, &DispExpr))
18
Calling 'AArch64MCPlusBuilder::evaluateAArch64MemoryOperand'
35
Returning from 'AArch64MCPlusBuilder::evaluateAArch64MemoryOperand'
36
Taking false branch
280 return false;
281
282 // Make sure it's a well-formed addressing we can statically evaluate.
283 if (DispExpr)
37
Assuming 'DispExpr' is null
38
Taking false branch
284 return false;
285
286 Target = DispValue;
39
Assigned value is garbage or undefined
287 if (Inst.getOpcode() == AArch64::ADRP)
288 Target += Address & ~0xFFFULL;
289 else
290 Target += Address;
291 return true;
292 }
293
294 MCInst::iterator getMemOperandDisp(MCInst &Inst) const override {
295 MCInst::iterator OI = Inst.begin();
296 if (isADR(Inst) || isADRP(Inst)) {
297 assert(MCPlus::getNumPrimeOperands(Inst) >= 2 &&(static_cast <bool> (MCPlus::getNumPrimeOperands(Inst) >=
2 && "Unexpected number of operands") ? void (0) : __assert_fail
("MCPlus::getNumPrimeOperands(Inst) >= 2 && \"Unexpected number of operands\""
, "bolt/lib/Target/AArch64/AArch64MCPlusBuilder.cpp", 298, __extension__
__PRETTY_FUNCTION__))
298 "Unexpected number of operands")(static_cast <bool> (MCPlus::getNumPrimeOperands(Inst) >=
2 && "Unexpected number of operands") ? void (0) : __assert_fail
("MCPlus::getNumPrimeOperands(Inst) >= 2 && \"Unexpected number of operands\""
, "bolt/lib/Target/AArch64/AArch64MCPlusBuilder.cpp", 298, __extension__
__PRETTY_FUNCTION__))
;
299 return ++OI;
300 }
301 const MCInstrDesc &MCII = Info->get(Inst.getOpcode());
302 for (unsigned I = 0, E = MCII.getNumOperands(); I != E; ++I) {
303 if (MCII.operands()[I].OperandType == MCOI::OPERAND_PCREL)
304 break;
305 ++OI;
306 }
307 assert(OI != Inst.end() && "Literal operand not found")(static_cast <bool> (OI != Inst.end() && "Literal operand not found"
) ? void (0) : __assert_fail ("OI != Inst.end() && \"Literal operand not found\""
, "bolt/lib/Target/AArch64/AArch64MCPlusBuilder.cpp", 307, __extension__
__PRETTY_FUNCTION__))
;
308 return OI;
309 }
310
311 bool replaceMemOperandDisp(MCInst &Inst, MCOperand Operand) const override {
312 MCInst::iterator OI = getMemOperandDisp(Inst);
313 *OI = Operand;
314 return true;
315 }
316
317 const MCExpr *getTargetExprFor(MCInst &Inst, const MCExpr *Expr,
318 MCContext &Ctx,
319 uint64_t RelType) const override {
320
321 if (isADR(Inst) || RelType == ELF::R_AARCH64_ADR_PREL_LO21 ||
322 RelType == ELF::R_AARCH64_TLSDESC_ADR_PREL21) {
323 return AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS, Ctx);
324 } else if (isADRP(Inst) || RelType == ELF::R_AARCH64_ADR_PREL_PG_HI21 ||
325 RelType == ELF::R_AARCH64_ADR_PREL_PG_HI21_NC ||
326 RelType == ELF::R_AARCH64_TLSDESC_ADR_PAGE21 ||
327 RelType == ELF::R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21 ||
328 RelType == ELF::R_AARCH64_ADR_GOT_PAGE) {
329 // Never emit a GOT reloc, we handled this in
330 // RewriteInstance::readRelocations().
331 return AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS_PAGE, Ctx);
332 } else {
333 switch (RelType) {
334 case ELF::R_AARCH64_ADD_ABS_LO12_NC:
335 case ELF::R_AARCH64_LD64_GOT_LO12_NC:
336 case ELF::R_AARCH64_LDST8_ABS_LO12_NC:
337 case ELF::R_AARCH64_LDST16_ABS_LO12_NC:
338 case ELF::R_AARCH64_LDST32_ABS_LO12_NC:
339 case ELF::R_AARCH64_LDST64_ABS_LO12_NC:
340 case ELF::R_AARCH64_LDST128_ABS_LO12_NC:
341 case ELF::R_AARCH64_TLSDESC_ADD_LO12:
342 case ELF::R_AARCH64_TLSDESC_LD64_LO12:
343 case ELF::R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
344 case ELF::R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
345 return AArch64MCExpr::create(Expr, AArch64MCExpr::VK_LO12, Ctx);
346 case ELF::R_AARCH64_MOVW_UABS_G3:
347 return AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS_G3, Ctx);
348 case ELF::R_AARCH64_MOVW_UABS_G2:
349 case ELF::R_AARCH64_MOVW_UABS_G2_NC:
350 return AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS_G2_NC, Ctx);
351 case ELF::R_AARCH64_MOVW_UABS_G1:
352 case ELF::R_AARCH64_MOVW_UABS_G1_NC:
353 return AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS_G1_NC, Ctx);
354 case ELF::R_AARCH64_MOVW_UABS_G0:
355 case ELF::R_AARCH64_MOVW_UABS_G0_NC:
356 return AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS_G0_NC, Ctx);
357 default:
358 break;
359 }
360 }
361 return Expr;
362 }
363
364 bool getSymbolRefOperandNum(const MCInst &Inst, unsigned &OpNum) const {
365 if (OpNum >= MCPlus::getNumPrimeOperands(Inst))
366 return false;
367
368 // Auto-select correct operand number
369 if (OpNum == 0) {
370 if (isConditionalBranch(Inst) || isADR(Inst) || isADRP(Inst) ||
371 isMOVW(Inst))
372 OpNum = 1;
373 if (isTB(Inst) || isAddXri(Inst))
374 OpNum = 2;
375 }
376
377 return true;
378 }
379
380 const MCSymbol *getTargetSymbol(const MCExpr *Expr) const override {
381 auto *AArchExpr = dyn_cast<AArch64MCExpr>(Expr);
382 if (AArchExpr && AArchExpr->getSubExpr())
383 return getTargetSymbol(AArchExpr->getSubExpr());
384
385 auto *BinExpr = dyn_cast<MCBinaryExpr>(Expr);
386 if (BinExpr)
387 return getTargetSymbol(BinExpr->getLHS());
388
389 auto *SymExpr = dyn_cast<MCSymbolRefExpr>(Expr);
390 if (SymExpr && SymExpr->getKind() == MCSymbolRefExpr::VK_None)
391 return &SymExpr->getSymbol();
392
393 return nullptr;
394 }
395
396 const MCSymbol *getTargetSymbol(const MCInst &Inst,
397 unsigned OpNum = 0) const override {
398 if (!getSymbolRefOperandNum(Inst, OpNum))
399 return nullptr;
400
401 const MCOperand &Op = Inst.getOperand(OpNum);
402 if (!Op.isExpr())
403 return nullptr;
404
405 return getTargetSymbol(Op.getExpr());
406 }
407
408 int64_t getTargetAddend(const MCExpr *Expr) const override {
409 auto *AArchExpr = dyn_cast<AArch64MCExpr>(Expr);
410 if (AArchExpr && AArchExpr->getSubExpr())
411 return getTargetAddend(AArchExpr->getSubExpr());
412
413 auto *BinExpr = dyn_cast<MCBinaryExpr>(Expr);
414 if (BinExpr && BinExpr->getOpcode() == MCBinaryExpr::Add)
415 return getTargetAddend(BinExpr->getRHS());
416
417 auto *ConstExpr = dyn_cast<MCConstantExpr>(Expr);
418 if (ConstExpr)
419 return ConstExpr->getValue();
420
421 return 0;
422 }
423
424 int64_t getTargetAddend(const MCInst &Inst,
425 unsigned OpNum = 0) const override {
426 if (!getSymbolRefOperandNum(Inst, OpNum))
427 return 0;
428
429 const MCOperand &Op = Inst.getOperand(OpNum);
430 if (!Op.isExpr())
431 return 0;
432
433 return getTargetAddend(Op.getExpr());
434 }
435
436 bool replaceBranchTarget(MCInst &Inst, const MCSymbol *TBB,
437 MCContext *Ctx) const override {
438 assert((isCall(Inst) || isBranch(Inst)) && !isIndirectBranch(Inst) &&(static_cast <bool> ((isCall(Inst) || isBranch(Inst)) &&
!isIndirectBranch(Inst) && "Invalid instruction") ? void
(0) : __assert_fail ("(isCall(Inst) || isBranch(Inst)) && !isIndirectBranch(Inst) && \"Invalid instruction\""
, "bolt/lib/Target/AArch64/AArch64MCPlusBuilder.cpp", 439, __extension__
__PRETTY_FUNCTION__))
439 "Invalid instruction")(static_cast <bool> ((isCall(Inst) || isBranch(Inst)) &&
!isIndirectBranch(Inst) && "Invalid instruction") ? void
(0) : __assert_fail ("(isCall(Inst) || isBranch(Inst)) && !isIndirectBranch(Inst) && \"Invalid instruction\""
, "bolt/lib/Target/AArch64/AArch64MCPlusBuilder.cpp", 439, __extension__
__PRETTY_FUNCTION__))
;
440 assert(MCPlus::getNumPrimeOperands(Inst) >= 1 &&(static_cast <bool> (MCPlus::getNumPrimeOperands(Inst) >=
1 && "Invalid number of operands") ? void (0) : __assert_fail
("MCPlus::getNumPrimeOperands(Inst) >= 1 && \"Invalid number of operands\""
, "bolt/lib/Target/AArch64/AArch64MCPlusBuilder.cpp", 441, __extension__
__PRETTY_FUNCTION__))
441 "Invalid number of operands")(static_cast <bool> (MCPlus::getNumPrimeOperands(Inst) >=
1 && "Invalid number of operands") ? void (0) : __assert_fail
("MCPlus::getNumPrimeOperands(Inst) >= 1 && \"Invalid number of operands\""
, "bolt/lib/Target/AArch64/AArch64MCPlusBuilder.cpp", 441, __extension__
__PRETTY_FUNCTION__))
;
442 MCInst::iterator OI = Inst.begin();
443
444 if (isConditionalBranch(Inst)) {
445 assert(MCPlus::getNumPrimeOperands(Inst) >= 2 &&(static_cast <bool> (MCPlus::getNumPrimeOperands(Inst) >=
2 && "Invalid number of operands") ? void (0) : __assert_fail
("MCPlus::getNumPrimeOperands(Inst) >= 2 && \"Invalid number of operands\""
, "bolt/lib/Target/AArch64/AArch64MCPlusBuilder.cpp", 446, __extension__
__PRETTY_FUNCTION__))
446 "Invalid number of operands")(static_cast <bool> (MCPlus::getNumPrimeOperands(Inst) >=
2 && "Invalid number of operands") ? void (0) : __assert_fail
("MCPlus::getNumPrimeOperands(Inst) >= 2 && \"Invalid number of operands\""
, "bolt/lib/Target/AArch64/AArch64MCPlusBuilder.cpp", 446, __extension__
__PRETTY_FUNCTION__))
;
447 ++OI;
448 }
449
450 if (isTB(Inst)) {
451 assert(MCPlus::getNumPrimeOperands(Inst) >= 3 &&(static_cast <bool> (MCPlus::getNumPrimeOperands(Inst) >=
3 && "Invalid number of operands") ? void (0) : __assert_fail
("MCPlus::getNumPrimeOperands(Inst) >= 3 && \"Invalid number of operands\""
, "bolt/lib/Target/AArch64/AArch64MCPlusBuilder.cpp", 452, __extension__
__PRETTY_FUNCTION__))
452 "Invalid number of operands")(static_cast <bool> (MCPlus::getNumPrimeOperands(Inst) >=
3 && "Invalid number of operands") ? void (0) : __assert_fail
("MCPlus::getNumPrimeOperands(Inst) >= 3 && \"Invalid number of operands\""
, "bolt/lib/Target/AArch64/AArch64MCPlusBuilder.cpp", 452, __extension__
__PRETTY_FUNCTION__))
;
453 OI = Inst.begin() + 2;
454 }
455
456 *OI = MCOperand::createExpr(
457 MCSymbolRefExpr::create(TBB, MCSymbolRefExpr::VK_None, *Ctx));
458 return true;
459 }
460
461 /// Matches indirect branch patterns in AArch64 related to a jump table (JT),
462 /// helping us to build the complete CFG. A typical indirect branch to
463 /// a jump table entry in AArch64 looks like the following:
464 ///
465 /// adrp x1, #-7585792 # Get JT Page location
466 /// add x1, x1, #692 # Complement with JT Page offset
467 /// ldrh w0, [x1, w0, uxtw #1] # Loads JT entry
468 /// adr x1, #12 # Get PC + 12 (end of this BB) used next
469 /// add x0, x1, w0, sxth #2 # Finish building branch target
470 /// # (entries in JT are relative to the end
471 /// # of this BB)
472 /// br x0 # Indirect jump instruction
473 ///
474 bool analyzeIndirectBranchFragment(
475 const MCInst &Inst,
476 DenseMap<const MCInst *, SmallVector<MCInst *, 4>> &UDChain,
477 const MCExpr *&JumpTable, int64_t &Offset, int64_t &ScaleValue,
478 MCInst *&PCRelBase) const {
479 // Expect AArch64 BR
480 assert(Inst.getOpcode() == AArch64::BR && "Unexpected opcode")(static_cast <bool> (Inst.getOpcode() == AArch64::BR &&
"Unexpected opcode") ? void (0) : __assert_fail ("Inst.getOpcode() == AArch64::BR && \"Unexpected opcode\""
, "bolt/lib/Target/AArch64/AArch64MCPlusBuilder.cpp", 480, __extension__
__PRETTY_FUNCTION__))
;
481
482 // Match the indirect branch pattern for aarch64
483 SmallVector<MCInst *, 4> &UsesRoot = UDChain[&Inst];
484 if (UsesRoot.size() == 0 || UsesRoot[0] == nullptr)
485 return false;
486
487 const MCInst *DefAdd = UsesRoot[0];
488
489 // Now we match an ADD
490 if (!isADD(*DefAdd)) {
491 // If the address is not broken up in two parts, this is not branching
492 // according to a jump table entry. Fail.
493 return false;
494 }
495 if (DefAdd->getOpcode() == AArch64::ADDXri) {
496 // This can happen when there is no offset, but a direct jump that was
497 // transformed into an indirect one (indirect tail call) :
498 // ADRP x2, Perl_re_compiler
499 // ADD x2, x2, :lo12:Perl_re_compiler
500 // BR x2
501 return false;
502 }
503 if (DefAdd->getOpcode() == AArch64::ADDXrs) {
504 // Covers the less common pattern where JT entries are relative to
505 // the JT itself (like x86). Seems less efficient since we can't
506 // assume the JT is aligned at 4B boundary and thus drop 2 bits from
507 // JT values.
508 // cde264:
509 // adrp x12, #21544960 ; 216a000
510 // add x12, x12, #1696 ; 216a6a0 (JT object in .rodata)
511 // ldrsw x8, [x12, x8, lsl #2] --> loads e.g. 0xfeb73bd8
512 // * add x8, x8, x12 --> = cde278, next block
513 // br x8
514 // cde278:
515 //
516 // Parsed as ADDXrs reg:x8 reg:x8 reg:x12 imm:0
517 return false;
518 }
519 assert(DefAdd->getOpcode() == AArch64::ADDXrx &&(static_cast <bool> (DefAdd->getOpcode() == AArch64::
ADDXrx && "Failed to match indirect branch!") ? void (
0) : __assert_fail ("DefAdd->getOpcode() == AArch64::ADDXrx && \"Failed to match indirect branch!\""
, "bolt/lib/Target/AArch64/AArch64MCPlusBuilder.cpp", 520, __extension__
__PRETTY_FUNCTION__))
520 "Failed to match indirect branch!")(static_cast <bool> (DefAdd->getOpcode() == AArch64::
ADDXrx && "Failed to match indirect branch!") ? void (
0) : __assert_fail ("DefAdd->getOpcode() == AArch64::ADDXrx && \"Failed to match indirect branch!\""
, "bolt/lib/Target/AArch64/AArch64MCPlusBuilder.cpp", 520, __extension__
__PRETTY_FUNCTION__))
;
521
522 // Validate ADD operands
523 int64_t OperandExtension = DefAdd->getOperand(3).getImm();
524 unsigned ShiftVal = AArch64_AM::getArithShiftValue(OperandExtension);
525 AArch64_AM::ShiftExtendType ExtendType =
526 AArch64_AM::getArithExtendType(OperandExtension);
527 if (ShiftVal != 2)
528 llvm_unreachable("Failed to match indirect branch! (fragment 2)")::llvm::llvm_unreachable_internal("Failed to match indirect branch! (fragment 2)"
, "bolt/lib/Target/AArch64/AArch64MCPlusBuilder.cpp", 528)
;
529
530 if (ExtendType == AArch64_AM::SXTB)
531 ScaleValue = 1LL;
532 else if (ExtendType == AArch64_AM::SXTH)
533 ScaleValue = 2LL;
534 else if (ExtendType == AArch64_AM::SXTW)
535 ScaleValue = 4LL;
536 else
537 llvm_unreachable("Failed to match indirect branch! (fragment 3)")::llvm::llvm_unreachable_internal("Failed to match indirect branch! (fragment 3)"
, "bolt/lib/Target/AArch64/AArch64MCPlusBuilder.cpp", 537)
;
538
539 // Match an ADR to load base address to be used when addressing JT targets
540 SmallVector<MCInst *, 4> &UsesAdd = UDChain[DefAdd];
541 if (UsesAdd.size() <= 1 || UsesAdd[1] == nullptr || UsesAdd[2] == nullptr) {
542 // This happens when we don't have enough context about this jump table
543 // because the jumping code sequence was split in multiple basic blocks.
544 // This was observed in the wild in HHVM code (dispatchImpl).
545 return false;
546 }
547 MCInst *DefBaseAddr = UsesAdd[1];
548 assert(DefBaseAddr->getOpcode() == AArch64::ADR &&(static_cast <bool> (DefBaseAddr->getOpcode() == AArch64
::ADR && "Failed to match indirect branch pattern! (fragment 3)"
) ? void (0) : __assert_fail ("DefBaseAddr->getOpcode() == AArch64::ADR && \"Failed to match indirect branch pattern! (fragment 3)\""
, "bolt/lib/Target/AArch64/AArch64MCPlusBuilder.cpp", 549, __extension__
__PRETTY_FUNCTION__))
549 "Failed to match indirect branch pattern! (fragment 3)")(static_cast <bool> (DefBaseAddr->getOpcode() == AArch64
::ADR && "Failed to match indirect branch pattern! (fragment 3)"
) ? void (0) : __assert_fail ("DefBaseAddr->getOpcode() == AArch64::ADR && \"Failed to match indirect branch pattern! (fragment 3)\""
, "bolt/lib/Target/AArch64/AArch64MCPlusBuilder.cpp", 549, __extension__
__PRETTY_FUNCTION__))
;
550
551 PCRelBase = DefBaseAddr;
552 // Match LOAD to load the jump table (relative) target
553 const MCInst *DefLoad = UsesAdd[2];
554 assert(isLoad(*DefLoad) &&(static_cast <bool> (isLoad(*DefLoad) && "Failed to match indirect branch load pattern! (1)"
) ? void (0) : __assert_fail ("isLoad(*DefLoad) && \"Failed to match indirect branch load pattern! (1)\""
, "bolt/lib/Target/AArch64/AArch64MCPlusBuilder.cpp", 555, __extension__
__PRETTY_FUNCTION__))
555 "Failed to match indirect branch load pattern! (1)")(static_cast <bool> (isLoad(*DefLoad) && "Failed to match indirect branch load pattern! (1)"
) ? void (0) : __assert_fail ("isLoad(*DefLoad) && \"Failed to match indirect branch load pattern! (1)\""
, "bolt/lib/Target/AArch64/AArch64MCPlusBuilder.cpp", 555, __extension__
__PRETTY_FUNCTION__))
;
556 assert((ScaleValue != 1LL || isLDRB(*DefLoad)) &&(static_cast <bool> ((ScaleValue != 1LL || isLDRB(*DefLoad
)) && "Failed to match indirect branch load pattern! (2)"
) ? void (0) : __assert_fail ("(ScaleValue != 1LL || isLDRB(*DefLoad)) && \"Failed to match indirect branch load pattern! (2)\""
, "bolt/lib/Target/AArch64/AArch64MCPlusBuilder.cpp", 557, __extension__
__PRETTY_FUNCTION__))
557 "Failed to match indirect branch load pattern! (2)")(static_cast <bool> ((ScaleValue != 1LL || isLDRB(*DefLoad
)) && "Failed to match indirect branch load pattern! (2)"
) ? void (0) : __assert_fail ("(ScaleValue != 1LL || isLDRB(*DefLoad)) && \"Failed to match indirect branch load pattern! (2)\""
, "bolt/lib/Target/AArch64/AArch64MCPlusBuilder.cpp", 557, __extension__
__PRETTY_FUNCTION__))
;
558 assert((ScaleValue != 2LL || isLDRH(*DefLoad)) &&(static_cast <bool> ((ScaleValue != 2LL || isLDRH(*DefLoad
)) && "Failed to match indirect branch load pattern! (3)"
) ? void (0) : __assert_fail ("(ScaleValue != 2LL || isLDRH(*DefLoad)) && \"Failed to match indirect branch load pattern! (3)\""
, "bolt/lib/Target/AArch64/AArch64MCPlusBuilder.cpp", 559, __extension__
__PRETTY_FUNCTION__))
559 "Failed to match indirect branch load pattern! (3)")(static_cast <bool> ((ScaleValue != 2LL || isLDRH(*DefLoad
)) && "Failed to match indirect branch load pattern! (3)"
) ? void (0) : __assert_fail ("(ScaleValue != 2LL || isLDRH(*DefLoad)) && \"Failed to match indirect branch load pattern! (3)\""
, "bolt/lib/Target/AArch64/AArch64MCPlusBuilder.cpp", 559, __extension__
__PRETTY_FUNCTION__))
;
560
561 // Match ADD that calculates the JumpTable Base Address (not the offset)
562 SmallVector<MCInst *, 4> &UsesLoad = UDChain[DefLoad];
563 const MCInst *DefJTBaseAdd = UsesLoad[1];
564 MCPhysReg From, To;
565 if (DefJTBaseAdd == nullptr || isLoadFromStack(*DefJTBaseAdd) ||
566 isRegToRegMove(*DefJTBaseAdd, From, To)) {
567 // Sometimes base address may have been defined in another basic block
568 // (hoisted). Return with no jump table info.
569 JumpTable = nullptr;
570 return true;
571 }
572
573 assert(DefJTBaseAdd->getOpcode() == AArch64::ADDXri &&(static_cast <bool> (DefJTBaseAdd->getOpcode() == AArch64
::ADDXri && "Failed to match jump table base address pattern! (1)"
) ? void (0) : __assert_fail ("DefJTBaseAdd->getOpcode() == AArch64::ADDXri && \"Failed to match jump table base address pattern! (1)\""
, "bolt/lib/Target/AArch64/AArch64MCPlusBuilder.cpp", 574, __extension__
__PRETTY_FUNCTION__))
574 "Failed to match jump table base address pattern! (1)")(static_cast <bool> (DefJTBaseAdd->getOpcode() == AArch64
::ADDXri && "Failed to match jump table base address pattern! (1)"
) ? void (0) : __assert_fail ("DefJTBaseAdd->getOpcode() == AArch64::ADDXri && \"Failed to match jump table base address pattern! (1)\""
, "bolt/lib/Target/AArch64/AArch64MCPlusBuilder.cpp", 574, __extension__
__PRETTY_FUNCTION__))
;
575
576 if (DefJTBaseAdd->getOperand(2).isImm())
577 Offset = DefJTBaseAdd->getOperand(2).getImm();
578 SmallVector<MCInst *, 4> &UsesJTBaseAdd = UDChain[DefJTBaseAdd];
579 const MCInst *DefJTBasePage = UsesJTBaseAdd[1];
580 if (DefJTBasePage == nullptr || isLoadFromStack(*DefJTBasePage)) {
581 JumpTable = nullptr;
582 return true;
583 }
584 assert(DefJTBasePage->getOpcode() == AArch64::ADRP &&(static_cast <bool> (DefJTBasePage->getOpcode() == AArch64
::ADRP && "Failed to match jump table base page pattern! (2)"
) ? void (0) : __assert_fail ("DefJTBasePage->getOpcode() == AArch64::ADRP && \"Failed to match jump table base page pattern! (2)\""
, "bolt/lib/Target/AArch64/AArch64MCPlusBuilder.cpp", 585, __extension__
__PRETTY_FUNCTION__))
585 "Failed to match jump table base page pattern! (2)")(static_cast <bool> (DefJTBasePage->getOpcode() == AArch64
::ADRP && "Failed to match jump table base page pattern! (2)"
) ? void (0) : __assert_fail ("DefJTBasePage->getOpcode() == AArch64::ADRP && \"Failed to match jump table base page pattern! (2)\""
, "bolt/lib/Target/AArch64/AArch64MCPlusBuilder.cpp", 585, __extension__
__PRETTY_FUNCTION__))
;
586 if (DefJTBasePage->getOperand(1).isExpr())
587 JumpTable = DefJTBasePage->getOperand(1).getExpr();
588 return true;
589 }
590
591 DenseMap<const MCInst *, SmallVector<MCInst *, 4>>
592 computeLocalUDChain(const MCInst *CurInstr, InstructionIterator Begin,
593 InstructionIterator End) const {
594 DenseMap<int, MCInst *> RegAliasTable;
595 DenseMap<const MCInst *, SmallVector<MCInst *, 4>> Uses;
596
597 auto addInstrOperands = [&](const MCInst &Instr) {
598 // Update Uses table
599 for (const MCOperand &Operand : MCPlus::primeOperands(Instr)) {
600 if (!Operand.isReg())
601 continue;
602 unsigned Reg = Operand.getReg();
603 MCInst *AliasInst = RegAliasTable[Reg];
604 Uses[&Instr].push_back(AliasInst);
605 LLVM_DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("mcplus")) { { dbgs() << "Adding reg operand " <<
Reg << " refs "; if (AliasInst != nullptr) AliasInst->
dump(); else dbgs() << "\n"; }; } } while (false)
606 dbgs() << "Adding reg operand " << Reg << " refs ";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("mcplus")) { { dbgs() << "Adding reg operand " <<
Reg << " refs "; if (AliasInst != nullptr) AliasInst->
dump(); else dbgs() << "\n"; }; } } while (false)
607 if (AliasInst != nullptr)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("mcplus")) { { dbgs() << "Adding reg operand " <<
Reg << " refs "; if (AliasInst != nullptr) AliasInst->
dump(); else dbgs() << "\n"; }; } } while (false)
608 AliasInst->dump();do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("mcplus")) { { dbgs() << "Adding reg operand " <<
Reg << " refs "; if (AliasInst != nullptr) AliasInst->
dump(); else dbgs() << "\n"; }; } } while (false)
609 elsedo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("mcplus")) { { dbgs() << "Adding reg operand " <<
Reg << " refs "; if (AliasInst != nullptr) AliasInst->
dump(); else dbgs() << "\n"; }; } } while (false)
610 dbgs() << "\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("mcplus")) { { dbgs() << "Adding reg operand " <<
Reg << " refs "; if (AliasInst != nullptr) AliasInst->
dump(); else dbgs() << "\n"; }; } } while (false)
611 })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("mcplus")) { { dbgs() << "Adding reg operand " <<
Reg << " refs "; if (AliasInst != nullptr) AliasInst->
dump(); else dbgs() << "\n"; }; } } while (false)
;
612 }
613 };
614
615 LLVM_DEBUG(dbgs() << "computeLocalUDChain\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("mcplus")) { dbgs() << "computeLocalUDChain\n"; } } while
(false)
;
616 bool TerminatorSeen = false;
617 for (auto II = Begin; II != End; ++II) {
618 MCInst &Instr = *II;
619 // Ignore nops and CFIs
620 if (isPseudo(Instr) || isNoop(Instr))
621 continue;
622 if (TerminatorSeen) {
623 RegAliasTable.clear();
624 Uses.clear();
625 }
626
627 LLVM_DEBUG(dbgs() << "Now updating for:\n ")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("mcplus")) { dbgs() << "Now updating for:\n "; } } while
(false)
;
628 LLVM_DEBUG(Instr.dump())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("mcplus")) { Instr.dump(); } } while (false)
;
629 addInstrOperands(Instr);
630
631 BitVector Regs = BitVector(RegInfo->getNumRegs(), false);
632 getWrittenRegs(Instr, Regs);
633
634 // Update register definitions after this point
635 for (int Idx : Regs.set_bits()) {
636 RegAliasTable[Idx] = &Instr;
637 LLVM_DEBUG(dbgs() << "Setting reg " << Idxdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("mcplus")) { dbgs() << "Setting reg " << Idx <<
" def to current instr.\n"; } } while (false)
638 << " def to current instr.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("mcplus")) { dbgs() << "Setting reg " << Idx <<
" def to current instr.\n"; } } while (false)
;
639 }
640
641 TerminatorSeen = isTerminator(Instr);
642 }
643
644 // Process the last instruction, which is not currently added into the
645 // instruction stream
646 if (CurInstr)
647 addInstrOperands(*CurInstr);
648
649 return Uses;
650 }
651
652 IndirectBranchType analyzeIndirectBranch(
653 MCInst &Instruction, InstructionIterator Begin, InstructionIterator End,
654 const unsigned PtrSize, MCInst *&MemLocInstrOut, unsigned &BaseRegNumOut,
655 unsigned &IndexRegNumOut, int64_t &DispValueOut,
656 const MCExpr *&DispExprOut, MCInst *&PCRelBaseOut) const override {
657 MemLocInstrOut = nullptr;
658 BaseRegNumOut = AArch64::NoRegister;
659 IndexRegNumOut = AArch64::NoRegister;
660 DispValueOut = 0;
661 DispExprOut = nullptr;
662
663 // An instruction referencing memory used by jump instruction (directly or
664 // via register). This location could be an array of function pointers
665 // in case of indirect tail call, or a jump table.
666 MCInst *MemLocInstr = nullptr;
667
668 // Analyze the memory location.
669 int64_t ScaleValue, DispValue;
670 const MCExpr *DispExpr;
671
672 DenseMap<const MCInst *, SmallVector<llvm::MCInst *, 4>> UDChain =
673 computeLocalUDChain(&Instruction, Begin, End);
674 MCInst *PCRelBase;
675 if (!analyzeIndirectBranchFragment(Instruction, UDChain, DispExpr,
676 DispValue, ScaleValue, PCRelBase))
677 return IndirectBranchType::UNKNOWN;
678
679 MemLocInstrOut = MemLocInstr;
680 DispValueOut = DispValue;
681 DispExprOut = DispExpr;
682 PCRelBaseOut = PCRelBase;
683 return IndirectBranchType::POSSIBLE_PIC_JUMP_TABLE;
684 }
685
686 /// Matches PLT entry pattern and returns the associated GOT entry address.
687 /// Typical PLT entry looks like the following:
688 ///
689 /// adrp x16, 230000
690 /// ldr x17, [x16, #3040]
691 /// add x16, x16, #0xbe0
692 /// br x17
693 ///
694 uint64_t analyzePLTEntry(MCInst &Instruction, InstructionIterator Begin,
695 InstructionIterator End,
696 uint64_t BeginPC) const override {
697 // Check branch instruction
698 MCInst *Branch = &Instruction;
699 assert(Branch->getOpcode() == AArch64::BR && "Unexpected opcode")(static_cast <bool> (Branch->getOpcode() == AArch64::
BR && "Unexpected opcode") ? void (0) : __assert_fail
("Branch->getOpcode() == AArch64::BR && \"Unexpected opcode\""
, "bolt/lib/Target/AArch64/AArch64MCPlusBuilder.cpp", 699, __extension__
__PRETTY_FUNCTION__))
;
1
Assuming the condition is true
2
'?' condition is true
700
701 DenseMap<const MCInst *, SmallVector<llvm::MCInst *, 4>> UDChain =
702 computeLocalUDChain(Branch, Begin, End);
703
704 // Match ldr instruction
705 SmallVector<MCInst *, 4> &BranchUses = UDChain[Branch];
706 if (BranchUses.size() < 1 || BranchUses[0] == nullptr)
3
Assuming the condition is false
4
Assuming the condition is false
5
Taking false branch
707 return 0;
708
709 // Check ldr instruction
710 const MCInst *Ldr = BranchUses[0];
711 if (Ldr->getOpcode() != AArch64::LDRXui)
6
Assuming the condition is false
7
Taking false branch
712 return 0;
713
714 // Get ldr value
715 const unsigned ScaleLdr = 8; // LDRX operates on 8 bytes segments
716 assert(Ldr->getOperand(2).isImm() && "Unexpected ldr operand")(static_cast <bool> (Ldr->getOperand(2).isImm() &&
"Unexpected ldr operand") ? void (0) : __assert_fail ("Ldr->getOperand(2).isImm() && \"Unexpected ldr operand\""
, "bolt/lib/Target/AArch64/AArch64MCPlusBuilder.cpp", 716, __extension__
__PRETTY_FUNCTION__))
;
8
'?' condition is true
717 const uint64_t Offset = Ldr->getOperand(2).getImm() * ScaleLdr;
718
719 // Match adrp instruction
720 SmallVector<MCInst *, 4> &LdrUses = UDChain[Ldr];
721 if (LdrUses.size() < 2 || LdrUses[1] == nullptr)
9
Assuming the condition is false
10
Assuming the condition is false
11
Taking false branch
722 return 0;
723
724 // Check adrp instruction
725 MCInst *Adrp = LdrUses[1];
726 if (Adrp->getOpcode() != AArch64::ADRP)
12
Assuming the condition is false
13
Taking false branch
727 return 0;
728
729 // Get adrp instruction PC
730 const unsigned InstSize = 4;
731 uint64_t AdrpPC = BeginPC;
732 for (InstructionIterator It = Begin; It != End; ++It) {
14
Loop condition is false. Execution continues on line 739
733 if (&(*It) == Adrp)
734 break;
735 AdrpPC += InstSize;
736 }
737
738 // Get adrp value
739 uint64_t Base;
740 assert(Adrp->getOperand(1).isImm() && "Unexpected adrp operand")(static_cast <bool> (Adrp->getOperand(1).isImm() &&
"Unexpected adrp operand") ? void (0) : __assert_fail ("Adrp->getOperand(1).isImm() && \"Unexpected adrp operand\""
, "bolt/lib/Target/AArch64/AArch64MCPlusBuilder.cpp", 740, __extension__
__PRETTY_FUNCTION__))
;
15
'?' condition is true
741 bool Ret = evaluateMemOperandTarget(*Adrp, Base, AdrpPC, InstSize);
16
Calling 'AArch64MCPlusBuilder::evaluateMemOperandTarget'
742 assert(Ret && "Failed to evaluate adrp")(static_cast <bool> (Ret && "Failed to evaluate adrp"
) ? void (0) : __assert_fail ("Ret && \"Failed to evaluate adrp\""
, "bolt/lib/Target/AArch64/AArch64MCPlusBuilder.cpp", 742, __extension__
__PRETTY_FUNCTION__))
;
743 (void)Ret;
744
745 return Base + Offset;
746 }
747
748 unsigned getInvertedBranchOpcode(unsigned Opcode) const {
749 switch (Opcode) {
750 default:
751 llvm_unreachable("Failed to invert branch opcode")::llvm::llvm_unreachable_internal("Failed to invert branch opcode"
, "bolt/lib/Target/AArch64/AArch64MCPlusBuilder.cpp", 751)
;
752 return Opcode;
753 case AArch64::TBZW: return AArch64::TBNZW;
754 case AArch64::TBZX: return AArch64::TBNZX;
755 case AArch64::TBNZW: return AArch64::TBZW;
756 case AArch64::TBNZX: return AArch64::TBZX;
757 case AArch64::CBZW: return AArch64::CBNZW;
758 case AArch64::CBZX: return AArch64::CBNZX;
759 case AArch64::CBNZW: return AArch64::CBZW;
760 case AArch64::CBNZX: return AArch64::CBZX;
761 }
762 }
763
764 unsigned getCondCode(const MCInst &Inst) const override {
765 // AArch64 does not use conditional codes, so we just return the opcode
766 // of the conditional branch here.
767 return Inst.getOpcode();
768 }
769
770 unsigned getCanonicalBranchCondCode(unsigned Opcode) const override {
771 switch (Opcode) {
772 default:
773 return Opcode;
774 case AArch64::TBNZW: return AArch64::TBZW;
775 case AArch64::TBNZX: return AArch64::TBZX;
776 case AArch64::CBNZW: return AArch64::CBZW;
777 case AArch64::CBNZX: return AArch64::CBZX;
778 }
779 }
780
781 bool reverseBranchCondition(MCInst &Inst, const MCSymbol *TBB,
782 MCContext *Ctx) const override {
783 if (isTB(Inst) || isCB(Inst)) {
784 Inst.setOpcode(getInvertedBranchOpcode(Inst.getOpcode()));
785 assert(Inst.getOpcode() != 0 && "Invalid branch instruction")(static_cast <bool> (Inst.getOpcode() != 0 && "Invalid branch instruction"
) ? void (0) : __assert_fail ("Inst.getOpcode() != 0 && \"Invalid branch instruction\""
, "bolt/lib/Target/AArch64/AArch64MCPlusBuilder.cpp", 785, __extension__
__PRETTY_FUNCTION__))
;
786 } else if (Inst.getOpcode() == AArch64::Bcc) {
787 Inst.getOperand(0).setImm(AArch64CC::getInvertedCondCode(
788 static_cast<AArch64CC::CondCode>(Inst.getOperand(0).getImm())));
789 assert(Inst.getOperand(0).getImm() != AArch64CC::AL &&(static_cast <bool> (Inst.getOperand(0).getImm() != AArch64CC
::AL && Inst.getOperand(0).getImm() != AArch64CC::NV &&
"Can't reverse ALWAYS cond code") ? void (0) : __assert_fail
("Inst.getOperand(0).getImm() != AArch64CC::AL && Inst.getOperand(0).getImm() != AArch64CC::NV && \"Can't reverse ALWAYS cond code\""
, "bolt/lib/Target/AArch64/AArch64MCPlusBuilder.cpp", 791, __extension__
__PRETTY_FUNCTION__))
790 Inst.getOperand(0).getImm() != AArch64CC::NV &&(static_cast <bool> (Inst.getOperand(0).getImm() != AArch64CC
::AL && Inst.getOperand(0).getImm() != AArch64CC::NV &&
"Can't reverse ALWAYS cond code") ? void (0) : __assert_fail
("Inst.getOperand(0).getImm() != AArch64CC::AL && Inst.getOperand(0).getImm() != AArch64CC::NV && \"Can't reverse ALWAYS cond code\""
, "bolt/lib/Target/AArch64/AArch64MCPlusBuilder.cpp", 791, __extension__
__PRETTY_FUNCTION__))
791 "Can't reverse ALWAYS cond code")(static_cast <bool> (Inst.getOperand(0).getImm() != AArch64CC
::AL && Inst.getOperand(0).getImm() != AArch64CC::NV &&
"Can't reverse ALWAYS cond code") ? void (0) : __assert_fail
("Inst.getOperand(0).getImm() != AArch64CC::AL && Inst.getOperand(0).getImm() != AArch64CC::NV && \"Can't reverse ALWAYS cond code\""
, "bolt/lib/Target/AArch64/AArch64MCPlusBuilder.cpp", 791, __extension__
__PRETTY_FUNCTION__))
;
792 } else {
793 LLVM_DEBUG(Inst.dump())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("mcplus")) { Inst.dump(); } } while (false)
;
794 llvm_unreachable("Unrecognized branch instruction")::llvm::llvm_unreachable_internal("Unrecognized branch instruction"
, "bolt/lib/Target/AArch64/AArch64MCPlusBuilder.cpp", 794)
;
795 }
796 return replaceBranchTarget(Inst, TBB, Ctx);
797 }
798
799 int getPCRelEncodingSize(const MCInst &Inst) const override {
800 switch (Inst.getOpcode()) {
801 default:
802 llvm_unreachable("Failed to get pcrel encoding size")::llvm::llvm_unreachable_internal("Failed to get pcrel encoding size"
, "bolt/lib/Target/AArch64/AArch64MCPlusBuilder.cpp", 802)
;
803 return 0;
804 case AArch64::TBZW: return 16;
805 case AArch64::TBZX: return 16;
806 case AArch64::TBNZW: return 16;
807 case AArch64::TBNZX: return 16;
808 case AArch64::CBZW: return 21;
809 case AArch64::CBZX: return 21;
810 case AArch64::CBNZW: return 21;
811 case AArch64::CBNZX: return 21;
812 case AArch64::B: return 28;
813 case AArch64::BL: return 28;
814 case AArch64::Bcc: return 21;
815 }
816 }
817
818 int getShortJmpEncodingSize() const override { return 33; }
819
820 int getUncondBranchEncodingSize() const override { return 28; }
821
822 bool createTailCall(MCInst &Inst, const MCSymbol *Target,
823 MCContext *Ctx) override {
824 Inst.setOpcode(AArch64::B);
825 Inst.addOperand(MCOperand::createExpr(getTargetExprFor(
826 Inst, MCSymbolRefExpr::create(Target, MCSymbolRefExpr::VK_None, *Ctx),
827 *Ctx, 0)));
828 setTailCall(Inst);
829 return true;
830 }
831
832 void createLongTailCall(InstructionListType &Seq, const MCSymbol *Target,
833 MCContext *Ctx) override {
834 createShortJmp(Seq, Target, Ctx, /*IsTailCall*/ true);
835 }
836
837 bool createTrap(MCInst &Inst) const override {
838 Inst.clear();
839 Inst.setOpcode(AArch64::BRK);
840 Inst.addOperand(MCOperand::createImm(1));
841 return true;
842 }
843
844 bool convertJmpToTailCall(MCInst &Inst) override {
845 setTailCall(Inst);
846 return true;
847 }
848
849 bool convertTailCallToJmp(MCInst &Inst) override {
850 removeAnnotation(Inst, MCPlus::MCAnnotation::kTailCall);
851 clearOffset(Inst);
852 if (getConditionalTailCall(Inst))
853 unsetConditionalTailCall(Inst);
854 return true;
855 }
856
857 bool lowerTailCall(MCInst &Inst) override {
858 removeAnnotation(Inst, MCPlus::MCAnnotation::kTailCall);
859 if (getConditionalTailCall(Inst))
860 unsetConditionalTailCall(Inst);
861 return true;
862 }
863
864 bool isNoop(const MCInst &Inst) const override {
865 return Inst.getOpcode() == AArch64::HINT &&
866 Inst.getOperand(0).getImm() == 0;
867 }
868
869 bool createNoop(MCInst &Inst) const override {
870 Inst.setOpcode(AArch64::HINT);
871 Inst.clear();
872 Inst.addOperand(MCOperand::createImm(0));
873 return true;
874 }
875
876 bool isStore(const MCInst &Inst) const override { return false; }
877
878 bool analyzeBranch(InstructionIterator Begin, InstructionIterator End,
879 const MCSymbol *&TBB, const MCSymbol *&FBB,
880 MCInst *&CondBranch,
881 MCInst *&UncondBranch) const override {
882 auto I = End;
883
884 while (I != Begin) {
885 --I;
886
887 // Ignore nops and CFIs
888 if (isPseudo(*I) || isNoop(*I))
889 continue;
890
891 // Stop when we find the first non-terminator
892 if (!isTerminator(*I) || isTailCall(*I) || !isBranch(*I))
893 break;
894
895 // Handle unconditional branches.
896 if (isUnconditionalBranch(*I)) {
897 // If any code was seen after this unconditional branch, we've seen
898 // unreachable code. Ignore them.
899 CondBranch = nullptr;
900 UncondBranch = &*I;
901 const MCSymbol *Sym = getTargetSymbol(*I);
902 assert(Sym != nullptr &&(static_cast <bool> (Sym != nullptr && "Couldn't extract BB symbol from jump operand"
) ? void (0) : __assert_fail ("Sym != nullptr && \"Couldn't extract BB symbol from jump operand\""
, "bolt/lib/Target/AArch64/AArch64MCPlusBuilder.cpp", 903, __extension__
__PRETTY_FUNCTION__))
903 "Couldn't extract BB symbol from jump operand")(static_cast <bool> (Sym != nullptr && "Couldn't extract BB symbol from jump operand"
) ? void (0) : __assert_fail ("Sym != nullptr && \"Couldn't extract BB symbol from jump operand\""
, "bolt/lib/Target/AArch64/AArch64MCPlusBuilder.cpp", 903, __extension__
__PRETTY_FUNCTION__))
;
904 TBB = Sym;
905 continue;
906 }
907
908 // Handle conditional branches and ignore indirect branches
909 if (isIndirectBranch(*I))
910 return false;
911
912 if (CondBranch == nullptr) {
913 const MCSymbol *TargetBB = getTargetSymbol(*I);
914 if (TargetBB == nullptr) {
915 // Unrecognized branch target
916 return false;
917 }
918 FBB = TBB;
919 TBB = TargetBB;
920 CondBranch = &*I;
921 continue;
922 }
923
924 llvm_unreachable("multiple conditional branches in one BB")::llvm::llvm_unreachable_internal("multiple conditional branches in one BB"
, "bolt/lib/Target/AArch64/AArch64MCPlusBuilder.cpp", 924)
;
925 }
926 return true;
927 }
928
929 void createLongJmp(InstructionListType &Seq, const MCSymbol *Target,
930 MCContext *Ctx, bool IsTailCall) override {
931 // ip0 (r16) is reserved to the linker (refer to 5.3.1.1 of "Procedure Call
932 // Standard for the ARM 64-bit Architecture (AArch64)".
933 // The sequence of instructions we create here is the following:
934 // movz ip0, #:abs_g3:<addr>
935 // movk ip0, #:abs_g2_nc:<addr>
936 // movk ip0, #:abs_g1_nc:<addr>
937 // movk ip0, #:abs_g0_nc:<addr>
938 // br ip0
939 MCInst Inst;
940 Inst.setOpcode(AArch64::MOVZXi);
941 Inst.addOperand(MCOperand::createReg(AArch64::X16));
942 Inst.addOperand(MCOperand::createExpr(AArch64MCExpr::create(
943 MCSymbolRefExpr::create(Target, MCSymbolRefExpr::VK_None, *Ctx),
944 AArch64MCExpr::VK_ABS_G3, *Ctx)));
945 Inst.addOperand(MCOperand::createImm(0x30));
946 Seq.emplace_back(Inst);
947
948 Inst.clear();
949 Inst.setOpcode(AArch64::MOVKXi);
950 Inst.addOperand(MCOperand::createReg(AArch64::X16));
951 Inst.addOperand(MCOperand::createReg(AArch64::X16));
952 Inst.addOperand(MCOperand::createExpr(AArch64MCExpr::create(
953 MCSymbolRefExpr::create(Target, MCSymbolRefExpr::VK_None, *Ctx),
954 AArch64MCExpr::VK_ABS_G2_NC, *Ctx)));
955 Inst.addOperand(MCOperand::createImm(0x20));
956 Seq.emplace_back(Inst);
957
958 Inst.clear();
959 Inst.setOpcode(AArch64::MOVKXi);
960 Inst.addOperand(MCOperand::createReg(AArch64::X16));
961 Inst.addOperand(MCOperand::createReg(AArch64::X16));
962 Inst.addOperand(MCOperand::createExpr(AArch64MCExpr::create(
963 MCSymbolRefExpr::create(Target, MCSymbolRefExpr::VK_None, *Ctx),
964 AArch64MCExpr::VK_ABS_G1_NC, *Ctx)));
965 Inst.addOperand(MCOperand::createImm(0x10));
966 Seq.emplace_back(Inst);
967
968 Inst.clear();
969 Inst.setOpcode(AArch64::MOVKXi);
970 Inst.addOperand(MCOperand::createReg(AArch64::X16));
971 Inst.addOperand(MCOperand::createReg(AArch64::X16));
972 Inst.addOperand(MCOperand::createExpr(AArch64MCExpr::create(
973 MCSymbolRefExpr::create(Target, MCSymbolRefExpr::VK_None, *Ctx),
974 AArch64MCExpr::VK_ABS_G0_NC, *Ctx)));
975 Inst.addOperand(MCOperand::createImm(0));
976 Seq.emplace_back(Inst);
977
978 Inst.clear();
979 Inst.setOpcode(AArch64::BR);
980 Inst.addOperand(MCOperand::createReg(AArch64::X16));
981 if (IsTailCall)
982 setTailCall(Inst);
983 Seq.emplace_back(Inst);
984 }
985
986 void createShortJmp(InstructionListType &Seq, const MCSymbol *Target,
987 MCContext *Ctx, bool IsTailCall) override {
988 // ip0 (r16) is reserved to the linker (refer to 5.3.1.1 of "Procedure Call
989 // Standard for the ARM 64-bit Architecture (AArch64)".
990 // The sequence of instructions we create here is the following:
991 // adrp ip0, imm
992 // add ip0, ip0, imm
993 // br ip0
994 MCPhysReg Reg = AArch64::X16;
995 InstructionListType Insts = materializeAddress(Target, Ctx, Reg);
996 Insts.emplace_back();
997 MCInst &Inst = Insts.back();
998 Inst.clear();
999 Inst.setOpcode(AArch64::BR);
1000 Inst.addOperand(MCOperand::createReg(Reg));
1001 if (IsTailCall)
1002 setTailCall(Inst);
1003 Seq.swap(Insts);
1004 }
1005
1006 /// Matching pattern here is
1007 ///
1008 /// ADRP x16, imm
1009 /// ADD x16, x16, imm
1010 /// BR x16
1011 ///
1012 uint64_t matchLinkerVeneer(InstructionIterator Begin, InstructionIterator End,
1013 uint64_t Address, const MCInst &CurInst,
1014 MCInst *&TargetHiBits, MCInst *&TargetLowBits,
1015 uint64_t &Target) const override {
1016 if (CurInst.getOpcode() != AArch64::BR || !CurInst.getOperand(0).isReg() ||
1017 CurInst.getOperand(0).getReg() != AArch64::X16)
1018 return 0;
1019
1020 auto I = End;
1021 if (I == Begin)
1022 return 0;
1023
1024 --I;
1025 Address -= 4;
1026 if (I == Begin || I->getOpcode() != AArch64::ADDXri ||
1027 MCPlus::getNumPrimeOperands(*I) < 3 || !I->getOperand(0).isReg() ||
1028 !I->getOperand(1).isReg() ||
1029 I->getOperand(0).getReg() != AArch64::X16 ||
1030 I->getOperand(1).getReg() != AArch64::X16 || !I->getOperand(2).isImm())
1031 return 0;
1032 TargetLowBits = &*I;
1033 uint64_t Addr = I->getOperand(2).getImm() & 0xFFF;
1034
1035 --I;
1036 Address -= 4;
1037 if (I->getOpcode() != AArch64::ADRP ||
1038 MCPlus::getNumPrimeOperands(*I) < 2 || !I->getOperand(0).isReg() ||
1039 !I->getOperand(1).isImm() || I->getOperand(0).getReg() != AArch64::X16)
1040 return 0;
1041 TargetHiBits = &*I;
1042 Addr |= (Address + ((int64_t)I->getOperand(1).getImm() << 12)) &
1043 0xFFFFFFFFFFFFF000ULL;
1044 Target = Addr;
1045 return 3;
1046 }
1047
1048 bool matchAdrpAddPair(const MCInst &Adrp, const MCInst &Add) const override {
1049 if (!isADRP(Adrp) || !isAddXri(Add))
1050 return false;
1051
1052 assert(Adrp.getOperand(0).isReg() &&(static_cast <bool> (Adrp.getOperand(0).isReg() &&
"Unexpected operand in ADRP instruction") ? void (0) : __assert_fail
("Adrp.getOperand(0).isReg() && \"Unexpected operand in ADRP instruction\""
, "bolt/lib/Target/AArch64/AArch64MCPlusBuilder.cpp", 1053, __extension__
__PRETTY_FUNCTION__))
1053 "Unexpected operand in ADRP instruction")(static_cast <bool> (Adrp.getOperand(0).isReg() &&
"Unexpected operand in ADRP instruction") ? void (0) : __assert_fail
("Adrp.getOperand(0).isReg() && \"Unexpected operand in ADRP instruction\""
, "bolt/lib/Target/AArch64/AArch64MCPlusBuilder.cpp", 1053, __extension__
__PRETTY_FUNCTION__))
;
1054 MCPhysReg AdrpReg = Adrp.getOperand(0).getReg();
1055 assert(Add.getOperand(1).isReg() &&(static_cast <bool> (Add.getOperand(1).isReg() &&
"Unexpected operand in ADDXri instruction") ? void (0) : __assert_fail
("Add.getOperand(1).isReg() && \"Unexpected operand in ADDXri instruction\""
, "bolt/lib/Target/AArch64/AArch64MCPlusBuilder.cpp", 1056, __extension__
__PRETTY_FUNCTION__))
1056 "Unexpected operand in ADDXri instruction")(static_cast <bool> (Add.getOperand(1).isReg() &&
"Unexpected operand in ADDXri instruction") ? void (0) : __assert_fail
("Add.getOperand(1).isReg() && \"Unexpected operand in ADDXri instruction\""
, "bolt/lib/Target/AArch64/AArch64MCPlusBuilder.cpp", 1056, __extension__
__PRETTY_FUNCTION__))
;
1057 MCPhysReg AddReg = Add.getOperand(1).getReg();
1058 return AdrpReg == AddReg;
1059 }
1060
1061 bool replaceImmWithSymbolRef(MCInst &Inst, const MCSymbol *Symbol,
1062 int64_t Addend, MCContext *Ctx, int64_t &Value,
1063 uint64_t RelType) const override {
1064 unsigned ImmOpNo = -1U;
1065 for (unsigned Index = 0; Index < MCPlus::getNumPrimeOperands(Inst);
1066 ++Index) {
1067 if (Inst.getOperand(Index).isImm()) {
1068 ImmOpNo = Index;
1069 break;
1070 }
1071 }
1072 if (ImmOpNo == -1U)
1073 return false;
1074
1075 Value = Inst.getOperand(ImmOpNo).getImm();
1076
1077 setOperandToSymbolRef(Inst, ImmOpNo, Symbol, Addend, Ctx, RelType);
1078
1079 return true;
1080 }
1081
1082 bool createUncondBranch(MCInst &Inst, const MCSymbol *TBB,
1083 MCContext *Ctx) const override {
1084 Inst.setOpcode(AArch64::B);
1085 Inst.clear();
1086 Inst.addOperand(MCOperand::createExpr(getTargetExprFor(
1087 Inst, MCSymbolRefExpr::create(TBB, MCSymbolRefExpr::VK_None, *Ctx),
1088 *Ctx, 0)));
1089 return true;
1090 }
1091
1092 bool shouldRecordCodeRelocation(uint64_t RelType) const override {
1093 switch (RelType) {
1094 case ELF::R_AARCH64_ABS64:
1095 case ELF::R_AARCH64_ABS32:
1096 case ELF::R_AARCH64_ABS16:
1097 case ELF::R_AARCH64_ADD_ABS_LO12_NC:
1098 case ELF::R_AARCH64_ADR_GOT_PAGE:
1099 case ELF::R_AARCH64_ADR_PREL_LO21:
1100 case ELF::R_AARCH64_ADR_PREL_PG_HI21:
1101 case ELF::R_AARCH64_ADR_PREL_PG_HI21_NC:
1102 case ELF::R_AARCH64_LD64_GOT_LO12_NC:
1103 case ELF::R_AARCH64_LDST8_ABS_LO12_NC:
1104 case ELF::R_AARCH64_LDST16_ABS_LO12_NC:
1105 case ELF::R_AARCH64_LDST32_ABS_LO12_NC:
1106 case ELF::R_AARCH64_LDST64_ABS_LO12_NC:
1107 case ELF::R_AARCH64_LDST128_ABS_LO12_NC:
1108 case ELF::R_AARCH64_TLSDESC_ADD_LO12:
1109 case ELF::R_AARCH64_TLSDESC_ADR_PAGE21:
1110 case ELF::R_AARCH64_TLSDESC_ADR_PREL21:
1111 case ELF::R_AARCH64_TLSDESC_LD64_LO12:
1112 case ELF::R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
1113 case ELF::R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
1114 case ELF::R_AARCH64_MOVW_UABS_G0:
1115 case ELF::R_AARCH64_MOVW_UABS_G0_NC:
1116 case ELF::R_AARCH64_MOVW_UABS_G1:
1117 case ELF::R_AARCH64_MOVW_UABS_G1_NC:
1118 case ELF::R_AARCH64_MOVW_UABS_G2:
1119 case ELF::R_AARCH64_MOVW_UABS_G2_NC:
1120 case ELF::R_AARCH64_MOVW_UABS_G3:
1121 case ELF::R_AARCH64_PREL16:
1122 case ELF::R_AARCH64_PREL32:
1123 case ELF::R_AARCH64_PREL64:
1124 return true;
1125 case ELF::R_AARCH64_CALL26:
1126 case ELF::R_AARCH64_JUMP26:
1127 case ELF::R_AARCH64_TSTBR14:
1128 case ELF::R_AARCH64_CONDBR19:
1129 case ELF::R_AARCH64_TLSDESC_CALL:
1130 case ELF::R_AARCH64_TLSLE_ADD_TPREL_HI12:
1131 case ELF::R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
1132 return false;
1133 default:
1134 llvm_unreachable("Unexpected AArch64 relocation type in code")::llvm::llvm_unreachable_internal("Unexpected AArch64 relocation type in code"
, "bolt/lib/Target/AArch64/AArch64MCPlusBuilder.cpp", 1134)
;
1135 }
1136 }
1137
1138 bool createReturn(MCInst &Inst) const override {
1139 Inst.setOpcode(AArch64::RET);
1140 Inst.clear();
1141 Inst.addOperand(MCOperand::createReg(AArch64::LR));
1142 return true;
1143 }
1144
1145 InstructionListType materializeAddress(const MCSymbol *Target, MCContext *Ctx,
1146 MCPhysReg RegName,
1147 int64_t Addend = 0) const override {
1148 // Get page-aligned address and add page offset
1149 InstructionListType Insts(2);
1150 Insts[0].setOpcode(AArch64::ADRP);
1151 Insts[0].clear();
1152 Insts[0].addOperand(MCOperand::createReg(RegName));
1153 Insts[0].addOperand(MCOperand::createImm(0));
1154 setOperandToSymbolRef(Insts[0], /* OpNum */ 1, Target, Addend, Ctx,
1155 ELF::R_AARCH64_NONE);
1156 Insts[1].setOpcode(AArch64::ADDXri);
1157 Insts[1].clear();
1158 Insts[1].addOperand(MCOperand::createReg(RegName));
1159 Insts[1].addOperand(MCOperand::createReg(RegName));
1160 Insts[1].addOperand(MCOperand::createImm(0));
1161 Insts[1].addOperand(MCOperand::createImm(0));
1162 setOperandToSymbolRef(Insts[1], /* OpNum */ 2, Target, Addend, Ctx,
1163 ELF::R_AARCH64_ADD_ABS_LO12_NC);
1164 return Insts;
1165 }
1166};
1167
1168} // end anonymous namespace
1169
1170namespace llvm {
1171namespace bolt {
1172
1173MCPlusBuilder *createAArch64MCPlusBuilder(const MCInstrAnalysis *Analysis,
1174 const MCInstrInfo *Info,
1175 const MCRegisterInfo *RegInfo) {
1176 return new AArch64MCPlusBuilder(Analysis, Info, RegInfo);
1177}
1178
1179} // namespace bolt
1180} // namespace llvm

/build/source/llvm/include/llvm/MC/MCInst.h

1//===- llvm/MC/MCInst.h - MCInst class --------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains the declaration of the MCInst and MCOperand classes, which
10// is the basic representation used to represent low-level machine code
11// instructions.
12//
13//===----------------------------------------------------------------------===//
14
15#ifndef LLVM_MC_MCINST_H
16#define LLVM_MC_MCINST_H
17
18#include "llvm/ADT/SmallVector.h"
19#include "llvm/ADT/StringRef.h"
20#include "llvm/ADT/bit.h"
21#include "llvm/Support/SMLoc.h"
22#include <cassert>
23#include <cstddef>
24#include <cstdint>
25
26namespace llvm {
27
28class MCExpr;
29class MCInst;
30class MCInstPrinter;
31class MCRegisterInfo;
32class raw_ostream;
33
34/// Instances of this class represent operands of the MCInst class.
35/// This is a simple discriminated union.
36class MCOperand {
37 enum MachineOperandType : unsigned char {
38 kInvalid, ///< Uninitialized.
39 kRegister, ///< Register operand.
40 kImmediate, ///< Immediate operand.
41 kSFPImmediate, ///< Single-floating-point immediate operand.
42 kDFPImmediate, ///< Double-Floating-point immediate operand.
43 kExpr, ///< Relocatable immediate operand.
44 kInst ///< Sub-instruction operand.
45 };
46 MachineOperandType Kind = kInvalid;
47
48 union {
49 unsigned RegVal;
50 int64_t ImmVal;
51 uint32_t SFPImmVal;
52 uint64_t FPImmVal;
53 const MCExpr *ExprVal;
54 const MCInst *InstVal;
55 };
56
57public:
58 MCOperand() : FPImmVal(0) {}
59
60 bool isValid() const { return Kind != kInvalid; }
61 bool isReg() const { return Kind == kRegister; }
62 bool isImm() const { return Kind == kImmediate; }
63 bool isSFPImm() const { return Kind == kSFPImmediate; }
64 bool isDFPImm() const { return Kind == kDFPImmediate; }
65 bool isExpr() const { return Kind == kExpr; }
66 bool isInst() const { return Kind == kInst; }
67
68 /// Returns the register number.
69 unsigned getReg() const {
70 assert(isReg() && "This is not a register operand!")(static_cast <bool> (isReg() && "This is not a register operand!"
) ? void (0) : __assert_fail ("isReg() && \"This is not a register operand!\""
, "llvm/include/llvm/MC/MCInst.h", 70, __extension__ __PRETTY_FUNCTION__
))
;
71 return RegVal;
72 }
73
74 /// Set the register number.
75 void setReg(unsigned Reg) {
76 assert(isReg() && "This is not a register operand!")(static_cast <bool> (isReg() && "This is not a register operand!"
) ? void (0) : __assert_fail ("isReg() && \"This is not a register operand!\""
, "llvm/include/llvm/MC/MCInst.h", 76, __extension__ __PRETTY_FUNCTION__
))
;
77 RegVal = Reg;
78 }
79
80 int64_t getImm() const {
81 assert(isImm() && "This is not an immediate")(static_cast <bool> (isImm() && "This is not an immediate"
) ? void (0) : __assert_fail ("isImm() && \"This is not an immediate\""
, "llvm/include/llvm/MC/MCInst.h", 81, __extension__ __PRETTY_FUNCTION__
))
;
82 return ImmVal;
83 }
84
85 void setImm(int64_t Val) {
86 assert(isImm() && "This is not an immediate")(static_cast <bool> (isImm() && "This is not an immediate"
) ? void (0) : __assert_fail ("isImm() && \"This is not an immediate\""
, "llvm/include/llvm/MC/MCInst.h", 86, __extension__ __PRETTY_FUNCTION__
))
;
87 ImmVal = Val;
88 }
89
90 uint32_t getSFPImm() const {
91 assert(isSFPImm() && "This is not an SFP immediate")(static_cast <bool> (isSFPImm() && "This is not an SFP immediate"
) ? void (0) : __assert_fail ("isSFPImm() && \"This is not an SFP immediate\""
, "llvm/include/llvm/MC/MCInst.h", 91, __extension__ __PRETTY_FUNCTION__
))
;
92 return SFPImmVal;
93 }
94
95 void setSFPImm(uint32_t Val) {
96 assert(isSFPImm() && "This is not an SFP immediate")(static_cast <bool> (isSFPImm() && "This is not an SFP immediate"
) ? void (0) : __assert_fail ("isSFPImm() && \"This is not an SFP immediate\""
, "llvm/include/llvm/MC/MCInst.h", 96, __extension__ __PRETTY_FUNCTION__
))
;
97 SFPImmVal = Val;
98 }
99
100 uint64_t getDFPImm() const {
101 assert(isDFPImm() && "This is not an FP immediate")(static_cast <bool> (isDFPImm() && "This is not an FP immediate"
) ? void (0) : __assert_fail ("isDFPImm() && \"This is not an FP immediate\""
, "llvm/include/llvm/MC/MCInst.h", 101, __extension__ __PRETTY_FUNCTION__
))
;
102 return FPImmVal;
103 }
104
105 void setDFPImm(uint64_t Val) {
106 assert(isDFPImm() && "This is not an FP immediate")(static_cast <bool> (isDFPImm() && "This is not an FP immediate"
) ? void (0) : __assert_fail ("isDFPImm() && \"This is not an FP immediate\""
, "llvm/include/llvm/MC/MCInst.h", 106, __extension__ __PRETTY_FUNCTION__
))
;
107 FPImmVal = Val;
108 }
109 void setFPImm(double Val) {
110 assert(isDFPImm() && "This is not an FP immediate")(static_cast <bool> (isDFPImm() && "This is not an FP immediate"
) ? void (0) : __assert_fail ("isDFPImm() && \"This is not an FP immediate\""
, "llvm/include/llvm/MC/MCInst.h", 110, __extension__ __PRETTY_FUNCTION__
))
;
111 FPImmVal = bit_cast<uint64_t>(Val);
112 }
113
114 const MCExpr *getExpr() const {
115 assert(isExpr() && "This is not an expression")(static_cast <bool> (isExpr() && "This is not an expression"
) ? void (0) : __assert_fail ("isExpr() && \"This is not an expression\""
, "llvm/include/llvm/MC/MCInst.h", 115, __extension__ __PRETTY_FUNCTION__
))
;
30
'?' condition is true
116 return ExprVal;
31
Returning pointer, which participates in a condition later
117 }
118
119 void setExpr(const MCExpr *Val) {
120 assert(isExpr() && "This is not an expression")(static_cast <bool> (isExpr() && "This is not an expression"
) ? void (0) : __assert_fail ("isExpr() && \"This is not an expression\""
, "llvm/include/llvm/MC/MCInst.h", 120, __extension__ __PRETTY_FUNCTION__
))
;
121 ExprVal = Val;
122 }
123
124 const MCInst *getInst() const {
125 assert(isInst() && "This is not a sub-instruction")(static_cast <bool> (isInst() && "This is not a sub-instruction"
) ? void (0) : __assert_fail ("isInst() && \"This is not a sub-instruction\""
, "llvm/include/llvm/MC/MCInst.h", 125, __extension__ __PRETTY_FUNCTION__
))
;
126 return InstVal;
127 }
128
129 void setInst(const MCInst *Val) {
130 assert(isInst() && "This is not a sub-instruction")(static_cast <bool> (isInst() && "This is not a sub-instruction"
) ? void (0) : __assert_fail ("isInst() && \"This is not a sub-instruction\""
, "llvm/include/llvm/MC/MCInst.h", 130, __extension__ __PRETTY_FUNCTION__
))
;
131 InstVal = Val;
132 }
133
134 static MCOperand createReg(unsigned Reg) {
135 MCOperand Op;
136 Op.Kind = kRegister;
137 Op.RegVal = Reg;
138 return Op;
139 }
140
141 static MCOperand createImm(int64_t Val) {
142 MCOperand Op;
143 Op.Kind = kImmediate;
144 Op.ImmVal = Val;
145 return Op;
146 }
147
148 static MCOperand createSFPImm(uint32_t Val) {
149 MCOperand Op;
150 Op.Kind = kSFPImmediate;
151 Op.SFPImmVal = Val;
152 return Op;
153 }
154
155 static MCOperand createDFPImm(uint64_t Val) {
156 MCOperand Op;
157 Op.Kind = kDFPImmediate;
158 Op.FPImmVal = Val;
159 return Op;
160 }
161
162 static MCOperand createExpr(const MCExpr *Val) {
163 MCOperand Op;
164 Op.Kind = kExpr;
165 Op.ExprVal = Val;
166 return Op;
167 }
168
169 static MCOperand createInst(const MCInst *Val) {
170 MCOperand Op;
171 Op.Kind = kInst;
172 Op.InstVal = Val;
173 return Op;
174 }
175
176 void print(raw_ostream &OS, const MCRegisterInfo *RegInfo = nullptr) const;
177 void dump() const;
178 bool isBareSymbolRef() const;
179 bool evaluateAsConstantImm(int64_t &Imm) const;
180};
181
182/// Instances of this class represent a single low-level machine
183/// instruction.
184class MCInst {
185 unsigned Opcode = 0;
186 // These flags could be used to pass some info from one target subcomponent
187 // to another, for example, from disassembler to asm printer. The values of
188 // the flags have any sense on target level only (e.g. prefixes on x86).
189 unsigned Flags = 0;
190
191 SMLoc Loc;
192 SmallVector<MCOperand, 10> Operands;
193
194public:
195 MCInst() = default;
196
197 void setOpcode(unsigned Op) { Opcode = Op; }
198 unsigned getOpcode() const { return Opcode; }
199
200 void setFlags(unsigned F) { Flags = F; }
201 unsigned getFlags() const { return Flags; }
202
203 void setLoc(SMLoc loc) { Loc = loc; }
204 SMLoc getLoc() const { return Loc; }
205
206 const MCOperand &getOperand(unsigned i) const { return Operands[i]; }
207 MCOperand &getOperand(unsigned i) { return Operands[i]; }
208 unsigned getNumOperands() const { return Operands.size(); }
209
210 void addOperand(const MCOperand Op) { Operands.push_back(Op); }
211
212 using iterator = SmallVectorImpl<MCOperand>::iterator;
213 using const_iterator = SmallVectorImpl<MCOperand>::const_iterator;
214
215 void clear() { Operands.clear(); }
216 void erase(iterator I) { Operands.erase(I); }
217 void erase(iterator First, iterator Last) { Operands.erase(First, Last); }
218 size_t size() const { return Operands.size(); }
219 iterator begin() { return Operands.begin(); }
220 const_iterator begin() const { return Operands.begin(); }
221 iterator end() { return Operands.end(); }
222 const_iterator end() const { return Operands.end(); }
223
224 iterator insert(iterator I, const MCOperand &Op) {
225 return Operands.insert(I, Op);
226 }
227
228 void print(raw_ostream &OS, const MCRegisterInfo *RegInfo = nullptr) const;
229 void dump() const;
230
231 /// Dump the MCInst as prettily as possible using the additional MC
232 /// structures, if given. Operators are separated by the \p Separator
233 /// string.
234 void dump_pretty(raw_ostream &OS, const MCInstPrinter *Printer = nullptr,
235 StringRef Separator = " ",
236 const MCRegisterInfo *RegInfo = nullptr) const;
237 void dump_pretty(raw_ostream &OS, StringRef Name, StringRef Separator = " ",
238 const MCRegisterInfo *RegInfo = nullptr) const;
239};
240
241inline raw_ostream& operator<<(raw_ostream &OS, const MCOperand &MO) {
242 MO.print(OS);
243 return OS;
244}
245
246inline raw_ostream& operator<<(raw_ostream &OS, const MCInst &MI) {
247 MI.print(OS);
248 return OS;
249}
250
251} // end namespace llvm
252
253#endif // LLVM_MC_MCINST_H