Bug Summary

File:tools/lld/ELF/AArch64ErrataFix.cpp
Warning:line 511, column 27
Assigned value is garbage or undefined

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name AArch64ErrataFix.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -mrelocation-model pic -pic-level 2 -mthread-model posix -fmath-errno -masm-verbose -mconstructor-aliases -munwind-tables -fuse-init-array -target-cpu x86-64 -dwarf-column-info -debugger-tuning=gdb -momit-leaf-frame-pointer -ffunction-sections -fdata-sections -resource-dir /usr/lib/llvm-8/lib/clang/8.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-8~svn345461/build-llvm/tools/lld/ELF -I /build/llvm-toolchain-snapshot-8~svn345461/tools/lld/ELF -I /build/llvm-toolchain-snapshot-8~svn345461/tools/lld/include -I /build/llvm-toolchain-snapshot-8~svn345461/build-llvm/tools/lld/include -I /build/llvm-toolchain-snapshot-8~svn345461/build-llvm/include -I /build/llvm-toolchain-snapshot-8~svn345461/include -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0/backward -internal-isystem /usr/include/clang/8.0.0/include/ -internal-isystem /usr/local/include -internal-isystem /usr/lib/llvm-8/lib/clang/8.0.0/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-comment -std=c++11 -fdeprecated-macro -fdebug-compilation-dir /build/llvm-toolchain-snapshot-8~svn345461/build-llvm/tools/lld/ELF -ferror-limit 19 -fmessage-length 0 -fvisibility-inlines-hidden -fobjc-runtime=gcc -fdiagnostics-show-option -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -o /tmp/scan-build-2018-10-27-211344-32123-1 -x c++ /build/llvm-toolchain-snapshot-8~svn345461/tools/lld/ELF/AArch64ErrataFix.cpp -faddrsig
1//===- AArch64ErrataFix.cpp -----------------------------------------------===//
2//
3// The LLVM Linker
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9// This file implements Section Patching for the purpose of working around
10// errata in CPUs. The general principle is that an erratum sequence of one or
11// more instructions is detected in the instruction stream, one of the
12// instructions in the sequence is replaced with a branch to a patch sequence
13// of replacement instructions. At the end of the replacement sequence the
14// patch branches back to the instruction stream.
15
16// This technique is only suitable for fixing an erratum when:
17// - There is a set of necessary conditions required to trigger the erratum that
18// can be detected at static link time.
19// - There is a set of replacement instructions that can be used to remove at
20// least one of the necessary conditions that trigger the erratum.
21// - We can overwrite an instruction in the erratum sequence with a branch to
22// the replacement sequence.
23// - We can place the replacement sequence within range of the branch.
24
25// FIXME:
26// - The implementation here only supports one patch, the AArch64 Cortex-53
27// errata 843419 that affects r0p0, r0p1, r0p2 and r0p4 versions of the core.
28// To keep the initial version simple there is no support for multiple
29// architectures or selection of different patches.
30//===----------------------------------------------------------------------===//
31
32#include "AArch64ErrataFix.h"
33#include "Config.h"
34#include "LinkerScript.h"
35#include "OutputSections.h"
36#include "Relocations.h"
37#include "Symbols.h"
38#include "SyntheticSections.h"
39#include "Target.h"
40#include "lld/Common/Memory.h"
41#include "lld/Common/Strings.h"
42#include "llvm/Support/Endian.h"
43#include "llvm/Support/raw_ostream.h"
44#include <algorithm>
45
46using namespace llvm;
47using namespace llvm::ELF;
48using namespace llvm::object;
49using namespace llvm::support;
50using namespace llvm::support::endian;
51
52using namespace lld;
53using namespace lld::elf;
54
55// Helper functions to identify instructions and conditions needed to trigger
56// the Cortex-A53-843419 erratum.
57
58// ADRP
59// | 1 | immlo (2) | 1 | 0 0 0 0 | immhi (19) | Rd (5) |
60static bool isADRP(uint32_t Instr) {
61 return (Instr & 0x9f000000) == 0x90000000;
62}
63
64// Load and store bit patterns from ARMv8-A ARM ARM.
65// Instructions appear in order of appearance starting from table in
66// C4.1.3 Loads and Stores.
67
68// All loads and stores have 1 (at bit postion 27), (0 at bit position 25).
69// | op0 x op1 (2) | 1 op2 0 op3 (2) | x | op4 (5) | xxxx | op5 (2) | x (10) |
70static bool isLoadStoreClass(uint32_t Instr) {
71 return (Instr & 0x0a000000) == 0x08000000;
72}
73
74// LDN/STN multiple no offset
75// | 0 Q 00 | 1100 | 0 L 00 | 0000 | opcode (4) | size (2) | Rn (5) | Rt (5) |
76// LDN/STN multiple post-indexed
77// | 0 Q 00 | 1100 | 1 L 0 | Rm (5)| opcode (4) | size (2) | Rn (5) | Rt (5) |
78// L == 0 for stores.
79
80// Utility routine to decode opcode field of LDN/STN multiple structure
81// instructions to find the ST1 instructions.
82// opcode == 0010 ST1 4 registers.
83// opcode == 0110 ST1 3 registers.
84// opcode == 0111 ST1 1 register.
85// opcode == 1010 ST1 2 registers.
86static bool isST1MultipleOpcode(uint32_t Instr) {
87 return (Instr & 0x0000f000) == 0x00002000 ||
88 (Instr & 0x0000f000) == 0x00006000 ||
89 (Instr & 0x0000f000) == 0x00007000 ||
90 (Instr & 0x0000f000) == 0x0000a000;
91}
92
93static bool isST1Multiple(uint32_t Instr) {
94 return (Instr & 0xbfff0000) == 0x0c000000 && isST1MultipleOpcode(Instr);
95}
96
97// Writes to Rn (writeback).
98static bool isST1MultiplePost(uint32_t Instr) {
99 return (Instr & 0xbfe00000) == 0x0c800000 && isST1MultipleOpcode(Instr);
100}
101
102// LDN/STN single no offset
103// | 0 Q 00 | 1101 | 0 L R 0 | 0000 | opc (3) S | size (2) | Rn (5) | Rt (5)|
104// LDN/STN single post-indexed
105// | 0 Q 00 | 1101 | 1 L R | Rm (5) | opc (3) S | size (2) | Rn (5) | Rt (5)|
106// L == 0 for stores
107
108// Utility routine to decode opcode field of LDN/STN single structure
109// instructions to find the ST1 instructions.
110// R == 0 for ST1 and ST3, R == 1 for ST2 and ST4.
111// opcode == 000 ST1 8-bit.
112// opcode == 010 ST1 16-bit.
113// opcode == 100 ST1 32 or 64-bit (Size determines which).
114static bool isST1SingleOpcode(uint32_t Instr) {
115 return (Instr & 0x0040e000) == 0x00000000 ||
116 (Instr & 0x0040e000) == 0x00004000 ||
117 (Instr & 0x0040e000) == 0x00008000;
118}
119
120static bool isST1Single(uint32_t Instr) {
121 return (Instr & 0xbfff0000) == 0x0d000000 && isST1SingleOpcode(Instr);
122}
123
124// Writes to Rn (writeback).
125static bool isST1SinglePost(uint32_t Instr) {
126 return (Instr & 0xbfe00000) == 0x0d800000 && isST1SingleOpcode(Instr);
127}
128
129static bool isST1(uint32_t Instr) {
130 return isST1Multiple(Instr) || isST1MultiplePost(Instr) ||
131 isST1Single(Instr) || isST1SinglePost(Instr);
132}
133
134// Load/store exclusive
135// | size (2) 00 | 1000 | o2 L o1 | Rs (5) | o0 | Rt2 (5) | Rn (5) | Rt (5) |
136// L == 0 for Stores.
137static bool isLoadStoreExclusive(uint32_t Instr) {
138 return (Instr & 0x3f000000) == 0x08000000;
139}
140
141static bool isLoadExclusive(uint32_t Instr) {
142 return (Instr & 0x3f400000) == 0x08400000;
143}
144
145// Load register literal
146// | opc (2) 01 | 1 V 00 | imm19 | Rt (5) |
147static bool isLoadLiteral(uint32_t Instr) {
148 return (Instr & 0x3b000000) == 0x18000000;
149}
150
151// Load/store no-allocate pair
152// (offset)
153// | opc (2) 10 | 1 V 00 | 0 L | imm7 | Rt2 (5) | Rn (5) | Rt (5) |
154// L == 0 for stores.
155// Never writes to register
156static bool isSTNP(uint32_t Instr) {
157 return (Instr & 0x3bc00000) == 0x28000000;
158}
159
160// Load/store register pair
161// (post-indexed)
162// | opc (2) 10 | 1 V 00 | 1 L | imm7 | Rt2 (5) | Rn (5) | Rt (5) |
163// L == 0 for stores, V == 0 for Scalar, V == 1 for Simd/FP
164// Writes to Rn.
165static bool isSTPPost(uint32_t Instr) {
166 return (Instr & 0x3bc00000) == 0x28800000;
167}
168
169// (offset)
170// | opc (2) 10 | 1 V 01 | 0 L | imm7 | Rt2 (5) | Rn (5) | Rt (5) |
171static bool isSTPOffset(uint32_t Instr) {
172 return (Instr & 0x3bc00000) == 0x29000000;
173}
174
175// (pre-index)
176// | opc (2) 10 | 1 V 01 | 1 L | imm7 | Rt2 (5) | Rn (5) | Rt (5) |
177// Writes to Rn.
178static bool isSTPPre(uint32_t Instr) {
179 return (Instr & 0x3bc00000) == 0x29800000;
180}
181
182static bool isSTP(uint32_t Instr) {
183 return isSTPPost(Instr) || isSTPOffset(Instr) || isSTPPre(Instr);
184}
185
186// Load/store register (unscaled immediate)
187// | size (2) 11 | 1 V 00 | opc (2) 0 | imm9 | 00 | Rn (5) | Rt (5) |
188// V == 0 for Scalar, V == 1 for Simd/FP.
189static bool isLoadStoreUnscaled(uint32_t Instr) {
190 return (Instr & 0x3b000c00) == 0x38000000;
191}
192
193// Load/store register (immediate post-indexed)
194// | size (2) 11 | 1 V 00 | opc (2) 0 | imm9 | 01 | Rn (5) | Rt (5) |
195static bool isLoadStoreImmediatePost(uint32_t Instr) {
196 return (Instr & 0x3b200c00) == 0x38000400;
197}
198
199// Load/store register (unprivileged)
200// | size (2) 11 | 1 V 00 | opc (2) 0 | imm9 | 10 | Rn (5) | Rt (5) |
201static bool isLoadStoreUnpriv(uint32_t Instr) {
202 return (Instr & 0x3b200c00) == 0x38000800;
203}
204
205// Load/store register (immediate pre-indexed)
206// | size (2) 11 | 1 V 00 | opc (2) 0 | imm9 | 11 | Rn (5) | Rt (5) |
207static bool isLoadStoreImmediatePre(uint32_t Instr) {
208 return (Instr & 0x3b200c00) == 0x38000c00;
209}
210
211// Load/store register (register offset)
212// | size (2) 11 | 1 V 00 | opc (2) 1 | Rm (5) | option (3) S | 10 | Rn | Rt |
213static bool isLoadStoreRegisterOff(uint32_t Instr) {
214 return (Instr & 0x3b200c00) == 0x38200800;
215}
216
217// Load/store register (unsigned immediate)
218// | size (2) 11 | 1 V 01 | opc (2) | imm12 | Rn (5) | Rt (5) |
219static bool isLoadStoreRegisterUnsigned(uint32_t Instr) {
220 return (Instr & 0x3b000000) == 0x39000000;
221}
222
223// Rt is always in bit position 0 - 4.
224static uint32_t getRt(uint32_t Instr) { return (Instr & 0x1f); }
225
226// Rn is always in bit position 5 - 9.
227static uint32_t getRn(uint32_t Instr) { return (Instr >> 5) & 0x1f; }
228
229// C4.1.2 Branches, Exception Generating and System instructions
230// | op0 (3) 1 | 01 op1 (4) | x (22) |
231// op0 == 010 101 op1 == 0xxx Conditional Branch.
232// op0 == 110 101 op1 == 1xxx Unconditional Branch Register.
233// op0 == x00 101 op1 == xxxx Unconditional Branch immediate.
234// op0 == x01 101 op1 == 0xxx Compare and branch immediate.
235// op0 == x01 101 op1 == 1xxx Test and branch immediate.
236static bool isBranch(uint32_t Instr) {
237 return ((Instr & 0xfe000000) == 0xd6000000) || // Cond branch.
238 ((Instr & 0xfe000000) == 0x54000000) || // Uncond branch reg.
239 ((Instr & 0x7c000000) == 0x14000000) || // Uncond branch imm.
240 ((Instr & 0x7c000000) == 0x34000000); // Compare and test branch.
241}
242
243static bool isV8SingleRegisterNonStructureLoadStore(uint32_t Instr) {
244 return isLoadStoreUnscaled(Instr) || isLoadStoreImmediatePost(Instr) ||
245 isLoadStoreUnpriv(Instr) || isLoadStoreImmediatePre(Instr) ||
246 isLoadStoreRegisterOff(Instr) || isLoadStoreRegisterUnsigned(Instr);
247}
248
249// Note that this function refers to v8.0 only and does not include the
250// additional load and store instructions added for in later revisions of
251// the architecture such as the Atomic memory operations introduced
252// in v8.1.
253static bool isV8NonStructureLoad(uint32_t Instr) {
254 if (isLoadExclusive(Instr))
255 return true;
256 if (isLoadLiteral(Instr))
257 return true;
258 else if (isV8SingleRegisterNonStructureLoadStore(Instr)) {
259 // For Load and Store single register, Loads are derived from a
260 // combination of the Size, V and Opc fields.
261 uint32_t Size = (Instr >> 30) & 0xff;
262 uint32_t V = (Instr >> 26) & 0x1;
263 uint32_t Opc = (Instr >> 22) & 0x3;
264 // For the load and store instructions that we are decoding.
265 // Opc == 0 are all stores.
266 // Opc == 1 with a couple of exceptions are loads. The exceptions are:
267 // Size == 00 (0), V == 1, Opc == 10 (2) which is a store and
268 // Size == 11 (3), V == 0, Opc == 10 (2) which is a prefetch.
269 return Opc != 0 && !(Size == 0 && V == 1 && Opc == 2) &&
270 !(Size == 3 && V == 0 && Opc == 2);
271 }
272 return false;
273}
274
275// The following decode instructions are only complete up to the instructions
276// needed for errata 843419.
277
278// Instruction with writeback updates the index register after the load/store.
279static bool hasWriteback(uint32_t Instr) {
280 return isLoadStoreImmediatePre(Instr) || isLoadStoreImmediatePost(Instr) ||
281 isSTPPre(Instr) || isSTPPost(Instr) || isST1SinglePost(Instr) ||
282 isST1MultiplePost(Instr);
283}
284
285// For the load and store class of instructions, a load can write to the
286// destination register, a load and a store can write to the base register when
287// the instruction has writeback.
288static bool doesLoadStoreWriteToReg(uint32_t Instr, uint32_t Reg) {
289 return (isV8NonStructureLoad(Instr) && getRt(Instr) == Reg) ||
290 (hasWriteback(Instr) && getRn(Instr) == Reg);
291}
292
293// Scanner for Cortex-A53 errata 843419
294// Full details are available in the Cortex A53 MPCore revision 0 Software
295// Developers Errata Notice (ARM-EPM-048406).
296//
297// The instruction sequence that triggers the erratum is common in compiled
298// AArch64 code, however it is sensitive to the offset of the sequence within
299// a 4k page. This means that by scanning and fixing the patch after we have
300// assigned addresses we only need to disassemble and fix instances of the
301// sequence in the range of affected offsets.
302//
303// In summary the erratum conditions are a series of 4 instructions:
304// 1.) An ADRP instruction that writes to register Rn with low 12 bits of
305// address of instruction either 0xff8 or 0xffc.
306// 2.) A load or store instruction that can be:
307// - A single register load or store, of either integer or vector registers.
308// - An STP or STNP, of either integer or vector registers.
309// - An Advanced SIMD ST1 store instruction.
310// - Must not write to Rn, but may optionally read from it.
311// 3.) An optional instruction that is not a branch and does not write to Rn.
312// 4.) A load or store from the Load/store register (unsigned immediate) class
313// that uses Rn as the base address register.
314//
315// Note that we do not attempt to scan for Sequence 2 as described in the
316// Software Developers Errata Notice as this has been assessed to be extremely
317// unlikely to occur in compiled code. This matches gold and ld.bfd behavior.
318
319// Return true if the Instruction sequence Adrp, Instr2, and Instr4 match
320// the erratum sequence. The Adrp, Instr2 and Instr4 correspond to 1.), 2.),
321// and 4.) in the Scanner for Cortex-A53 errata comment above.
322static bool is843419ErratumSequence(uint32_t Instr1, uint32_t Instr2,
323 uint32_t Instr4) {
324 if (!isADRP(Instr1))
325 return false;
326
327 uint32_t Rn = getRt(Instr1);
328 return isLoadStoreClass(Instr2) &&
329 (isLoadStoreExclusive(Instr2) || isLoadLiteral(Instr2) ||
330 isV8SingleRegisterNonStructureLoadStore(Instr2) || isSTP(Instr2) ||
331 isSTNP(Instr2) || isST1(Instr2)) &&
332 !doesLoadStoreWriteToReg(Instr2, Rn) &&
333 isLoadStoreRegisterUnsigned(Instr4) && getRn(Instr4) == Rn;
334}
335
336// Scan the instruction sequence starting at Offset Off from the base of
337// InputSection IS. We update Off in this function rather than in the caller as
338// we can skip ahead much further into the section when we know how many
339// instructions we've scanned.
340// Return the offset of the load or store instruction in IS that we want to
341// patch or 0 if no patch required.
342static uint64_t scanCortexA53Errata843419(InputSection *IS, uint64_t &Off,
343 uint64_t Limit) {
344 uint64_t ISAddr = IS->getVA(0);
345
346 // Advance Off so that (ISAddr + Off) modulo 0x1000 is at least 0xff8.
347 uint64_t InitialPageOff = (ISAddr + Off) & 0xfff;
348 if (InitialPageOff < 0xff8)
349 Off += 0xff8 - InitialPageOff;
350
351 bool OptionalAllowed = Limit - Off > 12;
352 if (Off >= Limit || Limit - Off < 12) {
353 // Need at least 3 4-byte sized instructions to trigger erratum.
354 Off = Limit;
355 return 0;
356 }
357
358 uint64_t PatchOff = 0;
359 const uint8_t *Buf = IS->data().begin();
360 const ulittle32_t *InstBuf = reinterpret_cast<const ulittle32_t *>(Buf + Off);
361 uint32_t Instr1 = *InstBuf++;
362 uint32_t Instr2 = *InstBuf++;
363 uint32_t Instr3 = *InstBuf++;
364 if (is843419ErratumSequence(Instr1, Instr2, Instr3)) {
365 PatchOff = Off + 8;
366 } else if (OptionalAllowed && !isBranch(Instr3)) {
367 uint32_t Instr4 = *InstBuf++;
368 if (is843419ErratumSequence(Instr1, Instr2, Instr4))
369 PatchOff = Off + 12;
370 }
371 if (((ISAddr + Off) & 0xfff) == 0xff8)
372 Off += 4;
373 else
374 Off += 0xffc;
375 return PatchOff;
376}
377
378class lld::elf::Patch843419Section : public SyntheticSection {
379public:
380 Patch843419Section(InputSection *P, uint64_t Off);
381
382 void writeTo(uint8_t *Buf) override;
383
384 size_t getSize() const override { return 8; }
385
386 uint64_t getLDSTAddr() const;
387
388 // The Section we are patching.
389 const InputSection *Patchee;
390 // The offset of the instruction in the Patchee section we are patching.
391 uint64_t PatcheeOffset;
392 // A label for the start of the Patch that we can use as a relocation target.
393 Symbol *PatchSym;
394};
395
396lld::elf::Patch843419Section::Patch843419Section(InputSection *P, uint64_t Off)
397 : SyntheticSection(SHF_ALLOC | SHF_EXECINSTR, SHT_PROGBITS, 4,
398 ".text.patch"),
399 Patchee(P), PatcheeOffset(Off) {
400 this->Parent = P->getParent();
401 PatchSym = addSyntheticLocal(
402 Saver.save("__CortexA53843419_" + utohexstr(getLDSTAddr())), STT_FUNC, 0,
403 getSize(), *this);
404 addSyntheticLocal(Saver.save("$x"), STT_NOTYPE, 0, 0, *this);
405}
406
407uint64_t lld::elf::Patch843419Section::getLDSTAddr() const {
408 return Patchee->getVA(PatcheeOffset);
409}
410
411void lld::elf::Patch843419Section::writeTo(uint8_t *Buf) {
412 // Copy the instruction that we will be replacing with a branch in the
413 // Patchee Section.
414 write32le(Buf, read32le(Patchee->data().begin() + PatcheeOffset));
415
416 // Apply any relocation transferred from the original PatcheeSection.
417 // For a SyntheticSection Buf already has OutSecOff added, but relocateAlloc
418 // also adds OutSecOff so we need to subtract to avoid double counting.
419 this->relocateAlloc(Buf - OutSecOff, Buf - OutSecOff + getSize());
420
421 // Return address is the next instruction after the one we have just copied.
422 uint64_t S = getLDSTAddr() + 4;
423 uint64_t P = PatchSym->getVA() + 4;
424 Target->relocateOne(Buf + 4, R_AARCH64_JUMP26, S - P);
425}
426
427void AArch64Err843419Patcher::init() {
428 // The AArch64 ABI permits data in executable sections. We must avoid scanning
429 // this data as if it were instructions to avoid false matches. We use the
430 // mapping symbols in the InputObjects to identify this data, caching the
431 // results in SectionMap so we don't have to recalculate it each pass.
432
433 // The ABI Section 4.5.4 Mapping symbols; defines local symbols that describe
434 // half open intervals [Symbol Value, Next Symbol Value) of code and data
435 // within sections. If there is no next symbol then the half open interval is
436 // [Symbol Value, End of section). The type, code or data, is determined by
437 // the mapping symbol name, $x for code, $d for data.
438 auto IsCodeMapSymbol = [](const Symbol *B) {
439 return B->getName() == "$x" || B->getName().startswith("$x.");
440 };
441 auto IsDataMapSymbol = [](const Symbol *B) {
442 return B->getName() == "$d" || B->getName().startswith("$d.");
443 };
444
445 // Collect mapping symbols for every executable InputSection.
446 for (InputFile *File : ObjectFiles) {
447 auto *F = cast<ObjFile<ELF64LE>>(File);
448 for (Symbol *B : F->getLocalSymbols()) {
449 auto *Def = dyn_cast<Defined>(B);
450 if (!Def)
451 continue;
452 if (!IsCodeMapSymbol(Def) && !IsDataMapSymbol(Def))
453 continue;
454 if (auto *Sec = dyn_cast_or_null<InputSection>(Def->Section))
455 if (Sec->Flags & SHF_EXECINSTR)
456 SectionMap[Sec].push_back(Def);
457 }
458 }
459 // For each InputSection make sure the mapping symbols are in sorted in
460 // ascending order and free from consecutive runs of mapping symbols with
461 // the same type. For example we must remove the redundant $d.1 from $x.0
462 // $d.0 $d.1 $x.1.
463 for (auto &KV : SectionMap) {
464 std::vector<const Defined *> &MapSyms = KV.second;
465 if (MapSyms.size() <= 1)
466 continue;
467 std::stable_sort(
468 MapSyms.begin(), MapSyms.end(),
469 [](const Defined *A, const Defined *B) { return A->Value < B->Value; });
470 MapSyms.erase(
471 std::unique(MapSyms.begin(), MapSyms.end(),
472 [=](const Defined *A, const Defined *B) {
473 return (IsCodeMapSymbol(A) && IsCodeMapSymbol(B)) ||
474 (IsDataMapSymbol(A) && IsDataMapSymbol(B));
475 }),
476 MapSyms.end());
477 }
478 Initialized = true;
479}
480
481// Insert the PatchSections we have created back into the
482// InputSectionDescription. As inserting patches alters the addresses of
483// InputSections that follow them, we try and place the patches after all the
484// executable sections, although we may need to insert them earlier if the
485// InputSectionDescription is larger than the maximum branch range.
486void AArch64Err843419Patcher::insertPatches(
487 InputSectionDescription &ISD, std::vector<Patch843419Section *> &Patches) {
488 uint64_t ISLimit;
10
'ISLimit' declared without an initial value
489 uint64_t PrevISLimit = ISD.Sections.front()->OutSecOff;
490 uint64_t PatchUpperBound = PrevISLimit + Target->getThunkSectionSpacing();
491
492 // Set the OutSecOff of patches to the place where we want to insert them.
493 // We use a similar strategy to Thunk placement. Place patches roughly
494 // every multiple of maximum branch range.
495 auto PatchIt = Patches.begin();
496 auto PatchEnd = Patches.end();
497 for (const InputSection *IS : ISD.Sections) {
498 ISLimit = IS->OutSecOff + IS->getSize();
499 if (ISLimit > PatchUpperBound) {
500 while (PatchIt != PatchEnd) {
501 if ((*PatchIt)->getLDSTAddr() >= PrevISLimit)
502 break;
503 (*PatchIt)->OutSecOff = PrevISLimit;
504 ++PatchIt;
505 }
506 PatchUpperBound = PrevISLimit + Target->getThunkSectionSpacing();
507 }
508 PrevISLimit = ISLimit;
509 }
510 for (; PatchIt != PatchEnd; ++PatchIt) {
11
Loop condition is true. Entering loop body
511 (*PatchIt)->OutSecOff = ISLimit;
12
Assigned value is garbage or undefined
512 }
513
514 // merge all patch sections. We use the OutSecOff assigned above to
515 // determine the insertion point. This is ok as we only merge into an
516 // InputSectionDescription once per pass, and at the end of the pass
517 // assignAddresses() will recalculate all the OutSecOff values.
518 std::vector<InputSection *> Tmp;
519 Tmp.reserve(ISD.Sections.size() + Patches.size());
520 auto MergeCmp = [](const InputSection *A, const InputSection *B) {
521 if (A->OutSecOff < B->OutSecOff)
522 return true;
523 if (A->OutSecOff == B->OutSecOff && isa<Patch843419Section>(A) &&
524 !isa<Patch843419Section>(B))
525 return true;
526 return false;
527 };
528 std::merge(ISD.Sections.begin(), ISD.Sections.end(), Patches.begin(),
529 Patches.end(), std::back_inserter(Tmp), MergeCmp);
530 ISD.Sections = std::move(Tmp);
531}
532
533// Given an erratum sequence that starts at address AdrpAddr, with an
534// instruction that we need to patch at PatcheeOffset from the start of
535// InputSection IS, create a Patch843419 Section and add it to the
536// Patches that we need to insert.
537static void implementPatch(uint64_t AdrpAddr, uint64_t PatcheeOffset,
538 InputSection *IS,
539 std::vector<Patch843419Section *> &Patches) {
540 // There may be a relocation at the same offset that we are patching. There
541 // are three cases that we need to consider.
542 // Case 1: R_AARCH64_JUMP26 branch relocation. We have already patched this
543 // instance of the erratum on a previous patch and altered the relocation. We
544 // have nothing more to do.
545 // Case 2: A load/store register (unsigned immediate) class relocation. There
546 // are two of these R_AARCH_LD64_ABS_LO12_NC and R_AARCH_LD64_GOT_LO12_NC and
547 // they are both absolute. We need to add the same relocation to the patch,
548 // and replace the relocation with a R_AARCH_JUMP26 branch relocation.
549 // Case 3: No relocation. We must create a new R_AARCH64_JUMP26 branch
550 // relocation at the offset.
551 auto RelIt = std::find_if(
552 IS->Relocations.begin(), IS->Relocations.end(),
553 [=](const Relocation &R) { return R.Offset == PatcheeOffset; });
554 if (RelIt != IS->Relocations.end() && RelIt->Type == R_AARCH64_JUMP26)
555 return;
556
557 log("detected cortex-a53-843419 erratum sequence starting at " +
558 utohexstr(AdrpAddr) + " in unpatched output.");
559
560 auto *PS = make<Patch843419Section>(IS, PatcheeOffset);
561 Patches.push_back(PS);
562
563 auto MakeRelToPatch = [](uint64_t Offset, Symbol *PatchSym) {
564 return Relocation{R_PC, R_AARCH64_JUMP26, Offset, 0, PatchSym};
565 };
566
567 if (RelIt != IS->Relocations.end()) {
568 PS->Relocations.push_back(
569 {RelIt->Expr, RelIt->Type, 0, RelIt->Addend, RelIt->Sym});
570 *RelIt = MakeRelToPatch(PatcheeOffset, PS->PatchSym);
571 } else
572 IS->Relocations.push_back(MakeRelToPatch(PatcheeOffset, PS->PatchSym));
573}
574
575// Scan all the instructions in InputSectionDescription, for each instance of
576// the erratum sequence create a Patch843419Section. We return the list of
577// Patch843419Sections that need to be applied to ISD.
578std::vector<Patch843419Section *>
579AArch64Err843419Patcher::patchInputSectionDescription(
580 InputSectionDescription &ISD) {
581 std::vector<Patch843419Section *> Patches;
582 for (InputSection *IS : ISD.Sections) {
583 // LLD doesn't use the erratum sequence in SyntheticSections.
584 if (isa<SyntheticSection>(IS))
585 continue;
586 // Use SectionMap to make sure we only scan code and not inline data.
587 // We have already sorted MapSyms in ascending order and removed consecutive
588 // mapping symbols of the same type. Our range of executable instructions to
589 // scan is therefore [CodeSym->Value, DataSym->Value) or [CodeSym->Value,
590 // section size).
591 std::vector<const Defined *> &MapSyms = SectionMap[IS];
592
593 auto CodeSym = llvm::find_if(MapSyms, [&](const Defined *MS) {
594 return MS->getName().startswith("$x");
595 });
596
597 while (CodeSym != MapSyms.end()) {
598 auto DataSym = std::next(CodeSym);
599 uint64_t Off = (*CodeSym)->Value;
600 uint64_t Limit =
601 (DataSym == MapSyms.end()) ? IS->data().size() : (*DataSym)->Value;
602
603 while (Off < Limit) {
604 uint64_t StartAddr = IS->getVA(Off);
605 if (uint64_t PatcheeOffset = scanCortexA53Errata843419(IS, Off, Limit))
606 implementPatch(StartAddr, PatcheeOffset, IS, Patches);
607 }
608 if (DataSym == MapSyms.end())
609 break;
610 CodeSym = std::next(DataSym);
611 }
612 }
613 return Patches;
614}
615
616// For each InputSectionDescription make one pass over the executable sections
617// looking for the erratum sequence; creating a synthetic Patch843419Section
618// for each instance found. We insert these synthetic patch sections after the
619// executable code in each InputSectionDescription.
620//
621// PreConditions:
622// The Output and Input Sections have had their final addresses assigned.
623//
624// PostConditions:
625// Returns true if at least one patch was added. The addresses of the
626// Ouptut and Input Sections may have been changed.
627// Returns false if no patches were required and no changes were made.
628bool AArch64Err843419Patcher::createFixes() {
629 if (Initialized == false)
1
Assuming the condition is false
2
Taking false branch
630 init();
631
632 bool AddressesChanged = false;
633 for (OutputSection *OS : OutputSections) {
634 if (!(OS->Flags & SHF_ALLOC) || !(OS->Flags & SHF_EXECINSTR))
3
Assuming the condition is false
4
Assuming the condition is false
5
Taking false branch
635 continue;
636 for (BaseCommand *BC : OS->SectionCommands)
637 if (auto *ISD = dyn_cast<InputSectionDescription>(BC)) {
6
Taking true branch
638 std::vector<Patch843419Section *> Patches =
639 patchInputSectionDescription(*ISD);
640 if (!Patches.empty()) {
7
Assuming the condition is true
8
Taking true branch
641 insertPatches(*ISD, Patches);
9
Calling 'AArch64Err843419Patcher::insertPatches'
642 AddressesChanged = true;
643 }
644 }
645 }
646 return AddressesChanged;
647}