Bug Summary

File:llvm/lib/Target/ARM/MCTargetDesc/ARMAddressingModes.h
Warning:line 88, column 17
The result of the right shift is undefined due to shifting by '32', which is greater or equal to the width of type 'unsigned int'

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name ARMAsmBackend.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/build-llvm/lib/Target/ARM/MCTargetDesc -resource-dir /usr/lib/llvm-14/lib/clang/14.0.0 -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/build-llvm/lib/Target/ARM/MCTargetDesc -I /build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/llvm/lib/Target/ARM/MCTargetDesc -I /build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/llvm/lib/Target/ARM -I /build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/build-llvm/lib/Target/ARM -I /build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/build-llvm/include -I /build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/llvm/include -D NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-14/lib/clang/14.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/build-llvm/lib/Target/ARM/MCTargetDesc -fdebug-prefix-map=/build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e=. -ferror-limit 19 -fvisibility hidden -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2021-09-04-040900-46481-1 -x c++ /build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/llvm/lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp

/build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/llvm/lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp

1//===-- ARMAsmBackend.cpp - ARM Assembler Backend -------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "MCTargetDesc/ARMAsmBackend.h"
10#include "MCTargetDesc/ARMAddressingModes.h"
11#include "MCTargetDesc/ARMAsmBackendDarwin.h"
12#include "MCTargetDesc/ARMAsmBackendELF.h"
13#include "MCTargetDesc/ARMAsmBackendWinCOFF.h"
14#include "MCTargetDesc/ARMFixupKinds.h"
15#include "MCTargetDesc/ARMMCTargetDesc.h"
16#include "llvm/ADT/StringSwitch.h"
17#include "llvm/BinaryFormat/ELF.h"
18#include "llvm/BinaryFormat/MachO.h"
19#include "llvm/MC/MCAsmBackend.h"
20#include "llvm/MC/MCAssembler.h"
21#include "llvm/MC/MCContext.h"
22#include "llvm/MC/MCDirectives.h"
23#include "llvm/MC/MCELFObjectWriter.h"
24#include "llvm/MC/MCExpr.h"
25#include "llvm/MC/MCFixupKindInfo.h"
26#include "llvm/MC/MCObjectWriter.h"
27#include "llvm/MC/MCRegisterInfo.h"
28#include "llvm/MC/MCSectionELF.h"
29#include "llvm/MC/MCSectionMachO.h"
30#include "llvm/MC/MCSubtargetInfo.h"
31#include "llvm/MC/MCValue.h"
32#include "llvm/MC/MCAsmLayout.h"
33#include "llvm/Support/Debug.h"
34#include "llvm/Support/EndianStream.h"
35#include "llvm/Support/ErrorHandling.h"
36#include "llvm/Support/Format.h"
37#include "llvm/Support/TargetParser.h"
38#include "llvm/Support/raw_ostream.h"
39using namespace llvm;
40
41namespace {
42class ARMELFObjectWriter : public MCELFObjectTargetWriter {
43public:
44 ARMELFObjectWriter(uint8_t OSABI)
45 : MCELFObjectTargetWriter(/*Is64Bit*/ false, OSABI, ELF::EM_ARM,
46 /*HasRelocationAddend*/ false) {}
47};
48} // end anonymous namespace
49
50Optional<MCFixupKind> ARMAsmBackend::getFixupKind(StringRef Name) const {
51 if (!STI.getTargetTriple().isOSBinFormatELF())
52 return None;
53
54 unsigned Type = llvm::StringSwitch<unsigned>(Name)
55#define ELF_RELOC(X, Y) .Case(#X, Y)
56#include "llvm/BinaryFormat/ELFRelocs/ARM.def"
57#undef ELF_RELOC
58 .Case("BFD_RELOC_NONE", ELF::R_ARM_NONE)
59 .Case("BFD_RELOC_8", ELF::R_ARM_ABS8)
60 .Case("BFD_RELOC_16", ELF::R_ARM_ABS16)
61 .Case("BFD_RELOC_32", ELF::R_ARM_ABS32)
62 .Default(-1u);
63 if (Type == -1u)
64 return None;
65 return static_cast<MCFixupKind>(FirstLiteralRelocationKind + Type);
66}
67
68const MCFixupKindInfo &ARMAsmBackend::getFixupKindInfo(MCFixupKind Kind) const {
69 unsigned IsPCRelConstant =
70 MCFixupKindInfo::FKF_IsPCRel | MCFixupKindInfo::FKF_Constant;
71 const static MCFixupKindInfo InfosLE[ARM::NumTargetFixupKinds] = {
72 // This table *must* be in the order that the fixup_* kinds are defined in
73 // ARMFixupKinds.h.
74 //
75 // Name Offset (bits) Size (bits) Flags
76 {"fixup_arm_ldst_pcrel_12", 0, 32, IsPCRelConstant},
77 {"fixup_t2_ldst_pcrel_12", 0, 32,
78 IsPCRelConstant | MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
79 {"fixup_arm_pcrel_10_unscaled", 0, 32, IsPCRelConstant},
80 {"fixup_arm_pcrel_10", 0, 32, IsPCRelConstant},
81 {"fixup_t2_pcrel_10", 0, 32,
82 MCFixupKindInfo::FKF_IsPCRel |
83 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
84 {"fixup_arm_pcrel_9", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
85 {"fixup_t2_pcrel_9", 0, 32,
86 IsPCRelConstant | MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
87 {"fixup_arm_ldst_abs_12", 0, 32, 0},
88 {"fixup_thumb_adr_pcrel_10", 0, 8,
89 IsPCRelConstant | MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
90 {"fixup_arm_adr_pcrel_12", 0, 32, IsPCRelConstant},
91 {"fixup_t2_adr_pcrel_12", 0, 32,
92 IsPCRelConstant | MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
93 {"fixup_arm_condbranch", 0, 24, MCFixupKindInfo::FKF_IsPCRel},
94 {"fixup_arm_uncondbranch", 0, 24, MCFixupKindInfo::FKF_IsPCRel},
95 {"fixup_t2_condbranch", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
96 {"fixup_t2_uncondbranch", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
97 {"fixup_arm_thumb_br", 0, 16, MCFixupKindInfo::FKF_IsPCRel},
98 {"fixup_arm_uncondbl", 0, 24, MCFixupKindInfo::FKF_IsPCRel},
99 {"fixup_arm_condbl", 0, 24, MCFixupKindInfo::FKF_IsPCRel},
100 {"fixup_arm_blx", 0, 24, MCFixupKindInfo::FKF_IsPCRel},
101 {"fixup_arm_thumb_bl", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
102 {"fixup_arm_thumb_blx", 0, 32,
103 MCFixupKindInfo::FKF_IsPCRel |
104 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
105 {"fixup_arm_thumb_cb", 0, 16, MCFixupKindInfo::FKF_IsPCRel},
106 {"fixup_arm_thumb_cp", 0, 8,
107 MCFixupKindInfo::FKF_IsPCRel |
108 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
109 {"fixup_arm_thumb_bcc", 0, 8, MCFixupKindInfo::FKF_IsPCRel},
110 // movw / movt: 16-bits immediate but scattered into two chunks 0 - 12, 16
111 // - 19.
112 {"fixup_arm_movt_hi16", 0, 20, 0},
113 {"fixup_arm_movw_lo16", 0, 20, 0},
114 {"fixup_t2_movt_hi16", 0, 20, 0},
115 {"fixup_t2_movw_lo16", 0, 20, 0},
116 {"fixup_arm_mod_imm", 0, 12, 0},
117 {"fixup_t2_so_imm", 0, 26, 0},
118 {"fixup_bf_branch", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
119 {"fixup_bf_target", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
120 {"fixup_bfl_target", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
121 {"fixup_bfc_target", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
122 {"fixup_bfcsel_else_target", 0, 32, 0},
123 {"fixup_wls", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
124 {"fixup_le", 0, 32, MCFixupKindInfo::FKF_IsPCRel}};
125 const static MCFixupKindInfo InfosBE[ARM::NumTargetFixupKinds] = {
126 // This table *must* be in the order that the fixup_* kinds are defined in
127 // ARMFixupKinds.h.
128 //
129 // Name Offset (bits) Size (bits) Flags
130 {"fixup_arm_ldst_pcrel_12", 0, 32, IsPCRelConstant},
131 {"fixup_t2_ldst_pcrel_12", 0, 32,
132 IsPCRelConstant | MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
133 {"fixup_arm_pcrel_10_unscaled", 0, 32, IsPCRelConstant},
134 {"fixup_arm_pcrel_10", 0, 32, IsPCRelConstant},
135 {"fixup_t2_pcrel_10", 0, 32,
136 MCFixupKindInfo::FKF_IsPCRel |
137 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
138 {"fixup_arm_pcrel_9", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
139 {"fixup_t2_pcrel_9", 0, 32,
140 IsPCRelConstant | MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
141 {"fixup_arm_ldst_abs_12", 0, 32, 0},
142 {"fixup_thumb_adr_pcrel_10", 8, 8,
143 IsPCRelConstant | MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
144 {"fixup_arm_adr_pcrel_12", 0, 32, IsPCRelConstant},
145 {"fixup_t2_adr_pcrel_12", 0, 32,
146 IsPCRelConstant | MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
147 {"fixup_arm_condbranch", 8, 24, MCFixupKindInfo::FKF_IsPCRel},
148 {"fixup_arm_uncondbranch", 8, 24, MCFixupKindInfo::FKF_IsPCRel},
149 {"fixup_t2_condbranch", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
150 {"fixup_t2_uncondbranch", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
151 {"fixup_arm_thumb_br", 0, 16, MCFixupKindInfo::FKF_IsPCRel},
152 {"fixup_arm_uncondbl", 8, 24, MCFixupKindInfo::FKF_IsPCRel},
153 {"fixup_arm_condbl", 8, 24, MCFixupKindInfo::FKF_IsPCRel},
154 {"fixup_arm_blx", 8, 24, MCFixupKindInfo::FKF_IsPCRel},
155 {"fixup_arm_thumb_bl", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
156 {"fixup_arm_thumb_blx", 0, 32,
157 MCFixupKindInfo::FKF_IsPCRel |
158 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
159 {"fixup_arm_thumb_cb", 0, 16, MCFixupKindInfo::FKF_IsPCRel},
160 {"fixup_arm_thumb_cp", 8, 8,
161 MCFixupKindInfo::FKF_IsPCRel |
162 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
163 {"fixup_arm_thumb_bcc", 8, 8, MCFixupKindInfo::FKF_IsPCRel},
164 // movw / movt: 16-bits immediate but scattered into two chunks 0 - 12, 16
165 // - 19.
166 {"fixup_arm_movt_hi16", 12, 20, 0},
167 {"fixup_arm_movw_lo16", 12, 20, 0},
168 {"fixup_t2_movt_hi16", 12, 20, 0},
169 {"fixup_t2_movw_lo16", 12, 20, 0},
170 {"fixup_arm_mod_imm", 20, 12, 0},
171 {"fixup_t2_so_imm", 26, 6, 0},
172 {"fixup_bf_branch", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
173 {"fixup_bf_target", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
174 {"fixup_bfl_target", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
175 {"fixup_bfc_target", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
176 {"fixup_bfcsel_else_target", 0, 32, 0},
177 {"fixup_wls", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
178 {"fixup_le", 0, 32, MCFixupKindInfo::FKF_IsPCRel}};
179
180 // Fixup kinds from .reloc directive are like R_ARM_NONE. They do not require
181 // any extra processing.
182 if (Kind >= FirstLiteralRelocationKind)
183 return MCAsmBackend::getFixupKindInfo(FK_NONE);
184
185 if (Kind < FirstTargetFixupKind)
186 return MCAsmBackend::getFixupKindInfo(Kind);
187
188 assert(unsigned(Kind - FirstTargetFixupKind) < getNumFixupKinds() &&(static_cast<void> (0))
189 "Invalid kind!")(static_cast<void> (0));
190 return (Endian == support::little ? InfosLE
191 : InfosBE)[Kind - FirstTargetFixupKind];
192}
193
194void ARMAsmBackend::handleAssemblerFlag(MCAssemblerFlag Flag) {
195 switch (Flag) {
196 default:
197 break;
198 case MCAF_Code16:
199 setIsThumb(true);
200 break;
201 case MCAF_Code32:
202 setIsThumb(false);
203 break;
204 }
205}
206
207unsigned ARMAsmBackend::getRelaxedOpcode(unsigned Op,
208 const MCSubtargetInfo &STI) const {
209 bool HasThumb2 = STI.getFeatureBits()[ARM::FeatureThumb2];
210 bool HasV8MBaselineOps = STI.getFeatureBits()[ARM::HasV8MBaselineOps];
211
212 switch (Op) {
213 default:
214 return Op;
215 case ARM::tBcc:
216 return HasThumb2 ? (unsigned)ARM::t2Bcc : Op;
217 case ARM::tLDRpci:
218 return HasThumb2 ? (unsigned)ARM::t2LDRpci : Op;
219 case ARM::tADR:
220 return HasThumb2 ? (unsigned)ARM::t2ADR : Op;
221 case ARM::tB:
222 return HasV8MBaselineOps ? (unsigned)ARM::t2B : Op;
223 case ARM::tCBZ:
224 return ARM::tHINT;
225 case ARM::tCBNZ:
226 return ARM::tHINT;
227 }
228}
229
230bool ARMAsmBackend::mayNeedRelaxation(const MCInst &Inst,
231 const MCSubtargetInfo &STI) const {
232 if (getRelaxedOpcode(Inst.getOpcode(), STI) != Inst.getOpcode())
233 return true;
234 return false;
235}
236
237static const char *checkPCRelOffset(uint64_t Value, int64_t Min, int64_t Max) {
238 int64_t Offset = int64_t(Value) - 4;
239 if (Offset < Min || Offset > Max)
240 return "out of range pc-relative fixup value";
241 return nullptr;
242}
243
244const char *ARMAsmBackend::reasonForFixupRelaxation(const MCFixup &Fixup,
245 uint64_t Value) const {
246 switch (Fixup.getTargetKind()) {
247 case ARM::fixup_arm_thumb_br: {
248 // Relaxing tB to t2B. tB has a signed 12-bit displacement with the
249 // low bit being an implied zero. There's an implied +4 offset for the
250 // branch, so we adjust the other way here to determine what's
251 // encodable.
252 //
253 // Relax if the value is too big for a (signed) i8.
254 int64_t Offset = int64_t(Value) - 4;
255 if (Offset > 2046 || Offset < -2048)
256 return "out of range pc-relative fixup value";
257 break;
258 }
259 case ARM::fixup_arm_thumb_bcc: {
260 // Relaxing tBcc to t2Bcc. tBcc has a signed 9-bit displacement with the
261 // low bit being an implied zero. There's an implied +4 offset for the
262 // branch, so we adjust the other way here to determine what's
263 // encodable.
264 //
265 // Relax if the value is too big for a (signed) i8.
266 int64_t Offset = int64_t(Value) - 4;
267 if (Offset > 254 || Offset < -256)
268 return "out of range pc-relative fixup value";
269 break;
270 }
271 case ARM::fixup_thumb_adr_pcrel_10:
272 case ARM::fixup_arm_thumb_cp: {
273 // If the immediate is negative, greater than 1020, or not a multiple
274 // of four, the wide version of the instruction must be used.
275 int64_t Offset = int64_t(Value) - 4;
276 if (Offset & 3)
277 return "misaligned pc-relative fixup value";
278 else if (Offset > 1020 || Offset < 0)
279 return "out of range pc-relative fixup value";
280 break;
281 }
282 case ARM::fixup_arm_thumb_cb: {
283 // If we have a Thumb CBZ or CBNZ instruction and its target is the next
284 // instruction it is actually out of range for the instruction.
285 // It will be changed to a NOP.
286 int64_t Offset = (Value & ~1);
287 if (Offset == 2)
288 return "will be converted to nop";
289 break;
290 }
291 case ARM::fixup_bf_branch:
292 return checkPCRelOffset(Value, 0, 30);
293 case ARM::fixup_bf_target:
294 return checkPCRelOffset(Value, -0x10000, +0xfffe);
295 case ARM::fixup_bfl_target:
296 return checkPCRelOffset(Value, -0x40000, +0x3fffe);
297 case ARM::fixup_bfc_target:
298 return checkPCRelOffset(Value, -0x1000, +0xffe);
299 case ARM::fixup_wls:
300 return checkPCRelOffset(Value, 0, +0xffe);
301 case ARM::fixup_le:
302 // The offset field in the LE and LETP instructions is an 11-bit
303 // value shifted left by 2 (i.e. 0,2,4,...,4094), and it is
304 // interpreted as a negative offset from the value read from pc,
305 // i.e. from instruction_address+4.
306 //
307 // So an LE instruction can in principle address the instruction
308 // immediately after itself, or (not very usefully) the address
309 // half way through the 4-byte LE.
310 return checkPCRelOffset(Value, -0xffe, 0);
311 case ARM::fixup_bfcsel_else_target: {
312 if (Value != 2 && Value != 4)
313 return "out of range label-relative fixup value";
314 break;
315 }
316
317 default:
318 llvm_unreachable("Unexpected fixup kind in reasonForFixupRelaxation()!")__builtin_unreachable();
319 }
320 return nullptr;
321}
322
323bool ARMAsmBackend::fixupNeedsRelaxation(const MCFixup &Fixup, uint64_t Value,
324 const MCRelaxableFragment *DF,
325 const MCAsmLayout &Layout) const {
326 return reasonForFixupRelaxation(Fixup, Value);
327}
328
329void ARMAsmBackend::relaxInstruction(MCInst &Inst,
330 const MCSubtargetInfo &STI) const {
331 unsigned RelaxedOp = getRelaxedOpcode(Inst.getOpcode(), STI);
332
333 // Sanity check w/ diagnostic if we get here w/ a bogus instruction.
334 if (RelaxedOp == Inst.getOpcode()) {
335 SmallString<256> Tmp;
336 raw_svector_ostream OS(Tmp);
337 Inst.dump_pretty(OS);
338 OS << "\n";
339 report_fatal_error("unexpected instruction to relax: " + OS.str());
340 }
341
342 // If we are changing Thumb CBZ or CBNZ instruction to a NOP, aka tHINT, we
343 // have to change the operands too.
344 if ((Inst.getOpcode() == ARM::tCBZ || Inst.getOpcode() == ARM::tCBNZ) &&
345 RelaxedOp == ARM::tHINT) {
346 MCInst Res;
347 Res.setOpcode(RelaxedOp);
348 Res.addOperand(MCOperand::createImm(0));
349 Res.addOperand(MCOperand::createImm(14));
350 Res.addOperand(MCOperand::createReg(0));
351 Inst = std::move(Res);
352 return;
353 }
354
355 // The rest of instructions we're relaxing have the same operands.
356 // We just need to update to the proper opcode.
357 Inst.setOpcode(RelaxedOp);
358}
359
360bool ARMAsmBackend::writeNopData(raw_ostream &OS, uint64_t Count) const {
361 const uint16_t Thumb1_16bitNopEncoding = 0x46c0; // using MOV r8,r8
362 const uint16_t Thumb2_16bitNopEncoding = 0xbf00; // NOP
363 const uint32_t ARMv4_NopEncoding = 0xe1a00000; // using MOV r0,r0
364 const uint32_t ARMv6T2_NopEncoding = 0xe320f000; // NOP
365 if (isThumb()) {
366 const uint16_t nopEncoding =
367 hasNOP() ? Thumb2_16bitNopEncoding : Thumb1_16bitNopEncoding;
368 uint64_t NumNops = Count / 2;
369 for (uint64_t i = 0; i != NumNops; ++i)
370 support::endian::write(OS, nopEncoding, Endian);
371 if (Count & 1)
372 OS << '\0';
373 return true;
374 }
375 // ARM mode
376 const uint32_t nopEncoding =
377 hasNOP() ? ARMv6T2_NopEncoding : ARMv4_NopEncoding;
378 uint64_t NumNops = Count / 4;
379 for (uint64_t i = 0; i != NumNops; ++i)
380 support::endian::write(OS, nopEncoding, Endian);
381 // FIXME: should this function return false when unable to write exactly
382 // 'Count' bytes with NOP encodings?
383 switch (Count % 4) {
384 default:
385 break; // No leftover bytes to write
386 case 1:
387 OS << '\0';
388 break;
389 case 2:
390 OS.write("\0\0", 2);
391 break;
392 case 3:
393 OS.write("\0\0\xa0", 3);
394 break;
395 }
396
397 return true;
398}
399
400static uint32_t swapHalfWords(uint32_t Value, bool IsLittleEndian) {
401 if (IsLittleEndian) {
402 // Note that the halfwords are stored high first and low second in thumb;
403 // so we need to swap the fixup value here to map properly.
404 uint32_t Swapped = (Value & 0xFFFF0000) >> 16;
405 Swapped |= (Value & 0x0000FFFF) << 16;
406 return Swapped;
407 } else
408 return Value;
409}
410
411static uint32_t joinHalfWords(uint32_t FirstHalf, uint32_t SecondHalf,
412 bool IsLittleEndian) {
413 uint32_t Value;
414
415 if (IsLittleEndian) {
416 Value = (SecondHalf & 0xFFFF) << 16;
417 Value |= (FirstHalf & 0xFFFF);
418 } else {
419 Value = (SecondHalf & 0xFFFF);
420 Value |= (FirstHalf & 0xFFFF) << 16;
421 }
422
423 return Value;
424}
425
426unsigned ARMAsmBackend::adjustFixupValue(const MCAssembler &Asm,
427 const MCFixup &Fixup,
428 const MCValue &Target, uint64_t Value,
429 bool IsResolved, MCContext &Ctx,
430 const MCSubtargetInfo* STI) const {
431 unsigned Kind = Fixup.getKind();
432
433 // MachO tries to make .o files that look vaguely pre-linked, so for MOVW/MOVT
434 // and .word relocations they put the Thumb bit into the addend if possible.
435 // Other relocation types don't want this bit though (branches couldn't encode
436 // it if it *was* present, and no other relocations exist) and it can
437 // interfere with checking valid expressions.
438 if (const MCSymbolRefExpr *A = Target.getSymA()) {
1
Assuming 'A' is null
2
Taking false branch
439 if (A->hasSubsectionsViaSymbols() && Asm.isThumbFunc(&A->getSymbol()) &&
440 A->getSymbol().isExternal() &&
441 (Kind == FK_Data_4 || Kind == ARM::fixup_arm_movw_lo16 ||
442 Kind == ARM::fixup_arm_movt_hi16 || Kind == ARM::fixup_t2_movw_lo16 ||
443 Kind == ARM::fixup_t2_movt_hi16))
444 Value |= 1;
445 }
446
447 switch (Kind) {
3
Control jumps to 'case fixup_arm_mod_imm:' at line 798
448 default:
449 Ctx.reportError(Fixup.getLoc(), "bad relocation fixup type");
450 return 0;
451 case FK_Data_1:
452 case FK_Data_2:
453 case FK_Data_4:
454 return Value;
455 case FK_SecRel_2:
456 return Value;
457 case FK_SecRel_4:
458 return Value;
459 case ARM::fixup_arm_movt_hi16:
460 assert(STI != nullptr)(static_cast<void> (0));
461 if (IsResolved || !STI->getTargetTriple().isOSBinFormatELF())
462 Value >>= 16;
463 LLVM_FALLTHROUGH[[gnu::fallthrough]];
464 case ARM::fixup_arm_movw_lo16: {
465 unsigned Hi4 = (Value & 0xF000) >> 12;
466 unsigned Lo12 = Value & 0x0FFF;
467 // inst{19-16} = Hi4;
468 // inst{11-0} = Lo12;
469 Value = (Hi4 << 16) | (Lo12);
470 return Value;
471 }
472 case ARM::fixup_t2_movt_hi16:
473 assert(STI != nullptr)(static_cast<void> (0));
474 if (IsResolved || !STI->getTargetTriple().isOSBinFormatELF())
475 Value >>= 16;
476 LLVM_FALLTHROUGH[[gnu::fallthrough]];
477 case ARM::fixup_t2_movw_lo16: {
478 unsigned Hi4 = (Value & 0xF000) >> 12;
479 unsigned i = (Value & 0x800) >> 11;
480 unsigned Mid3 = (Value & 0x700) >> 8;
481 unsigned Lo8 = Value & 0x0FF;
482 // inst{19-16} = Hi4;
483 // inst{26} = i;
484 // inst{14-12} = Mid3;
485 // inst{7-0} = Lo8;
486 Value = (Hi4 << 16) | (i << 26) | (Mid3 << 12) | (Lo8);
487 return swapHalfWords(Value, Endian == support::little);
488 }
489 case ARM::fixup_arm_ldst_pcrel_12:
490 // ARM PC-relative values are offset by 8.
491 Value -= 4;
492 LLVM_FALLTHROUGH[[gnu::fallthrough]];
493 case ARM::fixup_t2_ldst_pcrel_12:
494 // Offset by 4, adjusted by two due to the half-word ordering of thumb.
495 Value -= 4;
496 LLVM_FALLTHROUGH[[gnu::fallthrough]];
497 case ARM::fixup_arm_ldst_abs_12: {
498 bool isAdd = true;
499 if ((int64_t)Value < 0) {
500 Value = -Value;
501 isAdd = false;
502 }
503 if (Value >= 4096) {
504 Ctx.reportError(Fixup.getLoc(), "out of range pc-relative fixup value");
505 return 0;
506 }
507 Value |= isAdd << 23;
508
509 // Same addressing mode as fixup_arm_pcrel_10,
510 // but with 16-bit halfwords swapped.
511 if (Kind == ARM::fixup_t2_ldst_pcrel_12)
512 return swapHalfWords(Value, Endian == support::little);
513
514 return Value;
515 }
516 case ARM::fixup_arm_adr_pcrel_12: {
517 // ARM PC-relative values are offset by 8.
518 Value -= 8;
519 unsigned opc = 4; // bits {24-21}. Default to add: 0b0100
520 if ((int64_t)Value < 0) {
521 Value = -Value;
522 opc = 2; // 0b0010
523 }
524 if (ARM_AM::getSOImmVal(Value) == -1) {
525 Ctx.reportError(Fixup.getLoc(), "out of range pc-relative fixup value");
526 return 0;
527 }
528 // Encode the immediate and shift the opcode into place.
529 return ARM_AM::getSOImmVal(Value) | (opc << 21);
530 }
531
532 case ARM::fixup_t2_adr_pcrel_12: {
533 Value -= 4;
534 unsigned opc = 0;
535 if ((int64_t)Value < 0) {
536 Value = -Value;
537 opc = 5;
538 }
539
540 uint32_t out = (opc << 21);
541 out |= (Value & 0x800) << 15;
542 out |= (Value & 0x700) << 4;
543 out |= (Value & 0x0FF);
544
545 return swapHalfWords(out, Endian == support::little);
546 }
547
548 case ARM::fixup_arm_condbranch:
549 case ARM::fixup_arm_uncondbranch:
550 case ARM::fixup_arm_uncondbl:
551 case ARM::fixup_arm_condbl:
552 case ARM::fixup_arm_blx:
553 // These values don't encode the low two bits since they're always zero.
554 // Offset by 8 just as above.
555 if (const MCSymbolRefExpr *SRE =
556 dyn_cast<MCSymbolRefExpr>(Fixup.getValue()))
557 if (SRE->getKind() == MCSymbolRefExpr::VK_TLSCALL)
558 return 0;
559 return 0xffffff & ((Value - 8) >> 2);
560 case ARM::fixup_t2_uncondbranch: {
561 Value = Value - 4;
562 if (!isInt<25>(Value)) {
563 Ctx.reportError(Fixup.getLoc(), "Relocation out of range");
564 return 0;
565 }
566
567 Value >>= 1; // Low bit is not encoded.
568
569 uint32_t out = 0;
570 bool I = Value & 0x800000;
571 bool J1 = Value & 0x400000;
572 bool J2 = Value & 0x200000;
573 J1 ^= I;
574 J2 ^= I;
575
576 out |= I << 26; // S bit
577 out |= !J1 << 13; // J1 bit
578 out |= !J2 << 11; // J2 bit
579 out |= (Value & 0x1FF800) << 5; // imm6 field
580 out |= (Value & 0x0007FF); // imm11 field
581
582 return swapHalfWords(out, Endian == support::little);
583 }
584 case ARM::fixup_t2_condbranch: {
585 Value = Value - 4;
586 if (!isInt<21>(Value)) {
587 Ctx.reportError(Fixup.getLoc(), "Relocation out of range");
588 return 0;
589 }
590
591 Value >>= 1; // Low bit is not encoded.
592
593 uint64_t out = 0;
594 out |= (Value & 0x80000) << 7; // S bit
595 out |= (Value & 0x40000) >> 7; // J2 bit
596 out |= (Value & 0x20000) >> 4; // J1 bit
597 out |= (Value & 0x1F800) << 5; // imm6 field
598 out |= (Value & 0x007FF); // imm11 field
599
600 return swapHalfWords(out, Endian == support::little);
601 }
602 case ARM::fixup_arm_thumb_bl: {
603 if (!isInt<25>(Value - 4) ||
604 (!STI->getFeatureBits()[ARM::FeatureThumb2] &&
605 !STI->getFeatureBits()[ARM::HasV8MBaselineOps] &&
606 !STI->getFeatureBits()[ARM::HasV6MOps] &&
607 !isInt<23>(Value - 4))) {
608 Ctx.reportError(Fixup.getLoc(), "Relocation out of range");
609 return 0;
610 }
611
612 // The value doesn't encode the low bit (always zero) and is offset by
613 // four. The 32-bit immediate value is encoded as
614 // imm32 = SignExtend(S:I1:I2:imm10:imm11:0)
615 // where I1 = NOT(J1 ^ S) and I2 = NOT(J2 ^ S).
616 // The value is encoded into disjoint bit positions in the destination
617 // opcode. x = unchanged, I = immediate value bit, S = sign extension bit,
618 // J = either J1 or J2 bit
619 //
620 // BL: xxxxxSIIIIIIIIII xxJxJIIIIIIIIIII
621 //
622 // Note that the halfwords are stored high first, low second; so we need
623 // to transpose the fixup value here to map properly.
624 uint32_t offset = (Value - 4) >> 1;
625 uint32_t signBit = (offset & 0x800000) >> 23;
626 uint32_t I1Bit = (offset & 0x400000) >> 22;
627 uint32_t J1Bit = (I1Bit ^ 0x1) ^ signBit;
628 uint32_t I2Bit = (offset & 0x200000) >> 21;
629 uint32_t J2Bit = (I2Bit ^ 0x1) ^ signBit;
630 uint32_t imm10Bits = (offset & 0x1FF800) >> 11;
631 uint32_t imm11Bits = (offset & 0x000007FF);
632
633 uint32_t FirstHalf = (((uint16_t)signBit << 10) | (uint16_t)imm10Bits);
634 uint32_t SecondHalf = (((uint16_t)J1Bit << 13) | ((uint16_t)J2Bit << 11) |
635 (uint16_t)imm11Bits);
636 return joinHalfWords(FirstHalf, SecondHalf, Endian == support::little);
637 }
638 case ARM::fixup_arm_thumb_blx: {
639 // The value doesn't encode the low two bits (always zero) and is offset by
640 // four (see fixup_arm_thumb_cp). The 32-bit immediate value is encoded as
641 // imm32 = SignExtend(S:I1:I2:imm10H:imm10L:00)
642 // where I1 = NOT(J1 ^ S) and I2 = NOT(J2 ^ S).
643 // The value is encoded into disjoint bit positions in the destination
644 // opcode. x = unchanged, I = immediate value bit, S = sign extension bit,
645 // J = either J1 or J2 bit, 0 = zero.
646 //
647 // BLX: xxxxxSIIIIIIIIII xxJxJIIIIIIIIII0
648 //
649 // Note that the halfwords are stored high first, low second; so we need
650 // to transpose the fixup value here to map properly.
651 if (Value % 4 != 0) {
652 Ctx.reportError(Fixup.getLoc(), "misaligned ARM call destination");
653 return 0;
654 }
655
656 uint32_t offset = (Value - 4) >> 2;
657 if (const MCSymbolRefExpr *SRE =
658 dyn_cast<MCSymbolRefExpr>(Fixup.getValue()))
659 if (SRE->getKind() == MCSymbolRefExpr::VK_TLSCALL)
660 offset = 0;
661 uint32_t signBit = (offset & 0x400000) >> 22;
662 uint32_t I1Bit = (offset & 0x200000) >> 21;
663 uint32_t J1Bit = (I1Bit ^ 0x1) ^ signBit;
664 uint32_t I2Bit = (offset & 0x100000) >> 20;
665 uint32_t J2Bit = (I2Bit ^ 0x1) ^ signBit;
666 uint32_t imm10HBits = (offset & 0xFFC00) >> 10;
667 uint32_t imm10LBits = (offset & 0x3FF);
668
669 uint32_t FirstHalf = (((uint16_t)signBit << 10) | (uint16_t)imm10HBits);
670 uint32_t SecondHalf = (((uint16_t)J1Bit << 13) | ((uint16_t)J2Bit << 11) |
671 ((uint16_t)imm10LBits) << 1);
672 return joinHalfWords(FirstHalf, SecondHalf, Endian == support::little);
673 }
674 case ARM::fixup_thumb_adr_pcrel_10:
675 case ARM::fixup_arm_thumb_cp:
676 // On CPUs supporting Thumb2, this will be relaxed to an ldr.w, otherwise we
677 // could have an error on our hands.
678 assert(STI != nullptr)(static_cast<void> (0));
679 if (!STI->getFeatureBits()[ARM::FeatureThumb2] && IsResolved) {
680 const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value);
681 if (FixupDiagnostic) {
682 Ctx.reportError(Fixup.getLoc(), FixupDiagnostic);
683 return 0;
684 }
685 }
686 // Offset by 4, and don't encode the low two bits.
687 return ((Value - 4) >> 2) & 0xff;
688 case ARM::fixup_arm_thumb_cb: {
689 // CB instructions can only branch to offsets in [4, 126] in multiples of 2
690 // so ensure that the raw value LSB is zero and it lies in [2, 130].
691 // An offset of 2 will be relaxed to a NOP.
692 if ((int64_t)Value < 2 || Value > 0x82 || Value & 1) {
693 Ctx.reportError(Fixup.getLoc(), "out of range pc-relative fixup value");
694 return 0;
695 }
696 // Offset by 4 and don't encode the lower bit, which is always 0.
697 // FIXME: diagnose if no Thumb2
698 uint32_t Binary = (Value - 4) >> 1;
699 return ((Binary & 0x20) << 4) | ((Binary & 0x1f) << 3);
700 }
701 case ARM::fixup_arm_thumb_br:
702 // Offset by 4 and don't encode the lower bit, which is always 0.
703 assert(STI != nullptr)(static_cast<void> (0));
704 if (!STI->getFeatureBits()[ARM::FeatureThumb2] &&
705 !STI->getFeatureBits()[ARM::HasV8MBaselineOps]) {
706 const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value);
707 if (FixupDiagnostic) {
708 Ctx.reportError(Fixup.getLoc(), FixupDiagnostic);
709 return 0;
710 }
711 }
712 return ((Value - 4) >> 1) & 0x7ff;
713 case ARM::fixup_arm_thumb_bcc:
714 // Offset by 4 and don't encode the lower bit, which is always 0.
715 assert(STI != nullptr)(static_cast<void> (0));
716 if (!STI->getFeatureBits()[ARM::FeatureThumb2]) {
717 const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value);
718 if (FixupDiagnostic) {
719 Ctx.reportError(Fixup.getLoc(), FixupDiagnostic);
720 return 0;
721 }
722 }
723 return ((Value - 4) >> 1) & 0xff;
724 case ARM::fixup_arm_pcrel_10_unscaled: {
725 Value = Value - 8; // ARM fixups offset by an additional word and don't
726 // need to adjust for the half-word ordering.
727 bool isAdd = true;
728 if ((int64_t)Value < 0) {
729 Value = -Value;
730 isAdd = false;
731 }
732 // The value has the low 4 bits encoded in [3:0] and the high 4 in [11:8].
733 if (Value >= 256) {
734 Ctx.reportError(Fixup.getLoc(), "out of range pc-relative fixup value");
735 return 0;
736 }
737 Value = (Value & 0xf) | ((Value & 0xf0) << 4);
738 return Value | (isAdd << 23);
739 }
740 case ARM::fixup_arm_pcrel_10:
741 Value = Value - 4; // ARM fixups offset by an additional word and don't
742 // need to adjust for the half-word ordering.
743 LLVM_FALLTHROUGH[[gnu::fallthrough]];
744 case ARM::fixup_t2_pcrel_10: {
745 // Offset by 4, adjusted by two due to the half-word ordering of thumb.
746 Value = Value - 4;
747 bool isAdd = true;
748 if ((int64_t)Value < 0) {
749 Value = -Value;
750 isAdd = false;
751 }
752 // These values don't encode the low two bits since they're always zero.
753 Value >>= 2;
754 if (Value >= 256) {
755 Ctx.reportError(Fixup.getLoc(), "out of range pc-relative fixup value");
756 return 0;
757 }
758 Value |= isAdd << 23;
759
760 // Same addressing mode as fixup_arm_pcrel_10, but with 16-bit halfwords
761 // swapped.
762 if (Kind == ARM::fixup_t2_pcrel_10)
763 return swapHalfWords(Value, Endian == support::little);
764
765 return Value;
766 }
767 case ARM::fixup_arm_pcrel_9:
768 Value = Value - 4; // ARM fixups offset by an additional word and don't
769 // need to adjust for the half-word ordering.
770 LLVM_FALLTHROUGH[[gnu::fallthrough]];
771 case ARM::fixup_t2_pcrel_9: {
772 // Offset by 4, adjusted by two due to the half-word ordering of thumb.
773 Value = Value - 4;
774 bool isAdd = true;
775 if ((int64_t)Value < 0) {
776 Value = -Value;
777 isAdd = false;
778 }
779 // These values don't encode the low bit since it's always zero.
780 if (Value & 1) {
781 Ctx.reportError(Fixup.getLoc(), "invalid value for this fixup");
782 return 0;
783 }
784 Value >>= 1;
785 if (Value >= 256) {
786 Ctx.reportError(Fixup.getLoc(), "out of range pc-relative fixup value");
787 return 0;
788 }
789 Value |= isAdd << 23;
790
791 // Same addressing mode as fixup_arm_pcrel_9, but with 16-bit halfwords
792 // swapped.
793 if (Kind == ARM::fixup_t2_pcrel_9)
794 return swapHalfWords(Value, Endian == support::little);
795
796 return Value;
797 }
798 case ARM::fixup_arm_mod_imm:
799 Value = ARM_AM::getSOImmVal(Value);
4
Calling 'getSOImmVal'
800 if (Value >> 12) {
801 Ctx.reportError(Fixup.getLoc(), "out of range immediate fixup value");
802 return 0;
803 }
804 return Value;
805 case ARM::fixup_t2_so_imm: {
806 Value = ARM_AM::getT2SOImmVal(Value);
807 if ((int64_t)Value < 0) {
808 Ctx.reportError(Fixup.getLoc(), "out of range immediate fixup value");
809 return 0;
810 }
811 // Value will contain a 12-bit value broken up into a 4-bit shift in bits
812 // 11:8 and the 8-bit immediate in 0:7. The instruction has the immediate
813 // in 0:7. The 4-bit shift is split up into i:imm3 where i is placed at bit
814 // 10 of the upper half-word and imm3 is placed at 14:12 of the lower
815 // half-word.
816 uint64_t EncValue = 0;
817 EncValue |= (Value & 0x800) << 15;
818 EncValue |= (Value & 0x700) << 4;
819 EncValue |= (Value & 0xff);
820 return swapHalfWords(EncValue, Endian == support::little);
821 }
822 case ARM::fixup_bf_branch: {
823 const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value);
824 if (FixupDiagnostic) {
825 Ctx.reportError(Fixup.getLoc(), FixupDiagnostic);
826 return 0;
827 }
828 uint32_t out = (((Value - 4) >> 1) & 0xf) << 23;
829 return swapHalfWords(out, Endian == support::little);
830 }
831 case ARM::fixup_bf_target:
832 case ARM::fixup_bfl_target:
833 case ARM::fixup_bfc_target: {
834 const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value);
835 if (FixupDiagnostic) {
836 Ctx.reportError(Fixup.getLoc(), FixupDiagnostic);
837 return 0;
838 }
839 uint32_t out = 0;
840 uint32_t HighBitMask = (Kind == ARM::fixup_bf_target ? 0xf800 :
841 Kind == ARM::fixup_bfl_target ? 0x3f800 : 0x800);
842 out |= (((Value - 4) >> 1) & 0x1) << 11;
843 out |= (((Value - 4) >> 1) & 0x7fe);
844 out |= (((Value - 4) >> 1) & HighBitMask) << 5;
845 return swapHalfWords(out, Endian == support::little);
846 }
847 case ARM::fixup_bfcsel_else_target: {
848 // If this is a fixup of a branch future's else target then it should be a
849 // constant MCExpr representing the distance between the branch targetted
850 // and the instruction after that same branch.
851 Value = Target.getConstant();
852
853 const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value);
854 if (FixupDiagnostic) {
855 Ctx.reportError(Fixup.getLoc(), FixupDiagnostic);
856 return 0;
857 }
858 uint32_t out = ((Value >> 2) & 1) << 17;
859 return swapHalfWords(out, Endian == support::little);
860 }
861 case ARM::fixup_wls:
862 case ARM::fixup_le: {
863 const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value);
864 if (FixupDiagnostic) {
865 Ctx.reportError(Fixup.getLoc(), FixupDiagnostic);
866 return 0;
867 }
868 uint64_t real_value = Value - 4;
869 uint32_t out = 0;
870 if (Kind == ARM::fixup_le)
871 real_value = -real_value;
872 out |= ((real_value >> 1) & 0x1) << 11;
873 out |= ((real_value >> 1) & 0x7fe);
874 return swapHalfWords(out, Endian == support::little);
875 }
876 }
877}
878
879bool ARMAsmBackend::shouldForceRelocation(const MCAssembler &Asm,
880 const MCFixup &Fixup,
881 const MCValue &Target) {
882 const MCSymbolRefExpr *A = Target.getSymA();
883 const MCSymbol *Sym = A ? &A->getSymbol() : nullptr;
884 const unsigned FixupKind = Fixup.getKind();
885 if (FixupKind >= FirstLiteralRelocationKind)
886 return true;
887 if (FixupKind == ARM::fixup_arm_thumb_bl) {
888 assert(Sym && "How did we resolve this?")(static_cast<void> (0));
889
890 // If the symbol is external the linker will handle it.
891 // FIXME: Should we handle it as an optimization?
892
893 // If the symbol is out of range, produce a relocation and hope the
894 // linker can handle it. GNU AS produces an error in this case.
895 if (Sym->isExternal())
896 return true;
897 }
898 // Create relocations for unconditional branches to function symbols with
899 // different execution mode in ELF binaries.
900 if (Sym && Sym->isELF()) {
901 unsigned Type = cast<MCSymbolELF>(Sym)->getType();
902 if ((Type == ELF::STT_FUNC || Type == ELF::STT_GNU_IFUNC)) {
903 if (Asm.isThumbFunc(Sym) && (FixupKind == ARM::fixup_arm_uncondbranch))
904 return true;
905 if (!Asm.isThumbFunc(Sym) && (FixupKind == ARM::fixup_arm_thumb_br ||
906 FixupKind == ARM::fixup_arm_thumb_bl ||
907 FixupKind == ARM::fixup_t2_condbranch ||
908 FixupKind == ARM::fixup_t2_uncondbranch))
909 return true;
910 }
911 }
912 // We must always generate a relocation for BL/BLX instructions if we have
913 // a symbol to reference, as the linker relies on knowing the destination
914 // symbol's thumb-ness to get interworking right.
915 if (A && (FixupKind == ARM::fixup_arm_thumb_blx ||
916 FixupKind == ARM::fixup_arm_blx ||
917 FixupKind == ARM::fixup_arm_uncondbl ||
918 FixupKind == ARM::fixup_arm_condbl))
919 return true;
920 return false;
921}
922
923/// getFixupKindNumBytes - The number of bytes the fixup may change.
924static unsigned getFixupKindNumBytes(unsigned Kind) {
925 switch (Kind) {
926 default:
927 llvm_unreachable("Unknown fixup kind!")__builtin_unreachable();
928
929 case FK_Data_1:
930 case ARM::fixup_arm_thumb_bcc:
931 case ARM::fixup_arm_thumb_cp:
932 case ARM::fixup_thumb_adr_pcrel_10:
933 return 1;
934
935 case FK_Data_2:
936 case ARM::fixup_arm_thumb_br:
937 case ARM::fixup_arm_thumb_cb:
938 case ARM::fixup_arm_mod_imm:
939 return 2;
940
941 case ARM::fixup_arm_pcrel_10_unscaled:
942 case ARM::fixup_arm_ldst_pcrel_12:
943 case ARM::fixup_arm_pcrel_10:
944 case ARM::fixup_arm_pcrel_9:
945 case ARM::fixup_arm_ldst_abs_12:
946 case ARM::fixup_arm_adr_pcrel_12:
947 case ARM::fixup_arm_uncondbl:
948 case ARM::fixup_arm_condbl:
949 case ARM::fixup_arm_blx:
950 case ARM::fixup_arm_condbranch:
951 case ARM::fixup_arm_uncondbranch:
952 return 3;
953
954 case FK_Data_4:
955 case ARM::fixup_t2_ldst_pcrel_12:
956 case ARM::fixup_t2_condbranch:
957 case ARM::fixup_t2_uncondbranch:
958 case ARM::fixup_t2_pcrel_10:
959 case ARM::fixup_t2_pcrel_9:
960 case ARM::fixup_t2_adr_pcrel_12:
961 case ARM::fixup_arm_thumb_bl:
962 case ARM::fixup_arm_thumb_blx:
963 case ARM::fixup_arm_movt_hi16:
964 case ARM::fixup_arm_movw_lo16:
965 case ARM::fixup_t2_movt_hi16:
966 case ARM::fixup_t2_movw_lo16:
967 case ARM::fixup_t2_so_imm:
968 case ARM::fixup_bf_branch:
969 case ARM::fixup_bf_target:
970 case ARM::fixup_bfl_target:
971 case ARM::fixup_bfc_target:
972 case ARM::fixup_bfcsel_else_target:
973 case ARM::fixup_wls:
974 case ARM::fixup_le:
975 return 4;
976
977 case FK_SecRel_2:
978 return 2;
979 case FK_SecRel_4:
980 return 4;
981 }
982}
983
984/// getFixupKindContainerSizeBytes - The number of bytes of the
985/// container involved in big endian.
986static unsigned getFixupKindContainerSizeBytes(unsigned Kind) {
987 switch (Kind) {
988 default:
989 llvm_unreachable("Unknown fixup kind!")__builtin_unreachable();
990
991 case FK_Data_1:
992 return 1;
993 case FK_Data_2:
994 return 2;
995 case FK_Data_4:
996 return 4;
997
998 case ARM::fixup_arm_thumb_bcc:
999 case ARM::fixup_arm_thumb_cp:
1000 case ARM::fixup_thumb_adr_pcrel_10:
1001 case ARM::fixup_arm_thumb_br:
1002 case ARM::fixup_arm_thumb_cb:
1003 // Instruction size is 2 bytes.
1004 return 2;
1005
1006 case ARM::fixup_arm_pcrel_10_unscaled:
1007 case ARM::fixup_arm_ldst_pcrel_12:
1008 case ARM::fixup_arm_pcrel_10:
1009 case ARM::fixup_arm_pcrel_9:
1010 case ARM::fixup_arm_adr_pcrel_12:
1011 case ARM::fixup_arm_uncondbl:
1012 case ARM::fixup_arm_condbl:
1013 case ARM::fixup_arm_blx:
1014 case ARM::fixup_arm_condbranch:
1015 case ARM::fixup_arm_uncondbranch:
1016 case ARM::fixup_t2_ldst_pcrel_12:
1017 case ARM::fixup_t2_condbranch:
1018 case ARM::fixup_t2_uncondbranch:
1019 case ARM::fixup_t2_pcrel_10:
1020 case ARM::fixup_t2_pcrel_9:
1021 case ARM::fixup_t2_adr_pcrel_12:
1022 case ARM::fixup_arm_thumb_bl:
1023 case ARM::fixup_arm_thumb_blx:
1024 case ARM::fixup_arm_movt_hi16:
1025 case ARM::fixup_arm_movw_lo16:
1026 case ARM::fixup_t2_movt_hi16:
1027 case ARM::fixup_t2_movw_lo16:
1028 case ARM::fixup_arm_mod_imm:
1029 case ARM::fixup_t2_so_imm:
1030 case ARM::fixup_bf_branch:
1031 case ARM::fixup_bf_target:
1032 case ARM::fixup_bfl_target:
1033 case ARM::fixup_bfc_target:
1034 case ARM::fixup_bfcsel_else_target:
1035 case ARM::fixup_wls:
1036 case ARM::fixup_le:
1037 // Instruction size is 4 bytes.
1038 return 4;
1039 }
1040}
1041
1042void ARMAsmBackend::applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
1043 const MCValue &Target,
1044 MutableArrayRef<char> Data, uint64_t Value,
1045 bool IsResolved,
1046 const MCSubtargetInfo* STI) const {
1047 unsigned Kind = Fixup.getKind();
1048 if (Kind >= FirstLiteralRelocationKind)
1049 return;
1050 unsigned NumBytes = getFixupKindNumBytes(Kind);
1051 MCContext &Ctx = Asm.getContext();
1052 Value = adjustFixupValue(Asm, Fixup, Target, Value, IsResolved, Ctx, STI);
1053 if (!Value)
1054 return; // Doesn't change encoding.
1055
1056 unsigned Offset = Fixup.getOffset();
1057 assert(Offset + NumBytes <= Data.size() && "Invalid fixup offset!")(static_cast<void> (0));
1058
1059 // Used to point to big endian bytes.
1060 unsigned FullSizeBytes;
1061 if (Endian == support::big) {
1062 FullSizeBytes = getFixupKindContainerSizeBytes(Kind);
1063 assert((Offset + FullSizeBytes) <= Data.size() && "Invalid fixup size!")(static_cast<void> (0));
1064 assert(NumBytes <= FullSizeBytes && "Invalid fixup size!")(static_cast<void> (0));
1065 }
1066
1067 // For each byte of the fragment that the fixup touches, mask in the bits from
1068 // the fixup value. The Value has been "split up" into the appropriate
1069 // bitfields above.
1070 for (unsigned i = 0; i != NumBytes; ++i) {
1071 unsigned Idx = Endian == support::little ? i : (FullSizeBytes - 1 - i);
1072 Data[Offset + Idx] |= uint8_t((Value >> (i * 8)) & 0xff);
1073 }
1074}
1075
1076namespace CU {
1077
1078/// Compact unwind encoding values.
1079enum CompactUnwindEncodings {
1080 UNWIND_ARM_MODE_MASK = 0x0F000000,
1081 UNWIND_ARM_MODE_FRAME = 0x01000000,
1082 UNWIND_ARM_MODE_FRAME_D = 0x02000000,
1083 UNWIND_ARM_MODE_DWARF = 0x04000000,
1084
1085 UNWIND_ARM_FRAME_STACK_ADJUST_MASK = 0x00C00000,
1086
1087 UNWIND_ARM_FRAME_FIRST_PUSH_R4 = 0x00000001,
1088 UNWIND_ARM_FRAME_FIRST_PUSH_R5 = 0x00000002,
1089 UNWIND_ARM_FRAME_FIRST_PUSH_R6 = 0x00000004,
1090
1091 UNWIND_ARM_FRAME_SECOND_PUSH_R8 = 0x00000008,
1092 UNWIND_ARM_FRAME_SECOND_PUSH_R9 = 0x00000010,
1093 UNWIND_ARM_FRAME_SECOND_PUSH_R10 = 0x00000020,
1094 UNWIND_ARM_FRAME_SECOND_PUSH_R11 = 0x00000040,
1095 UNWIND_ARM_FRAME_SECOND_PUSH_R12 = 0x00000080,
1096
1097 UNWIND_ARM_FRAME_D_REG_COUNT_MASK = 0x00000F00,
1098
1099 UNWIND_ARM_DWARF_SECTION_OFFSET = 0x00FFFFFF
1100};
1101
1102} // end CU namespace
1103
1104/// Generate compact unwind encoding for the function based on the CFI
1105/// instructions. If the CFI instructions describe a frame that cannot be
1106/// encoded in compact unwind, the method returns UNWIND_ARM_MODE_DWARF which
1107/// tells the runtime to fallback and unwind using dwarf.
1108uint32_t ARMAsmBackendDarwin::generateCompactUnwindEncoding(
1109 ArrayRef<MCCFIInstruction> Instrs) const {
1110 DEBUG_WITH_TYPE("compact-unwind", llvm::dbgs() << "generateCU()\n")do { } while (false);
1111 // Only armv7k uses CFI based unwinding.
1112 if (Subtype != MachO::CPU_SUBTYPE_ARM_V7K)
1113 return 0;
1114 // No .cfi directives means no frame.
1115 if (Instrs.empty())
1116 return 0;
1117 // Start off assuming CFA is at SP+0.
1118 unsigned CFARegister = ARM::SP;
1119 int CFARegisterOffset = 0;
1120 // Mark savable registers as initially unsaved
1121 DenseMap<unsigned, int> RegOffsets;
1122 int FloatRegCount = 0;
1123 // Process each .cfi directive and build up compact unwind info.
1124 for (size_t i = 0, e = Instrs.size(); i != e; ++i) {
1125 unsigned Reg;
1126 const MCCFIInstruction &Inst = Instrs[i];
1127 switch (Inst.getOperation()) {
1128 case MCCFIInstruction::OpDefCfa: // DW_CFA_def_cfa
1129 CFARegisterOffset = Inst.getOffset();
1130 CFARegister = *MRI.getLLVMRegNum(Inst.getRegister(), true);
1131 break;
1132 case MCCFIInstruction::OpDefCfaOffset: // DW_CFA_def_cfa_offset
1133 CFARegisterOffset = Inst.getOffset();
1134 break;
1135 case MCCFIInstruction::OpDefCfaRegister: // DW_CFA_def_cfa_register
1136 CFARegister = *MRI.getLLVMRegNum(Inst.getRegister(), true);
1137 break;
1138 case MCCFIInstruction::OpOffset: // DW_CFA_offset
1139 Reg = *MRI.getLLVMRegNum(Inst.getRegister(), true);
1140 if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
1141 RegOffsets[Reg] = Inst.getOffset();
1142 else if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) {
1143 RegOffsets[Reg] = Inst.getOffset();
1144 ++FloatRegCount;
1145 } else {
1146 DEBUG_WITH_TYPE("compact-unwind",do { } while (false)
1147 llvm::dbgs() << ".cfi_offset on unknown register="do { } while (false)
1148 << Inst.getRegister() << "\n")do { } while (false);
1149 return CU::UNWIND_ARM_MODE_DWARF;
1150 }
1151 break;
1152 case MCCFIInstruction::OpRelOffset: // DW_CFA_advance_loc
1153 // Ignore
1154 break;
1155 default:
1156 // Directive not convertable to compact unwind, bail out.
1157 DEBUG_WITH_TYPE("compact-unwind",do { } while (false)
1158 llvm::dbgs()do { } while (false)
1159 << "CFI directive not compatiable with comact "do { } while (false)
1160 "unwind encoding, opcode=" << Inst.getOperation()do { } while (false)
1161 << "\n")do { } while (false);
1162 return CU::UNWIND_ARM_MODE_DWARF;
1163 break;
1164 }
1165 }
1166
1167 // If no frame set up, return no unwind info.
1168 if ((CFARegister == ARM::SP) && (CFARegisterOffset == 0))
1169 return 0;
1170
1171 // Verify standard frame (lr/r7) was used.
1172 if (CFARegister != ARM::R7) {
1173 DEBUG_WITH_TYPE("compact-unwind", llvm::dbgs() << "frame register is "do { } while (false)
1174 << CFARegisterdo { } while (false)
1175 << " instead of r7\n")do { } while (false);
1176 return CU::UNWIND_ARM_MODE_DWARF;
1177 }
1178 int StackAdjust = CFARegisterOffset - 8;
1179 if (RegOffsets.lookup(ARM::LR) != (-4 - StackAdjust)) {
1180 DEBUG_WITH_TYPE("compact-unwind",do { } while (false)
1181 llvm::dbgs()do { } while (false)
1182 << "LR not saved as standard frame, StackAdjust="do { } while (false)
1183 << StackAdjustdo { } while (false)
1184 << ", CFARegisterOffset=" << CFARegisterOffsetdo { } while (false)
1185 << ", lr save at offset=" << RegOffsets[14] << "\n")do { } while (false);
1186 return CU::UNWIND_ARM_MODE_DWARF;
1187 }
1188 if (RegOffsets.lookup(ARM::R7) != (-8 - StackAdjust)) {
1189 DEBUG_WITH_TYPE("compact-unwind",do { } while (false)
1190 llvm::dbgs() << "r7 not saved as standard frame\n")do { } while (false);
1191 return CU::UNWIND_ARM_MODE_DWARF;
1192 }
1193 uint32_t CompactUnwindEncoding = CU::UNWIND_ARM_MODE_FRAME;
1194
1195 // If var-args are used, there may be a stack adjust required.
1196 switch (StackAdjust) {
1197 case 0:
1198 break;
1199 case 4:
1200 CompactUnwindEncoding |= 0x00400000;
1201 break;
1202 case 8:
1203 CompactUnwindEncoding |= 0x00800000;
1204 break;
1205 case 12:
1206 CompactUnwindEncoding |= 0x00C00000;
1207 break;
1208 default:
1209 DEBUG_WITH_TYPE("compact-unwind", llvm::dbgs()do { } while (false)
1210 << ".cfi_def_cfa stack adjust ("do { } while (false)
1211 << StackAdjust << ") out of range\n")do { } while (false);
1212 return CU::UNWIND_ARM_MODE_DWARF;
1213 }
1214
1215 // If r6 is saved, it must be right below r7.
1216 static struct {
1217 unsigned Reg;
1218 unsigned Encoding;
1219 } GPRCSRegs[] = {{ARM::R6, CU::UNWIND_ARM_FRAME_FIRST_PUSH_R6},
1220 {ARM::R5, CU::UNWIND_ARM_FRAME_FIRST_PUSH_R5},
1221 {ARM::R4, CU::UNWIND_ARM_FRAME_FIRST_PUSH_R4},
1222 {ARM::R12, CU::UNWIND_ARM_FRAME_SECOND_PUSH_R12},
1223 {ARM::R11, CU::UNWIND_ARM_FRAME_SECOND_PUSH_R11},
1224 {ARM::R10, CU::UNWIND_ARM_FRAME_SECOND_PUSH_R10},
1225 {ARM::R9, CU::UNWIND_ARM_FRAME_SECOND_PUSH_R9},
1226 {ARM::R8, CU::UNWIND_ARM_FRAME_SECOND_PUSH_R8}};
1227
1228 int CurOffset = -8 - StackAdjust;
1229 for (auto CSReg : GPRCSRegs) {
1230 auto Offset = RegOffsets.find(CSReg.Reg);
1231 if (Offset == RegOffsets.end())
1232 continue;
1233
1234 int RegOffset = Offset->second;
1235 if (RegOffset != CurOffset - 4) {
1236 DEBUG_WITH_TYPE("compact-unwind",do { } while (false)
1237 llvm::dbgs() << MRI.getName(CSReg.Reg) << " saved at "do { } while (false)
1238 << RegOffset << " but only supported at "do { } while (false)
1239 << CurOffset << "\n")do { } while (false);
1240 return CU::UNWIND_ARM_MODE_DWARF;
1241 }
1242 CompactUnwindEncoding |= CSReg.Encoding;
1243 CurOffset -= 4;
1244 }
1245
1246 // If no floats saved, we are done.
1247 if (FloatRegCount == 0)
1248 return CompactUnwindEncoding;
1249
1250 // Switch mode to include D register saving.
1251 CompactUnwindEncoding &= ~CU::UNWIND_ARM_MODE_MASK;
1252 CompactUnwindEncoding |= CU::UNWIND_ARM_MODE_FRAME_D;
1253
1254 // FIXME: supporting more than 4 saved D-registers compactly would be trivial,
1255 // but needs coordination with the linker and libunwind.
1256 if (FloatRegCount > 4) {
1257 DEBUG_WITH_TYPE("compact-unwind",do { } while (false)
1258 llvm::dbgs() << "unsupported number of D registers saved ("do { } while (false)
1259 << FloatRegCount << ")\n")do { } while (false);
1260 return CU::UNWIND_ARM_MODE_DWARF;
1261 }
1262
1263 // Floating point registers must either be saved sequentially, or we defer to
1264 // DWARF. No gaps allowed here so check that each saved d-register is
1265 // precisely where it should be.
1266 static unsigned FPRCSRegs[] = { ARM::D8, ARM::D10, ARM::D12, ARM::D14 };
1267 for (int Idx = FloatRegCount - 1; Idx >= 0; --Idx) {
1268 auto Offset = RegOffsets.find(FPRCSRegs[Idx]);
1269 if (Offset == RegOffsets.end()) {
1270 DEBUG_WITH_TYPE("compact-unwind",do { } while (false)
1271 llvm::dbgs() << FloatRegCount << " D-regs saved, but "do { } while (false)
1272 << MRI.getName(FPRCSRegs[Idx])do { } while (false)
1273 << " not saved\n")do { } while (false);
1274 return CU::UNWIND_ARM_MODE_DWARF;
1275 } else if (Offset->second != CurOffset - 8) {
1276 DEBUG_WITH_TYPE("compact-unwind",do { } while (false)
1277 llvm::dbgs() << FloatRegCount << " D-regs saved, but "do { } while (false)
1278 << MRI.getName(FPRCSRegs[Idx])do { } while (false)
1279 << " saved at " << Offset->seconddo { } while (false)
1280 << ", expected at " << CurOffset - 8do { } while (false)
1281 << "\n")do { } while (false);
1282 return CU::UNWIND_ARM_MODE_DWARF;
1283 }
1284 CurOffset -= 8;
1285 }
1286
1287 return CompactUnwindEncoding | ((FloatRegCount - 1) << 8);
1288}
1289
1290static MCAsmBackend *createARMAsmBackend(const Target &T,
1291 const MCSubtargetInfo &STI,
1292 const MCRegisterInfo &MRI,
1293 const MCTargetOptions &Options,
1294 support::endianness Endian) {
1295 const Triple &TheTriple = STI.getTargetTriple();
1296 switch (TheTriple.getObjectFormat()) {
1297 default:
1298 llvm_unreachable("unsupported object format")__builtin_unreachable();
1299 case Triple::MachO:
1300 return new ARMAsmBackendDarwin(T, STI, MRI);
1301 case Triple::COFF:
1302 assert(TheTriple.isOSWindows() && "non-Windows ARM COFF is not supported")(static_cast<void> (0));
1303 return new ARMAsmBackendWinCOFF(T, STI);
1304 case Triple::ELF:
1305 assert(TheTriple.isOSBinFormatELF() && "using ELF for non-ELF target")(static_cast<void> (0));
1306 uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TheTriple.getOS());
1307 return new ARMAsmBackendELF(T, STI, OSABI, Endian);
1308 }
1309}
1310
1311MCAsmBackend *llvm::createARMLEAsmBackend(const Target &T,
1312 const MCSubtargetInfo &STI,
1313 const MCRegisterInfo &MRI,
1314 const MCTargetOptions &Options) {
1315 return createARMAsmBackend(T, STI, MRI, Options, support::little);
1316}
1317
1318MCAsmBackend *llvm::createARMBEAsmBackend(const Target &T,
1319 const MCSubtargetInfo &STI,
1320 const MCRegisterInfo &MRI,
1321 const MCTargetOptions &Options) {
1322 return createARMAsmBackend(T, STI, MRI, Options, support::big);
1323}

/build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/llvm/lib/Target/ARM/MCTargetDesc/ARMAddressingModes.h

1//===-- ARMAddressingModes.h - ARM Addressing Modes -------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains the ARM addressing mode implementation stuff.
10//
11//===----------------------------------------------------------------------===//
12
13#ifndef LLVM_LIB_TARGET_ARM_MCTARGETDESC_ARMADDRESSINGMODES_H
14#define LLVM_LIB_TARGET_ARM_MCTARGETDESC_ARMADDRESSINGMODES_H
15
16#include "llvm/ADT/APFloat.h"
17#include "llvm/ADT/APInt.h"
18#include "llvm/ADT/bit.h"
19#include "llvm/Support/ErrorHandling.h"
20#include "llvm/Support/MathExtras.h"
21#include <cassert>
22
23namespace llvm {
24
25/// ARM_AM - ARM Addressing Mode Stuff
26namespace ARM_AM {
27 enum ShiftOpc {
28 no_shift = 0,
29 asr,
30 lsl,
31 lsr,
32 ror,
33 rrx,
34 uxtw
35 };
36
37 enum AddrOpc {
38 sub = 0,
39 add
40 };
41
42 inline const char *getAddrOpcStr(AddrOpc Op) { return Op == sub ? "-" : ""; }
43
44 inline const char *getShiftOpcStr(ShiftOpc Op) {
45 switch (Op) {
46 default: llvm_unreachable("Unknown shift opc!")__builtin_unreachable();
47 case ARM_AM::asr: return "asr";
48 case ARM_AM::lsl: return "lsl";
49 case ARM_AM::lsr: return "lsr";
50 case ARM_AM::ror: return "ror";
51 case ARM_AM::rrx: return "rrx";
52 case ARM_AM::uxtw: return "uxtw";
53 }
54 }
55
56 inline unsigned getShiftOpcEncoding(ShiftOpc Op) {
57 switch (Op) {
58 default: llvm_unreachable("Unknown shift opc!")__builtin_unreachable();
59 case ARM_AM::asr: return 2;
60 case ARM_AM::lsl: return 0;
61 case ARM_AM::lsr: return 1;
62 case ARM_AM::ror: return 3;
63 }
64 }
65
66 enum AMSubMode {
67 bad_am_submode = 0,
68 ia,
69 ib,
70 da,
71 db
72 };
73
74 inline const char *getAMSubModeStr(AMSubMode Mode) {
75 switch (Mode) {
76 default: llvm_unreachable("Unknown addressing sub-mode!")__builtin_unreachable();
77 case ARM_AM::ia: return "ia";
78 case ARM_AM::ib: return "ib";
79 case ARM_AM::da: return "da";
80 case ARM_AM::db: return "db";
81 }
82 }
83
84 /// rotr32 - Rotate a 32-bit unsigned value right by a specified # bits.
85 ///
86 inline unsigned rotr32(unsigned Val, unsigned Amt) {
87 assert(Amt < 32 && "Invalid rotate amount")(static_cast<void> (0));
88 return (Val >> Amt) | (Val << ((32-Amt)&31));
12
The result of the right shift is undefined due to shifting by '32', which is greater or equal to the width of type 'unsigned int'
89 }
90
91 /// rotl32 - Rotate a 32-bit unsigned value left by a specified # bits.
92 ///
93 inline unsigned rotl32(unsigned Val, unsigned Amt) {
94 assert(Amt < 32 && "Invalid rotate amount")(static_cast<void> (0));
95 return (Val << Amt) | (Val >> ((32-Amt)&31));
96 }
97
98 //===--------------------------------------------------------------------===//
99 // Addressing Mode #1: shift_operand with registers
100 //===--------------------------------------------------------------------===//
101 //
102 // This 'addressing mode' is used for arithmetic instructions. It can
103 // represent things like:
104 // reg
105 // reg [asr|lsl|lsr|ror|rrx] reg
106 // reg [asr|lsl|lsr|ror|rrx] imm
107 //
108 // This is stored three operands [rega, regb, opc]. The first is the base
109 // reg, the second is the shift amount (or reg0 if not present or imm). The
110 // third operand encodes the shift opcode and the imm if a reg isn't present.
111 //
112 inline unsigned getSORegOpc(ShiftOpc ShOp, unsigned Imm) {
113 return ShOp | (Imm << 3);
114 }
115 inline unsigned getSORegOffset(unsigned Op) { return Op >> 3; }
116 inline ShiftOpc getSORegShOp(unsigned Op) { return (ShiftOpc)(Op & 7); }
117
118 /// getSOImmValImm - Given an encoded imm field for the reg/imm form, return
119 /// the 8-bit imm value.
120 inline unsigned getSOImmValImm(unsigned Imm) { return Imm & 0xFF; }
121 /// getSOImmValRot - Given an encoded imm field for the reg/imm form, return
122 /// the rotate amount.
123 inline unsigned getSOImmValRot(unsigned Imm) { return (Imm >> 8) * 2; }
124
125 /// getSOImmValRotate - Try to handle Imm with an immediate shifter operand,
126 /// computing the rotate amount to use. If this immediate value cannot be
127 /// handled with a single shifter-op, determine a good rotate amount that will
128 /// take a maximal chunk of bits out of the immediate.
129 inline unsigned getSOImmValRotate(unsigned Imm) {
130 // 8-bit (or less) immediates are trivially shifter_operands with a rotate
131 // of zero.
132 if ((Imm & ~255U) == 0) return 0;
8
Taking false branch
133
134 // Use CTZ to compute the rotate amount.
135 unsigned TZ = countTrailingZeros(Imm);
136
137 // Rotate amount must be even. Something like 0x200 must be rotated 8 bits,
138 // not 9.
139 unsigned RotAmt = TZ & ~1;
9
'RotAmt' initialized to 32
140
141 // If we can handle this spread, return it.
142 if ((rotr32(Imm, RotAmt) & ~255U) == 0)
10
Passing the value 32 via 2nd parameter 'Amt'
11
Calling 'rotr32'
143 return (32-RotAmt)&31; // HW rotates right, not left.
144
145 // For values like 0xF000000F, we should ignore the low 6 bits, then
146 // retry the hunt.
147 if (Imm & 63U) {
148 unsigned TZ2 = countTrailingZeros(Imm & ~63U);
149 unsigned RotAmt2 = TZ2 & ~1;
150 if ((rotr32(Imm, RotAmt2) & ~255U) == 0)
151 return (32-RotAmt2)&31; // HW rotates right, not left.
152 }
153
154 // Otherwise, we have no way to cover this span of bits with a single
155 // shifter_op immediate. Return a chunk of bits that will be useful to
156 // handle.
157 return (32-RotAmt)&31; // HW rotates right, not left.
158 }
159
160 /// getSOImmVal - Given a 32-bit immediate, if it is something that can fit
161 /// into an shifter_operand immediate operand, return the 12-bit encoding for
162 /// it. If not, return -1.
163 inline int getSOImmVal(unsigned Arg) {
164 // 8-bit (or less) immediates are trivially shifter_operands with a rotate
165 // of zero.
166 if ((Arg & ~255U) == 0) return Arg;
5
Assuming the condition is false
6
Taking false branch
167
168 unsigned RotAmt = getSOImmValRotate(Arg);
7
Calling 'getSOImmValRotate'
169
170 // If this cannot be handled with a single shifter_op, bail out.
171 if (rotr32(~255U, RotAmt) & Arg)
172 return -1;
173
174 // Encode this correctly.
175 return rotl32(Arg, RotAmt) | ((RotAmt>>1) << 8);
176 }
177
178 /// isSOImmTwoPartVal - Return true if the specified value can be obtained by
179 /// or'ing together two SOImmVal's.
180 inline bool isSOImmTwoPartVal(unsigned V) {
181 // If this can be handled with a single shifter_op, bail out.
182 V = rotr32(~255U, getSOImmValRotate(V)) & V;
183 if (V == 0)
184 return false;
185
186 // If this can be handled with two shifter_op's, accept.
187 V = rotr32(~255U, getSOImmValRotate(V)) & V;
188 return V == 0;
189 }
190
191 /// getSOImmTwoPartFirst - If V is a value that satisfies isSOImmTwoPartVal,
192 /// return the first chunk of it.
193 inline unsigned getSOImmTwoPartFirst(unsigned V) {
194 return rotr32(255U, getSOImmValRotate(V)) & V;
195 }
196
197 /// getSOImmTwoPartSecond - If V is a value that satisfies isSOImmTwoPartVal,
198 /// return the second chunk of it.
199 inline unsigned getSOImmTwoPartSecond(unsigned V) {
200 // Mask out the first hunk.
201 V = rotr32(~255U, getSOImmValRotate(V)) & V;
202
203 // Take what's left.
204 assert(V == (rotr32(255U, getSOImmValRotate(V)) & V))(static_cast<void> (0));
205 return V;
206 }
207
208 /// isSOImmTwoPartValNeg - Return true if the specified value can be obtained
209 /// by two SOImmVal, that -V = First + Second.
210 /// "R+V" can be optimized to (sub (sub R, First), Second).
211 /// "R=V" can be optimized to (sub (mvn R, ~(-First)), Second).
212 inline bool isSOImmTwoPartValNeg(unsigned V) {
213 unsigned First;
214 if (!isSOImmTwoPartVal(-V))
215 return false;
216 // Return false if ~(-First) is not a SoImmval.
217 First = getSOImmTwoPartFirst(-V);
218 First = ~(-First);
219 return !(rotr32(~255U, getSOImmValRotate(First)) & First);
220 }
221
222 /// getThumbImmValShift - Try to handle Imm with a 8-bit immediate followed
223 /// by a left shift. Returns the shift amount to use.
224 inline unsigned getThumbImmValShift(unsigned Imm) {
225 // 8-bit (or less) immediates are trivially immediate operand with a shift
226 // of zero.
227 if ((Imm & ~255U) == 0) return 0;
228
229 // Use CTZ to compute the shift amount.
230 return countTrailingZeros(Imm);
231 }
232
233 /// isThumbImmShiftedVal - Return true if the specified value can be obtained
234 /// by left shifting a 8-bit immediate.
235 inline bool isThumbImmShiftedVal(unsigned V) {
236 // If this can be handled with
237 V = (~255U << getThumbImmValShift(V)) & V;
238 return V == 0;
239 }
240
241 /// getThumbImm16ValShift - Try to handle Imm with a 16-bit immediate followed
242 /// by a left shift. Returns the shift amount to use.
243 inline unsigned getThumbImm16ValShift(unsigned Imm) {
244 // 16-bit (or less) immediates are trivially immediate operand with a shift
245 // of zero.
246 if ((Imm & ~65535U) == 0) return 0;
247
248 // Use CTZ to compute the shift amount.
249 return countTrailingZeros(Imm);
250 }
251
252 /// isThumbImm16ShiftedVal - Return true if the specified value can be
253 /// obtained by left shifting a 16-bit immediate.
254 inline bool isThumbImm16ShiftedVal(unsigned V) {
255 // If this can be handled with
256 V = (~65535U << getThumbImm16ValShift(V)) & V;
257 return V == 0;
258 }
259
260 /// getThumbImmNonShiftedVal - If V is a value that satisfies
261 /// isThumbImmShiftedVal, return the non-shiftd value.
262 inline unsigned getThumbImmNonShiftedVal(unsigned V) {
263 return V >> getThumbImmValShift(V);
264 }
265
266
267 /// getT2SOImmValSplat - Return the 12-bit encoded representation
268 /// if the specified value can be obtained by splatting the low 8 bits
269 /// into every other byte or every byte of a 32-bit value. i.e.,
270 /// 00000000 00000000 00000000 abcdefgh control = 0
271 /// 00000000 abcdefgh 00000000 abcdefgh control = 1
272 /// abcdefgh 00000000 abcdefgh 00000000 control = 2
273 /// abcdefgh abcdefgh abcdefgh abcdefgh control = 3
274 /// Return -1 if none of the above apply.
275 /// See ARM Reference Manual A6.3.2.
276 inline int getT2SOImmValSplatVal(unsigned V) {
277 unsigned u, Vs, Imm;
278 // control = 0
279 if ((V & 0xffffff00) == 0)
280 return V;
281
282 // If the value is zeroes in the first byte, just shift those off
283 Vs = ((V & 0xff) == 0) ? V >> 8 : V;
284 // Any passing value only has 8 bits of payload, splatted across the word
285 Imm = Vs & 0xff;
286 // Likewise, any passing values have the payload splatted into the 3rd byte
287 u = Imm | (Imm << 16);
288
289 // control = 1 or 2
290 if (Vs == u)
291 return (((Vs == V) ? 1 : 2) << 8) | Imm;
292
293 // control = 3
294 if (Vs == (u | (u << 8)))
295 return (3 << 8) | Imm;
296
297 return -1;
298 }
299
300 /// getT2SOImmValRotateVal - Return the 12-bit encoded representation if the
301 /// specified value is a rotated 8-bit value. Return -1 if no rotation
302 /// encoding is possible.
303 /// See ARM Reference Manual A6.3.2.
304 inline int getT2SOImmValRotateVal(unsigned V) {
305 unsigned RotAmt = countLeadingZeros(V);
306 if (RotAmt >= 24)
307 return -1;
308
309 // If 'Arg' can be handled with a single shifter_op return the value.
310 if ((rotr32(0xff000000U, RotAmt) & V) == V)
311 return (rotr32(V, 24 - RotAmt) & 0x7f) | ((RotAmt + 8) << 7);
312
313 return -1;
314 }
315
316 /// getT2SOImmVal - Given a 32-bit immediate, if it is something that can fit
317 /// into a Thumb-2 shifter_operand immediate operand, return the 12-bit
318 /// encoding for it. If not, return -1.
319 /// See ARM Reference Manual A6.3.2.
320 inline int getT2SOImmVal(unsigned Arg) {
321 // If 'Arg' is an 8-bit splat, then get the encoded value.
322 int Splat = getT2SOImmValSplatVal(Arg);
323 if (Splat != -1)
324 return Splat;
325
326 // If 'Arg' can be handled with a single shifter_op return the value.
327 int Rot = getT2SOImmValRotateVal(Arg);
328 if (Rot != -1)
329 return Rot;
330
331 return -1;
332 }
333
334 inline unsigned getT2SOImmValRotate(unsigned V) {
335 if ((V & ~255U) == 0) return 0;
336 // Use CTZ to compute the rotate amount.
337 unsigned RotAmt = countTrailingZeros(V);
338 return (32 - RotAmt) & 31;
339 }
340
341 inline bool isT2SOImmTwoPartVal(unsigned Imm) {
342 unsigned V = Imm;
343 // Passing values can be any combination of splat values and shifter
344 // values. If this can be handled with a single shifter or splat, bail
345 // out. Those should be handled directly, not with a two-part val.
346 if (getT2SOImmValSplatVal(V) != -1)
347 return false;
348 V = rotr32 (~255U, getT2SOImmValRotate(V)) & V;
349 if (V == 0)
350 return false;
351
352 // If this can be handled as an immediate, accept.
353 if (getT2SOImmVal(V) != -1) return true;
354
355 // Likewise, try masking out a splat value first.
356 V = Imm;
357 if (getT2SOImmValSplatVal(V & 0xff00ff00U) != -1)
358 V &= ~0xff00ff00U;
359 else if (getT2SOImmValSplatVal(V & 0x00ff00ffU) != -1)
360 V &= ~0x00ff00ffU;
361 // If what's left can be handled as an immediate, accept.
362 if (getT2SOImmVal(V) != -1) return true;
363
364 // Otherwise, do not accept.
365 return false;
366 }
367
368 inline unsigned getT2SOImmTwoPartFirst(unsigned Imm) {
369 assert (isT2SOImmTwoPartVal(Imm) &&(static_cast<void> (0))
370 "Immedate cannot be encoded as two part immediate!")(static_cast<void> (0));
371 // Try a shifter operand as one part
372 unsigned V = rotr32 (~255, getT2SOImmValRotate(Imm)) & Imm;
373 // If the rest is encodable as an immediate, then return it.
374 if (getT2SOImmVal(V) != -1) return V;
375
376 // Try masking out a splat value first.
377 if (getT2SOImmValSplatVal(Imm & 0xff00ff00U) != -1)
378 return Imm & 0xff00ff00U;
379
380 // The other splat is all that's left as an option.
381 assert (getT2SOImmValSplatVal(Imm & 0x00ff00ffU) != -1)(static_cast<void> (0));
382 return Imm & 0x00ff00ffU;
383 }
384
385 inline unsigned getT2SOImmTwoPartSecond(unsigned Imm) {
386 // Mask out the first hunk
387 Imm ^= getT2SOImmTwoPartFirst(Imm);
388 // Return what's left
389 assert (getT2SOImmVal(Imm) != -1 &&(static_cast<void> (0))
390 "Unable to encode second part of T2 two part SO immediate")(static_cast<void> (0));
391 return Imm;
392 }
393
394
395 //===--------------------------------------------------------------------===//
396 // Addressing Mode #2
397 //===--------------------------------------------------------------------===//
398 //
399 // This is used for most simple load/store instructions.
400 //
401 // addrmode2 := reg +/- reg shop imm
402 // addrmode2 := reg +/- imm12
403 //
404 // The first operand is always a Reg. The second operand is a reg if in
405 // reg/reg form, otherwise it's reg#0. The third field encodes the operation
406 // in bit 12, the immediate in bits 0-11, and the shift op in 13-15. The
407 // fourth operand 16-17 encodes the index mode.
408 //
409 // If this addressing mode is a frame index (before prolog/epilog insertion
410 // and code rewriting), this operand will have the form: FI#, reg0, <offs>
411 // with no shift amount for the frame offset.
412 //
413 inline unsigned getAM2Opc(AddrOpc Opc, unsigned Imm12, ShiftOpc SO,
414 unsigned IdxMode = 0) {
415 assert(Imm12 < (1 << 12) && "Imm too large!")(static_cast<void> (0));
416 bool isSub = Opc == sub;
417 return Imm12 | ((int)isSub << 12) | (SO << 13) | (IdxMode << 16) ;
418 }
419 inline unsigned getAM2Offset(unsigned AM2Opc) {
420 return AM2Opc & ((1 << 12)-1);
421 }
422 inline AddrOpc getAM2Op(unsigned AM2Opc) {
423 return ((AM2Opc >> 12) & 1) ? sub : add;
424 }
425 inline ShiftOpc getAM2ShiftOpc(unsigned AM2Opc) {
426 return (ShiftOpc)((AM2Opc >> 13) & 7);
427 }
428 inline unsigned getAM2IdxMode(unsigned AM2Opc) { return (AM2Opc >> 16); }
429
430 //===--------------------------------------------------------------------===//
431 // Addressing Mode #3
432 //===--------------------------------------------------------------------===//
433 //
434 // This is used for sign-extending loads, and load/store-pair instructions.
435 //
436 // addrmode3 := reg +/- reg
437 // addrmode3 := reg +/- imm8
438 //
439 // The first operand is always a Reg. The second operand is a reg if in
440 // reg/reg form, otherwise it's reg#0. The third field encodes the operation
441 // in bit 8, the immediate in bits 0-7. The fourth operand 9-10 encodes the
442 // index mode.
443
444 /// getAM3Opc - This function encodes the addrmode3 opc field.
445 inline unsigned getAM3Opc(AddrOpc Opc, unsigned char Offset,
446 unsigned IdxMode = 0) {
447 bool isSub = Opc == sub;
448 return ((int)isSub << 8) | Offset | (IdxMode << 9);
449 }
450 inline unsigned char getAM3Offset(unsigned AM3Opc) { return AM3Opc & 0xFF; }
451 inline AddrOpc getAM3Op(unsigned AM3Opc) {
452 return ((AM3Opc >> 8) & 1) ? sub : add;
453 }
454 inline unsigned getAM3IdxMode(unsigned AM3Opc) { return (AM3Opc >> 9); }
455
456 //===--------------------------------------------------------------------===//
457 // Addressing Mode #4
458 //===--------------------------------------------------------------------===//
459 //
460 // This is used for load / store multiple instructions.
461 //
462 // addrmode4 := reg, <mode>
463 //
464 // The four modes are:
465 // IA - Increment after
466 // IB - Increment before
467 // DA - Decrement after
468 // DB - Decrement before
469 // For VFP instructions, only the IA and DB modes are valid.
470
471 inline AMSubMode getAM4SubMode(unsigned Mode) {
472 return (AMSubMode)(Mode & 0x7);
473 }
474
475 inline unsigned getAM4ModeImm(AMSubMode SubMode) { return (int)SubMode; }
476
477 //===--------------------------------------------------------------------===//
478 // Addressing Mode #5
479 //===--------------------------------------------------------------------===//
480 //
481 // This is used for coprocessor instructions, such as FP load/stores.
482 //
483 // addrmode5 := reg +/- imm8*4
484 //
485 // The first operand is always a Reg. The second operand encodes the
486 // operation (add or subtract) in bit 8 and the immediate in bits 0-7.
487
488 /// getAM5Opc - This function encodes the addrmode5 opc field.
489 inline unsigned getAM5Opc(AddrOpc Opc, unsigned char Offset) {
490 bool isSub = Opc == sub;
491 return ((int)isSub << 8) | Offset;
492 }
493 inline unsigned char getAM5Offset(unsigned AM5Opc) { return AM5Opc & 0xFF; }
494 inline AddrOpc getAM5Op(unsigned AM5Opc) {
495 return ((AM5Opc >> 8) & 1) ? sub : add;
496 }
497
498 //===--------------------------------------------------------------------===//
499 // Addressing Mode #5 FP16
500 //===--------------------------------------------------------------------===//
501 //
502 // This is used for coprocessor instructions, such as 16-bit FP load/stores.
503 //
504 // addrmode5fp16 := reg +/- imm8*2
505 //
506 // The first operand is always a Reg. The second operand encodes the
507 // operation (add or subtract) in bit 8 and the immediate in bits 0-7.
508
509 /// getAM5FP16Opc - This function encodes the addrmode5fp16 opc field.
510 inline unsigned getAM5FP16Opc(AddrOpc Opc, unsigned char Offset) {
511 bool isSub = Opc == sub;
512 return ((int)isSub << 8) | Offset;
513 }
514 inline unsigned char getAM5FP16Offset(unsigned AM5Opc) {
515 return AM5Opc & 0xFF;
516 }
517 inline AddrOpc getAM5FP16Op(unsigned AM5Opc) {
518 return ((AM5Opc >> 8) & 1) ? sub : add;
519 }
520
521 //===--------------------------------------------------------------------===//
522 // Addressing Mode #6
523 //===--------------------------------------------------------------------===//
524 //
525 // This is used for NEON load / store instructions.
526 //
527 // addrmode6 := reg with optional alignment
528 //
529 // This is stored in two operands [regaddr, align]. The first is the
530 // address register. The second operand is the value of the alignment
531 // specifier in bytes or zero if no explicit alignment.
532 // Valid alignments depend on the specific instruction.
533
534 //===--------------------------------------------------------------------===//
535 // NEON/MVE Modified Immediates
536 //===--------------------------------------------------------------------===//
537 //
538 // Several NEON and MVE instructions (e.g., VMOV) take a "modified immediate"
539 // vector operand, where a small immediate encoded in the instruction
540 // specifies a full NEON vector value. These modified immediates are
541 // represented here as encoded integers. The low 8 bits hold the immediate
542 // value; bit 12 holds the "Op" field of the instruction, and bits 11-8 hold
543 // the "Cmode" field of the instruction. The interfaces below treat the
544 // Op and Cmode values as a single 5-bit value.
545
546 inline unsigned createVMOVModImm(unsigned OpCmode, unsigned Val) {
547 return (OpCmode << 8) | Val;
548 }
549 inline unsigned getVMOVModImmOpCmode(unsigned ModImm) {
550 return (ModImm >> 8) & 0x1f;
551 }
552 inline unsigned getVMOVModImmVal(unsigned ModImm) { return ModImm & 0xff; }
553
554 /// decodeVMOVModImm - Decode a NEON/MVE modified immediate value into the
555 /// element value and the element size in bits. (If the element size is
556 /// smaller than the vector, it is splatted into all the elements.)
557 inline uint64_t decodeVMOVModImm(unsigned ModImm, unsigned &EltBits) {
558 unsigned OpCmode = getVMOVModImmOpCmode(ModImm);
559 unsigned Imm8 = getVMOVModImmVal(ModImm);
560 uint64_t Val = 0;
561
562 if (OpCmode == 0xe) {
563 // 8-bit vector elements
564 Val = Imm8;
565 EltBits = 8;
566 } else if ((OpCmode & 0xc) == 0x8) {
567 // 16-bit vector elements
568 unsigned ByteNum = (OpCmode & 0x6) >> 1;
569 Val = Imm8 << (8 * ByteNum);
570 EltBits = 16;
571 } else if ((OpCmode & 0x8) == 0) {
572 // 32-bit vector elements, zero with one byte set
573 unsigned ByteNum = (OpCmode & 0x6) >> 1;
574 Val = Imm8 << (8 * ByteNum);
575 EltBits = 32;
576 } else if ((OpCmode & 0xe) == 0xc) {
577 // 32-bit vector elements, one byte with low bits set
578 unsigned ByteNum = 1 + (OpCmode & 0x1);
579 Val = (Imm8 << (8 * ByteNum)) | (0xffff >> (8 * (2 - ByteNum)));
580 EltBits = 32;
581 } else if (OpCmode == 0x1e) {
582 // 64-bit vector elements
583 for (unsigned ByteNum = 0; ByteNum < 8; ++ByteNum) {
584 if ((ModImm >> ByteNum) & 1)
585 Val |= (uint64_t)0xff << (8 * ByteNum);
586 }
587 EltBits = 64;
588 } else {
589 llvm_unreachable("Unsupported VMOV immediate")__builtin_unreachable();
590 }
591 return Val;
592 }
593
594 // Generic validation for single-byte immediate (0X00, 00X0, etc).
595 inline bool isNEONBytesplat(unsigned Value, unsigned Size) {
596 assert(Size >= 1 && Size <= 4 && "Invalid size")(static_cast<void> (0));
597 unsigned count = 0;
598 for (unsigned i = 0; i < Size; ++i) {
599 if (Value & 0xff) count++;
600 Value >>= 8;
601 }
602 return count == 1;
603 }
604
605 /// Checks if Value is a correct immediate for instructions like VBIC/VORR.
606 inline bool isNEONi16splat(unsigned Value) {
607 if (Value > 0xffff)
608 return false;
609 // i16 value with set bits only in one byte X0 or 0X.
610 return Value == 0 || isNEONBytesplat(Value, 2);
611 }
612
613 // Encode NEON 16 bits Splat immediate for instructions like VBIC/VORR
614 inline unsigned encodeNEONi16splat(unsigned Value) {
615 assert(isNEONi16splat(Value) && "Invalid NEON splat value")(static_cast<void> (0));
616 if (Value >= 0x100)
617 Value = (Value >> 8) | 0xa00;
618 else
619 Value |= 0x800;
620 return Value;
621 }
622
623 /// Checks if Value is a correct immediate for instructions like VBIC/VORR.
624 inline bool isNEONi32splat(unsigned Value) {
625 // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X.
626 return Value == 0 || isNEONBytesplat(Value, 4);
627 }
628
629 /// Encode NEON 32 bits Splat immediate for instructions like VBIC/VORR.
630 inline unsigned encodeNEONi32splat(unsigned Value) {
631 assert(isNEONi32splat(Value) && "Invalid NEON splat value")(static_cast<void> (0));
632 if (Value >= 0x100 && Value <= 0xff00)
633 Value = (Value >> 8) | 0x200;
634 else if (Value > 0xffff && Value <= 0xff0000)
635 Value = (Value >> 16) | 0x400;
636 else if (Value > 0xffffff)
637 Value = (Value >> 24) | 0x600;
638 return Value;
639 }
640
641 //===--------------------------------------------------------------------===//
642 // Floating-point Immediates
643 //
644 inline float getFPImmFloat(unsigned Imm) {
645 // We expect an 8-bit binary encoding of a floating-point number here.
646
647 uint8_t Sign = (Imm >> 7) & 0x1;
648 uint8_t Exp = (Imm >> 4) & 0x7;
649 uint8_t Mantissa = Imm & 0xf;
650
651 // 8-bit FP IEEE Float Encoding
652 // abcd efgh aBbbbbbc defgh000 00000000 00000000
653 //
654 // where B = NOT(b);
655 uint32_t I = 0;
656 I |= Sign << 31;
657 I |= ((Exp & 0x4) != 0 ? 0 : 1) << 30;
658 I |= ((Exp & 0x4) != 0 ? 0x1f : 0) << 25;
659 I |= (Exp & 0x3) << 23;
660 I |= Mantissa << 19;
661 return bit_cast<float>(I);
662 }
663
664 /// getFP16Imm - Return an 8-bit floating-point version of the 16-bit
665 /// floating-point value. If the value cannot be represented as an 8-bit
666 /// floating-point value, then return -1.
667 inline int getFP16Imm(const APInt &Imm) {
668 uint32_t Sign = Imm.lshr(15).getZExtValue() & 1;
669 int32_t Exp = (Imm.lshr(10).getSExtValue() & 0x1f) - 15; // -14 to 15
670 int64_t Mantissa = Imm.getZExtValue() & 0x3ff; // 10 bits
671
672 // We can handle 4 bits of mantissa.
673 // mantissa = (16+UInt(e:f:g:h))/16.
674 if (Mantissa & 0x3f)
675 return -1;
676 Mantissa >>= 6;
677
678 // We can handle 3 bits of exponent: exp == UInt(NOT(b):c:d)-3
679 if (Exp < -3 || Exp > 4)
680 return -1;
681 Exp = ((Exp+3) & 0x7) ^ 4;
682
683 return ((int)Sign << 7) | (Exp << 4) | Mantissa;
684 }
685
686 inline int getFP16Imm(const APFloat &FPImm) {
687 return getFP16Imm(FPImm.bitcastToAPInt());
688 }
689
690 /// If this is a FP16Imm encoded as a fp32 value, return the 8-bit encoding
691 /// for it. Otherwise return -1 like getFP16Imm.
692 inline int getFP32FP16Imm(const APInt &Imm) {
693 if (Imm.getActiveBits() > 16)
694 return -1;
695 return ARM_AM::getFP16Imm(Imm.trunc(16));
696 }
697
698 inline int getFP32FP16Imm(const APFloat &FPImm) {
699 return getFP32FP16Imm(FPImm.bitcastToAPInt());
700 }
701
702 /// getFP32Imm - Return an 8-bit floating-point version of the 32-bit
703 /// floating-point value. If the value cannot be represented as an 8-bit
704 /// floating-point value, then return -1.
705 inline int getFP32Imm(const APInt &Imm) {
706 uint32_t Sign = Imm.lshr(31).getZExtValue() & 1;
707 int32_t Exp = (Imm.lshr(23).getSExtValue() & 0xff) - 127; // -126 to 127
708 int64_t Mantissa = Imm.getZExtValue() & 0x7fffff; // 23 bits
709
710 // We can handle 4 bits of mantissa.
711 // mantissa = (16+UInt(e:f:g:h))/16.
712 if (Mantissa & 0x7ffff)
713 return -1;
714 Mantissa >>= 19;
715 if ((Mantissa & 0xf) != Mantissa)
716 return -1;
717
718 // We can handle 3 bits of exponent: exp == UInt(NOT(b):c:d)-3
719 if (Exp < -3 || Exp > 4)
720 return -1;
721 Exp = ((Exp+3) & 0x7) ^ 4;
722
723 return ((int)Sign << 7) | (Exp << 4) | Mantissa;
724 }
725
726 inline int getFP32Imm(const APFloat &FPImm) {
727 return getFP32Imm(FPImm.bitcastToAPInt());
728 }
729
730 /// getFP64Imm - Return an 8-bit floating-point version of the 64-bit
731 /// floating-point value. If the value cannot be represented as an 8-bit
732 /// floating-point value, then return -1.
733 inline int getFP64Imm(const APInt &Imm) {
734 uint64_t Sign = Imm.lshr(63).getZExtValue() & 1;
735 int64_t Exp = (Imm.lshr(52).getSExtValue() & 0x7ff) - 1023; // -1022 to 1023
736 uint64_t Mantissa = Imm.getZExtValue() & 0xfffffffffffffULL;
737
738 // We can handle 4 bits of mantissa.
739 // mantissa = (16+UInt(e:f:g:h))/16.
740 if (Mantissa & 0xffffffffffffULL)
741 return -1;
742 Mantissa >>= 48;
743 if ((Mantissa & 0xf) != Mantissa)
744 return -1;
745
746 // We can handle 3 bits of exponent: exp == UInt(NOT(b):c:d)-3
747 if (Exp < -3 || Exp > 4)
748 return -1;
749 Exp = ((Exp+3) & 0x7) ^ 4;
750
751 return ((int)Sign << 7) | (Exp << 4) | Mantissa;
752 }
753
754 inline int getFP64Imm(const APFloat &FPImm) {
755 return getFP64Imm(FPImm.bitcastToAPInt());
756 }
757
758} // end namespace ARM_AM
759} // end namespace llvm
760
761#endif
762