LLVM  14.0.0git
X86AsmBackend.cpp
Go to the documentation of this file.
1 //===-- X86AsmBackend.cpp - X86 Assembler Backend -------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
11 #include "llvm/ADT/StringSwitch.h"
12 #include "llvm/BinaryFormat/ELF.h"
14 #include "llvm/MC/MCAsmBackend.h"
15 #include "llvm/MC/MCAsmLayout.h"
16 #include "llvm/MC/MCAssembler.h"
17 #include "llvm/MC/MCCodeEmitter.h"
18 #include "llvm/MC/MCContext.h"
19 #include "llvm/MC/MCDwarf.h"
21 #include "llvm/MC/MCExpr.h"
23 #include "llvm/MC/MCInst.h"
24 #include "llvm/MC/MCInstrInfo.h"
27 #include "llvm/MC/MCObjectWriter.h"
28 #include "llvm/MC/MCRegisterInfo.h"
29 #include "llvm/MC/MCSectionMachO.h"
31 #include "llvm/MC/MCValue.h"
32 #include "llvm/MC/TargetRegistry.h"
36 
37 using namespace llvm;
38 
39 namespace {
40 /// A wrapper for holding a mask of the values from X86::AlignBranchBoundaryKind
41 class X86AlignBranchKind {
42 private:
43  uint8_t AlignBranchKind = 0;
44 
45 public:
46  void operator=(const std::string &Val) {
47  if (Val.empty())
48  return;
49  SmallVector<StringRef, 6> BranchTypes;
50  StringRef(Val).split(BranchTypes, '+', -1, false);
51  for (auto BranchType : BranchTypes) {
52  if (BranchType == "fused")
53  addKind(X86::AlignBranchFused);
54  else if (BranchType == "jcc")
55  addKind(X86::AlignBranchJcc);
56  else if (BranchType == "jmp")
57  addKind(X86::AlignBranchJmp);
58  else if (BranchType == "call")
59  addKind(X86::AlignBranchCall);
60  else if (BranchType == "ret")
61  addKind(X86::AlignBranchRet);
62  else if (BranchType == "indirect")
63  addKind(X86::AlignBranchIndirect);
64  else {
65  errs() << "invalid argument " << BranchType.str()
66  << " to -x86-align-branch=; each element must be one of: fused, "
67  "jcc, jmp, call, ret, indirect.(plus separated)\n";
68  }
69  }
70  }
71 
72  operator uint8_t() const { return AlignBranchKind; }
73  void addKind(X86::AlignBranchBoundaryKind Value) { AlignBranchKind |= Value; }
74 };
75 
76 X86AlignBranchKind X86AlignBranchKindLoc;
77 
78 cl::opt<unsigned> X86AlignBranchBoundary(
79  "x86-align-branch-boundary", cl::init(0),
80  cl::desc(
81  "Control how the assembler should align branches with NOP. If the "
82  "boundary's size is not 0, it should be a power of 2 and no less "
83  "than 32. Branches will be aligned to prevent from being across or "
84  "against the boundary of specified size. The default value 0 does not "
85  "align branches."));
86 
88  "x86-align-branch",
89  cl::desc(
90  "Specify types of branches to align (plus separated list of types):"
91  "\njcc indicates conditional jumps"
92  "\nfused indicates fused conditional jumps"
93  "\njmp indicates direct unconditional jumps"
94  "\ncall indicates direct and indirect calls"
95  "\nret indicates rets"
96  "\nindirect indicates indirect unconditional jumps"),
97  cl::location(X86AlignBranchKindLoc));
98 
99 cl::opt<bool> X86AlignBranchWithin32BBoundaries(
100  "x86-branches-within-32B-boundaries", cl::init(false),
101  cl::desc(
102  "Align selected instructions to mitigate negative performance impact "
103  "of Intel's micro code update for errata skx102. May break "
104  "assumptions about labels corresponding to particular instructions, "
105  "and should be used with caution."));
106 
107 cl::opt<unsigned> X86PadMaxPrefixSize(
108  "x86-pad-max-prefix-size", cl::init(0),
109  cl::desc("Maximum number of prefixes to use for padding"));
110 
111 cl::opt<bool> X86PadForAlign(
112  "x86-pad-for-align", cl::init(false), cl::Hidden,
113  cl::desc("Pad previous instructions to implement align directives"));
114 
115 cl::opt<bool> X86PadForBranchAlign(
116  "x86-pad-for-branch-align", cl::init(true), cl::Hidden,
117  cl::desc("Pad previous instructions to implement branch alignment"));
118 
119 class X86ELFObjectWriter : public MCELFObjectTargetWriter {
120 public:
121  X86ELFObjectWriter(bool is64Bit, uint8_t OSABI, uint16_t EMachine,
122  bool HasRelocationAddend, bool foobar)
123  : MCELFObjectTargetWriter(is64Bit, OSABI, EMachine, HasRelocationAddend) {}
124 };
125 
126 class X86AsmBackend : public MCAsmBackend {
127  const MCSubtargetInfo &STI;
128  std::unique_ptr<const MCInstrInfo> MCII;
129  X86AlignBranchKind AlignBranchType;
130  Align AlignBoundary;
131  unsigned TargetPrefixMax = 0;
132 
133  MCInst PrevInst;
134  MCBoundaryAlignFragment *PendingBA = nullptr;
135  std::pair<MCFragment *, size_t> PrevInstPosition;
136  bool CanPadInst;
137 
138  uint8_t determinePaddingPrefix(const MCInst &Inst) const;
139  bool isMacroFused(const MCInst &Cmp, const MCInst &Jcc) const;
140  bool needAlign(const MCInst &Inst) const;
141  bool canPadBranches(MCObjectStreamer &OS) const;
142  bool canPadInst(const MCInst &Inst, MCObjectStreamer &OS) const;
143 
144 public:
145  X86AsmBackend(const Target &T, const MCSubtargetInfo &STI)
146  : MCAsmBackend(support::little), STI(STI),
147  MCII(T.createMCInstrInfo()) {
148  if (X86AlignBranchWithin32BBoundaries) {
149  // At the moment, this defaults to aligning fused branches, unconditional
150  // jumps, and (unfused) conditional jumps with nops. Both the
151  // instructions aligned and the alignment method (nop vs prefix) may
152  // change in the future.
153  AlignBoundary = assumeAligned(32);;
154  AlignBranchType.addKind(X86::AlignBranchFused);
155  AlignBranchType.addKind(X86::AlignBranchJcc);
156  AlignBranchType.addKind(X86::AlignBranchJmp);
157  }
158  // Allow overriding defaults set by master flag
159  if (X86AlignBranchBoundary.getNumOccurrences())
160  AlignBoundary = assumeAligned(X86AlignBranchBoundary);
161  if (X86AlignBranch.getNumOccurrences())
162  AlignBranchType = X86AlignBranchKindLoc;
163  if (X86PadMaxPrefixSize.getNumOccurrences())
164  TargetPrefixMax = X86PadMaxPrefixSize;
165  }
166 
167  bool allowAutoPadding() const override;
168  bool allowEnhancedRelaxation() const override;
169  void emitInstructionBegin(MCObjectStreamer &OS, const MCInst &Inst,
170  const MCSubtargetInfo &STI) override;
171  void emitInstructionEnd(MCObjectStreamer &OS, const MCInst &Inst) override;
172 
173  unsigned getNumFixupKinds() const override {
175  }
176 
177  Optional<MCFixupKind> getFixupKind(StringRef Name) const override;
178 
179  const MCFixupKindInfo &getFixupKindInfo(MCFixupKind Kind) const override;
180 
181  bool shouldForceRelocation(const MCAssembler &Asm, const MCFixup &Fixup,
182  const MCValue &Target) override;
183 
184  void applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
186  uint64_t Value, bool IsResolved,
187  const MCSubtargetInfo *STI) const override;
188 
189  bool mayNeedRelaxation(const MCInst &Inst,
190  const MCSubtargetInfo &STI) const override;
191 
192  bool fixupNeedsRelaxation(const MCFixup &Fixup, uint64_t Value,
193  const MCRelaxableFragment *DF,
194  const MCAsmLayout &Layout) const override;
195 
196  void relaxInstruction(MCInst &Inst,
197  const MCSubtargetInfo &STI) const override;
198 
199  bool padInstructionViaRelaxation(MCRelaxableFragment &RF,
200  MCCodeEmitter &Emitter,
201  unsigned &RemainingSize) const;
202 
203  bool padInstructionViaPrefix(MCRelaxableFragment &RF, MCCodeEmitter &Emitter,
204  unsigned &RemainingSize) const;
205 
206  bool padInstructionEncoding(MCRelaxableFragment &RF, MCCodeEmitter &Emitter,
207  unsigned &RemainingSize) const;
208 
209  void finishLayout(MCAssembler const &Asm, MCAsmLayout &Layout) const override;
210 
211  unsigned getMaximumNopSize(const MCSubtargetInfo &STI) const override;
212 
213  bool writeNopData(raw_ostream &OS, uint64_t Count,
214  const MCSubtargetInfo *STI) const override;
215 };
216 } // end anonymous namespace
217 
218 static unsigned getRelaxedOpcodeBranch(const MCInst &Inst, bool Is16BitMode) {
219  unsigned Op = Inst.getOpcode();
220  switch (Op) {
221  default:
222  return Op;
223  case X86::JCC_1:
224  return (Is16BitMode) ? X86::JCC_2 : X86::JCC_4;
225  case X86::JMP_1:
226  return (Is16BitMode) ? X86::JMP_2 : X86::JMP_4;
227  }
228 }
229 
230 static unsigned getRelaxedOpcodeArith(const MCInst &Inst) {
231  unsigned Op = Inst.getOpcode();
232  switch (Op) {
233  default:
234  return Op;
235 
236  // IMUL
237  case X86::IMUL16rri8: return X86::IMUL16rri;
238  case X86::IMUL16rmi8: return X86::IMUL16rmi;
239  case X86::IMUL32rri8: return X86::IMUL32rri;
240  case X86::IMUL32rmi8: return X86::IMUL32rmi;
241  case X86::IMUL64rri8: return X86::IMUL64rri32;
242  case X86::IMUL64rmi8: return X86::IMUL64rmi32;
243 
244  // AND
245  case X86::AND16ri8: return X86::AND16ri;
246  case X86::AND16mi8: return X86::AND16mi;
247  case X86::AND32ri8: return X86::AND32ri;
248  case X86::AND32mi8: return X86::AND32mi;
249  case X86::AND64ri8: return X86::AND64ri32;
250  case X86::AND64mi8: return X86::AND64mi32;
251 
252  // OR
253  case X86::OR16ri8: return X86::OR16ri;
254  case X86::OR16mi8: return X86::OR16mi;
255  case X86::OR32ri8: return X86::OR32ri;
256  case X86::OR32mi8: return X86::OR32mi;
257  case X86::OR64ri8: return X86::OR64ri32;
258  case X86::OR64mi8: return X86::OR64mi32;
259 
260  // XOR
261  case X86::XOR16ri8: return X86::XOR16ri;
262  case X86::XOR16mi8: return X86::XOR16mi;
263  case X86::XOR32ri8: return X86::XOR32ri;
264  case X86::XOR32mi8: return X86::XOR32mi;
265  case X86::XOR64ri8: return X86::XOR64ri32;
266  case X86::XOR64mi8: return X86::XOR64mi32;
267 
268  // ADD
269  case X86::ADD16ri8: return X86::ADD16ri;
270  case X86::ADD16mi8: return X86::ADD16mi;
271  case X86::ADD32ri8: return X86::ADD32ri;
272  case X86::ADD32mi8: return X86::ADD32mi;
273  case X86::ADD64ri8: return X86::ADD64ri32;
274  case X86::ADD64mi8: return X86::ADD64mi32;
275 
276  // ADC
277  case X86::ADC16ri8: return X86::ADC16ri;
278  case X86::ADC16mi8: return X86::ADC16mi;
279  case X86::ADC32ri8: return X86::ADC32ri;
280  case X86::ADC32mi8: return X86::ADC32mi;
281  case X86::ADC64ri8: return X86::ADC64ri32;
282  case X86::ADC64mi8: return X86::ADC64mi32;
283 
284  // SUB
285  case X86::SUB16ri8: return X86::SUB16ri;
286  case X86::SUB16mi8: return X86::SUB16mi;
287  case X86::SUB32ri8: return X86::SUB32ri;
288  case X86::SUB32mi8: return X86::SUB32mi;
289  case X86::SUB64ri8: return X86::SUB64ri32;
290  case X86::SUB64mi8: return X86::SUB64mi32;
291 
292  // SBB
293  case X86::SBB16ri8: return X86::SBB16ri;
294  case X86::SBB16mi8: return X86::SBB16mi;
295  case X86::SBB32ri8: return X86::SBB32ri;
296  case X86::SBB32mi8: return X86::SBB32mi;
297  case X86::SBB64ri8: return X86::SBB64ri32;
298  case X86::SBB64mi8: return X86::SBB64mi32;
299 
300  // CMP
301  case X86::CMP16ri8: return X86::CMP16ri;
302  case X86::CMP16mi8: return X86::CMP16mi;
303  case X86::CMP32ri8: return X86::CMP32ri;
304  case X86::CMP32mi8: return X86::CMP32mi;
305  case X86::CMP64ri8: return X86::CMP64ri32;
306  case X86::CMP64mi8: return X86::CMP64mi32;
307 
308  // PUSH
309  case X86::PUSH32i8: return X86::PUSHi32;
310  case X86::PUSH16i8: return X86::PUSHi16;
311  case X86::PUSH64i8: return X86::PUSH64i32;
312  }
313 }
314 
315 static unsigned getRelaxedOpcode(const MCInst &Inst, bool Is16BitMode) {
316  unsigned R = getRelaxedOpcodeArith(Inst);
317  if (R != Inst.getOpcode())
318  return R;
319  return getRelaxedOpcodeBranch(Inst, Is16BitMode);
320 }
321 
323  const MCInstrInfo &MCII) {
324  unsigned Opcode = MI.getOpcode();
325  switch (Opcode) {
326  default:
327  return X86::COND_INVALID;
328  case X86::JCC_1: {
329  const MCInstrDesc &Desc = MCII.get(Opcode);
330  return static_cast<X86::CondCode>(
331  MI.getOperand(Desc.getNumOperands() - 1).getImm());
332  }
333  }
334 }
335 
338  X86::CondCode CC = getCondFromBranch(MI, MCII);
340 }
341 
342 /// Check if the instruction uses RIP relative addressing.
343 static bool isRIPRelative(const MCInst &MI, const MCInstrInfo &MCII) {
344  unsigned Opcode = MI.getOpcode();
345  const MCInstrDesc &Desc = MCII.get(Opcode);
346  uint64_t TSFlags = Desc.TSFlags;
347  unsigned CurOp = X86II::getOperandBias(Desc);
348  int MemoryOperand = X86II::getMemoryOperandNo(TSFlags);
349  if (MemoryOperand < 0)
350  return false;
351  unsigned BaseRegNum = MemoryOperand + CurOp + X86::AddrBaseReg;
352  unsigned BaseReg = MI.getOperand(BaseRegNum).getReg();
353  return (BaseReg == X86::RIP);
354 }
355 
356 /// Check if the instruction is a prefix.
357 static bool isPrefix(const MCInst &MI, const MCInstrInfo &MCII) {
358  return X86II::isPrefix(MCII.get(MI.getOpcode()).TSFlags);
359 }
360 
361 /// Check if the instruction is valid as the first instruction in macro fusion.
362 static bool isFirstMacroFusibleInst(const MCInst &Inst,
363  const MCInstrInfo &MCII) {
364  // An Intel instruction with RIP relative addressing is not macro fusible.
365  if (isRIPRelative(Inst, MCII))
366  return false;
370 }
371 
372 /// X86 can reduce the bytes of NOP by padding instructions with prefixes to
373 /// get a better peformance in some cases. Here, we determine which prefix is
374 /// the most suitable.
375 ///
376 /// If the instruction has a segment override prefix, use the existing one.
377 /// If the target is 64-bit, use the CS.
378 /// If the target is 32-bit,
379 /// - If the instruction has a ESP/EBP base register, use SS.
380 /// - Otherwise use DS.
381 uint8_t X86AsmBackend::determinePaddingPrefix(const MCInst &Inst) const {
382  assert((STI.hasFeature(X86::Mode32Bit) || STI.hasFeature(X86::Mode64Bit)) &&
383  "Prefixes can be added only in 32-bit or 64-bit mode.");
384  const MCInstrDesc &Desc = MCII->get(Inst.getOpcode());
385  uint64_t TSFlags = Desc.TSFlags;
386 
387  // Determine where the memory operand starts, if present.
388  int MemoryOperand = X86II::getMemoryOperandNo(TSFlags);
389  if (MemoryOperand != -1)
390  MemoryOperand += X86II::getOperandBias(Desc);
391 
392  unsigned SegmentReg = 0;
393  if (MemoryOperand >= 0) {
394  // Check for explicit segment override on memory operand.
395  SegmentReg = Inst.getOperand(MemoryOperand + X86::AddrSegmentReg).getReg();
396  }
397 
398  switch (TSFlags & X86II::FormMask) {
399  default:
400  break;
401  case X86II::RawFrmDstSrc: {
402  // Check segment override opcode prefix as needed (not for %ds).
403  if (Inst.getOperand(2).getReg() != X86::DS)
404  SegmentReg = Inst.getOperand(2).getReg();
405  break;
406  }
407  case X86II::RawFrmSrc: {
408  // Check segment override opcode prefix as needed (not for %ds).
409  if (Inst.getOperand(1).getReg() != X86::DS)
410  SegmentReg = Inst.getOperand(1).getReg();
411  break;
412  }
413  case X86II::RawFrmMemOffs: {
414  // Check segment override opcode prefix as needed.
415  SegmentReg = Inst.getOperand(1).getReg();
416  break;
417  }
418  }
419 
420  if (SegmentReg != 0)
421  return X86::getSegmentOverridePrefixForReg(SegmentReg);
422 
423  if (STI.hasFeature(X86::Mode64Bit))
424  return X86::CS_Encoding;
425 
426  if (MemoryOperand >= 0) {
427  unsigned BaseRegNum = MemoryOperand + X86::AddrBaseReg;
428  unsigned BaseReg = Inst.getOperand(BaseRegNum).getReg();
429  if (BaseReg == X86::ESP || BaseReg == X86::EBP)
430  return X86::SS_Encoding;
431  }
432  return X86::DS_Encoding;
433 }
434 
435 /// Check if the two instructions will be macro-fused on the target cpu.
436 bool X86AsmBackend::isMacroFused(const MCInst &Cmp, const MCInst &Jcc) const {
437  const MCInstrDesc &InstDesc = MCII->get(Jcc.getOpcode());
438  if (!InstDesc.isConditionalBranch())
439  return false;
440  if (!isFirstMacroFusibleInst(Cmp, *MCII))
441  return false;
442  const X86::FirstMacroFusionInstKind CmpKind =
444  const X86::SecondMacroFusionInstKind BranchKind =
446  return X86::isMacroFused(CmpKind, BranchKind);
447 }
448 
449 /// Check if the instruction has a variant symbol operand.
450 static bool hasVariantSymbol(const MCInst &MI) {
451  for (auto &Operand : MI) {
452  if (!Operand.isExpr())
453  continue;
454  const MCExpr &Expr = *Operand.getExpr();
455  if (Expr.getKind() == MCExpr::SymbolRef &&
456  cast<MCSymbolRefExpr>(Expr).getKind() != MCSymbolRefExpr::VK_None)
457  return true;
458  }
459  return false;
460 }
461 
462 bool X86AsmBackend::allowAutoPadding() const {
463  return (AlignBoundary != Align(1) && AlignBranchType != X86::AlignBranchNone);
464 }
465 
466 bool X86AsmBackend::allowEnhancedRelaxation() const {
467  return allowAutoPadding() && TargetPrefixMax != 0 && X86PadForBranchAlign;
468 }
469 
470 /// X86 has certain instructions which enable interrupts exactly one
471 /// instruction *after* the instruction which stores to SS. Return true if the
472 /// given instruction has such an interrupt delay slot.
473 static bool hasInterruptDelaySlot(const MCInst &Inst) {
474  switch (Inst.getOpcode()) {
475  case X86::POPSS16:
476  case X86::POPSS32:
477  case X86::STI:
478  return true;
479 
480  case X86::MOV16sr:
481  case X86::MOV32sr:
482  case X86::MOV64sr:
483  case X86::MOV16sm:
484  if (Inst.getOperand(0).getReg() == X86::SS)
485  return true;
486  break;
487  }
488  return false;
489 }
490 
491 /// Check if the instruction to be emitted is right after any data.
492 static bool
493 isRightAfterData(MCFragment *CurrentFragment,
494  const std::pair<MCFragment *, size_t> &PrevInstPosition) {
495  MCFragment *F = CurrentFragment;
496  // Empty data fragments may be created to prevent further data being
497  // added into the previous fragment, we need to skip them since they
498  // have no contents.
499  for (; isa_and_nonnull<MCDataFragment>(F); F = F->getPrevNode())
500  if (cast<MCDataFragment>(F)->getContents().size() != 0)
501  break;
502 
503  // Since data is always emitted into a DataFragment, our check strategy is
504  // simple here.
505  // - If the fragment is a DataFragment
506  // - If it's not the fragment where the previous instruction is,
507  // returns true.
508  // - If it's the fragment holding the previous instruction but its
509  // size changed since the the previous instruction was emitted into
510  // it, returns true.
511  // - Otherwise returns false.
512  // - If the fragment is not a DataFragment, returns false.
513  if (auto *DF = dyn_cast_or_null<MCDataFragment>(F))
514  return DF != PrevInstPosition.first ||
515  DF->getContents().size() != PrevInstPosition.second;
516 
517  return false;
518 }
519 
520 /// \returns the fragment size if it has instructions, otherwise returns 0.
521 static size_t getSizeForInstFragment(const MCFragment *F) {
522  if (!F || !F->hasInstructions())
523  return 0;
524  // MCEncodedFragmentWithContents being templated makes this tricky.
525  switch (F->getKind()) {
526  default:
527  llvm_unreachable("Unknown fragment with instructions!");
528  case MCFragment::FT_Data:
529  return cast<MCDataFragment>(*F).getContents().size();
531  return cast<MCRelaxableFragment>(*F).getContents().size();
533  return cast<MCCompactEncodedInstFragment>(*F).getContents().size();
534  }
535 }
536 
537 /// Return true if we can insert NOP or prefixes automatically before the
538 /// the instruction to be emitted.
539 bool X86AsmBackend::canPadInst(const MCInst &Inst, MCObjectStreamer &OS) const {
540  if (hasVariantSymbol(Inst))
541  // Linker may rewrite the instruction with variant symbol operand(e.g.
542  // TLSCALL).
543  return false;
544 
545  if (hasInterruptDelaySlot(PrevInst))
546  // If this instruction follows an interrupt enabling instruction with a one
547  // instruction delay, inserting a nop would change behavior.
548  return false;
549 
550  if (isPrefix(PrevInst, *MCII))
551  // If this instruction follows a prefix, inserting a nop/prefix would change
552  // semantic.
553  return false;
554 
555  if (isPrefix(Inst, *MCII))
556  // If this instruction is a prefix, inserting a prefix would change
557  // semantic.
558  return false;
559 
560  if (isRightAfterData(OS.getCurrentFragment(), PrevInstPosition))
561  // If this instruction follows any data, there is no clear
562  // instruction boundary, inserting a nop/prefix would change semantic.
563  return false;
564 
565  return true;
566 }
567 
568 bool X86AsmBackend::canPadBranches(MCObjectStreamer &OS) const {
569  if (!OS.getAllowAutoPadding())
570  return false;
571  assert(allowAutoPadding() && "incorrect initialization!");
572 
573  // We only pad in text section.
574  if (!OS.getCurrentSectionOnly()->getKind().isText())
575  return false;
576 
577  // To be Done: Currently don't deal with Bundle cases.
578  if (OS.getAssembler().isBundlingEnabled())
579  return false;
580 
581  // Branches only need to be aligned in 32-bit or 64-bit mode.
582  if (!(STI.hasFeature(X86::Mode64Bit) || STI.hasFeature(X86::Mode32Bit)))
583  return false;
584 
585  return true;
586 }
587 
588 /// Check if the instruction operand needs to be aligned.
589 bool X86AsmBackend::needAlign(const MCInst &Inst) const {
590  const MCInstrDesc &Desc = MCII->get(Inst.getOpcode());
591  return (Desc.isConditionalBranch() &&
592  (AlignBranchType & X86::AlignBranchJcc)) ||
593  (Desc.isUnconditionalBranch() &&
594  (AlignBranchType & X86::AlignBranchJmp)) ||
595  (Desc.isCall() && (AlignBranchType & X86::AlignBranchCall)) ||
596  (Desc.isReturn() && (AlignBranchType & X86::AlignBranchRet)) ||
597  (Desc.isIndirectBranch() &&
598  (AlignBranchType & X86::AlignBranchIndirect));
599 }
600 
601 /// Insert BoundaryAlignFragment before instructions to align branches.
602 void X86AsmBackend::emitInstructionBegin(MCObjectStreamer &OS,
603  const MCInst &Inst, const MCSubtargetInfo &STI) {
604  CanPadInst = canPadInst(Inst, OS);
605 
606  if (!canPadBranches(OS))
607  return;
608 
609  if (!isMacroFused(PrevInst, Inst))
610  // Macro fusion doesn't happen indeed, clear the pending.
611  PendingBA = nullptr;
612 
613  if (!CanPadInst)
614  return;
615 
616  if (PendingBA && OS.getCurrentFragment()->getPrevNode() == PendingBA) {
617  // Macro fusion actually happens and there is no other fragment inserted
618  // after the previous instruction.
619  //
620  // Do nothing here since we already inserted a BoudaryAlign fragment when
621  // we met the first instruction in the fused pair and we'll tie them
622  // together in emitInstructionEnd.
623  //
624  // Note: When there is at least one fragment, such as MCAlignFragment,
625  // inserted after the previous instruction, e.g.
626  //
627  // \code
628  // cmp %rax %rcx
629  // .align 16
630  // je .Label0
631  // \ endcode
632  //
633  // We will treat the JCC as a unfused branch although it may be fused
634  // with the CMP.
635  return;
636  }
637 
638  if (needAlign(Inst) || ((AlignBranchType & X86::AlignBranchFused) &&
639  isFirstMacroFusibleInst(Inst, *MCII))) {
640  // If we meet a unfused branch or the first instuction in a fusiable pair,
641  // insert a BoundaryAlign fragment.
642  OS.insert(PendingBA = new MCBoundaryAlignFragment(AlignBoundary, STI));
643  }
644 }
645 
646 /// Set the last fragment to be aligned for the BoundaryAlignFragment.
647 void X86AsmBackend::emitInstructionEnd(MCObjectStreamer &OS, const MCInst &Inst) {
648  PrevInst = Inst;
649  MCFragment *CF = OS.getCurrentFragment();
650  PrevInstPosition = std::make_pair(CF, getSizeForInstFragment(CF));
651  if (auto *F = dyn_cast_or_null<MCRelaxableFragment>(CF))
652  F->setAllowAutoPadding(CanPadInst);
653 
654  if (!canPadBranches(OS))
655  return;
656 
657  if (!needAlign(Inst) || !PendingBA)
658  return;
659 
660  // Tie the aligned instructions into a a pending BoundaryAlign.
661  PendingBA->setLastFragment(CF);
662  PendingBA = nullptr;
663 
664  // We need to ensure that further data isn't added to the current
665  // DataFragment, so that we can get the size of instructions later in
666  // MCAssembler::relaxBoundaryAlign. The easiest way is to insert a new empty
667  // DataFragment.
668  if (isa_and_nonnull<MCDataFragment>(CF))
669  OS.insert(new MCDataFragment());
670 
671  // Update the maximum alignment on the current section if necessary.
672  MCSection *Sec = OS.getCurrentSectionOnly();
673  if (AlignBoundary.value() > Sec->getAlignment())
674  Sec->setAlignment(AlignBoundary);
675 }
676 
677 Optional<MCFixupKind> X86AsmBackend::getFixupKind(StringRef Name) const {
678  if (STI.getTargetTriple().isOSBinFormatELF()) {
679  unsigned Type;
680  if (STI.getTargetTriple().getArch() == Triple::x86_64) {
682 #define ELF_RELOC(X, Y) .Case(#X, Y)
683 #include "llvm/BinaryFormat/ELFRelocs/x86_64.def"
684 #undef ELF_RELOC
685  .Case("BFD_RELOC_NONE", ELF::R_X86_64_NONE)
686  .Case("BFD_RELOC_8", ELF::R_X86_64_8)
687  .Case("BFD_RELOC_16", ELF::R_X86_64_16)
688  .Case("BFD_RELOC_32", ELF::R_X86_64_32)
689  .Case("BFD_RELOC_64", ELF::R_X86_64_64)
690  .Default(-1u);
691  } else {
693 #define ELF_RELOC(X, Y) .Case(#X, Y)
694 #include "llvm/BinaryFormat/ELFRelocs/i386.def"
695 #undef ELF_RELOC
696  .Case("BFD_RELOC_NONE", ELF::R_386_NONE)
697  .Case("BFD_RELOC_8", ELF::R_386_8)
698  .Case("BFD_RELOC_16", ELF::R_386_16)
699  .Case("BFD_RELOC_32", ELF::R_386_32)
700  .Default(-1u);
701  }
702  if (Type == -1u)
703  return None;
704  return static_cast<MCFixupKind>(FirstLiteralRelocationKind + Type);
705  }
707 }
708 
709 const MCFixupKindInfo &X86AsmBackend::getFixupKindInfo(MCFixupKind Kind) const {
710  const static MCFixupKindInfo Infos[X86::NumTargetFixupKinds] = {
711  {"reloc_riprel_4byte", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
712  {"reloc_riprel_4byte_movq_load", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
713  {"reloc_riprel_4byte_relax", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
714  {"reloc_riprel_4byte_relax_rex", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
715  {"reloc_signed_4byte", 0, 32, 0},
716  {"reloc_signed_4byte_relax", 0, 32, 0},
717  {"reloc_global_offset_table", 0, 32, 0},
718  {"reloc_global_offset_table8", 0, 64, 0},
719  {"reloc_branch_4byte_pcrel", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
720  };
721 
722  // Fixup kinds from .reloc directive are like R_386_NONE/R_X86_64_NONE. They
723  // do not require any extra processing.
726 
729 
730  assert(unsigned(Kind - FirstTargetFixupKind) < getNumFixupKinds() &&
731  "Invalid kind!");
732  assert(Infos[Kind - FirstTargetFixupKind].Name && "Empty fixup name!");
733  return Infos[Kind - FirstTargetFixupKind];
734 }
735 
736 bool X86AsmBackend::shouldForceRelocation(const MCAssembler &,
737  const MCFixup &Fixup,
738  const MCValue &) {
739  return Fixup.getKind() >= FirstLiteralRelocationKind;
740 }
741 
742 static unsigned getFixupKindSize(unsigned Kind) {
743  switch (Kind) {
744  default:
745  llvm_unreachable("invalid fixup kind!");
746  case FK_NONE:
747  return 0;
748  case FK_PCRel_1:
749  case FK_SecRel_1:
750  case FK_Data_1:
751  return 1;
752  case FK_PCRel_2:
753  case FK_SecRel_2:
754  case FK_Data_2:
755  return 2;
756  case FK_PCRel_4:
765  case FK_SecRel_4:
766  case FK_Data_4:
767  return 4;
768  case FK_PCRel_8:
769  case FK_SecRel_8:
770  case FK_Data_8:
772  return 8;
773  }
774 }
775 
776 void X86AsmBackend::applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
777  const MCValue &Target,
779  uint64_t Value, bool IsResolved,
780  const MCSubtargetInfo *STI) const {
781  unsigned Kind = Fixup.getKind();
783  return;
784  unsigned Size = getFixupKindSize(Kind);
785 
786  assert(Fixup.getOffset() + Size <= Data.size() && "Invalid fixup offset!");
787 
788  int64_t SignedValue = static_cast<int64_t>(Value);
789  if ((Target.isAbsolute() || IsResolved) &&
790  getFixupKindInfo(Fixup.getKind()).Flags &
792  // check that PC relative fixup fits into the fixup size.
793  if (Size > 0 && !isIntN(Size * 8, SignedValue))
794  Asm.getContext().reportError(
795  Fixup.getLoc(), "value of " + Twine(SignedValue) +
796  " is too large for field of " + Twine(Size) +
797  ((Size == 1) ? " byte." : " bytes."));
798  } else {
799  // Check that uppper bits are either all zeros or all ones.
800  // Specifically ignore overflow/underflow as long as the leakage is
801  // limited to the lower bits. This is to remain compatible with
802  // other assemblers.
803  assert((Size == 0 || isIntN(Size * 8 + 1, SignedValue)) &&
804  "Value does not fit in the Fixup field");
805  }
806 
807  for (unsigned i = 0; i != Size; ++i)
808  Data[Fixup.getOffset() + i] = uint8_t(Value >> (i * 8));
809 }
810 
811 bool X86AsmBackend::mayNeedRelaxation(const MCInst &Inst,
812  const MCSubtargetInfo &STI) const {
813  // Branches can always be relaxed in either mode.
814  if (getRelaxedOpcodeBranch(Inst, false) != Inst.getOpcode())
815  return true;
816 
817  // Check if this instruction is ever relaxable.
818  if (getRelaxedOpcodeArith(Inst) == Inst.getOpcode())
819  return false;
820 
821 
822  // Check if the relaxable operand has an expression. For the current set of
823  // relaxable instructions, the relaxable operand is always the last operand.
824  unsigned RelaxableOp = Inst.getNumOperands() - 1;
825  if (Inst.getOperand(RelaxableOp).isExpr())
826  return true;
827 
828  return false;
829 }
830 
831 bool X86AsmBackend::fixupNeedsRelaxation(const MCFixup &Fixup,
832  uint64_t Value,
833  const MCRelaxableFragment *DF,
834  const MCAsmLayout &Layout) const {
835  // Relax if the value is too big for a (signed) i8.
836  return !isInt<8>(Value);
837 }
838 
839 // FIXME: Can tblgen help at all here to verify there aren't other instructions
840 // we can relax?
841 void X86AsmBackend::relaxInstruction(MCInst &Inst,
842  const MCSubtargetInfo &STI) const {
843  // The only relaxations X86 does is from a 1byte pcrel to a 4byte pcrel.
844  bool Is16BitMode = STI.getFeatureBits()[X86::Mode16Bit];
845  unsigned RelaxedOp = getRelaxedOpcode(Inst, Is16BitMode);
846 
847  if (RelaxedOp == Inst.getOpcode()) {
848  SmallString<256> Tmp;
849  raw_svector_ostream OS(Tmp);
850  Inst.dump_pretty(OS);
851  OS << "\n";
852  report_fatal_error("unexpected instruction to relax: " + OS.str());
853  }
854 
855  Inst.setOpcode(RelaxedOp);
856 }
857 
858 /// Return true if this instruction has been fully relaxed into it's most
859 /// general available form.
860 static bool isFullyRelaxed(const MCRelaxableFragment &RF) {
861  auto &Inst = RF.getInst();
862  auto &STI = *RF.getSubtargetInfo();
863  bool Is16BitMode = STI.getFeatureBits()[X86::Mode16Bit];
864  return getRelaxedOpcode(Inst, Is16BitMode) == Inst.getOpcode();
865 }
866 
867 bool X86AsmBackend::padInstructionViaPrefix(MCRelaxableFragment &RF,
868  MCCodeEmitter &Emitter,
869  unsigned &RemainingSize) const {
870  if (!RF.getAllowAutoPadding())
871  return false;
872  // If the instruction isn't fully relaxed, shifting it around might require a
873  // larger value for one of the fixups then can be encoded. The outer loop
874  // will also catch this before moving to the next instruction, but we need to
875  // prevent padding this single instruction as well.
876  if (!isFullyRelaxed(RF))
877  return false;
878 
879  const unsigned OldSize = RF.getContents().size();
880  if (OldSize == 15)
881  return false;
882 
883  const unsigned MaxPossiblePad = std::min(15 - OldSize, RemainingSize);
884  const unsigned RemainingPrefixSize = [&]() -> unsigned {
886  raw_svector_ostream VecOS(Code);
887  Emitter.emitPrefix(RF.getInst(), VecOS, STI);
888  assert(Code.size() < 15 && "The number of prefixes must be less than 15.");
889 
890  // TODO: It turns out we need a decent amount of plumbing for the target
891  // specific bits to determine number of prefixes its safe to add. Various
892  // targets (older chips mostly, but also Atom family) encounter decoder
893  // stalls with too many prefixes. For testing purposes, we set the value
894  // externally for the moment.
895  unsigned ExistingPrefixSize = Code.size();
896  if (TargetPrefixMax <= ExistingPrefixSize)
897  return 0;
898  return TargetPrefixMax - ExistingPrefixSize;
899  }();
900  const unsigned PrefixBytesToAdd =
901  std::min(MaxPossiblePad, RemainingPrefixSize);
902  if (PrefixBytesToAdd == 0)
903  return false;
904 
905  const uint8_t Prefix = determinePaddingPrefix(RF.getInst());
906 
908  Code.append(PrefixBytesToAdd, Prefix);
909  Code.append(RF.getContents().begin(), RF.getContents().end());
910  RF.getContents() = Code;
911 
912  // Adjust the fixups for the change in offsets
913  for (auto &F : RF.getFixups()) {
914  F.setOffset(F.getOffset() + PrefixBytesToAdd);
915  }
916 
917  RemainingSize -= PrefixBytesToAdd;
918  return true;
919 }
920 
921 bool X86AsmBackend::padInstructionViaRelaxation(MCRelaxableFragment &RF,
922  MCCodeEmitter &Emitter,
923  unsigned &RemainingSize) const {
924  if (isFullyRelaxed(RF))
925  // TODO: There are lots of other tricks we could apply for increasing
926  // encoding size without impacting performance.
927  return false;
928 
929  MCInst Relaxed = RF.getInst();
930  relaxInstruction(Relaxed, *RF.getSubtargetInfo());
931 
934  raw_svector_ostream VecOS(Code);
935  Emitter.encodeInstruction(Relaxed, VecOS, Fixups, *RF.getSubtargetInfo());
936  const unsigned OldSize = RF.getContents().size();
937  const unsigned NewSize = Code.size();
938  assert(NewSize >= OldSize && "size decrease during relaxation?");
939  unsigned Delta = NewSize - OldSize;
940  if (Delta > RemainingSize)
941  return false;
942  RF.setInst(Relaxed);
943  RF.getContents() = Code;
944  RF.getFixups() = Fixups;
945  RemainingSize -= Delta;
946  return true;
947 }
948 
949 bool X86AsmBackend::padInstructionEncoding(MCRelaxableFragment &RF,
950  MCCodeEmitter &Emitter,
951  unsigned &RemainingSize) const {
952  bool Changed = false;
953  if (RemainingSize != 0)
954  Changed |= padInstructionViaRelaxation(RF, Emitter, RemainingSize);
955  if (RemainingSize != 0)
956  Changed |= padInstructionViaPrefix(RF, Emitter, RemainingSize);
957  return Changed;
958 }
959 
960 void X86AsmBackend::finishLayout(MCAssembler const &Asm,
961  MCAsmLayout &Layout) const {
962  // See if we can further relax some instructions to cut down on the number of
963  // nop bytes required for code alignment. The actual win is in reducing
964  // instruction count, not number of bytes. Modern X86-64 can easily end up
965  // decode limited. It is often better to reduce the number of instructions
966  // (i.e. eliminate nops) even at the cost of increasing the size and
967  // complexity of others.
968  if (!X86PadForAlign && !X86PadForBranchAlign)
969  return;
970 
971  // The processed regions are delimitered by LabeledFragments. -g may have more
972  // MCSymbols and therefore different relaxation results. X86PadForAlign is
973  // disabled by default to eliminate the -g vs non -g difference.
974  DenseSet<MCFragment *> LabeledFragments;
975  for (const MCSymbol &S : Asm.symbols())
976  LabeledFragments.insert(S.getFragment(false));
977 
978  for (MCSection &Sec : Asm) {
979  if (!Sec.getKind().isText())
980  continue;
981 
983  for (MCSection::iterator I = Sec.begin(), IE = Sec.end(); I != IE; ++I) {
984  MCFragment &F = *I;
985 
986  if (LabeledFragments.count(&F))
987  Relaxable.clear();
988 
989  if (F.getKind() == MCFragment::FT_Data ||
991  // Skip and ignore
992  continue;
993 
994  if (F.getKind() == MCFragment::FT_Relaxable) {
995  auto &RF = cast<MCRelaxableFragment>(*I);
996  Relaxable.push_back(&RF);
997  continue;
998  }
999 
1000  auto canHandle = [](MCFragment &F) -> bool {
1001  switch (F.getKind()) {
1002  default:
1003  return false;
1004  case MCFragment::FT_Align:
1005  return X86PadForAlign;
1007  return X86PadForBranchAlign;
1008  }
1009  };
1010  // For any unhandled kind, assume we can't change layout.
1011  if (!canHandle(F)) {
1012  Relaxable.clear();
1013  continue;
1014  }
1015 
1016 #ifndef NDEBUG
1017  const uint64_t OrigOffset = Layout.getFragmentOffset(&F);
1018 #endif
1019  const uint64_t OrigSize = Asm.computeFragmentSize(Layout, F);
1020 
1021  // To keep the effects local, prefer to relax instructions closest to
1022  // the align directive. This is purely about human understandability
1023  // of the resulting code. If we later find a reason to expand
1024  // particular instructions over others, we can adjust.
1025  MCFragment *FirstChangedFragment = nullptr;
1026  unsigned RemainingSize = OrigSize;
1027  while (!Relaxable.empty() && RemainingSize != 0) {
1028  auto &RF = *Relaxable.pop_back_val();
1029  // Give the backend a chance to play any tricks it wishes to increase
1030  // the encoding size of the given instruction. Target independent code
1031  // will try further relaxation, but target's may play further tricks.
1032  if (padInstructionEncoding(RF, Asm.getEmitter(), RemainingSize))
1033  FirstChangedFragment = &RF;
1034 
1035  // If we have an instruction which hasn't been fully relaxed, we can't
1036  // skip past it and insert bytes before it. Changing its starting
1037  // offset might require a larger negative offset than it can encode.
1038  // We don't need to worry about larger positive offsets as none of the
1039  // possible offsets between this and our align are visible, and the
1040  // ones afterwards aren't changing.
1041  if (!isFullyRelaxed(RF))
1042  break;
1043  }
1044  Relaxable.clear();
1045 
1046  if (FirstChangedFragment) {
1047  // Make sure the offsets for any fragments in the effected range get
1048  // updated. Note that this (conservatively) invalidates the offsets of
1049  // those following, but this is not required.
1050  Layout.invalidateFragmentsFrom(FirstChangedFragment);
1051  }
1052 
1053  // BoundaryAlign explicitly tracks it's size (unlike align)
1054  if (F.getKind() == MCFragment::FT_BoundaryAlign)
1055  cast<MCBoundaryAlignFragment>(F).setSize(RemainingSize);
1056 
1057 #ifndef NDEBUG
1058  const uint64_t FinalOffset = Layout.getFragmentOffset(&F);
1059  const uint64_t FinalSize = Asm.computeFragmentSize(Layout, F);
1060  assert(OrigOffset + OrigSize == FinalOffset + FinalSize &&
1061  "can't move start of next fragment!");
1062  assert(FinalSize == RemainingSize && "inconsistent size computation?");
1063 #endif
1064 
1065  // If we're looking at a boundary align, make sure we don't try to pad
1066  // its target instructions for some following directive. Doing so would
1067  // break the alignment of the current boundary align.
1068  if (auto *BF = dyn_cast<MCBoundaryAlignFragment>(&F)) {
1069  const MCFragment *LastFragment = BF->getLastFragment();
1070  if (!LastFragment)
1071  continue;
1072  while (&*I != LastFragment)
1073  ++I;
1074  }
1075  }
1076  }
1077 
1078  // The layout is done. Mark every fragment as valid.
1079  for (unsigned int i = 0, n = Layout.getSectionOrder().size(); i != n; ++i) {
1080  MCSection &Section = *Layout.getSectionOrder()[i];
1081  Layout.getFragmentOffset(&*Section.getFragmentList().rbegin());
1082  Asm.computeFragmentSize(Layout, *Section.getFragmentList().rbegin());
1083  }
1084 }
1085 
1086 unsigned X86AsmBackend::getMaximumNopSize(const MCSubtargetInfo &STI) const {
1087  if (STI.hasFeature(X86::Mode16Bit))
1088  return 4;
1089  if (!STI.hasFeature(X86::FeatureNOPL) && !STI.hasFeature(X86::Mode64Bit))
1090  return 1;
1091  if (STI.getFeatureBits()[X86::TuningFast7ByteNOP])
1092  return 7;
1093  if (STI.getFeatureBits()[X86::TuningFast15ByteNOP])
1094  return 15;
1095  if (STI.getFeatureBits()[X86::TuningFast11ByteNOP])
1096  return 11;
1097  // FIXME: handle 32-bit mode
1098  // 15-bytes is the longest single NOP instruction, but 10-bytes is
1099  // commonly the longest that can be efficiently decoded.
1100  return 10;
1101 }
1102 
1103 /// Write a sequence of optimal nops to the output, covering \p Count
1104 /// bytes.
1105 /// \return - true on success, false on failure
1106 bool X86AsmBackend::writeNopData(raw_ostream &OS, uint64_t Count,
1107  const MCSubtargetInfo *STI) const {
1108  static const char Nops32Bit[10][11] = {
1109  // nop
1110  "\x90",
1111  // xchg %ax,%ax
1112  "\x66\x90",
1113  // nopl (%[re]ax)
1114  "\x0f\x1f\x00",
1115  // nopl 0(%[re]ax)
1116  "\x0f\x1f\x40\x00",
1117  // nopl 0(%[re]ax,%[re]ax,1)
1118  "\x0f\x1f\x44\x00\x00",
1119  // nopw 0(%[re]ax,%[re]ax,1)
1120  "\x66\x0f\x1f\x44\x00\x00",
1121  // nopl 0L(%[re]ax)
1122  "\x0f\x1f\x80\x00\x00\x00\x00",
1123  // nopl 0L(%[re]ax,%[re]ax,1)
1124  "\x0f\x1f\x84\x00\x00\x00\x00\x00",
1125  // nopw 0L(%[re]ax,%[re]ax,1)
1126  "\x66\x0f\x1f\x84\x00\x00\x00\x00\x00",
1127  // nopw %cs:0L(%[re]ax,%[re]ax,1)
1128  "\x66\x2e\x0f\x1f\x84\x00\x00\x00\x00\x00",
1129  };
1130 
1131  // 16-bit mode uses different nop patterns than 32-bit.
1132  static const char Nops16Bit[4][11] = {
1133  // nop
1134  "\x90",
1135  // xchg %eax,%eax
1136  "\x66\x90",
1137  // lea 0(%si),%si
1138  "\x8d\x74\x00",
1139  // lea 0w(%si),%si
1140  "\x8d\xb4\x00\x00",
1141  };
1142 
1143  const char(*Nops)[11] =
1144  STI->getFeatureBits()[X86::Mode16Bit] ? Nops16Bit : Nops32Bit;
1145 
1146  uint64_t MaxNopLength = (uint64_t)getMaximumNopSize(*STI);
1147 
1148  // Emit as many MaxNopLength NOPs as needed, then emit a NOP of the remaining
1149  // length.
1150  do {
1151  const uint8_t ThisNopLength = (uint8_t) std::min(Count, MaxNopLength);
1152  const uint8_t Prefixes = ThisNopLength <= 10 ? 0 : ThisNopLength - 10;
1153  for (uint8_t i = 0; i < Prefixes; i++)
1154  OS << '\x66';
1155  const uint8_t Rest = ThisNopLength - Prefixes;
1156  if (Rest != 0)
1157  OS.write(Nops[Rest - 1], Rest);
1158  Count -= ThisNopLength;
1159  } while (Count != 0);
1160 
1161  return true;
1162 }
1163 
1164 /* *** */
1165 
1166 namespace {
1167 
1168 class ELFX86AsmBackend : public X86AsmBackend {
1169 public:
1170  uint8_t OSABI;
1171  ELFX86AsmBackend(const Target &T, uint8_t OSABI, const MCSubtargetInfo &STI)
1172  : X86AsmBackend(T, STI), OSABI(OSABI) {}
1173 };
1174 
1175 class ELFX86_32AsmBackend : public ELFX86AsmBackend {
1176 public:
1177  ELFX86_32AsmBackend(const Target &T, uint8_t OSABI,
1178  const MCSubtargetInfo &STI)
1179  : ELFX86AsmBackend(T, OSABI, STI) {}
1180 
1181  std::unique_ptr<MCObjectTargetWriter>
1182  createObjectTargetWriter() const override {
1183  return createX86ELFObjectWriter(/*IsELF64*/ false, OSABI, ELF::EM_386);
1184  }
1185 };
1186 
1187 class ELFX86_X32AsmBackend : public ELFX86AsmBackend {
1188 public:
1189  ELFX86_X32AsmBackend(const Target &T, uint8_t OSABI,
1190  const MCSubtargetInfo &STI)
1191  : ELFX86AsmBackend(T, OSABI, STI) {}
1192 
1193  std::unique_ptr<MCObjectTargetWriter>
1194  createObjectTargetWriter() const override {
1195  return createX86ELFObjectWriter(/*IsELF64*/ false, OSABI,
1196  ELF::EM_X86_64);
1197  }
1198 };
1199 
1200 class ELFX86_IAMCUAsmBackend : public ELFX86AsmBackend {
1201 public:
1202  ELFX86_IAMCUAsmBackend(const Target &T, uint8_t OSABI,
1203  const MCSubtargetInfo &STI)
1204  : ELFX86AsmBackend(T, OSABI, STI) {}
1205 
1206  std::unique_ptr<MCObjectTargetWriter>
1207  createObjectTargetWriter() const override {
1208  return createX86ELFObjectWriter(/*IsELF64*/ false, OSABI,
1209  ELF::EM_IAMCU);
1210  }
1211 };
1212 
1213 class ELFX86_64AsmBackend : public ELFX86AsmBackend {
1214 public:
1215  ELFX86_64AsmBackend(const Target &T, uint8_t OSABI,
1216  const MCSubtargetInfo &STI)
1217  : ELFX86AsmBackend(T, OSABI, STI) {}
1218 
1219  std::unique_ptr<MCObjectTargetWriter>
1220  createObjectTargetWriter() const override {
1221  return createX86ELFObjectWriter(/*IsELF64*/ true, OSABI, ELF::EM_X86_64);
1222  }
1223 };
1224 
1225 class WindowsX86AsmBackend : public X86AsmBackend {
1226  bool Is64Bit;
1227 
1228 public:
1229  WindowsX86AsmBackend(const Target &T, bool is64Bit,
1230  const MCSubtargetInfo &STI)
1231  : X86AsmBackend(T, STI)
1232  , Is64Bit(is64Bit) {
1233  }
1234 
1235  Optional<MCFixupKind> getFixupKind(StringRef Name) const override {
1237  .Case("dir32", FK_Data_4)
1238  .Case("secrel32", FK_SecRel_4)
1239  .Case("secidx", FK_SecRel_2)
1241  }
1242 
1243  std::unique_ptr<MCObjectTargetWriter>
1244  createObjectTargetWriter() const override {
1245  return createX86WinCOFFObjectWriter(Is64Bit);
1246  }
1247 };
1248 
1249 namespace CU {
1250 
1251  /// Compact unwind encoding values.
1253  /// [RE]BP based frame where [RE]BP is pused on the stack immediately after
1254  /// the return address, then [RE]SP is moved to [RE]BP.
1255  UNWIND_MODE_BP_FRAME = 0x01000000,
1256 
1257  /// A frameless function with a small constant stack size.
1258  UNWIND_MODE_STACK_IMMD = 0x02000000,
1259 
1260  /// A frameless function with a large constant stack size.
1261  UNWIND_MODE_STACK_IND = 0x03000000,
1262 
1263  /// No compact unwind encoding is available.
1264  UNWIND_MODE_DWARF = 0x04000000,
1265 
1266  /// Mask for encoding the frame registers.
1267  UNWIND_BP_FRAME_REGISTERS = 0x00007FFF,
1268 
1269  /// Mask for encoding the frameless registers.
1270  UNWIND_FRAMELESS_STACK_REG_PERMUTATION = 0x000003FF
1271  };
1272 
1273 } // namespace CU
1274 
1275 class DarwinX86AsmBackend : public X86AsmBackend {
1276  const MCRegisterInfo &MRI;
1277 
1278  /// Number of registers that can be saved in a compact unwind encoding.
1279  enum { CU_NUM_SAVED_REGS = 6 };
1280 
1281  mutable unsigned SavedRegs[CU_NUM_SAVED_REGS];
1282  Triple TT;
1283  bool Is64Bit;
1284 
1285  unsigned OffsetSize; ///< Offset of a "push" instruction.
1286  unsigned MoveInstrSize; ///< Size of a "move" instruction.
1287  unsigned StackDivide; ///< Amount to adjust stack size by.
1288 protected:
1289  /// Size of a "push" instruction for the given register.
1290  unsigned PushInstrSize(unsigned Reg) const {
1291  switch (Reg) {
1292  case X86::EBX:
1293  case X86::ECX:
1294  case X86::EDX:
1295  case X86::EDI:
1296  case X86::ESI:
1297  case X86::EBP:
1298  case X86::RBX:
1299  case X86::RBP:
1300  return 1;
1301  case X86::R12:
1302  case X86::R13:
1303  case X86::R14:
1304  case X86::R15:
1305  return 2;
1306  }
1307  return 1;
1308  }
1309 
1310 private:
1311  /// Get the compact unwind number for a given register. The number
1312  /// corresponds to the enum lists in compact_unwind_encoding.h.
1313  int getCompactUnwindRegNum(unsigned Reg) const {
1314  static const MCPhysReg CU32BitRegs[7] = {
1315  X86::EBX, X86::ECX, X86::EDX, X86::EDI, X86::ESI, X86::EBP, 0
1316  };
1317  static const MCPhysReg CU64BitRegs[] = {
1318  X86::RBX, X86::R12, X86::R13, X86::R14, X86::R15, X86::RBP, 0
1319  };
1320  const MCPhysReg *CURegs = Is64Bit ? CU64BitRegs : CU32BitRegs;
1321  for (int Idx = 1; *CURegs; ++CURegs, ++Idx)
1322  if (*CURegs == Reg)
1323  return Idx;
1324 
1325  return -1;
1326  }
1327 
1328  /// Return the registers encoded for a compact encoding with a frame
1329  /// pointer.
1330  uint32_t encodeCompactUnwindRegistersWithFrame() const {
1331  // Encode the registers in the order they were saved --- 3-bits per
1332  // register. The list of saved registers is assumed to be in reverse
1333  // order. The registers are numbered from 1 to CU_NUM_SAVED_REGS.
1334  uint32_t RegEnc = 0;
1335  for (int i = 0, Idx = 0; i != CU_NUM_SAVED_REGS; ++i) {
1336  unsigned Reg = SavedRegs[i];
1337  if (Reg == 0) break;
1338 
1339  int CURegNum = getCompactUnwindRegNum(Reg);
1340  if (CURegNum == -1) return ~0U;
1341 
1342  // Encode the 3-bit register number in order, skipping over 3-bits for
1343  // each register.
1344  RegEnc |= (CURegNum & 0x7) << (Idx++ * 3);
1345  }
1346 
1347  assert((RegEnc & 0x3FFFF) == RegEnc &&
1348  "Invalid compact register encoding!");
1349  return RegEnc;
1350  }
1351 
1352  /// Create the permutation encoding used with frameless stacks. It is
1353  /// passed the number of registers to be saved and an array of the registers
1354  /// saved.
1355  uint32_t encodeCompactUnwindRegistersWithoutFrame(unsigned RegCount) const {
1356  // The saved registers are numbered from 1 to 6. In order to encode the
1357  // order in which they were saved, we re-number them according to their
1358  // place in the register order. The re-numbering is relative to the last
1359  // re-numbered register. E.g., if we have registers {6, 2, 4, 5} saved in
1360  // that order:
1361  //
1362  // Orig Re-Num
1363  // ---- ------
1364  // 6 6
1365  // 2 2
1366  // 4 3
1367  // 5 3
1368  //
1369  for (unsigned i = 0; i < RegCount; ++i) {
1370  int CUReg = getCompactUnwindRegNum(SavedRegs[i]);
1371  if (CUReg == -1) return ~0U;
1372  SavedRegs[i] = CUReg;
1373  }
1374 
1375  // Reverse the list.
1376  std::reverse(&SavedRegs[0], &SavedRegs[CU_NUM_SAVED_REGS]);
1377 
1378  uint32_t RenumRegs[CU_NUM_SAVED_REGS];
1379  for (unsigned i = CU_NUM_SAVED_REGS - RegCount; i < CU_NUM_SAVED_REGS; ++i){
1380  unsigned Countless = 0;
1381  for (unsigned j = CU_NUM_SAVED_REGS - RegCount; j < i; ++j)
1382  if (SavedRegs[j] < SavedRegs[i])
1383  ++Countless;
1384 
1385  RenumRegs[i] = SavedRegs[i] - Countless - 1;
1386  }
1387 
1388  // Take the renumbered values and encode them into a 10-bit number.
1389  uint32_t permutationEncoding = 0;
1390  switch (RegCount) {
1391  case 6:
1392  permutationEncoding |= 120 * RenumRegs[0] + 24 * RenumRegs[1]
1393  + 6 * RenumRegs[2] + 2 * RenumRegs[3]
1394  + RenumRegs[4];
1395  break;
1396  case 5:
1397  permutationEncoding |= 120 * RenumRegs[1] + 24 * RenumRegs[2]
1398  + 6 * RenumRegs[3] + 2 * RenumRegs[4]
1399  + RenumRegs[5];
1400  break;
1401  case 4:
1402  permutationEncoding |= 60 * RenumRegs[2] + 12 * RenumRegs[3]
1403  + 3 * RenumRegs[4] + RenumRegs[5];
1404  break;
1405  case 3:
1406  permutationEncoding |= 20 * RenumRegs[3] + 4 * RenumRegs[4]
1407  + RenumRegs[5];
1408  break;
1409  case 2:
1410  permutationEncoding |= 5 * RenumRegs[4] + RenumRegs[5];
1411  break;
1412  case 1:
1413  permutationEncoding |= RenumRegs[5];
1414  break;
1415  }
1416 
1417  assert((permutationEncoding & 0x3FF) == permutationEncoding &&
1418  "Invalid compact register encoding!");
1419  return permutationEncoding;
1420  }
1421 
1422 public:
1423  DarwinX86AsmBackend(const Target &T, const MCRegisterInfo &MRI,
1424  const MCSubtargetInfo &STI)
1425  : X86AsmBackend(T, STI), MRI(MRI), TT(STI.getTargetTriple()),
1426  Is64Bit(TT.isArch64Bit()) {
1427  memset(SavedRegs, 0, sizeof(SavedRegs));
1428  OffsetSize = Is64Bit ? 8 : 4;
1429  MoveInstrSize = Is64Bit ? 3 : 2;
1430  StackDivide = Is64Bit ? 8 : 4;
1431  }
1432 
1433  std::unique_ptr<MCObjectTargetWriter>
1434  createObjectTargetWriter() const override {
1436  uint32_t CPUSubType = cantFail(MachO::getCPUSubType(TT));
1437  return createX86MachObjectWriter(Is64Bit, CPUType, CPUSubType);
1438  }
1439 
1440  /// Implementation of algorithm to generate the compact unwind encoding
1441  /// for the CFI instructions.
1442  uint32_t
1443  generateCompactUnwindEncoding(ArrayRef<MCCFIInstruction> Instrs) const override {
1444  if (Instrs.empty()) return 0;
1445 
1446  // Reset the saved registers.
1447  unsigned SavedRegIdx = 0;
1448  memset(SavedRegs, 0, sizeof(SavedRegs));
1449 
1450  bool HasFP = false;
1451 
1452  // Encode that we are using EBP/RBP as the frame pointer.
1453  uint32_t CompactUnwindEncoding = 0;
1454 
1455  unsigned SubtractInstrIdx = Is64Bit ? 3 : 2;
1456  unsigned InstrOffset = 0;
1457  unsigned StackAdjust = 0;
1458  unsigned StackSize = 0;
1459  unsigned NumDefCFAOffsets = 0;
1460  int MinAbsOffset = std::numeric_limits<int>::max();
1461 
1462  for (unsigned i = 0, e = Instrs.size(); i != e; ++i) {
1463  const MCCFIInstruction &Inst = Instrs[i];
1464 
1465  switch (Inst.getOperation()) {
1466  default:
1467  // Any other CFI directives indicate a frame that we aren't prepared
1468  // to represent via compact unwind, so just bail out.
1469  return 0;
1471  // Defines a frame pointer. E.g.
1472  //
1473  // movq %rsp, %rbp
1474  // L0:
1475  // .cfi_def_cfa_register %rbp
1476  //
1477  HasFP = true;
1478 
1479  // If the frame pointer is other than esp/rsp, we do not have a way to
1480  // generate a compact unwinding representation, so bail out.
1481  if (*MRI.getLLVMRegNum(Inst.getRegister(), true) !=
1482  (Is64Bit ? X86::RBP : X86::EBP))
1483  return 0;
1484 
1485  // Reset the counts.
1486  memset(SavedRegs, 0, sizeof(SavedRegs));
1487  StackAdjust = 0;
1488  SavedRegIdx = 0;
1489  MinAbsOffset = std::numeric_limits<int>::max();
1490  InstrOffset += MoveInstrSize;
1491  break;
1492  }
1494  // Defines a new offset for the CFA. E.g.
1495  //
1496  // With frame:
1497  //
1498  // pushq %rbp
1499  // L0:
1500  // .cfi_def_cfa_offset 16
1501  //
1502  // Without frame:
1503  //
1504  // subq $72, %rsp
1505  // L0:
1506  // .cfi_def_cfa_offset 80
1507  //
1508  StackSize = Inst.getOffset() / StackDivide;
1509  ++NumDefCFAOffsets;
1510  break;
1511  }
1513  // Defines a "push" of a callee-saved register. E.g.
1514  //
1515  // pushq %r15
1516  // pushq %r14
1517  // pushq %rbx
1518  // L0:
1519  // subq $120, %rsp
1520  // L1:
1521  // .cfi_offset %rbx, -40
1522  // .cfi_offset %r14, -32
1523  // .cfi_offset %r15, -24
1524  //
1525  if (SavedRegIdx == CU_NUM_SAVED_REGS)
1526  // If there are too many saved registers, we cannot use a compact
1527  // unwind encoding.
1528  return CU::UNWIND_MODE_DWARF;
1529 
1530  unsigned Reg = *MRI.getLLVMRegNum(Inst.getRegister(), true);
1531  SavedRegs[SavedRegIdx++] = Reg;
1532  StackAdjust += OffsetSize;
1533  MinAbsOffset = std::min(MinAbsOffset, abs(Inst.getOffset()));
1534  InstrOffset += PushInstrSize(Reg);
1535  break;
1536  }
1537  }
1538  }
1539 
1540  StackAdjust /= StackDivide;
1541 
1542  if (HasFP) {
1543  if ((StackAdjust & 0xFF) != StackAdjust)
1544  // Offset was too big for a compact unwind encoding.
1545  return CU::UNWIND_MODE_DWARF;
1546 
1547  // We don't attempt to track a real StackAdjust, so if the saved registers
1548  // aren't adjacent to rbp we can't cope.
1549  if (SavedRegIdx != 0 && MinAbsOffset != 3 * (int)OffsetSize)
1550  return CU::UNWIND_MODE_DWARF;
1551 
1552  // Get the encoding of the saved registers when we have a frame pointer.
1553  uint32_t RegEnc = encodeCompactUnwindRegistersWithFrame();
1554  if (RegEnc == ~0U) return CU::UNWIND_MODE_DWARF;
1555 
1556  CompactUnwindEncoding |= CU::UNWIND_MODE_BP_FRAME;
1557  CompactUnwindEncoding |= (StackAdjust & 0xFF) << 16;
1558  CompactUnwindEncoding |= RegEnc & CU::UNWIND_BP_FRAME_REGISTERS;
1559  } else {
1560  SubtractInstrIdx += InstrOffset;
1561  ++StackAdjust;
1562 
1563  if ((StackSize & 0xFF) == StackSize) {
1564  // Frameless stack with a small stack size.
1565  CompactUnwindEncoding |= CU::UNWIND_MODE_STACK_IMMD;
1566 
1567  // Encode the stack size.
1568  CompactUnwindEncoding |= (StackSize & 0xFF) << 16;
1569  } else {
1570  if ((StackAdjust & 0x7) != StackAdjust)
1571  // The extra stack adjustments are too big for us to handle.
1572  return CU::UNWIND_MODE_DWARF;
1573 
1574  // Frameless stack with an offset too large for us to encode compactly.
1575  CompactUnwindEncoding |= CU::UNWIND_MODE_STACK_IND;
1576 
1577  // Encode the offset to the nnnnnn value in the 'subl $nnnnnn, ESP'
1578  // instruction.
1579  CompactUnwindEncoding |= (SubtractInstrIdx & 0xFF) << 16;
1580 
1581  // Encode any extra stack adjustments (done via push instructions).
1582  CompactUnwindEncoding |= (StackAdjust & 0x7) << 13;
1583  }
1584 
1585  // Encode the number of registers saved. (Reverse the list first.)
1586  std::reverse(&SavedRegs[0], &SavedRegs[SavedRegIdx]);
1587  CompactUnwindEncoding |= (SavedRegIdx & 0x7) << 10;
1588 
1589  // Get the encoding of the saved registers when we don't have a frame
1590  // pointer.
1591  uint32_t RegEnc = encodeCompactUnwindRegistersWithoutFrame(SavedRegIdx);
1592  if (RegEnc == ~0U) return CU::UNWIND_MODE_DWARF;
1593 
1594  // Encode the register encoding.
1595  CompactUnwindEncoding |=
1596  RegEnc & CU::UNWIND_FRAMELESS_STACK_REG_PERMUTATION;
1597  }
1598 
1599  return CompactUnwindEncoding;
1600  }
1601 };
1602 
1603 } // end anonymous namespace
1604 
1606  const MCSubtargetInfo &STI,
1607  const MCRegisterInfo &MRI,
1608  const MCTargetOptions &Options) {
1609  const Triple &TheTriple = STI.getTargetTriple();
1610  if (TheTriple.isOSBinFormatMachO())
1611  return new DarwinX86AsmBackend(T, MRI, STI);
1612 
1613  if (TheTriple.isOSWindows() && TheTriple.isOSBinFormatCOFF())
1614  return new WindowsX86AsmBackend(T, false, STI);
1615 
1616  uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TheTriple.getOS());
1617 
1618  if (TheTriple.isOSIAMCU())
1619  return new ELFX86_IAMCUAsmBackend(T, OSABI, STI);
1620 
1621  return new ELFX86_32AsmBackend(T, OSABI, STI);
1622 }
1623 
1625  const MCSubtargetInfo &STI,
1626  const MCRegisterInfo &MRI,
1627  const MCTargetOptions &Options) {
1628  const Triple &TheTriple = STI.getTargetTriple();
1629  if (TheTriple.isOSBinFormatMachO())
1630  return new DarwinX86AsmBackend(T, MRI, STI);
1631 
1632  if (TheTriple.isOSWindows() && TheTriple.isOSBinFormatCOFF())
1633  return new WindowsX86AsmBackend(T, true, STI);
1634 
1635  uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TheTriple.getOS());
1636 
1637  if (TheTriple.isX32())
1638  return new ELFX86_X32AsmBackend(T, OSABI, STI);
1639  return new ELFX86_64AsmBackend(T, OSABI, STI);
1640 }
llvm::Check::Size
@ Size
Definition: FileCheck.h:73
llvm::StringSwitch::Case
StringSwitch & Case(StringLiteral S, T Value)
Definition: StringSwitch.h:67
llvm::SectionKind::isText
bool isText() const
Definition: SectionKind.h:121
i
i
Definition: README.txt:29
llvm::X86::CS_Encoding
@ CS_Encoding
Definition: X86BaseInfo.h:369
isRightAfterData
static bool isRightAfterData(MCFragment *CurrentFragment, const std::pair< MCFragment *, size_t > &PrevInstPosition)
Check if the instruction to be emitted is right after any data.
Definition: X86AsmBackend.cpp:493
MCDwarf.h
MI
IRTranslator LLVM IR MI
Definition: IRTranslator.cpp:105
llvm
This file implements support for optimizing divisions by a constant.
Definition: AllocatorList.h:23
llvm::X86II::getMemoryOperandNo
int getMemoryOperandNo(uint64_t TSFlags)
The function returns the MCInst operand # for the first field of the memory operand.
Definition: X86BaseInfo.h:1090
llvm::MCFragment::FT_BoundaryAlign
@ FT_BoundaryAlign
Definition: MCFragment.h:46
llvm::MCRelaxableFragment
A relaxable fragment holds on to its MCInst, since it may need to be relaxed during the assembler lay...
Definition: MCFragment.h:271
Reg
unsigned Reg
Definition: MachineSink.cpp:1566
llvm::MCSymbol
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
Definition: MCSymbol.h:41
llvm::ARM::PredBlockMask::TT
@ TT
llvm::MCCFIInstruction::OpDefCfaRegister
@ OpDefCfaRegister
Definition: MCDwarf.h:465
llvm::MCAsmBackend::getFixupKindInfo
virtual const MCFixupKindInfo & getFixupKindInfo(MCFixupKind Kind) const
Get information on a fixup kind.
Definition: MCAsmBackend.cpp:74
llvm::FK_PCRel_8
@ FK_PCRel_8
A eight-byte pc relative fixup.
Definition: MCFixup.h:31
getCondFromBranch
static X86::CondCode getCondFromBranch(const MCInst &MI, const MCInstrInfo &MCII)
Definition: X86AsmBackend.cpp:322
llvm::Triple::isOSBinFormatCOFF
bool isOSBinFormatCOFF() const
Tests whether the OS uses the COFF binary format.
Definition: Triple.h:638
llvm::cl::Prefix
@ Prefix
Definition: CommandLine.h:164
T
llvm::cl::location
LocationClass< Ty > location(Ty &L)
Definition: CommandLine.h:457
llvm::SystemZII::BranchType
BranchType
Definition: SystemZInstrInfo.h:78
llvm::MCCFIInstruction::OpOffset
@ OpOffset
Definition: MCDwarf.h:463
llvm::StringSwitch::Default
LLVM_NODISCARD R Default(T Value)
Definition: StringSwitch.h:181
is64Bit
static bool is64Bit(const char *name)
Definition: X86Disassembler.cpp:1019
MCCodeEmitter.h
llvm::Target
Target - Wrapper for Target specific information.
Definition: TargetRegistry.h:137
llvm::MCCodeEmitter::emitPrefix
virtual void emitPrefix(const MCInst &Inst, raw_ostream &OS, const MCSubtargetInfo &STI) const
Emit the prefixes of given instruction on the output stream.
Definition: MCCodeEmitter.h:37
llvm::SmallVector
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1168
llvm::tgtok::Code
@ Code
Definition: TGLexer.h:50
llvm::FirstTargetFixupKind
@ FirstTargetFixupKind
Definition: MCFixup.h:45
ErrorHandling.h
llvm::X86::AlignBranchRet
@ AlignBranchRet
Definition: X86BaseInfo.h:363
llvm::Triple
Triple - Helper class for working with autoconf configuration names.
Definition: Triple.h:45
MCAssembler.h
llvm::cl::Hidden
@ Hidden
Definition: CommandLine.h:143
createMCInstrInfo
static MCInstrInfo * createMCInstrInfo()
Definition: WebAssemblyMCTargetDesc.cpp:43
llvm::Type
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
llvm::Triple::x86_64
@ x86_64
Definition: Triple.h:84
llvm::createX86ELFObjectWriter
std::unique_ptr< MCObjectTargetWriter > createX86ELFObjectWriter(bool IsELF64, uint8_t OSABI, uint16_t EMachine)
Construct an X86 ELF object writer.
Definition: X86ELFObjectWriter.cpp:343
llvm::X86::AlignBranchFused
@ AlignBranchFused
Definition: X86BaseInfo.h:359
llvm::reverse
auto reverse(ContainerTy &&C, std::enable_if_t< has_rbegin< ContainerTy >::value > *=nullptr)
Definition: STLExtras.h:333
llvm::createX86WinCOFFObjectWriter
std::unique_ptr< MCObjectTargetWriter > createX86WinCOFFObjectWriter(bool Is64Bit)
Construct an X86 Win COFF object writer.
Definition: X86WinCOFFObjectWriter.cpp:118
llvm::MachO::CPUType
CPUType
Definition: MachO.h:1418
llvm::Optional
Definition: APInt.h:33
MCFixupKindInfo.h
llvm::N86::EDX
@ EDX
Definition: X86MCTargetDesc.h:51
getRelaxedOpcodeArith
static unsigned getRelaxedOpcodeArith(const MCInst &Inst)
Definition: X86AsmBackend.cpp:230
llvm::X86::reloc_riprel_4byte
@ reloc_riprel_4byte
Definition: X86FixupKinds.h:17
llvm::errs
raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
Definition: raw_ostream.cpp:893
llvm::SmallVectorImpl::pop_back_val
LLVM_NODISCARD T pop_back_val()
Definition: SmallVector.h:635
llvm::MCInst
Instances of this class represent a single low-level machine instruction.
Definition: MCInst.h:184
llvm::X86::CondCode
CondCode
Definition: X86BaseInfo.h:80
llvm::MCInstrDesc::isIndirectBranch
bool isIndirectBranch() const
Return true if this is an indirect branch, such as a branch through a register.
Definition: MCInstrDesc.h:302
llvm::MCEncodedFragmentWithFixups::getFixups
SmallVectorImpl< MCFixup > & getFixups()
Definition: MCFragment.h:223
llvm::X86::COND_INVALID
@ COND_INVALID
Definition: X86BaseInfo.h:107
llvm::X86II::RawFrmDstSrc
@ RawFrmDstSrc
RawFrmDstSrc - This form is for instructions that use the source index register SI/ESI/RSI with a pos...
Definition: X86BaseInfo.h:607
llvm::detail::DenseSetImpl< ValueT, DenseMap< ValueT, detail::DenseSetEmpty, DenseMapInfo< ValueT >, detail::DenseSetPair< ValueT > >, DenseMapInfo< ValueT > >::insert
std::pair< iterator, bool > insert(const ValueT &V)
Definition: DenseSet.h:206
llvm::MCStreamer::getAllowAutoPadding
bool getAllowAutoPadding() const
Definition: MCStreamer.h:292
llvm::detail::DenseSetImpl< ValueT, DenseMap< ValueT, detail::DenseSetEmpty, DenseMapInfo< ValueT >, detail::DenseSetPair< ValueT > >, DenseMapInfo< ValueT > >::count
size_type count(const_arg_type_t< ValueT > V) const
Return 1 if the specified key is in the set, 0 otherwise.
Definition: DenseSet.h:97
llvm::MCInst::getNumOperands
unsigned getNumOperands() const
Definition: MCInst.h:208
llvm::Data
@ Data
Definition: SIMachineScheduler.h:55
llvm::ArrayRef::empty
bool empty() const
empty - Check if the array is empty.
Definition: ArrayRef.h:160
llvm::FK_PCRel_1
@ FK_PCRel_1
A one-byte pc relative fixup.
Definition: MCFixup.h:28
llvm::MachO::getCPUType
Expected< uint32_t > getCPUType(const Triple &T)
Definition: MachO.cpp:77
F
#define F(x, y, z)
Definition: MD5.cpp:56
llvm::X86::reloc_signed_4byte_relax
@ reloc_signed_4byte_relax
Definition: X86FixupKinds.h:26
llvm::MCSection::end
iterator end()
Definition: MCSection.h:180
llvm::MCInst::setOpcode
void setOpcode(unsigned Op)
Definition: MCInst.h:197
llvm::ARMBuildAttrs::Section
@ Section
Legacy Tags.
Definition: ARMBuildAttributes.h:78
MCObjectStreamer.h
llvm::FirstLiteralRelocationKind
@ FirstLiteralRelocationKind
The range [FirstLiteralRelocationKind, MaxTargetFixupKind) is used for relocations coming from ....
Definition: MCFixup.h:50
llvm::MCFragment
Definition: MCFragment.h:31
CommandLine.h
llvm::FK_Data_4
@ FK_Data_4
A four-byte fixup.
Definition: MCFixup.h:25
llvm::MCInstrDesc::TSFlags
uint64_t TSFlags
Definition: MCInstrDesc.h:203
llvm::Triple::isOSBinFormatELF
bool isOSBinFormatELF() const
Tests whether the OS uses the ELF binary format.
Definition: Triple.h:633
llvm::MCAsmBackend
Generic interface to target specific assembler backends.
Definition: MCAsmBackend.h:36
ELF.h
CompactUnwindEncodings
CompactUnwindEncodings
Compact unwind encoding values.
Definition: AArch64AsmBackend.cpp:504
MCAsmBackend.h
llvm::MutableArrayRef< char >
llvm::MCAsmLayout::invalidateFragmentsFrom
void invalidateFragmentsFrom(MCFragment *F)
Invalidate the fragments starting with F because it has been resized.
Definition: MCFragment.cpp:70
llvm::N86::ESP
@ ESP
Definition: X86MCTargetDesc.h:51
llvm::FK_SecRel_4
@ FK_SecRel_4
A four-byte section relative fixup.
Definition: MCFixup.h:42
llvm::MCSubtargetInfo::hasFeature
bool hasFeature(unsigned Feature) const
Definition: MCSubtargetInfo.h:118
llvm::support::little
@ little
Definition: Endian.h:27
llvm::StringRef::split
LLVM_NODISCARD std::pair< StringRef, StringRef > split(char Separator) const
Split into two substrings around the first occurrence of a separator character.
Definition: StringRef.h:749
llvm::X86::AlignBranchCall
@ AlignBranchCall
Definition: X86BaseInfo.h:362
llvm::MCSubtargetInfo::getTargetTriple
const Triple & getTargetTriple() const
Definition: MCSubtargetInfo.h:107
getSizeForInstFragment
static size_t getSizeForInstFragment(const MCFragment *F)
Definition: X86AsmBackend.cpp:521
MCContext.h
llvm::X86II::RawFrmSrc
@ RawFrmSrc
RawFrmSrc - This form is for instructions that use the source index register SI/ESI/RSI with a possib...
Definition: X86BaseInfo.h:598
getFixupKindSize
static unsigned getFixupKindSize(unsigned Kind)
Definition: X86AsmBackend.cpp:742
MCSectionMachO.h
llvm::X86::AlignBranchJcc
@ AlignBranchJcc
Definition: X86BaseInfo.h:360
MCInstrInfo.h
llvm::MCCFIInstruction::getOffset
int getOffset() const
Definition: MCDwarf.h:642
MCInst.h
llvm::FK_SecRel_2
@ FK_SecRel_2
A two-byte section relative fixup.
Definition: MCFixup.h:41
llvm::MCObjectStreamer
Streaming object file generation interface.
Definition: MCObjectStreamer.h:36
llvm::MCRelaxableFragment::setInst
void setInst(const MCInst &Value)
Definition: MCFragment.h:285
llvm::MCInstrDesc
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:195
llvm::AArch64::Fixups
Fixups
Definition: AArch64FixupKinds.h:17
MCSubtargetInfo.h
isFirstMacroFusibleInst
static bool isFirstMacroFusibleInst(const MCInst &Inst, const MCInstrInfo &MCII)
Check if the instruction is valid as the first instruction in macro fusion.
Definition: X86AsmBackend.cpp:362
llvm::SIInstrFlags::DS
@ DS
Definition: SIDefines.h:59
llvm::MCSubtargetInfo::getFeatureBits
const FeatureBitset & getFeatureBits() const
Definition: MCSubtargetInfo.h:111
llvm::codeview::ProcSymFlags::HasFP
@ HasFP
llvm::Triple::isOSBinFormatMachO
bool isOSBinFormatMachO() const
Tests whether the environment is MachO.
Definition: Triple.h:646
llvm::createX86_32AsmBackend
MCAsmBackend * createX86_32AsmBackend(const Target &T, const MCSubtargetInfo &STI, const MCRegisterInfo &MRI, const MCTargetOptions &Options)
Definition: X86AsmBackend.cpp:1605
llvm::raw_ostream::write
raw_ostream & write(unsigned char C)
Definition: raw_ostream.cpp:220
llvm::MCRelaxableFragment::getInst
const MCInst & getInst() const
Definition: MCFragment.h:284
llvm::MCELFObjectTargetWriter
Definition: MCELFObjectWriter.h:53
llvm::Triple::isOSIAMCU
bool isOSIAMCU() const
Definition: Triple.h:525
llvm::report_fatal_error
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:140
Options
const char LLVMTargetMachineRef LLVMPassBuilderOptionsRef Options
Definition: PassBuilderBindings.cpp:48
llvm::raw_ostream
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:53
MCMachObjectWriter.h
llvm::MCCFIInstruction::getOperation
OpType getOperation() const
Definition: MCDwarf.h:620
Align
uint64_t Align
Definition: ELFObjHandler.cpp:83
llvm::X86::AlignBranchIndirect
@ AlignBranchIndirect
Definition: X86BaseInfo.h:364
llvm::X86::classifyFirstOpcodeInMacroFusion
FirstMacroFusionInstKind classifyFirstOpcodeInMacroFusion(unsigned Opcode)
Definition: X86BaseInfo.h:140
llvm::MCExpr::getKind
ExprKind getKind() const
Definition: MCExpr.h:81
llvm::MCObjectStreamer::getCurrentFragment
MCFragment * getCurrentFragment() const
Definition: MCObjectStreamer.cpp:179
llvm::MachO::getCPUSubType
Expected< uint32_t > getCPUSubType(const Triple &T)
Definition: MachO.cpp:95
llvm::Align
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
llvm::Triple::getArch
ArchType getArch() const
Get the parsed architecture type of this triple.
Definition: Triple.h:310
llvm::MCStreamer::getCurrentSectionOnly
MCSection * getCurrentSectionOnly() const
Definition: MCStreamer.h:376
llvm::MCCFIInstruction::getRegister
unsigned getRegister() const
Definition: MCDwarf.h:623
llvm::isIntN
bool isIntN(unsigned N, int64_t x)
Checks if an signed integer fits into the given (dynamic) bit width.
Definition: MathExtras.h:460
llvm::None
const NoneType None
Definition: None.h:23
llvm::lltok::Kind
Kind
Definition: LLToken.h:18
llvm::MCCFIInstruction
Definition: MCDwarf.h:457
llvm::X86::reloc_signed_4byte
@ reloc_signed_4byte
Definition: X86FixupKinds.h:23
DF
static RegisterPass< DebugifyFunctionPass > DF("debugify-function", "Attach debug info to a function")
llvm::SmallString< 256 >
llvm::X86::NumTargetFixupKinds
@ NumTargetFixupKinds
Definition: X86FixupKinds.h:35
llvm::ARM_PROC::IE
@ IE
Definition: ARMBaseInfo.h:27
llvm::MCAsmLayout::getSectionOrder
llvm::SmallVectorImpl< MCSection * > & getSectionOrder()
Definition: MCAsmLayout.h:69
llvm::isInt< 8 >
constexpr bool isInt< 8 >(int64_t x)
Definition: MathExtras.h:367
llvm::DenseSet
Implements a dense probed hash-table based set.
Definition: DenseSet.h:268
llvm::cl::opt
Definition: CommandLine.h:1432
isPrefix
static bool isPrefix(const MCInst &MI, const MCInstrInfo &MCII)
Check if the instruction is a prefix.
Definition: X86AsmBackend.cpp:357
llvm::MCAssembler
Definition: MCAssembler.h:60
llvm::X86II::getOperandBias
unsigned getOperandBias(const MCInstrDesc &Desc)
Compute whether all of the def operands are repeated in the uses and therefore should be skipped.
Definition: X86BaseInfo.h:1050
llvm::MCCFIInstruction::OpDefCfaOffset
@ OpDefCfaOffset
Definition: MCDwarf.h:466
uint64_t
llvm::MCInstrDesc::isCall
bool isCall() const
Return true if the instruction is a call.
Definition: MCInstrDesc.h:279
llvm::Triple::getOS
OSType getOS() const
Get the parsed operating system type of this triple.
Definition: Triple.h:319
MCELFObjectWriter.h
llvm::X86::AddrSegmentReg
@ AddrSegmentReg
AddrSegmentReg - The operand # of the segment in the memory operand.
Definition: X86BaseInfo.h:38
llvm::numbers::e
constexpr double e
Definition: MathExtras.h:57
llvm::MCFixupKindInfo::FKF_IsPCRel
@ FKF_IsPCRel
Is this fixup kind PCrelative? This is used by the assembler backend to evaluate fixup values in a ta...
Definition: MCFixupKindInfo.h:19
llvm::assumeAligned
Align assumeAligned(uint64_t Value)
Treats the value 0 as a 1, so Align is always at least 1.
Definition: Alignment.h:103
I
#define I(x, y, z)
Definition: MD5.cpp:59
llvm::MCPhysReg
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
Definition: MCRegister.h:21
llvm::cl::init
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:441
MCRegisterInfo.h
llvm::MCObjectStreamer::insert
void insert(MCFragment *F)
Definition: MCObjectStreamer.h:78
llvm::MCAsmBackend::getFixupKind
virtual Optional< MCFixupKind > getFixupKind(StringRef Name) const
Map a relocation name used in .reloc to a fixup kind.
Definition: MCAsmBackend.cpp:70
llvm::ilist_node_with_parent::getPrevNode
NodeTy * getPrevNode()
Definition: ilist_node.h:274
assert
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
isFullyRelaxed
static bool isFullyRelaxed(const MCRelaxableFragment &RF)
Return true if this instruction has been fully relaxed into it's most general available form.
Definition: X86AsmBackend.cpp:860
llvm::MCFixupKindInfo
Target independent information on a fixup kind.
Definition: MCFixupKindInfo.h:15
llvm::FK_PCRel_2
@ FK_PCRel_2
A two-byte pc relative fixup.
Definition: MCFixup.h:29
llvm::FK_Data_1
@ FK_Data_1
A one-byte fixup.
Definition: MCFixup.h:23
llvm::FK_PCRel_4
@ FK_PCRel_4
A four-byte pc relative fixup.
Definition: MCFixup.h:30
llvm::MCTargetOptions
Definition: MCTargetOptions.h:36
llvm::MCFragment::FT_Align
@ FT_Align
Definition: MCFragment.h:36
llvm::MCBoundaryAlignFragment::setLastFragment
void setLastFragment(const MCFragment *F)
Definition: MCFragment.h:604
llvm::FK_NONE
@ FK_NONE
A no-op fixup.
Definition: MCFixup.h:22
llvm::MCELFObjectTargetWriter::getOSABI
uint8_t getOSABI() const
Definition: MCELFObjectWriter.h:101
llvm::X86::reloc_global_offset_table
@ reloc_global_offset_table
Definition: X86FixupKinds.h:28
llvm::MCObjectStreamer::getAssembler
MCAssembler & getAssembler()
Definition: MCObjectStreamer.h:112
llvm::ArrayRef
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: APInt.h:32
llvm::min
Expected< ExpressionValue > min(const ExpressionValue &Lhs, const ExpressionValue &Rhs)
Definition: FileCheck.cpp:357
llvm::MCSection::getAlignment
unsigned getAlignment() const
Definition: MCSection.h:138
Fixup
PowerPC TLS Dynamic Call Fixup
Definition: PPCTLSDynamicCall.cpp:235
llvm::StringRef
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:58
getRelaxedOpcodeBranch
static unsigned getRelaxedOpcodeBranch(const MCInst &Inst, bool Is16BitMode)
Definition: X86AsmBackend.cpp:218
llvm::X86II::isPrefix
bool isPrefix(uint64_t TSFlags)
Definition: X86BaseInfo.h:969
llvm_unreachable
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Definition: ErrorHandling.h:134
llvm::X86::classifySecondCondCodeInMacroFusion
SecondMacroFusionInstKind classifySecondCondCodeInMacroFusion(X86::CondCode CC)
Definition: X86BaseInfo.h:289
llvm::MCFragment::FT_Data
@ FT_Data
Definition: MCFragment.h:37
llvm::cantFail
void cantFail(Error Err, const char *Msg=nullptr)
Report a fatal error if Err is a failure value.
Definition: Error.h:747
uint32_t
llvm::X86::AlignBranchBoundaryKind
AlignBranchBoundaryKind
Defines the possible values of the branch boundary alignment mask.
Definition: X86BaseInfo.h:357
llvm::X86::reloc_global_offset_table8
@ reloc_global_offset_table8
Definition: X86FixupKinds.h:31
llvm::MCSection
Instances of this class represent a uniqued identifier for a section in the current translation unit.
Definition: MCSection.h:39
S
add sub stmia L5 ldr r0 bl L_printf $stub Instead of a and a wouldn t it be better to do three moves *Return an aggregate type is even return S
Definition: README.txt:210
llvm::X86II::RawFrmMemOffs
@ RawFrmMemOffs
RawFrmMemOffs - This form is for instructions that store an absolute memory offset as an immediate wi...
Definition: X86BaseInfo.h:594
llvm::X86::isMacroFused
bool isMacroFused(FirstMacroFusionInstKind FirstKind, SecondMacroFusionInstKind SecondKind)
Definition: X86BaseInfo.h:338
llvm::MCRegisterInfo
MCRegisterInfo base class - We assume that the target defines a static array of MCRegisterDesc object...
Definition: MCRegisterInfo.h:135
llvm::FK_SecRel_1
@ FK_SecRel_1
A one-byte section relative fixup.
Definition: MCFixup.h:40
MRI
unsigned const MachineRegisterInfo * MRI
Definition: AArch64AdvSIMDScalarPass.cpp:105
llvm::MCAsmLayout::getFragmentOffset
uint64_t getFragmentOffset(const MCFragment *F) const
Get the offset of the given fragment inside its containing section.
Definition: MCFragment.cpp:96
MCAsmLayout.h
llvm::X86::reloc_riprel_4byte_relax
@ reloc_riprel_4byte_relax
Definition: X86FixupKinds.h:19
llvm::Triple::isOSWindows
bool isOSWindows() const
Tests whether the OS is Windows.
Definition: Triple.h:548
llvm::MCSection::setAlignment
void setAlignment(Align Value)
Definition: MCSection.h:139
j
return j(j<< 16)
MCObjectWriter.h
llvm::N86::EDI
@ EDI
Definition: X86MCTargetDesc.h:51
llvm::Twine
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:83
llvm::MCInstrInfo
Interface to description of machine instruction set.
Definition: MCInstrInfo.h:25
llvm::GraphProgram::Name
Name
Definition: GraphWriter.h:52
llvm::X86::DS_Encoding
@ DS_Encoding
Definition: X86BaseInfo.h:370
uint16_t
llvm::MCAsmLayout
Encapsulates the layout of an assembly file at a particular point in time.
Definition: MCAsmLayout.h:28
llvm::MCCodeEmitter
MCCodeEmitter - Generic instruction encoding interface.
Definition: MCCodeEmitter.h:21
llvm::AMDGPU::SendMsg::Op
Op
Definition: SIDefines.h:324
llvm::X86::FirstMacroFusionInstKind::Cmp
@ Cmp
llvm::ilist_iterator
Iterator for intrusive lists based on ilist_node.
Definition: ilist_iterator.h:57
llvm::Align::value
uint64_t value() const
This is a hole in the type system and should not be abused.
Definition: Alignment.h:85
llvm::X86::reloc_riprel_4byte_movq_load
@ reloc_riprel_4byte_movq_load
Definition: X86FixupKinds.h:18
llvm::FK_SecRel_8
@ FK_SecRel_8
A eight-byte section relative fixup.
Definition: MCFixup.h:43
llvm::MCEncodedFragment::getSubtargetInfo
const MCSubtargetInfo * getSubtargetInfo() const
Retrieve the MCSubTargetInfo in effect when the instruction was encoded.
Definition: MCFragment.h:173
llvm::X86::AlignBranchJmp
@ AlignBranchJmp
Definition: X86BaseInfo.h:361
llvm::TargetStackID::Value
Value
Definition: TargetFrameLowering.h:27
llvm::X86::SS_Encoding
@ SS_Encoding
Definition: X86BaseInfo.h:374
llvm::MCInst::getOpcode
unsigned getOpcode() const
Definition: MCInst.h:198
llvm::createX86MachObjectWriter
std::unique_ptr< MCObjectTargetWriter > createX86MachObjectWriter(bool Is64Bit, uint32_t CPUType, uint32_t CPUSubtype)
Construct an X86 Mach-O object writer.
Definition: X86MachObjectWriter.cpp:603
StringSwitch.h
llvm::SmallVectorImpl::clear
void clear()
Definition: SmallVector.h:585
llvm::X86::SecondMacroFusionInstKind
SecondMacroFusionInstKind
Definition: X86BaseInfo.h:127
llvm::X86::getSegmentOverridePrefixForReg
EncodingOfSegmentOverridePrefix getSegmentOverridePrefixForReg(unsigned Reg)
Given a segment register, return the encoding of the segment override prefix for it.
Definition: X86BaseInfo.h:380
classifySecondInstInMacroFusion
static X86::SecondMacroFusionInstKind classifySecondInstInMacroFusion(const MCInst &MI, const MCInstrInfo &MCII)
Definition: X86AsmBackend.cpp:337
MCValue.h
llvm::MCOperand::isExpr
bool isExpr() const
Definition: MCInst.h:65
llvm::MCFragment::FT_Relaxable
@ FT_Relaxable
Definition: MCFragment.h:41
llvm::MCRelaxableFragment::getAllowAutoPadding
bool getAllowAutoPadding() const
Definition: MCFragment.h:287
llvm::MCSection::getKind
SectionKind getKind() const
Definition: MCSection.h:123
llvm::MCSection::begin
iterator begin()
Definition: MCSection.h:177
llvm::N86::ECX
@ ECX
Definition: X86MCTargetDesc.h:51
llvm::MCFixupKind
MCFixupKind
Extensible enumeration to represent the type of a fixup.
Definition: MCFixup.h:21
X86BaseInfo.h
llvm::X86AS::SS
@ SS
Definition: X86.h:189
llvm::MCInstrDesc::isUnconditionalBranch
bool isUnconditionalBranch() const
Return true if this is a branch which always transfers control flow to some other block.
Definition: MCInstrDesc.h:316
llvm::FK_Data_8
@ FK_Data_8
A eight-byte fixup.
Definition: MCFixup.h:26
llvm::X86::FirstMacroFusionInstKind::Invalid
@ Invalid
isRIPRelative
static bool isRIPRelative(const MCInst &MI, const MCInstrInfo &MCII)
Check if the instruction uses RIP relative addressing.
Definition: X86AsmBackend.cpp:343
llvm::MCExpr::SymbolRef
@ SymbolRef
References to labels and assigned expressions.
Definition: MCExpr.h:40
getRelaxedOpcode
static unsigned getRelaxedOpcode(const MCInst &Inst, bool Is16BitMode)
Definition: X86AsmBackend.cpp:315
llvm::N86::ESI
@ ESI
Definition: X86MCTargetDesc.h:51
llvm::MCInst::getOperand
const MCOperand & getOperand(unsigned i) const
Definition: MCInst.h:206
llvm::Triple::isX32
bool isX32() const
Tests whether the target is X32.
Definition: Triple.h:810
llvm::ArrayRef::size
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:165
llvm::max
Align max(MaybeAlign Lhs, Align Rhs)
Definition: Alignment.h:340
support
Reimplement select in terms of SEL *We would really like to support but we need to prove that the add doesn t need to overflow between the two bit chunks *Implement pre post increment support(e.g. PR935) *Implement smarter const ant generation for binops with large immediates. A few ARMv6T2 ops should be pattern matched
Definition: README.txt:10
llvm::MCInst::dump_pretty
void dump_pretty(raw_ostream &OS, const MCInstPrinter *Printer=nullptr, StringRef Separator=" ", const MCRegisterInfo *RegInfo=nullptr) const
Dump the MCInst as prettily as possible using the additional MC structures, if given.
Definition: MCInst.cpp:81
llvm::ELF::EM_X86_64
@ EM_X86_64
Definition: ELF.h:179
llvm::raw_svector_ostream
A raw_ostream that writes to an SmallVector or SmallString.
Definition: raw_ostream.h:658
llvm::X86::AlignBranchNone
@ AlignBranchNone
Definition: X86BaseInfo.h:358
llvm::X86::FirstMacroFusionInstKind
FirstMacroFusionInstKind
Definition: X86BaseInfo.h:111
hasInterruptDelaySlot
static bool hasInterruptDelaySlot(const MCInst &Inst)
X86 has certain instructions which enable interrupts exactly one instruction after the instruction wh...
Definition: X86AsmBackend.cpp:473
llvm::StringSwitch
A switch()-like statement whose cases are string literals.
Definition: StringSwitch.h:42
llvm::HexStyle::Asm
@ Asm
0ffh
Definition: MCInstPrinter.h:34
llvm::ELF::EM_IAMCU
@ EM_IAMCU
Definition: ELF.h:140
llvm::MCInstrDesc::isReturn
bool isReturn() const
Return true if the instruction is a return.
Definition: MCInstrDesc.h:267
llvm::N86::EBX
@ EBX
Definition: X86MCTargetDesc.h:51
llvm::MCValue
This represents an "assembler immediate".
Definition: MCValue.h:37
llvm::MCSymbolRefExpr::VK_None
@ VK_None
Definition: MCExpr.h:195
llvm::MCInstrInfo::get
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode.
Definition: MCInstrInfo.h:62
MachO.h
llvm::cl::desc
Definition: CommandLine.h:412
llvm::X86::reloc_riprel_4byte_relax_rex
@ reloc_riprel_4byte_relax_rex
Definition: X86FixupKinds.h:21
raw_ostream.h
llvm::X86::AddrBaseReg
@ AddrBaseReg
Definition: X86BaseInfo.h:32
llvm::FK_Data_2
@ FK_Data_2
A two-byte fixup.
Definition: MCFixup.h:24
n
The same transformation can work with an even modulo with the addition of a and shrink the compare RHS by the same amount Unless the target supports that transformation probably isn t worthwhile The transformation can also easily be made to work with non zero equality for n
Definition: README.txt:685
llvm::MCDataFragment
Fragment for data and encoded instructions.
Definition: MCFragment.h:242
llvm::ELF::EM_386
@ EM_386
Definition: ELF.h:137
TargetRegistry.h
llvm::abs
APFloat abs(APFloat X)
Returns the absolute value of the argument.
Definition: APFloat.h:1282
llvm::MCAssembler::isBundlingEnabled
bool isBundlingEnabled() const
Definition: MCAssembler.h:331
MCExpr.h
llvm::MCSubtargetInfo
Generic base class for all target subtargets.
Definition: MCSubtargetInfo.h:75
CU
Definition: AArch64AsmBackend.cpp:501
llvm::MCFixup
Encode information on a single operation to perform on a byte sequence (e.g., an encoded instruction)...
Definition: MCFixup.h:71
llvm::X86::reloc_branch_4byte_pcrel
@ reloc_branch_4byte_pcrel
Definition: X86FixupKinds.h:32
llvm::MCInstrDesc::getNumOperands
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
Definition: MCInstrDesc.h:228
llvm::Value
LLVM Value Representation.
Definition: Value.h:74
llvm::MCExpr
Base class for the full range of assembler expressions which are needed for parsing.
Definition: MCExpr.h:35
llvm::createX86_64AsmBackend
MCAsmBackend * createX86_64AsmBackend(const Target &T, const MCSubtargetInfo &STI, const MCRegisterInfo &MRI, const MCTargetOptions &Options)
Definition: X86AsmBackend.cpp:1624
llvm::MCInstrDesc::isConditionalBranch
bool isConditionalBranch() const
Return true if this is a branch which may fall through to the next instruction or may transfer contro...
Definition: MCInstrDesc.h:308
X86FixupKinds.h
hasVariantSymbol
static bool hasVariantSymbol(const MCInst &MI)
Check if the instruction has a variant symbol operand.
Definition: X86AsmBackend.cpp:450
llvm::MCEncodedFragmentWithContents::getContents
SmallVectorImpl< char > & getContents()
Definition: MCFragment.h:197
llvm::MCFragment::FT_CompactEncodedInst
@ FT_CompactEncodedInst
Definition: MCFragment.h:38
llvm::MCCodeEmitter::encodeInstruction
virtual void encodeInstruction(const MCInst &Inst, raw_ostream &OS, SmallVectorImpl< MCFixup > &Fixups, const MCSubtargetInfo &STI) const =0
EncodeInstruction - Encode the given Inst to bytes on the output stream OS.
llvm::X86II::FormMask
@ FormMask
Definition: X86BaseInfo.h:744
llvm::MCOperand::getReg
unsigned getReg() const
Returns the register number.
Definition: MCInst.h:69
llvm::MCBoundaryAlignFragment
Represents required padding such that a particular other set of fragments does not cross a particular...
Definition: MCFragment.h:579