LLVM  12.0.0git
AArch64TargetMachine.cpp
Go to the documentation of this file.
1 //===-- AArch64TargetMachine.cpp - Define TargetMachine for AArch64 -------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 //
10 //===----------------------------------------------------------------------===//
11 
12 #include "AArch64TargetMachine.h"
13 #include "AArch64.h"
15 #include "AArch64MacroFusion.h"
16 #include "AArch64Subtarget.h"
21 #include "llvm/ADT/STLExtras.h"
22 #include "llvm/ADT/Triple.h"
32 #include "llvm/CodeGen/Passes.h"
34 #include "llvm/IR/Attributes.h"
35 #include "llvm/IR/Function.h"
36 #include "llvm/InitializePasses.h"
37 #include "llvm/MC/MCAsmInfo.h"
39 #include "llvm/Pass.h"
40 #include "llvm/Support/CodeGen.h"
46 #include "llvm/Transforms/Scalar.h"
47 #include <memory>
48 #include <string>
49 
50 using namespace llvm;
51 
52 static cl::opt<bool> EnableCCMP("aarch64-enable-ccmp",
53  cl::desc("Enable the CCMP formation pass"),
54  cl::init(true), cl::Hidden);
55 
56 static cl::opt<bool>
57  EnableCondBrTuning("aarch64-enable-cond-br-tune",
58  cl::desc("Enable the conditional branch tuning pass"),
59  cl::init(true), cl::Hidden);
60 
61 static cl::opt<bool> EnableMCR("aarch64-enable-mcr",
62  cl::desc("Enable the machine combiner pass"),
63  cl::init(true), cl::Hidden);
64 
65 static cl::opt<bool> EnableStPairSuppress("aarch64-enable-stp-suppress",
66  cl::desc("Suppress STP for AArch64"),
67  cl::init(true), cl::Hidden);
68 
70  "aarch64-enable-simd-scalar",
71  cl::desc("Enable use of AdvSIMD scalar integer instructions"),
72  cl::init(false), cl::Hidden);
73 
74 static cl::opt<bool>
75  EnablePromoteConstant("aarch64-enable-promote-const",
76  cl::desc("Enable the promote constant pass"),
77  cl::init(true), cl::Hidden);
78 
80  "aarch64-enable-collect-loh",
81  cl::desc("Enable the pass that emits the linker optimization hints (LOH)"),
82  cl::init(true), cl::Hidden);
83 
84 static cl::opt<bool>
85  EnableDeadRegisterElimination("aarch64-enable-dead-defs", cl::Hidden,
86  cl::desc("Enable the pass that removes dead"
87  " definitons and replaces stores to"
88  " them with stores to the zero"
89  " register"),
90  cl::init(true));
91 
93  "aarch64-enable-copyelim",
94  cl::desc("Enable the redundant copy elimination pass"), cl::init(true),
95  cl::Hidden);
96 
97 static cl::opt<bool> EnableLoadStoreOpt("aarch64-enable-ldst-opt",
98  cl::desc("Enable the load/store pair"
99  " optimization pass"),
100  cl::init(true), cl::Hidden);
101 
103  "aarch64-enable-atomic-cfg-tidy", cl::Hidden,
104  cl::desc("Run SimplifyCFG after expanding atomic operations"
105  " to make use of cmpxchg flow-based information"),
106  cl::init(true));
107 
108 static cl::opt<bool>
109 EnableEarlyIfConversion("aarch64-enable-early-ifcvt", cl::Hidden,
110  cl::desc("Run early if-conversion"),
111  cl::init(true));
112 
113 static cl::opt<bool>
114  EnableCondOpt("aarch64-enable-condopt",
115  cl::desc("Enable the condition optimizer pass"),
116  cl::init(true), cl::Hidden);
117 
118 static cl::opt<bool>
119 EnableA53Fix835769("aarch64-fix-cortex-a53-835769", cl::Hidden,
120  cl::desc("Work around Cortex-A53 erratum 835769"),
121  cl::init(false));
122 
123 static cl::opt<bool>
124  EnableGEPOpt("aarch64-enable-gep-opt", cl::Hidden,
125  cl::desc("Enable optimizations on complex GEPs"),
126  cl::init(false));
127 
128 static cl::opt<bool>
129  BranchRelaxation("aarch64-enable-branch-relax", cl::Hidden, cl::init(true),
130  cl::desc("Relax out of range conditional branches"));
131 
133  "aarch64-enable-compress-jump-tables", cl::Hidden, cl::init(true),
134  cl::desc("Use smallest entry possible for jump tables"));
135 
136 // FIXME: Unify control over GlobalMerge.
138  EnableGlobalMerge("aarch64-enable-global-merge", cl::Hidden,
139  cl::desc("Enable the global merge pass"));
140 
141 static cl::opt<bool>
142  EnableLoopDataPrefetch("aarch64-enable-loop-data-prefetch", cl::Hidden,
143  cl::desc("Enable the loop data prefetch pass"),
144  cl::init(true));
145 
147  "aarch64-enable-global-isel-at-O", cl::Hidden,
148  cl::desc("Enable GlobalISel at or below an opt level (-1 to disable)"),
149  cl::init(0));
150 
151 static cl::opt<bool>
152  EnableSVEIntrinsicOpts("aarch64-enable-sve-intrinsic-opts", cl::Hidden,
153  cl::desc("Enable SVE intrinsic opts"),
154  cl::init(true));
155 
156 static cl::opt<bool> EnableFalkorHWPFFix("aarch64-enable-falkor-hwpf-fix",
157  cl::init(true), cl::Hidden);
158 
159 static cl::opt<bool>
160  EnableBranchTargets("aarch64-enable-branch-targets", cl::Hidden,
161  cl::desc("Enable the AAcrh64 branch target pass"),
162  cl::init(true));
163 
165  // Register the target.
171  auto PR = PassRegistry::getPassRegistry();
200 }
201 
202 //===----------------------------------------------------------------------===//
203 // AArch64 Lowering public interface.
204 //===----------------------------------------------------------------------===//
205 static std::unique_ptr<TargetLoweringObjectFile> createTLOF(const Triple &TT) {
206  if (TT.isOSBinFormatMachO())
207  return std::make_unique<AArch64_MachoTargetObjectFile>();
208  if (TT.isOSBinFormatCOFF())
209  return std::make_unique<AArch64_COFFTargetObjectFile>();
210 
211  return std::make_unique<AArch64_ELFTargetObjectFile>();
212 }
213 
214 // Helper function to build a DataLayout string
215 static std::string computeDataLayout(const Triple &TT,
216  const MCTargetOptions &Options,
217  bool LittleEndian) {
218  if (Options.getABIName() == "ilp32")
219  return "e-m:e-p:32:32-i8:8-i16:16-i64:64-S128";
220  if (TT.isOSBinFormatMachO()) {
221  if (TT.getArch() == Triple::aarch64_32)
222  return "e-m:o-p:32:32-i64:64-i128:128-n32:64-S128";
223  return "e-m:o-i64:64-i128:128-n32:64-S128";
224  }
225  if (TT.isOSBinFormatCOFF())
226  return "e-m:w-p:64:64-i32:32-i64:64-i128:128-n32:64-S128";
227  if (LittleEndian)
228  return "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128";
229  return "E-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128";
230 }
231 
232 static StringRef computeDefaultCPU(const Triple &TT, StringRef CPU) {
233  if (CPU.empty() && TT.isArm64e())
234  return "apple-a12";
235  return CPU;
236 }
237 
240  // AArch64 Darwin and Windows are always PIC.
241  if (TT.isOSDarwin() || TT.isOSWindows())
242  return Reloc::PIC_;
243  // On ELF platforms the default static relocation model has a smart enough
244  // linker to cope with referencing external symbols defined in a shared
245  // library. Hence DynamicNoPIC doesn't need to be promoted to PIC.
246  if (!RM.hasValue() || *RM == Reloc::DynamicNoPIC)
247  return Reloc::Static;
248  return *RM;
249 }
250 
251 static CodeModel::Model
253  bool JIT) {
254  if (CM) {
255  if (*CM != CodeModel::Small && *CM != CodeModel::Tiny &&
256  *CM != CodeModel::Large) {
258  "Only small, tiny and large code models are allowed on AArch64");
259  } else if (*CM == CodeModel::Tiny && !TT.isOSBinFormatELF())
260  report_fatal_error("tiny code model is only supported on ELF");
261  return *CM;
262  }
263  // The default MCJIT memory managers make no guarantees about where they can
264  // find an executable page; JITed code needs to be able to refer to globals
265  // no matter how far away they are.
266  // We should set the CodeModel::Small for Windows ARM64 in JIT mode,
267  // since with large code model LLVM generating 4 MOV instructions, and
268  // Windows doesn't support relocating these long branch (4 MOVs).
269  if (JIT && !TT.isOSWindows())
270  return CodeModel::Large;
271  return CodeModel::Small;
272 }
273 
274 /// Create an AArch64 architecture model.
275 ///
277  StringRef CPU, StringRef FS,
278  const TargetOptions &Options,
281  CodeGenOpt::Level OL, bool JIT,
282  bool LittleEndian)
284  computeDataLayout(TT, Options.MCOptions, LittleEndian),
285  TT, computeDefaultCPU(TT, CPU), FS, Options,
287  getEffectiveAArch64CodeModel(TT, CM, JIT), OL),
288  TLOF(createTLOF(getTargetTriple())), isLittle(LittleEndian) {
289  initAsmInfo();
290 
291  if (TT.isOSBinFormatMachO()) {
292  this->Options.TrapUnreachable = true;
293  this->Options.NoTrapAfterNoreturn = true;
294  }
295 
296  if (getMCAsmInfo()->usesWindowsCFI()) {
297  // Unwinding can get confused if the last instruction in an
298  // exception-handling region (function, funclet, try block, etc.)
299  // is a call.
300  //
301  // FIXME: We could elide the trap if the next instruction would be in
302  // the same region anyway.
303  this->Options.TrapUnreachable = true;
304  }
305 
306  if (this->Options.TLSSize == 0) // default
307  this->Options.TLSSize = 24;
308  if ((getCodeModel() == CodeModel::Small ||
310  this->Options.TLSSize > 32)
311  // for the small (and kernel) code model, the maximum TLS size is 4GiB
312  this->Options.TLSSize = 32;
313  else if (getCodeModel() == CodeModel::Tiny && this->Options.TLSSize > 24)
314  // for the tiny code model, the maximum TLS size is 1MiB (< 16MiB)
315  this->Options.TLSSize = 24;
316 
317  // Enable GlobalISel at or below EnableGlobalISelAt0, unless this is
318  // MachO/CodeModel::Large, which GlobalISel does not support.
320  TT.getArch() != Triple::aarch64_32 &&
321  !(getCodeModel() == CodeModel::Large && TT.isOSBinFormatMachO())) {
322  setGlobalISel(true);
324  }
325 
326  // AArch64 supports the MachineOutliner.
327  setMachineOutliner(true);
328 
329  // AArch64 supports default outlining behaviour.
331 
332  // AArch64 supports the debug entry values.
334 }
335 
337 
338 const AArch64Subtarget *
340  Attribute CPUAttr = F.getFnAttribute("target-cpu");
341  Attribute FSAttr = F.getFnAttribute("target-features");
342 
343  std::string CPU =
344  CPUAttr.isValid() ? CPUAttr.getValueAsString().str() : TargetCPU;
345  std::string FS =
346  FSAttr.isValid() ? FSAttr.getValueAsString().str() : TargetFS;
347 
348  auto &I = SubtargetMap[CPU + FS];
349  if (!I) {
350  // This needs to be done before we create a new subtarget since any
351  // creation will depend on the TM and the code generation flags on the
352  // function that reside in TargetOptions.
354  I = std::make_unique<AArch64Subtarget>(TargetTriple, CPU, FS, *this,
355  isLittle);
356  }
357  return I.get();
358 }
359 
360 void AArch64leTargetMachine::anchor() { }
361 
363  const Target &T, const Triple &TT, StringRef CPU, StringRef FS,
364  const TargetOptions &Options, Optional<Reloc::Model> RM,
366  : AArch64TargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, JIT, true) {}
367 
368 void AArch64beTargetMachine::anchor() { }
369 
371  const Target &T, const Triple &TT, StringRef CPU, StringRef FS,
372  const TargetOptions &Options, Optional<Reloc::Model> RM,
374  : AArch64TargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, JIT, false) {}
375 
376 namespace {
377 
378 /// AArch64 Code Generator Pass Configuration Options.
379 class AArch64PassConfig : public TargetPassConfig {
380 public:
381  AArch64PassConfig(AArch64TargetMachine &TM, PassManagerBase &PM)
382  : TargetPassConfig(TM, PM) {
383  if (TM.getOptLevel() != CodeGenOpt::None)
384  substitutePass(&PostRASchedulerID, &PostMachineSchedulerID);
385  }
386 
387  AArch64TargetMachine &getAArch64TargetMachine() const {
388  return getTM<AArch64TargetMachine>();
389  }
390 
392  createMachineScheduler(MachineSchedContext *C) const override {
393  const AArch64Subtarget &ST = C->MF->getSubtarget<AArch64Subtarget>();
395  DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
396  DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
397  if (ST.hasFusion())
398  DAG->addMutation(createAArch64MacroFusionDAGMutation());
399  return DAG;
400  }
401 
403  createPostMachineScheduler(MachineSchedContext *C) const override {
404  const AArch64Subtarget &ST = C->MF->getSubtarget<AArch64Subtarget>();
405  if (ST.hasFusion()) {
406  // Run the Macro Fusion after RA again since literals are expanded from
407  // pseudos then (v. addPreSched2()).
410  return DAG;
411  }
412 
413  return nullptr;
414  }
415 
416  void addIRPasses() override;
417  bool addPreISel() override;
418  bool addInstSelector() override;
419  bool addIRTranslator() override;
420  void addPreLegalizeMachineIR() override;
421  bool addLegalizeMachineIR() override;
422  void addPreRegBankSelect() override;
423  bool addRegBankSelect() override;
424  void addPreGlobalInstructionSelect() override;
425  bool addGlobalInstructionSelect() override;
426  bool addILPOpts() override;
427  void addPreRegAlloc() override;
428  void addPostRegAlloc() override;
429  void addPreSched2() override;
430  void addPreEmitPass() override;
431 
432  std::unique_ptr<CSEConfigBase> getCSEConfig() const override;
433 };
434 
435 } // end anonymous namespace
436 
439  return TargetTransformInfo(AArch64TTIImpl(this, F));
440 }
441 
443  return new AArch64PassConfig(*this, PM);
444 }
445 
446 std::unique_ptr<CSEConfigBase> AArch64PassConfig::getCSEConfig() const {
447  return getStandardCSEConfigForOpt(TM->getOptLevel());
448 }
449 
450 void AArch64PassConfig::addIRPasses() {
451  // Always expand atomic operations, we don't deal with atomicrmw or cmpxchg
452  // ourselves.
453  addPass(createAtomicExpandPass());
454 
455  // Expand any SVE vector library calls that we can't code generate directly.
456  if (EnableSVEIntrinsicOpts && TM->getOptLevel() == CodeGenOpt::Aggressive)
457  addPass(createSVEIntrinsicOptsPass());
458 
459  // Cmpxchg instructions are often used with a subsequent comparison to
460  // determine whether it succeeded. We can exploit existing control-flow in
461  // ldrex/strex loops to simplify this, but it needs tidying up.
462  if (TM->getOptLevel() != CodeGenOpt::None && EnableAtomicTidy)
464  .forwardSwitchCondToPhi(true)
465  .convertSwitchToLookupTable(true)
466  .needCanonicalLoops(false)
467  .hoistCommonInsts(true)
468  .sinkCommonInsts(true)));
469 
470  // Run LoopDataPrefetch
471  //
472  // Run this before LSR to remove the multiplies involved in computing the
473  // pointer values N iterations ahead.
474  if (TM->getOptLevel() != CodeGenOpt::None) {
476  addPass(createLoopDataPrefetchPass());
479  }
480 
482 
484  /*IsOptNone=*/TM->getOptLevel() == CodeGenOpt::None));
485 
486  // Match interleaved memory accesses to ldN/stN intrinsics.
487  if (TM->getOptLevel() != CodeGenOpt::None) {
489  addPass(createInterleavedAccessPass());
490  }
491 
492  if (TM->getOptLevel() == CodeGenOpt::Aggressive && EnableGEPOpt) {
493  // Call SeparateConstOffsetFromGEP pass to extract constants within indices
494  // and lower a GEP with multiple indices to either arithmetic operations or
495  // multiple GEPs with single index.
497  // Call EarlyCSE pass to find and remove subexpressions in the lowered
498  // result.
499  addPass(createEarlyCSEPass());
500  // Do loop invariant code motion in case part of the lowered result is
501  // invariant.
502  addPass(createLICMPass());
503  }
504 
505  // Add Control Flow Guard checks.
506  if (TM->getTargetTriple().isOSWindows())
507  addPass(createCFGuardCheckPass());
508 }
509 
510 // Pass Pipeline Configuration
511 bool AArch64PassConfig::addPreISel() {
512  // Run promote constant before global merge, so that the promoted constants
513  // get a chance to be merged
514  if (TM->getOptLevel() != CodeGenOpt::None && EnablePromoteConstant)
516  // FIXME: On AArch64, this depends on the type.
517  // Basically, the addressable offsets are up to 4095 * Ty.getSizeInBytes().
518  // and the offset has to be a multiple of the related size in bytes.
519  if ((TM->getOptLevel() != CodeGenOpt::None &&
522  bool OnlyOptimizeForSize = (TM->getOptLevel() < CodeGenOpt::Aggressive) &&
524 
525  // Merging of extern globals is enabled by default on non-Mach-O as we
526  // expect it to be generally either beneficial or harmless. On Mach-O it
527  // is disabled as we emit the .subsections_via_symbols directive which
528  // means that merging extern globals is not safe.
529  bool MergeExternalByDefault = !TM->getTargetTriple().isOSBinFormatMachO();
530 
531  // FIXME: extern global merging is only enabled when we optimise for size
532  // because there are some regressions with it also enabled for performance.
533  if (!OnlyOptimizeForSize)
534  MergeExternalByDefault = false;
535 
536  addPass(createGlobalMergePass(TM, 4095, OnlyOptimizeForSize,
537  MergeExternalByDefault));
538  }
539 
540  return false;
541 }
542 
543 bool AArch64PassConfig::addInstSelector() {
544  addPass(createAArch64ISelDag(getAArch64TargetMachine(), getOptLevel()));
545 
546  // For ELF, cleanup any local-dynamic TLS accesses (i.e. combine as many
547  // references to _TLS_MODULE_BASE_ as possible.
548  if (TM->getTargetTriple().isOSBinFormatELF() &&
549  getOptLevel() != CodeGenOpt::None)
551 
552  return false;
553 }
554 
555 bool AArch64PassConfig::addIRTranslator() {
556  addPass(new IRTranslator(getOptLevel()));
557  return false;
558 }
559 
560 void AArch64PassConfig::addPreLegalizeMachineIR() {
561  bool IsOptNone = getOptLevel() == CodeGenOpt::None;
562  addPass(createAArch64PreLegalizerCombiner(IsOptNone));
563 }
564 
565 bool AArch64PassConfig::addLegalizeMachineIR() {
566  addPass(new Legalizer());
567  return false;
568 }
569 
570 void AArch64PassConfig::addPreRegBankSelect() {
571  bool IsOptNone = getOptLevel() == CodeGenOpt::None;
572  if (!IsOptNone)
573  addPass(createAArch64PostLegalizerCombiner(IsOptNone));
575 }
576 
577 bool AArch64PassConfig::addRegBankSelect() {
578  addPass(new RegBankSelect());
579  return false;
580 }
581 
582 void AArch64PassConfig::addPreGlobalInstructionSelect() {
583  addPass(new Localizer());
584 }
585 
586 bool AArch64PassConfig::addGlobalInstructionSelect() {
587  addPass(new InstructionSelect());
588  if (getOptLevel() != CodeGenOpt::None)
590  return false;
591 }
592 
593 bool AArch64PassConfig::addILPOpts() {
594  if (EnableCondOpt)
596  if (EnableCCMP)
598  if (EnableMCR)
599  addPass(&MachineCombinerID);
600  if (EnableCondBrTuning)
601  addPass(createAArch64CondBrTuning());
603  addPass(&EarlyIfConverterID);
607  if (TM->getOptLevel() != CodeGenOpt::None)
609  return true;
610 }
611 
612 void AArch64PassConfig::addPreRegAlloc() {
613  // Change dead register definitions to refer to the zero register.
614  if (TM->getOptLevel() != CodeGenOpt::None && EnableDeadRegisterElimination)
616 
617  // Use AdvSIMD scalar instructions whenever profitable.
618  if (TM->getOptLevel() != CodeGenOpt::None && EnableAdvSIMDScalar) {
619  addPass(createAArch64AdvSIMDScalar());
620  // The AdvSIMD pass may produce copies that can be rewritten to
621  // be register coalescer friendly.
622  addPass(&PeepholeOptimizerID);
623  }
624 }
625 
626 void AArch64PassConfig::addPostRegAlloc() {
627  // Remove redundant copy instructions.
628  if (TM->getOptLevel() != CodeGenOpt::None && EnableRedundantCopyElimination)
630 
631  if (TM->getOptLevel() != CodeGenOpt::None && usingDefaultRegAlloc())
632  // Improve performance for some FP/SIMD code for A57.
634 }
635 
636 void AArch64PassConfig::addPreSched2() {
637  // Expand some pseudo instructions to allow proper scheduling.
639  // Use load/store pair instructions when possible.
640  if (TM->getOptLevel() != CodeGenOpt::None) {
641  if (EnableLoadStoreOpt)
643  }
644 
645  // The AArch64SpeculationHardeningPass destroys dominator tree and natural
646  // loop info, which is needed for the FalkorHWPFFixPass and also later on.
647  // Therefore, run the AArch64SpeculationHardeningPass before the
648  // FalkorHWPFFixPass to avoid recomputing dominator tree and natural loop
649  // info.
651 
652  addPass(createAArch64IndirectThunks());
654 
655  if (TM->getOptLevel() != CodeGenOpt::None) {
657  addPass(createFalkorHWPFFixPass());
658  }
659 }
660 
661 void AArch64PassConfig::addPreEmitPass() {
662  // Machine Block Placement might have created new opportunities when run
663  // at O3, where the Tail Duplication Threshold is set to 4 instructions.
664  // Run the load/store optimizer once more.
665  if (TM->getOptLevel() >= CodeGenOpt::Aggressive && EnableLoadStoreOpt)
667 
668  if (EnableA53Fix835769)
669  addPass(createAArch64A53Fix835769());
670 
673 
674  // Relax conditional branch instructions if they're otherwise out of
675  // range of their destination.
676  if (BranchRelaxation)
677  addPass(&BranchRelaxationPassID);
678 
679  // Identify valid longjmp targets for Windows Control Flow Guard.
680  if (TM->getTargetTriple().isOSWindows())
681  addPass(createCFGuardLongjmpPass());
682 
683  if (TM->getOptLevel() != CodeGenOpt::None && EnableCompressJumpTables)
685 
686  if (TM->getOptLevel() != CodeGenOpt::None && EnableCollectLOH &&
687  TM->getTargetTriple().isOSBinFormatMachO())
688  addPass(createAArch64CollectLOHPass());
689 
690  // SVE bundles move prefixes with destructive operations.
691  addPass(createUnpackMachineBundles(nullptr));
692 }
693 
696  return new yaml::AArch64FunctionInfo();
697 }
698 
701  const auto *MFI = MF.getInfo<AArch64FunctionInfo>();
702  return new yaml::AArch64FunctionInfo(*MFI);
703 }
704 
707  SMDiagnostic &Error, SMRange &SourceRange) const {
708  const auto &YamlMFI =
709  reinterpret_cast<const yaml::AArch64FunctionInfo &>(MFI);
710  MachineFunction &MF = PFS.MF;
711  MF.getInfo<AArch64FunctionInfo>()->initializeBaseYamlFields(YamlMFI);
712  return false;
713 }
uint64_t CallInst * C
Represents a range in source code.
Definition: SMLoc.h:48
char & MachineCombinerID
This pass performs instruction combining using trace metrics to estimate critical-path and resource d...
bool usesWindowsCFI() const
Definition: MCAsmInfo.h:653
FunctionPass * createAArch64PostLegalizerCombiner(bool IsOptNone)
Target & getTheAArch64beTarget()
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
FunctionPass * createCFGuardCheckPass()
Insert Control FLow Guard checks on indirect function calls.
Definition: CFGuard.cpp:294
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
AArch64FunctionInfo - This class is derived from MachineFunctionInfo and contains private AArch64-spe...
LLVM_NODISCARD std::string str() const
str - Get the contents as an std::string.
Definition: StringRef.h:248
static StringRef computeDefaultCPU(const Triple &TT, StringRef CPU)
void initializeAArch64A53Fix835769Pass(PassRegistry &)
AArch64beTargetMachine(const Target &T, const Triple &TT, StringRef CPU, StringRef FS, const TargetOptions &Options, Optional< Reloc::Model > RM, Optional< CodeModel::Model > CM, CodeGenOpt::Level OL, bool JIT)
LLVM_ATTRIBUTE_NORETURN void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:140
This class represents lattice values for constants.
Definition: AllocatorList.h:23
FunctionPass * createAArch64PostSelectOptimize()
Target & getTheARM64_32Target()
virtual void addIRPasses()
Add common target configurable passes that perform LLVM IR to IR transforms following machine indepen...
FunctionPass * createFalkorMarkStridedAccessesPass()
Triple TargetTriple
Triple string, CPU name, and target feature strings the TargetMachine instance is created with.
Definition: TargetMachine.h:96
void initializeAArch64LoadStoreOptPass(PassRegistry &)
Target & getTheAArch64leTarget()
This pass implements the localization mechanism described at the top of this file.
Definition: Localizer.h:40
static cl::opt< bool > EnableCompressJumpTables("aarch64-enable-compress-jump-tables", cl::Hidden, cl::init(true), cl::desc("Use smallest entry possible for jump tables"))
void initializeSVEIntrinsicOptsPass(PassRegistry &)
char & EarlyIfConverterID
EarlyIfConverter - This pass performs if-conversion on SSA form by inserting cmov instructions.
ScheduleDAGMI * createGenericSchedPostRA(MachineSchedContext *C)
Create a generic scheduler with no vreg liveness or DAG mutation passes.
Pass * createGlobalMergePass(const TargetMachine *TM, unsigned MaximalOffset, bool OnlyOptimizeForSize=false, bool MergeExternalByDefault=false)
GlobalMerge - This pass merges internal (by default) globals into structs to enable reuse of a base p...
char & BranchRelaxationPassID
BranchRelaxation - This pass replaces branches that need to jump further than is supported by a branc...
F(f)
FunctionPass * createAArch64ConditionalCompares()
void initializeAArch64RedundantCopyEliminationPass(PassRegistry &)
void setGlobalISelAbort(GlobalISelAbortMode Mode)
ScheduleDAGMI is an implementation of ScheduleDAGInstrs that simply schedules machine instructions ac...
unsigned NoTrapAfterNoreturn
Do not emit a trap instruction for 'unreachable' IR instructions behind noreturn calls,...
ModulePass * createAArch64PromoteConstantPass()
FunctionPass * createAArch64IndirectThunks()
ScheduleDAGMILive is an implementation of ScheduleDAGInstrs that schedules machine instructions while...
static CodeModel::Model getEffectiveAArch64CodeModel(const Triple &TT, Optional< CodeModel::Model > CM, bool JIT)
FunctionPass * createLoopDataPrefetchPass()
FunctionPass * createCFGuardLongjmpPass()
Creates CFGuard longjmp target identification pass.
FunctionPass * createAArch64CollectLOHPass()
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
bool parseMachineFunctionInfo(const yaml::MachineFunctionInfo &, PerFunctionMIParsingState &PFS, SMDiagnostic &Error, SMRange &SourceRange) const override
Parse out the target's MachineFunctionInfo from the YAML reprsentation.
static cl::opt< bool > EnableEarlyIfConversion("aarch64-enable-early-ifcvt", cl::Hidden, cl::desc("Run early if-conversion"), cl::init(true))
void resetTargetOptions(const Function &F) const
Reset the target options based on the function's attributes.
static cl::opt< bool > EnableMCR("aarch64-enable-mcr", cl::desc("Enable the machine combiner pass"), cl::init(true), cl::Hidden)
static cl::opt< bool > EnableLoopDataPrefetch("aarch64-enable-loop-data-prefetch", cl::Hidden, cl::desc("Enable the loop data prefetch pass"), cl::init(true))
void initializeLDTLSCleanupPass(PassRegistry &)
static cl::opt< bool > EnablePromoteConstant("aarch64-enable-promote-const", cl::desc("Enable the promote constant pass"), cl::init(true), cl::Hidden)
This file contains the simple types necessary to represent the attributes associated with functions a...
FunctionPass * createAArch64RedundantCopyEliminationPass()
Target & getTheARM64Target()
void initializeAArch64CollectLOHPass(PassRegistry &)
LLVM_NODISCARD bool empty() const
empty - Check if the string is empty.
Definition: StringRef.h:156
Target-Independent Code Generator Pass Configuration Options.
void initializeAArch64StackTaggingPreRAPass(PassRegistry &)
TargetPassConfig * createPassConfig(PassManagerBase &PM) override
Create a pass configuration object to be used by addPassToEmitX methods for generating a pipeline of ...
FunctionPass * createAArch64A57FPLoadBalancing()
static cl::opt< bool > EnableLoadStoreOpt("aarch64-enable-ldst-opt", cl::desc("Enable the load/store pair" " optimization pass"), cl::init(true), cl::Hidden)
FunctionPass * createCFGSimplificationPass(SimplifyCFGOptions Options=SimplifyCFGOptions(), std::function< bool(const Function &)> Ftor=nullptr)
RegisterTargetMachine - Helper template for registering a target machine implementation,...
static cl::opt< bool > EnableCondBrTuning("aarch64-enable-cond-br-tune", cl::desc("Enable the conditional branch tuning pass"), cl::init(true), cl::Hidden)
FunctionPass * createInterleavedLoadCombinePass()
InterleavedLoadCombines Pass - This pass identifies interleaved loads and combines them into wide loa...
static cl::opt< cl::boolOrDefault > EnableGlobalMerge("aarch64-enable-global-merge", cl::Hidden, cl::desc("Enable the global merge pass"))
FunctionPass * createAArch64CleanupLocalDynamicTLSPass()
static cl::opt< bool > EnableCollectLOH("aarch64-enable-collect-loh", cl::desc("Enable the pass that emits the linker optimization hints (LOH)"), cl::init(true), cl::Hidden)
void initializeAArch64SpeculationHardeningPass(PassRegistry &)
ScheduleDAGMILive * createGenericSchedLive(MachineSchedContext *C)
Create the standard converging machine scheduler.
static cl::opt< bool > EnableAdvSIMDScalar("aarch64-enable-simd-scalar", cl::desc("Enable use of AdvSIMD scalar integer instructions"), cl::init(false), cl::Hidden)
FunctionPass * createAArch64LoadStoreOptimizationPass()
createAArch64LoadStoreOptimizationPass - returns an instance of the load / store optimization pass.
yaml::MachineFunctionInfo * createDefaultFuncInfoYAML() const override
Allocate and return a default initialized instance of the YAML representation for the MachineFunction...
AArch64TargetMachine(const Target &T, const Triple &TT, StringRef CPU, StringRef FS, const TargetOptions &Options, Optional< Reloc::Model > RM, Optional< CodeModel::Model > CM, CodeGenOpt::Level OL, bool JIT, bool IsLittleEndian)
Create an AArch64 architecture model.
bool isValid() const
Return true if the attribute is any kind of attribute.
Definition: Attributes.h:151
void initializeAArch64A57FPLoadBalancingPass(PassRegistry &)
std::unique_ptr< CSEConfigBase > getStandardCSEConfigForOpt(CodeGenOpt::Level Level)
Definition: CSEInfo.cpp:73
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:427
* if(!EatIfPresent(lltok::kw_thread_local)) return false
parseOptionalThreadLocal := /*empty
CodeGenOpt::Level getOptLevel() const
Returns the optimization level: None, Less, Default, or Aggressive.
void initializeAArch64PromoteConstantPass(PassRegistry &)
void initializeAArch64ExpandPseudoPass(PassRegistry &)
static Reloc::Model getEffectiveRelocModel(Optional< Reloc::Model > RM)
This pass implements the reg bank selector pass used in the GlobalISel pipeline.
Definition: RegBankSelect.h:90
Target & getTheAArch64_32Target()
unsigned TLSSize
Bit size of immediate TLS offsets (0 == use the default).
FunctionPass * createInterleavedAccessPass()
InterleavedAccess Pass - This pass identifies and matches interleaved memory accesses to target speci...
const MCAsmInfo * getMCAsmInfo() const
Return target specific asm information.
static cl::opt< bool > EnableBranchTargets("aarch64-enable-branch-targets", cl::Hidden, cl::desc("Enable the AAcrh64 branch target pass"), cl::init(true))
FunctionPass * createAArch64PreLegalizerCombiner(bool IsOptNone)
Pass * createLICMPass()
Definition: LICM.cpp:306
FunctionPass * createAArch64AdvSIMDScalar()
void initializeAArch64PostLegalizerCombinerPass(PassRegistry &)
FunctionPass * createAArch64SLSHardeningPass()
const AArch64Subtarget * getSubtargetImpl() const =delete
This class describes a target machine that is implemented with the LLVM target-independent code gener...
FunctionPass * createAArch64StackTaggingPreRAPass()
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
std::unique_ptr< ScheduleDAGMutation > createAArch64MacroFusionDAGMutation()
Note that you have to add: DAG.addMutation(createAArch64MacroFusionDAGMutation()); to AArch64PassConf...
void setMachineOutliner(bool Enable)
std::unique_ptr< ScheduleDAGMutation > createStoreClusterDAGMutation(const TargetInstrInfo *TII, const TargetRegisterInfo *TRI)
This file a TargetTransformInfo::Concept conforming object specific to the AArch64 target machine.
void setSupportsDefaultOutlining(bool Enable)
Triple - Helper class for working with autoconf configuration names.
Definition: Triple.h:45
static cl::opt< bool > EnableRedundantCopyElimination("aarch64-enable-copyelim", cl::desc("Enable the redundant copy elimination pass"), cl::init(true), cl::Hidden)
void initializeFalkorMarkStridedAccessesLegacyPass(PassRegistry &)
char & PostRASchedulerID
PostRAScheduler - This pass performs post register allocation scheduling.
static cl::opt< bool > EnableA53Fix835769("aarch64-fix-cortex-a53-835769", cl::Hidden, cl::desc("Work around Cortex-A53 erratum 835769"), cl::init(false))
ModulePass * createSVEIntrinsicOptsPass()
FunctionPass * createAArch64StackTaggingPass(bool IsOptNone)
static cl::opt< bool > EnableDeadRegisterElimination("aarch64-enable-dead-defs", cl::Hidden, cl::desc("Enable the pass that removes dead" " definitons and replaces stores to" " them with stores to the zero" " register"), cl::init(true))
std::unique_ptr< ScheduleDAGMutation > createLoadClusterDAGMutation(const TargetInstrInfo *TII, const TargetRegisterInfo *TRI)
LLVM_EXTERNAL_VISIBILITY void LLVMInitializeAArch64Target()
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
static cl::opt< bool > EnableGEPOpt("aarch64-enable-gep-opt", cl::Hidden, cl::desc("Enable optimizations on complex GEPs"), cl::init(false))
AArch64leTargetMachine(const Target &T, const Triple &TT, StringRef CPU, StringRef FS, const TargetOptions &Options, Optional< Reloc::Model > RM, Optional< CodeModel::Model > CM, CodeGenOpt::Level OL, bool JIT)
PassManagerBase - An abstract interface to allow code to add passes to a pass manager without having ...
static cl::opt< bool > EnableSVEIntrinsicOpts("aarch64-enable-sve-intrinsic-opts", cl::Hidden, cl::desc("Enable SVE intrinsic opts"), cl::init(true))
static cl::opt< int > EnableGlobalISelAtO("aarch64-enable-global-isel-at-O", cl::Hidden, cl::desc("Enable GlobalISel at or below an opt level (-1 to disable)"), cl::init(0))
FunctionPass * createAArch64ExpandPseudoPass()
Returns an instance of the pseudo instruction expansion pass.
FunctionPass * createAArch64SpeculationHardeningPass()
Returns an instance of the pseudo instruction expansion pass.
static cl::opt< bool > EnableCondOpt("aarch64-enable-condopt", cl::desc("Enable the condition optimizer pass"), cl::init(true), cl::Hidden)
void initializeFalkorHWPFFixPass(PassRegistry &)
#define LLVM_EXTERNAL_VISIBILITY
Definition: Compiler.h:131
static cl::opt< bool > EnableAtomicTidy("aarch64-enable-atomic-cfg-tidy", cl::Hidden, cl::desc("Run SimplifyCFG after expanding atomic operations" " to make use of cmpxchg flow-based information"), cl::init(true))
FunctionPass * createUnpackMachineBundles(std::function< bool(const MachineFunction &)> Ftor)
FunctionPass * createAArch64A53Fix835769()
This pass is responsible for selecting generic machine instructions to target-specific instructions.
void initializeAArch64StackTaggingPass(PassRegistry &)
yaml::MachineFunctionInfo * convertFuncInfoToYAML(const MachineFunction &MF) const override
Allocate and initialize an instance of the YAML representation of the MachineFunctionInfo.
FunctionPass * createSeparateConstOffsetFromGEPPass(bool LowerGEP=false)
static std::unique_ptr< TargetLoweringObjectFile > createTLOF(const Triple &TT)
Target - Wrapper for Target specific information.
CodeModel::Model getCodeModel() const
Returns the code model.
void initializeAArch64PreLegalizerCombinerPass(PassRegistry &)
char & PeepholeOptimizerID
PeepholeOptimizer - This pass performs peephole optimizations - like extension and comparison elimina...
static cl::opt< bool > EnableFalkorHWPFFix("aarch64-enable-falkor-hwpf-fix", cl::init(true), cl::Hidden)
FunctionPass * createAArch64ConditionOptimizerPass()
void initializeAArch64BranchTargetsPass(PassRegistry &)
std::string TargetCPU
Definition: TargetMachine.h:97
FunctionPass * createAArch64ISelDag(AArch64TargetMachine &TM, CodeGenOpt::Level OptLevel)
createAArch64ISelDag - This pass converts a legalized DAG into a AArch64-specific DAG,...
void initializeAArch64SLSHardeningPass(PassRegistry &)
Targets should override this in a way that mirrors the implementation of llvm::MachineFunctionInfo.
A ScheduleDAG for scheduling lists of MachineInstr.
static std::string computeDataLayout(const Triple &TT, const MCTargetOptions &Options, bool LittleEndian)
basic Basic Alias true
void initializeAArch64ConditionOptimizerPass(PassRegistry &)
void initializeAArch64PostLegalizerLoweringPass(PassRegistry &)
StringRef getABIName() const
getABIName - If this returns a non-empty string this represents the textual name of the ABI that we w...
char & PostMachineSchedulerID
PostMachineScheduler - This pass schedules machine instructions postRA.
StringMap< std::unique_ptr< AArch64Subtarget > > SubtargetMap
StringRef getValueAsString() const
Return the attribute's value as a string.
Definition: Attributes.cpp:275
FunctionPass * createAArch64StorePairSuppressPass()
FunctionPass * createAArch64CondBrTuning()
MachineSchedContext provides enough context from the MachineScheduler pass for the target to instanti...
#define I(x, y, z)
Definition: MD5.cpp:59
void setGlobalISel(bool Enable)
FunctionPass * createFalkorHWPFFixPass()
FunctionPass * createAArch64BranchTargetsPass()
static cl::opt< bool > EnableCCMP("aarch64-enable-ccmp", cl::desc("Enable the CCMP formation pass"), cl::init(true), cl::Hidden)
void initializeAArch64ConditionalComparesPass(PassRegistry &)
std::string TargetFS
Definition: TargetMachine.h:98
void initializeAArch64CompressJumpTablesPass(PassRegistry &)
This file declares the IRTranslator pass.
FunctionPass * createAArch64PostLegalizerLowering()
FunctionPass * createAArch64SIMDInstrOptPass()
Returns an instance of the high cost ASIMD instruction replacement optimization pass.
void initializeAArch64DeadRegisterDefinitionsPass(PassRegistry &)
Lightweight error class with error context and mandatory checking.
Definition: Error.h:157
FunctionPass * createEarlyCSEPass(bool UseMemorySSA=false)
Definition: EarlyCSE.cpp:1693
FunctionPass * createAArch64DeadRegisterDefinitions()
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:57
void setSupportsDebugEntryValues(bool Enable)
unsigned TrapUnreachable
Emit target-specific trap instruction for 'unreachable' IR instructions.
This pass exposes codegen information to IR-level passes.
TargetTransformInfo getTargetTransformInfo(const Function &F) override
Get a TargetTransformInfo implementation for the target.
void initializeAArch64PostSelectOptimizePass(PassRegistry &)
static cl::opt< bool > BranchRelaxation("aarch64-enable-branch-relax", cl::Hidden, cl::init(true), cl::desc("Relax out of range conditional branches"))
FunctionPass * createAtomicExpandPass()
static cl::opt< bool > EnableStPairSuppress("aarch64-enable-stp-suppress", cl::desc("Suppress STP for AArch64"), cl::init(true), cl::Hidden)
void initializeGlobalISel(PassRegistry &)
Initialize all passes linked into the GlobalISel library.
Definition: GlobalISel.cpp:18
void initializeAArch64SIMDInstrOptPass(PassRegistry &)
void initializeAArch64AdvSIMDScalarPass(PassRegistry &)
FunctionPass * createAArch64CompressJumpTablesPass()
void addMutation(std::unique_ptr< ScheduleDAGMutation > Mutation)
Add a postprocessing step to the DAG builder.
Instances of this class encapsulate one diagnostic report, allowing printing to a raw_ostream as a ca...
Definition: SourceMgr.h:251
void initializeAArch64StorePairSuppressPass(PassRegistry &)