LLVM  9.0.0svn
InstrBuilder.cpp
Go to the documentation of this file.
1 //===--------------------- InstrBuilder.cpp ---------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 ///
10 /// This file implements the InstrBuilder interface.
11 ///
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/MCA/InstrBuilder.h"
15 #include "llvm/ADT/APInt.h"
16 #include "llvm/ADT/DenseMap.h"
17 #include "llvm/MC/MCInst.h"
18 #include "llvm/Support/Debug.h"
19 #include "llvm/Support/WithColor.h"
21 
22 #define DEBUG_TYPE "llvm-mca"
23 
24 namespace llvm {
25 namespace mca {
26 
27 InstrBuilder::InstrBuilder(const llvm::MCSubtargetInfo &sti,
28  const llvm::MCInstrInfo &mcii,
29  const llvm::MCRegisterInfo &mri,
30  const llvm::MCInstrAnalysis *mcia)
31  : STI(sti), MCII(mcii), MRI(mri), MCIA(mcia), FirstCallInst(true),
32  FirstReturnInst(true) {
33  const MCSchedModel &SM = STI.getSchedModel();
34  ProcResourceMasks.resize(SM.getNumProcResourceKinds());
35  computeProcResourceMasks(STI.getSchedModel(), ProcResourceMasks);
36 }
37 
39  const MCSchedClassDesc &SCDesc,
40  const MCSubtargetInfo &STI,
41  ArrayRef<uint64_t> ProcResourceMasks) {
42  const MCSchedModel &SM = STI.getSchedModel();
43 
44  // Populate resources consumed.
45  using ResourcePlusCycles = std::pair<uint64_t, ResourceUsage>;
46  std::vector<ResourcePlusCycles> Worklist;
47 
48  // Track cycles contributed by resources that are in a "Super" relationship.
49  // This is required if we want to correctly match the behavior of method
50  // SubtargetEmitter::ExpandProcResource() in Tablegen. When computing the set
51  // of "consumed" processor resources and resource cycles, the logic in
52  // ExpandProcResource() doesn't update the number of resource cycles
53  // contributed by a "Super" resource to a group.
54  // We need to take this into account when we find that a processor resource is
55  // part of a group, and it is also used as the "Super" of other resources.
56  // This map stores the number of cycles contributed by sub-resources that are
57  // part of a "Super" resource. The key value is the "Super" resource mask ID.
58  DenseMap<uint64_t, unsigned> SuperResources;
59 
60  unsigned NumProcResources = SM.getNumProcResourceKinds();
61  APInt Buffers(NumProcResources, 0);
62 
63  bool AllInOrderResources = true;
64  bool AnyDispatchHazards = false;
65  for (unsigned I = 0, E = SCDesc.NumWriteProcResEntries; I < E; ++I) {
66  const MCWriteProcResEntry *PRE = STI.getWriteProcResBegin(&SCDesc) + I;
68  uint64_t Mask = ProcResourceMasks[PRE->ProcResourceIdx];
69  if (PR.BufferSize < 0) {
70  AllInOrderResources = false;
71  } else {
72  Buffers.setBit(PRE->ProcResourceIdx);
73  AnyDispatchHazards |= (PR.BufferSize == 0);
74  AllInOrderResources &= (PR.BufferSize <= 1);
75  }
76 
77  CycleSegment RCy(0, PRE->Cycles, false);
78  Worklist.emplace_back(ResourcePlusCycles(Mask, ResourceUsage(RCy)));
79  if (PR.SuperIdx) {
80  uint64_t Super = ProcResourceMasks[PR.SuperIdx];
81  SuperResources[Super] += PRE->Cycles;
82  }
83  }
84 
85  ID.MustIssueImmediately = AllInOrderResources && AnyDispatchHazards;
86 
87  // Sort elements by mask popcount, so that we prioritize resource units over
88  // resource groups, and smaller groups over larger groups.
89  sort(Worklist, [](const ResourcePlusCycles &A, const ResourcePlusCycles &B) {
90  unsigned popcntA = countPopulation(A.first);
91  unsigned popcntB = countPopulation(B.first);
92  if (popcntA < popcntB)
93  return true;
94  if (popcntA > popcntB)
95  return false;
96  return A.first < B.first;
97  });
98 
99  uint64_t UsedResourceUnits = 0;
100  uint64_t UsedResourceGroups = 0;
101 
102  // Remove cycles contributed by smaller resources.
103  for (unsigned I = 0, E = Worklist.size(); I < E; ++I) {
104  ResourcePlusCycles &A = Worklist[I];
105  if (!A.second.size()) {
106  assert(countPopulation(A.first) > 1 && "Expected a group!");
107  UsedResourceGroups |= PowerOf2Floor(A.first);
108  continue;
109  }
110 
111  ID.Resources.emplace_back(A);
112  uint64_t NormalizedMask = A.first;
113  if (countPopulation(A.first) == 1) {
114  UsedResourceUnits |= A.first;
115  } else {
116  // Remove the leading 1 from the resource group mask.
117  NormalizedMask ^= PowerOf2Floor(NormalizedMask);
118  UsedResourceGroups |= (A.first ^ NormalizedMask);
119  }
120 
121  for (unsigned J = I + 1; J < E; ++J) {
122  ResourcePlusCycles &B = Worklist[J];
123  if ((NormalizedMask & B.first) == NormalizedMask) {
124  B.second.CS.subtract(A.second.size() - SuperResources[A.first]);
125  if (countPopulation(B.first) > 1)
126  B.second.NumUnits++;
127  }
128  }
129  }
130 
131  ID.UsedProcResUnits = UsedResourceUnits;
132  ID.UsedProcResGroups = UsedResourceGroups;
133 
134  // A SchedWrite may specify a number of cycles in which a resource group
135  // is reserved. For example (on target x86; cpu Haswell):
136  //
137  // SchedWriteRes<[HWPort0, HWPort1, HWPort01]> {
138  // let ResourceCycles = [2, 2, 3];
139  // }
140  //
141  // This means:
142  // Resource units HWPort0 and HWPort1 are both used for 2cy.
143  // Resource group HWPort01 is the union of HWPort0 and HWPort1.
144  // Since this write touches both HWPort0 and HWPort1 for 2cy, HWPort01
145  // will not be usable for 2 entire cycles from instruction issue.
146  //
147  // On top of those 2cy, SchedWriteRes explicitly specifies an extra latency
148  // of 3 cycles for HWPort01. This tool assumes that the 3cy latency is an
149  // extra delay on top of the 2 cycles latency.
150  // During those extra cycles, HWPort01 is not usable by other instructions.
151  for (ResourcePlusCycles &RPC : ID.Resources) {
152  if (countPopulation(RPC.first) > 1 && !RPC.second.isReserved()) {
153  // Remove the leading 1 from the resource group mask.
154  uint64_t Mask = RPC.first ^ PowerOf2Floor(RPC.first);
155  if ((Mask & UsedResourceUnits) == Mask)
156  RPC.second.setReserved();
157  }
158  }
159 
160  // Identify extra buffers that are consumed through super resources.
161  for (const std::pair<uint64_t, unsigned> &SR : SuperResources) {
162  for (unsigned I = 1, E = NumProcResources; I < E; ++I) {
163  const MCProcResourceDesc &PR = *SM.getProcResource(I);
164  if (PR.BufferSize == -1)
165  continue;
166 
167  uint64_t Mask = ProcResourceMasks[I];
168  if (Mask != SR.first && ((Mask & SR.first) == SR.first))
169  Buffers.setBit(I);
170  }
171  }
172 
173  // Now set the buffers.
174  if (unsigned NumBuffers = Buffers.countPopulation()) {
175  ID.Buffers.resize(NumBuffers);
176  for (unsigned I = 0, E = NumProcResources; I < E && NumBuffers; ++I) {
177  if (Buffers[I]) {
178  --NumBuffers;
179  ID.Buffers[NumBuffers] = ProcResourceMasks[I];
180  }
181  }
182  }
183 
184  LLVM_DEBUG({
185  for (const std::pair<uint64_t, ResourceUsage> &R : ID.Resources)
186  dbgs() << "\t\tResource Mask=" << format_hex(R.first, 16) << ", "
187  << "Reserved=" << R.second.isReserved() << ", "
188  << "#Units=" << R.second.NumUnits << ", "
189  << "cy=" << R.second.size() << '\n';
190  for (const uint64_t R : ID.Buffers)
191  dbgs() << "\t\tBuffer Mask=" << format_hex(R, 16) << '\n';
192  dbgs() << "\t\t Used Units=" << format_hex(ID.UsedProcResUnits, 16) << '\n';
193  dbgs() << "\t\tUsed Groups=" << format_hex(ID.UsedProcResGroups, 16) << '\n';
194  });
195 }
196 
197 static void computeMaxLatency(InstrDesc &ID, const MCInstrDesc &MCDesc,
198  const MCSchedClassDesc &SCDesc,
199  const MCSubtargetInfo &STI) {
200  if (MCDesc.isCall()) {
201  // We cannot estimate how long this call will take.
202  // Artificially set an arbitrarily high latency (100cy).
203  ID.MaxLatency = 100U;
204  return;
205  }
206 
207  int Latency = MCSchedModel::computeInstrLatency(STI, SCDesc);
208  // If latency is unknown, then conservatively assume a MaxLatency of 100cy.
209  ID.MaxLatency = Latency < 0 ? 100U : static_cast<unsigned>(Latency);
210 }
211 
212 static Error verifyOperands(const MCInstrDesc &MCDesc, const MCInst &MCI) {
213  // Count register definitions, and skip non register operands in the process.
214  unsigned I, E;
215  unsigned NumExplicitDefs = MCDesc.getNumDefs();
216  for (I = 0, E = MCI.getNumOperands(); NumExplicitDefs && I < E; ++I) {
217  const MCOperand &Op = MCI.getOperand(I);
218  if (Op.isReg())
219  --NumExplicitDefs;
220  }
221 
222  if (NumExplicitDefs) {
223  return make_error<InstructionError<MCInst>>(
224  "Expected more register operand definitions.", MCI);
225  }
226 
227  if (MCDesc.hasOptionalDef()) {
228  // Always assume that the optional definition is the last operand.
229  const MCOperand &Op = MCI.getOperand(MCDesc.getNumOperands() - 1);
230  if (I == MCI.getNumOperands() || !Op.isReg()) {
231  std::string Message =
232  "expected a register operand for an optional definition. Instruction "
233  "has not been correctly analyzed.";
234  return make_error<InstructionError<MCInst>>(Message, MCI);
235  }
236  }
237 
238  return ErrorSuccess();
239 }
240 
241 void InstrBuilder::populateWrites(InstrDesc &ID, const MCInst &MCI,
242  unsigned SchedClassID) {
243  const MCInstrDesc &MCDesc = MCII.get(MCI.getOpcode());
244  const MCSchedModel &SM = STI.getSchedModel();
245  const MCSchedClassDesc &SCDesc = *SM.getSchedClassDesc(SchedClassID);
246 
247  // Assumptions made by this algorithm:
248  // 1. The number of explicit and implicit register definitions in a MCInst
249  // matches the number of explicit and implicit definitions according to
250  // the opcode descriptor (MCInstrDesc).
251  // 2. Uses start at index #(MCDesc.getNumDefs()).
252  // 3. There can only be a single optional register definition, an it is
253  // always the last operand of the sequence (excluding extra operands
254  // contributed by variadic opcodes).
255  //
256  // These assumptions work quite well for most out-of-order in-tree targets
257  // like x86. This is mainly because the vast majority of instructions is
258  // expanded to MCInst using a straightforward lowering logic that preserves
259  // the ordering of the operands.
260  //
261  // About assumption 1.
262  // The algorithm allows non-register operands between register operand
263  // definitions. This helps to handle some special ARM instructions with
264  // implicit operand increment (-mtriple=armv7):
265  //
266  // vld1.32 {d18, d19}, [r1]! @ <MCInst #1463 VLD1q32wb_fixed
267  // @ <MCOperand Reg:59>
268  // @ <MCOperand Imm:0> (!!)
269  // @ <MCOperand Reg:67>
270  // @ <MCOperand Imm:0>
271  // @ <MCOperand Imm:14>
272  // @ <MCOperand Reg:0>>
273  //
274  // MCDesc reports:
275  // 6 explicit operands.
276  // 1 optional definition
277  // 2 explicit definitions (!!)
278  //
279  // The presence of an 'Imm' operand between the two register definitions
280  // breaks the assumption that "register definitions are always at the
281  // beginning of the operand sequence".
282  //
283  // To workaround this issue, this algorithm ignores (i.e. skips) any
284  // non-register operands between register definitions. The optional
285  // definition is still at index #(NumOperands-1).
286  //
287  // According to assumption 2. register reads start at #(NumExplicitDefs-1).
288  // That means, register R1 from the example is both read and written.
289  unsigned NumExplicitDefs = MCDesc.getNumDefs();
290  unsigned NumImplicitDefs = MCDesc.getNumImplicitDefs();
291  unsigned NumWriteLatencyEntries = SCDesc.NumWriteLatencyEntries;
292  unsigned TotalDefs = NumExplicitDefs + NumImplicitDefs;
293  if (MCDesc.hasOptionalDef())
294  TotalDefs++;
295 
296  unsigned NumVariadicOps = MCI.getNumOperands() - MCDesc.getNumOperands();
297  ID.Writes.resize(TotalDefs + NumVariadicOps);
298  // Iterate over the operands list, and skip non-register operands.
299  // The first NumExplictDefs register operands are expected to be register
300  // definitions.
301  unsigned CurrentDef = 0;
302  unsigned i = 0;
303  for (; i < MCI.getNumOperands() && CurrentDef < NumExplicitDefs; ++i) {
304  const MCOperand &Op = MCI.getOperand(i);
305  if (!Op.isReg())
306  continue;
307 
308  WriteDescriptor &Write = ID.Writes[CurrentDef];
309  Write.OpIndex = i;
310  if (CurrentDef < NumWriteLatencyEntries) {
311  const MCWriteLatencyEntry &WLE =
312  *STI.getWriteLatencyEntry(&SCDesc, CurrentDef);
313  // Conservatively default to MaxLatency.
314  Write.Latency =
315  WLE.Cycles < 0 ? ID.MaxLatency : static_cast<unsigned>(WLE.Cycles);
317  } else {
318  // Assign a default latency for this write.
319  Write.Latency = ID.MaxLatency;
320  Write.SClassOrWriteResourceID = 0;
321  }
322  Write.IsOptionalDef = false;
323  LLVM_DEBUG({
324  dbgs() << "\t\t[Def] OpIdx=" << Write.OpIndex
325  << ", Latency=" << Write.Latency
326  << ", WriteResourceID=" << Write.SClassOrWriteResourceID << '\n';
327  });
328  CurrentDef++;
329  }
330 
331  assert(CurrentDef == NumExplicitDefs &&
332  "Expected more register operand definitions.");
333  for (CurrentDef = 0; CurrentDef < NumImplicitDefs; ++CurrentDef) {
334  unsigned Index = NumExplicitDefs + CurrentDef;
335  WriteDescriptor &Write = ID.Writes[Index];
336  Write.OpIndex = ~CurrentDef;
337  Write.RegisterID = MCDesc.getImplicitDefs()[CurrentDef];
338  if (Index < NumWriteLatencyEntries) {
339  const MCWriteLatencyEntry &WLE =
340  *STI.getWriteLatencyEntry(&SCDesc, Index);
341  // Conservatively default to MaxLatency.
342  Write.Latency =
343  WLE.Cycles < 0 ? ID.MaxLatency : static_cast<unsigned>(WLE.Cycles);
345  } else {
346  // Assign a default latency for this write.
347  Write.Latency = ID.MaxLatency;
348  Write.SClassOrWriteResourceID = 0;
349  }
350 
351  Write.IsOptionalDef = false;
352  assert(Write.RegisterID != 0 && "Expected a valid phys register!");
353  LLVM_DEBUG({
354  dbgs() << "\t\t[Def][I] OpIdx=" << ~Write.OpIndex
355  << ", PhysReg=" << MRI.getName(Write.RegisterID)
356  << ", Latency=" << Write.Latency
357  << ", WriteResourceID=" << Write.SClassOrWriteResourceID << '\n';
358  });
359  }
360 
361  if (MCDesc.hasOptionalDef()) {
362  WriteDescriptor &Write = ID.Writes[NumExplicitDefs + NumImplicitDefs];
363  Write.OpIndex = MCDesc.getNumOperands() - 1;
364  // Assign a default latency for this write.
365  Write.Latency = ID.MaxLatency;
366  Write.SClassOrWriteResourceID = 0;
367  Write.IsOptionalDef = true;
368  LLVM_DEBUG({
369  dbgs() << "\t\t[Def][O] OpIdx=" << Write.OpIndex
370  << ", Latency=" << Write.Latency
371  << ", WriteResourceID=" << Write.SClassOrWriteResourceID << '\n';
372  });
373  }
374 
375  if (!NumVariadicOps)
376  return;
377 
378  // FIXME: if an instruction opcode is flagged 'mayStore', and it has no
379  // "unmodeledSideEffects', then this logic optimistically assumes that any
380  // extra register operands in the variadic sequence is not a register
381  // definition.
382  //
383  // Otherwise, we conservatively assume that any register operand from the
384  // variadic sequence is both a register read and a register write.
385  bool AssumeUsesOnly = MCDesc.mayStore() && !MCDesc.mayLoad() &&
386  !MCDesc.hasUnmodeledSideEffects();
387  CurrentDef = NumExplicitDefs + NumImplicitDefs + MCDesc.hasOptionalDef();
388  for (unsigned I = 0, OpIndex = MCDesc.getNumOperands();
389  I < NumVariadicOps && !AssumeUsesOnly; ++I, ++OpIndex) {
390  const MCOperand &Op = MCI.getOperand(OpIndex);
391  if (!Op.isReg())
392  continue;
393 
394  WriteDescriptor &Write = ID.Writes[CurrentDef];
395  Write.OpIndex = OpIndex;
396  // Assign a default latency for this write.
397  Write.Latency = ID.MaxLatency;
398  Write.SClassOrWriteResourceID = 0;
399  Write.IsOptionalDef = false;
400  ++CurrentDef;
401  LLVM_DEBUG({
402  dbgs() << "\t\t[Def][V] OpIdx=" << Write.OpIndex
403  << ", Latency=" << Write.Latency
404  << ", WriteResourceID=" << Write.SClassOrWriteResourceID << '\n';
405  });
406  }
407 
408  ID.Writes.resize(CurrentDef);
409 }
410 
411 void InstrBuilder::populateReads(InstrDesc &ID, const MCInst &MCI,
412  unsigned SchedClassID) {
413  const MCInstrDesc &MCDesc = MCII.get(MCI.getOpcode());
414  unsigned NumExplicitUses = MCDesc.getNumOperands() - MCDesc.getNumDefs();
415  unsigned NumImplicitUses = MCDesc.getNumImplicitUses();
416  // Remove the optional definition.
417  if (MCDesc.hasOptionalDef())
418  --NumExplicitUses;
419  unsigned NumVariadicOps = MCI.getNumOperands() - MCDesc.getNumOperands();
420  unsigned TotalUses = NumExplicitUses + NumImplicitUses + NumVariadicOps;
421  ID.Reads.resize(TotalUses);
422  unsigned CurrentUse = 0;
423  for (unsigned I = 0, OpIndex = MCDesc.getNumDefs(); I < NumExplicitUses;
424  ++I, ++OpIndex) {
425  const MCOperand &Op = MCI.getOperand(OpIndex);
426  if (!Op.isReg())
427  continue;
428 
429  ReadDescriptor &Read = ID.Reads[CurrentUse];
430  Read.OpIndex = OpIndex;
431  Read.UseIndex = I;
432  Read.SchedClassID = SchedClassID;
433  ++CurrentUse;
434  LLVM_DEBUG(dbgs() << "\t\t[Use] OpIdx=" << Read.OpIndex
435  << ", UseIndex=" << Read.UseIndex << '\n');
436  }
437 
438  // For the purpose of ReadAdvance, implicit uses come directly after explicit
439  // uses. The "UseIndex" must be updated according to that implicit layout.
440  for (unsigned I = 0; I < NumImplicitUses; ++I) {
441  ReadDescriptor &Read = ID.Reads[CurrentUse + I];
442  Read.OpIndex = ~I;
443  Read.UseIndex = NumExplicitUses + I;
444  Read.RegisterID = MCDesc.getImplicitUses()[I];
445  Read.SchedClassID = SchedClassID;
446  LLVM_DEBUG(dbgs() << "\t\t[Use][I] OpIdx=" << ~Read.OpIndex
447  << ", UseIndex=" << Read.UseIndex << ", RegisterID="
448  << MRI.getName(Read.RegisterID) << '\n');
449  }
450 
451  CurrentUse += NumImplicitUses;
452 
453  // FIXME: If an instruction opcode is marked as 'mayLoad', and it has no
454  // "unmodeledSideEffects", then this logic optimistically assumes that any
455  // extra register operands in the variadic sequence are not register
456  // definition.
457 
458  bool AssumeDefsOnly = !MCDesc.mayStore() && MCDesc.mayLoad() &&
459  !MCDesc.hasUnmodeledSideEffects();
460  for (unsigned I = 0, OpIndex = MCDesc.getNumOperands();
461  I < NumVariadicOps && !AssumeDefsOnly; ++I, ++OpIndex) {
462  const MCOperand &Op = MCI.getOperand(OpIndex);
463  if (!Op.isReg())
464  continue;
465 
466  ReadDescriptor &Read = ID.Reads[CurrentUse];
467  Read.OpIndex = OpIndex;
468  Read.UseIndex = NumExplicitUses + NumImplicitUses + I;
469  Read.SchedClassID = SchedClassID;
470  ++CurrentUse;
471  LLVM_DEBUG(dbgs() << "\t\t[Use][V] OpIdx=" << Read.OpIndex
472  << ", UseIndex=" << Read.UseIndex << '\n');
473  }
474 
475  ID.Reads.resize(CurrentUse);
476 }
477 
478 Error InstrBuilder::verifyInstrDesc(const InstrDesc &ID,
479  const MCInst &MCI) const {
480  if (ID.NumMicroOps != 0)
481  return ErrorSuccess();
482 
483  bool UsesMemory = ID.MayLoad || ID.MayStore;
484  bool UsesBuffers = !ID.Buffers.empty();
485  bool UsesResources = !ID.Resources.empty();
486  if (!UsesMemory && !UsesBuffers && !UsesResources)
487  return ErrorSuccess();
488 
489  StringRef Message;
490  if (UsesMemory) {
491  Message = "found an inconsistent instruction that decodes "
492  "into zero opcodes and that consumes load/store "
493  "unit resources.";
494  } else {
495  Message = "found an inconsistent instruction that decodes "
496  "to zero opcodes and that consumes scheduler "
497  "resources.";
498  }
499 
500  return make_error<InstructionError<MCInst>>(Message, MCI);
501 }
502 
504 InstrBuilder::createInstrDescImpl(const MCInst &MCI) {
506  "Itineraries are not yet supported!");
507 
508  // Obtain the instruction descriptor from the opcode.
509  unsigned short Opcode = MCI.getOpcode();
510  const MCInstrDesc &MCDesc = MCII.get(Opcode);
511  const MCSchedModel &SM = STI.getSchedModel();
512 
513  // Then obtain the scheduling class information from the instruction.
514  unsigned SchedClassID = MCDesc.getSchedClass();
515  bool IsVariant = SM.getSchedClassDesc(SchedClassID)->isVariant();
516 
517  // Try to solve variant scheduling classes.
518  if (IsVariant) {
519  unsigned CPUID = SM.getProcessorID();
520  while (SchedClassID && SM.getSchedClassDesc(SchedClassID)->isVariant())
521  SchedClassID = STI.resolveVariantSchedClass(SchedClassID, &MCI, CPUID);
522 
523  if (!SchedClassID) {
524  return make_error<InstructionError<MCInst>>(
525  "unable to resolve scheduling class for write variant.", MCI);
526  }
527  }
528 
529  // Check if this instruction is supported. Otherwise, report an error.
530  const MCSchedClassDesc &SCDesc = *SM.getSchedClassDesc(SchedClassID);
531  if (SCDesc.NumMicroOps == MCSchedClassDesc::InvalidNumMicroOps) {
532  return make_error<InstructionError<MCInst>>(
533  "found an unsupported instruction in the input assembly sequence.",
534  MCI);
535  }
536 
537  LLVM_DEBUG(dbgs() << "\n\t\tOpcode Name= " << MCII.getName(Opcode) << '\n');
538  LLVM_DEBUG(dbgs() << "\t\tSchedClassID=" << SchedClassID << '\n');
539 
540  // Create a new empty descriptor.
541  std::unique_ptr<InstrDesc> ID = llvm::make_unique<InstrDesc>();
542  ID->NumMicroOps = SCDesc.NumMicroOps;
543  ID->SchedClassID = SchedClassID;
544 
545  if (MCDesc.isCall() && FirstCallInst) {
546  // We don't correctly model calls.
547  WithColor::warning() << "found a call in the input assembly sequence.\n";
548  WithColor::note() << "call instructions are not correctly modeled. "
549  << "Assume a latency of 100cy.\n";
550  FirstCallInst = false;
551  }
552 
553  if (MCDesc.isReturn() && FirstReturnInst) {
554  WithColor::warning() << "found a return instruction in the input"
555  << " assembly sequence.\n";
556  WithColor::note() << "program counter updates are ignored.\n";
557  FirstReturnInst = false;
558  }
559 
560  ID->MayLoad = MCDesc.mayLoad();
561  ID->MayStore = MCDesc.mayStore();
562  ID->HasSideEffects = MCDesc.hasUnmodeledSideEffects();
563  ID->BeginGroup = SCDesc.BeginGroup;
564  ID->EndGroup = SCDesc.EndGroup;
565 
566  initializeUsedResources(*ID, SCDesc, STI, ProcResourceMasks);
567  computeMaxLatency(*ID, MCDesc, SCDesc, STI);
568 
569  if (Error Err = verifyOperands(MCDesc, MCI))
570  return std::move(Err);
571 
572  populateWrites(*ID, MCI, SchedClassID);
573  populateReads(*ID, MCI, SchedClassID);
574 
575  LLVM_DEBUG(dbgs() << "\t\tMaxLatency=" << ID->MaxLatency << '\n');
576  LLVM_DEBUG(dbgs() << "\t\tNumMicroOps=" << ID->NumMicroOps << '\n');
577 
578  // Sanity check on the instruction descriptor.
579  if (Error Err = verifyInstrDesc(*ID, MCI))
580  return std::move(Err);
581 
582  // Now add the new descriptor.
583  bool IsVariadic = MCDesc.isVariadic();
584  if (!IsVariadic && !IsVariant) {
585  Descriptors[MCI.getOpcode()] = std::move(ID);
586  return *Descriptors[MCI.getOpcode()];
587  }
588 
589  VariantDescriptors[&MCI] = std::move(ID);
590  return *VariantDescriptors[&MCI];
591 }
592 
594 InstrBuilder::getOrCreateInstrDesc(const MCInst &MCI) {
595  if (Descriptors.find_as(MCI.getOpcode()) != Descriptors.end())
596  return *Descriptors[MCI.getOpcode()];
597 
598  if (VariantDescriptors.find(&MCI) != VariantDescriptors.end())
599  return *VariantDescriptors[&MCI];
600 
601  return createInstrDescImpl(MCI);
602 }
603 
606  Expected<const InstrDesc &> DescOrErr = getOrCreateInstrDesc(MCI);
607  if (!DescOrErr)
608  return DescOrErr.takeError();
609  const InstrDesc &D = *DescOrErr;
610  std::unique_ptr<Instruction> NewIS = llvm::make_unique<Instruction>(D);
611 
612  // Check if this is a dependency breaking instruction.
613  APInt Mask;
614 
615  bool IsZeroIdiom = false;
616  bool IsDepBreaking = false;
617  if (MCIA) {
618  unsigned ProcID = STI.getSchedModel().getProcessorID();
619  IsZeroIdiom = MCIA->isZeroIdiom(MCI, Mask, ProcID);
620  IsDepBreaking =
621  IsZeroIdiom || MCIA->isDependencyBreaking(MCI, Mask, ProcID);
622  if (MCIA->isOptimizableRegisterMove(MCI, ProcID))
623  NewIS->setOptimizableMove();
624  }
625 
626  // Initialize Reads first.
627  for (const ReadDescriptor &RD : D.Reads) {
628  int RegID = -1;
629  if (!RD.isImplicitRead()) {
630  // explicit read.
631  const MCOperand &Op = MCI.getOperand(RD.OpIndex);
632  // Skip non-register operands.
633  if (!Op.isReg())
634  continue;
635  RegID = Op.getReg();
636  } else {
637  // Implicit read.
638  RegID = RD.RegisterID;
639  }
640 
641  // Skip invalid register operands.
642  if (!RegID)
643  continue;
644 
645  // Okay, this is a register operand. Create a ReadState for it.
646  assert(RegID > 0 && "Invalid register ID found!");
647  NewIS->getUses().emplace_back(RD, RegID);
648  ReadState &RS = NewIS->getUses().back();
649 
650  if (IsDepBreaking) {
651  // A mask of all zeroes means: explicit input operands are not
652  // independent.
653  if (Mask.isNullValue()) {
654  if (!RD.isImplicitRead())
656  } else {
657  // Check if this register operand is independent according to `Mask`.
658  // Note that Mask may not have enough bits to describe all explicit and
659  // implicit input operands. If this register operand doesn't have a
660  // corresponding bit in Mask, then conservatively assume that it is
661  // dependent.
662  if (Mask.getBitWidth() > RD.UseIndex) {
663  // Okay. This map describe register use `RD.UseIndex`.
664  if (Mask[RD.UseIndex])
666  }
667  }
668  }
669  }
670 
671  // Early exit if there are no writes.
672  if (D.Writes.empty())
673  return std::move(NewIS);
674 
675  // Track register writes that implicitly clear the upper portion of the
676  // underlying super-registers using an APInt.
677  APInt WriteMask(D.Writes.size(), 0);
678 
679  // Now query the MCInstrAnalysis object to obtain information about which
680  // register writes implicitly clear the upper portion of a super-register.
681  if (MCIA)
682  MCIA->clearsSuperRegisters(MRI, MCI, WriteMask);
683 
684  // Initialize writes.
685  unsigned WriteIndex = 0;
686  for (const WriteDescriptor &WD : D.Writes) {
687  unsigned RegID = WD.isImplicitWrite() ? WD.RegisterID
688  : MCI.getOperand(WD.OpIndex).getReg();
689  // Check if this is a optional definition that references NoReg.
690  if (WD.IsOptionalDef && !RegID) {
691  ++WriteIndex;
692  continue;
693  }
694 
695  assert(RegID && "Expected a valid register ID!");
696  NewIS->getDefs().emplace_back(WD, RegID,
697  /* ClearsSuperRegs */ WriteMask[WriteIndex],
698  /* WritesZero */ IsZeroIdiom);
699  ++WriteIndex;
700  }
701 
702  return std::move(NewIS);
703 }
704 } // namespace mca
705 } // namespace llvm
Expected< std::unique_ptr< Instruction > > createInstruction(const MCInst &MCI)
unsigned getNumImplicitUses() const
Return the number of implicit uses this instruction has.
Definition: MCInstrDesc.h:526
A sequence of cycles.
Definition: Instruction.h:279
unsigned getNumImplicitDefs() const
Return the number of implicit defs this instruct has.
Definition: MCInstrDesc.h:548
This class represents lattice values for constants.
Definition: AllocatorList.h:23
FormattedNumber format_hex(uint64_t N, unsigned Width, bool Upper=false)
format_hex - Output N as a fixed width hexadecimal.
Definition: Format.h:185
SmallVector< uint64_t, 4 > Buffers
Definition: Instruction.h:346
Subclass of Error for the sole purpose of identifying the success path in the type system...
Definition: Error.h:324
unsigned UsedProcResGroups
Definition: Instruction.h:349
bool isImplicitRead() const
Definition: Instruction.h:78
A register read descriptor.
Definition: Instruction.h:64
const MCPhysReg * getImplicitUses() const
Return a list of registers that are potentially read by any instance of this machine instruction...
Definition: MCInstrDesc.h:523
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:163
bool hasUnmodeledSideEffects() const
Return true if this instruction has side effects that are not modeled by other flags.
Definition: MCInstrDesc.h:418
const MCProcResourceDesc * getProcResource(unsigned ProcResourceIdx) const
Definition: MCSchedule.h:339
bool isReg() const
Definition: MCInst.h:57
static raw_ostream & warning()
Convenience method for printing "warning: " to stderr.
Definition: WithColor.cpp:62
unsigned UsedProcResUnits
Definition: Instruction.h:348
block Block Frequency true
unsigned getProcessorID() const
Definition: MCSchedule.h:317
const MCSchedClassDesc * getSchedClassDesc(unsigned SchedClassIdx) const
Definition: MCSchedule.h:346
Error takeError()
Take ownership of the stored error.
Definition: Error.h:552
bool mayLoad() const
Return true if this instruction could possibly read memory.
Definition: MCInstrDesc.h:398
bool isReturn() const
Return true if the instruction is a return.
Definition: MCInstrDesc.h:245
void setBit(unsigned BitPosition)
Set a given bit to 1.
Definition: APInt.h:1402
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
Definition: MCInstrDesc.h:210
Tracks register operand latency in cycles.
Definition: Instruction.h:219
SmallVector< ReadDescriptor, 4 > Reads
Definition: Instruction.h:339
static raw_ostream & note()
Convenience method for printing "note: " to stderr.
Definition: WithColor.cpp:64
Tagged union holding either a T or a Error.
Definition: CachePruning.h:22
This file implements a class to represent arbitrary precision integral constant values and operations...
unsigned getReg() const
Returns the register number.
Definition: MCInst.h:64
virtual bool isOptimizableRegisterMove(const MCInst &MI, unsigned CPUID) const
Returns true if MI is a candidate for move elimination.
void setIndependentFromDef()
Definition: Instruction.h:262
Helper used by class InstrDesc to describe how hardware resources are used.
Definition: Instruction.h:326
uint16_t NumWriteProcResEntries
Definition: MCSchedule.h:121
const MCWriteLatencyEntry * getWriteLatencyEntry(const MCSchedClassDesc *SC, unsigned DefIdx) const
A register write descriptor.
Definition: Instruction.h:36
void computeProcResourceMasks(const MCSchedModel &SM, MutableArrayRef< uint64_t > Masks)
Populates vector Masks with processor resource masks.
Definition: Support.cpp:39
Instances of this class represent a single low-level machine instruction.
Definition: MCInst.h:158
const char * getName(unsigned RegNo) const
Return the human-readable symbolic target-specific name for the specified physical register...
MCRegisterInfo base class - We assume that the target defines a static array of MCRegisterDesc object...
unsigned getSchedClass() const
Return the scheduling class for this instruction.
Definition: MCInstrDesc.h:576
const MCPhysReg * getImplicitDefs() const
Return a list of registers that are potentially written by any instance of this machine instruction...
Definition: MCInstrDesc.h:545
virtual bool clearsSuperRegisters(const MCRegisterInfo &MRI, const MCInst &Inst, APInt &Writes) const
Returns true if at least one of the register writes performed by.
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
unsigned const MachineRegisterInfo * MRI
unsigned countPopulation() const
Count the number of bits set.
Definition: APInt.h:1657
bool hasOptionalDef() const
Set if this instruction has an optional definition, e.g.
Definition: MCInstrDesc.h:238
Identify one of the processor resource kinds consumed by a particular scheduling class for the specif...
Definition: MCSchedule.h:64
bool isVariadic() const
Return true if this instruction can have a variable number of operands.
Definition: MCInstrDesc.h:234
static Error verifyOperands(const MCInstrDesc &MCDesc, const MCInst &MCI)
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
Summarize the scheduling resources required for an instruction of a particular scheduling class...
Definition: MCSchedule.h:110
Interface to description of machine instruction set.
Definition: MCInstrInfo.h:23
virtual bool isDependencyBreaking(const MCInst &MI, APInt &Mask, unsigned CPUID) const
Returns true if MI is a dependency breaking instruction for the subtarget associated with CPUID ...
unsigned getNumOperands() const
Definition: MCInst.h:181
SmallVector< std::pair< uint64_t, ResourceUsage >, 4 > Resources
Definition: Instruction.h:343
StringRef getName(unsigned Opcode) const
Returns the name for the instructions with the given opcode.
Definition: MCInstrInfo.h:50
void sort(IteratorTy Start, IteratorTy End)
Definition: STLExtras.h:1115
static const unsigned short InvalidNumMicroOps
Definition: MCSchedule.h:111
bool hasInstrSchedModel() const
Does this machine model include instruction-level scheduling.
Definition: MCSchedule.h:320
unsigned countPopulation(T Value)
Count the number of set bits in a value.
Definition: MathExtras.h:519
Specify the latency in cpu cycles for a particular scheduling class and def index.
Definition: MCSchedule.h:78
bool isImplicitWrite() const
Definition: Instruction.h:60
Define a kind of processor resource that will be modeled by the scheduler.
Definition: MCSchedule.h:32
const MCOperand & getOperand(unsigned i) const
Definition: MCInst.h:179
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
unsigned getNumDefs() const
Return the number of MachineOperands that are register definitions.
Definition: MCInstrDesc.h:225
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:132
Class for arbitrary precision integers.
Definition: APInt.h:69
bool isVariant() const
Definition: MCSchedule.h:130
bool mayStore() const
Return true if this instruction could possibly modify memory.
Definition: MCInstrDesc.h:404
virtual bool isZeroIdiom(const MCInst &MI, APInt &Mask, unsigned CPUID) const
Returns true if MI is a dependency breaking zero-idiom for the given subtarget.
An instruction descriptor.
Definition: Instruction.h:337
static void computeMaxLatency(InstrDesc &ID, const MCInstrDesc &MCDesc, const MCSchedClassDesc &SCDesc, const MCSubtargetInfo &STI)
static int computeInstrLatency(const MCSubtargetInfo &STI, const MCSchedClassDesc &SCDesc)
Returns the latency value for the scheduling class.
Definition: MCSchedule.cpp:40
LLVM_NODISCARD bool empty() const
Definition: SmallVector.h:55
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode...
Definition: MCInstrInfo.h:44
#define I(x, y, z)
Definition: MD5.cpp:58
bool isCall() const
Return true if the instruction is a call.
Definition: MCInstrDesc.h:257
Generic base class for all target subtargets.
virtual unsigned resolveVariantSchedClass(unsigned SchedClass, const MCInst *MI, unsigned CPUID) const
Resolve a variant scheduling class for the given MCInst and CPU.
const MCWriteProcResEntry * getWriteProcResBegin(const MCSchedClassDesc *SC) const
Return an iterator at the first process resource consumed by the given scheduling class...
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
A builder class for instructions that are statically analyzed by llvm-mca.
uint64_t PowerOf2Floor(uint64_t A)
Returns the power of two which is less than or equal to the given value.
Definition: MathExtras.h:651
Lightweight error class with error context and mandatory checking.
Definition: Error.h:157
std::underlying_type< E >::type Mask()
Get a bitmask with 1s in all places up to the high-order bit of E&#39;s largest value.
Definition: BitmaskEnum.h:80
uint16_t NumWriteLatencyEntries
Definition: MCSchedule.h:123
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:48
unsigned getOpcode() const
Definition: MCInst.h:171
#define LLVM_DEBUG(X)
Definition: Debug.h:122
Instances of this class represent operands of the MCInst class.
Definition: MCInst.h:34
static void initializeUsedResources(InstrDesc &ID, const MCSchedClassDesc &SCDesc, const MCSubtargetInfo &STI, ArrayRef< uint64_t > ProcResourceMasks)
Machine model for scheduling, bundling, and heuristics.
Definition: MCSchedule.h:244
const MCSchedModel & getSchedModel() const
Get the machine model for this subtarget&#39;s CPU.
unsigned getNumProcResourceKinds() const
Definition: MCSchedule.h:335
SmallVector< WriteDescriptor, 4 > Writes
Definition: Instruction.h:338
void resize(size_type N)
Definition: SmallVector.h:344